]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.0-3.17.5-201412071006.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.0-3.17.5-201412071006.patch
CommitLineData
061f5a26
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 9de9813..1462492 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -3,9 +3,11 @@
6 *.bc
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -15,6 +17,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -51,14 +54,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -72,9 +78,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -83,6 +91,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -95,32 +104,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -148,14 +168,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -165,14 +185,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -188,6 +209,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -197,6 +220,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -206,7 +230,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -216,8 +245,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -227,6 +260,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -238,13 +272,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -252,9 +290,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
238index 764f599..c600e2f 100644
239--- a/Documentation/kbuild/makefiles.txt
240+++ b/Documentation/kbuild/makefiles.txt
241@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
242 === 4 Host Program support
243 --- 4.1 Simple Host Program
244 --- 4.2 Composite Host Programs
245- --- 4.3 Using C++ for host programs
246- --- 4.4 Controlling compiler options for host programs
247- --- 4.5 When host programs are actually built
248- --- 4.6 Using hostprogs-$(CONFIG_FOO)
249+ --- 4.3 Defining shared libraries
250+ --- 4.4 Using C++ for host programs
251+ --- 4.5 Controlling compiler options for host programs
252+ --- 4.6 When host programs are actually built
253+ --- 4.7 Using hostprogs-$(CONFIG_FOO)
254
255 === 5 Kbuild clean infrastructure
256
257@@ -642,7 +643,29 @@ Both possibilities are described in the following.
258 Finally, the two .o files are linked to the executable, lxdialog.
259 Note: The syntax <executable>-y is not permitted for host-programs.
260
261---- 4.3 Using C++ for host programs
262+--- 4.3 Defining shared libraries
263+
264+ Objects with extension .so are considered shared libraries, and
265+ will be compiled as position independent objects.
266+ Kbuild provides support for shared libraries, but the usage
267+ shall be restricted.
268+ In the following example the libkconfig.so shared library is used
269+ to link the executable conf.
270+
271+ Example:
272+ #scripts/kconfig/Makefile
273+ hostprogs-y := conf
274+ conf-objs := conf.o libkconfig.so
275+ libkconfig-objs := expr.o type.o
276+
277+ Shared libraries always require a corresponding -objs line, and
278+ in the example above the shared library libkconfig is composed by
279+ the two objects expr.o and type.o.
280+ expr.o and type.o will be built as position independent code and
281+ linked as a shared library libkconfig.so. C++ is not supported for
282+ shared libraries.
283+
284+--- 4.4 Using C++ for host programs
285
286 kbuild offers support for host programs written in C++. This was
287 introduced solely to support kconfig, and is not recommended
288@@ -665,7 +688,7 @@ Both possibilities are described in the following.
289 qconf-cxxobjs := qconf.o
290 qconf-objs := check.o
291
292---- 4.4 Controlling compiler options for host programs
293+--- 4.5 Controlling compiler options for host programs
294
295 When compiling host programs, it is possible to set specific flags.
296 The programs will always be compiled utilising $(HOSTCC) passed
297@@ -693,7 +716,7 @@ Both possibilities are described in the following.
298 When linking qconf, it will be passed the extra option
299 "-L$(QTDIR)/lib".
300
301---- 4.5 When host programs are actually built
302+--- 4.6 When host programs are actually built
303
304 Kbuild will only build host-programs when they are referenced
305 as a prerequisite.
306@@ -724,7 +747,7 @@ Both possibilities are described in the following.
307 This will tell kbuild to build lxdialog even if not referenced in
308 any rule.
309
310---- 4.6 Using hostprogs-$(CONFIG_FOO)
311+--- 4.7 Using hostprogs-$(CONFIG_FOO)
312
313 A typical pattern in a Kbuild file looks like this:
314
315diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
316index 1edd5fd..107ff46 100644
317--- a/Documentation/kernel-parameters.txt
318+++ b/Documentation/kernel-parameters.txt
319@@ -1155,6 +1155,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
320 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
321 Default: 1024
322
323+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
324+ ignore grsecurity's /proc restrictions
325+
326+
327 hashdist= [KNL,NUMA] Large hashes allocated during boot
328 are distributed across NUMA nodes. Defaults on
329 for 64-bit NUMA, off otherwise.
330@@ -2175,6 +2179,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
331 noexec=on: enable non-executable mappings (default)
332 noexec=off: disable non-executable mappings
333
334+ nopcid [X86-64]
335+ Disable PCID (Process-Context IDentifier) even if it
336+ is supported by the processor.
337+
338 nosmap [X86]
339 Disable SMAP (Supervisor Mode Access Prevention)
340 even if it is supported by processor.
341@@ -2467,6 +2475,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
342 the specified number of seconds. This is to be used if
343 your oopses keep scrolling off the screen.
344
345+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
346+ virtualization environments that don't cope well with the
347+ expand down segment used by UDEREF on X86-32 or the frequent
348+ page table updates on X86-64.
349+
350+ pax_sanitize_slab=
351+ Format: { 0 | 1 | off | fast | full }
352+ Options '0' and '1' are only provided for backward
353+ compatibility, 'off' or 'fast' should be used instead.
354+ 0|off : disable slab object sanitization
355+ 1|fast: enable slab object sanitization excluding
356+ whitelisted slabs (default)
357+ full : sanitize all slabs, even the whitelisted ones
358+
359+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
360+
361+ pax_extra_latent_entropy
362+ Enable a very simple form of latent entropy extraction
363+ from the first 4GB of memory as the bootmem allocator
364+ passes the memory pages to the buddy allocator.
365+
366+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
367+ when the processor supports PCID.
368+
369 pcbit= [HW,ISDN]
370
371 pcd. [PARIDE]
372diff --git a/Makefile b/Makefile
373index 42585f6..80391c0 100644
374--- a/Makefile
375+++ b/Makefile
376@@ -303,8 +303,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
377
378 HOSTCC = gcc
379 HOSTCXX = g++
380-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
381-HOSTCXXFLAGS = -O2
382+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
383+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
384+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
385
386 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
387 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
388@@ -450,8 +451,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
389 # Rules shared between *config targets and build targets
390
391 # Basic helpers built in scripts/
392-PHONY += scripts_basic
393-scripts_basic:
394+PHONY += scripts_basic gcc-plugins
395+scripts_basic: gcc-plugins
396 $(Q)$(MAKE) $(build)=scripts/basic
397 $(Q)rm -f .tmp_quiet_recordmcount
398
399@@ -625,6 +626,72 @@ endif
400 # Tell gcc to never replace conditional load with a non-conditional one
401 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
402
403+ifndef DISABLE_PAX_PLUGINS
404+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
405+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
406+else
407+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
408+endif
409+ifneq ($(PLUGINCC),)
410+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
411+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
412+endif
413+ifdef CONFIG_PAX_MEMORY_STACKLEAK
414+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
415+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
416+endif
417+ifdef CONFIG_KALLOCSTAT_PLUGIN
418+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
419+endif
420+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
421+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
422+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
423+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
424+endif
425+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
426+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
427+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
428+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
429+endif
430+endif
431+ifdef CONFIG_CHECKER_PLUGIN
432+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
433+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
434+endif
435+endif
436+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
437+ifdef CONFIG_PAX_SIZE_OVERFLOW
438+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
439+endif
440+ifdef CONFIG_PAX_LATENT_ENTROPY
441+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
442+endif
443+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
444+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
445+endif
446+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
447+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
448+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
449+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
450+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
451+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
452+ifeq ($(KBUILD_EXTMOD),)
453+gcc-plugins:
454+ $(Q)$(MAKE) $(build)=tools/gcc
455+else
456+gcc-plugins: ;
457+endif
458+else
459+gcc-plugins:
460+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
461+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
462+else
463+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
464+endif
465+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
466+endif
467+endif
468+
469 ifdef CONFIG_READABLE_ASM
470 # Disable optimizations that make assembler listings hard to read.
471 # reorder blocks reorders the control in the function
472@@ -717,7 +784,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
473 else
474 KBUILD_CFLAGS += -g
475 endif
476-KBUILD_AFLAGS += -Wa,-gdwarf-2
477+KBUILD_AFLAGS += -Wa,--gdwarf-2
478 endif
479 ifdef CONFIG_DEBUG_INFO_DWARF4
480 KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
481@@ -867,7 +934,7 @@ export mod_sign_cmd
482
483
484 ifeq ($(KBUILD_EXTMOD),)
485-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
486+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
487
488 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
489 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
490@@ -916,6 +983,8 @@ endif
491
492 # The actual objects are generated when descending,
493 # make sure no implicit rule kicks in
494+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
495+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
496 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
497
498 # Handle descending into subdirectories listed in $(vmlinux-dirs)
499@@ -925,7 +994,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
500 # Error messages still appears in the original language
501
502 PHONY += $(vmlinux-dirs)
503-$(vmlinux-dirs): prepare scripts
504+$(vmlinux-dirs): gcc-plugins prepare scripts
505 $(Q)$(MAKE) $(build)=$@
506
507 define filechk_kernel.release
508@@ -968,10 +1037,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
509
510 archprepare: archheaders archscripts prepare1 scripts_basic
511
512+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
513+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
514 prepare0: archprepare FORCE
515 $(Q)$(MAKE) $(build)=.
516
517 # All the preparing..
518+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
519 prepare: prepare0
520
521 # Generate some files
522@@ -1086,6 +1158,8 @@ all: modules
523 # using awk while concatenating to the final file.
524
525 PHONY += modules
526+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
527+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
528 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
529 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
530 @$(kecho) ' Building modules, stage 2.';
531@@ -1101,7 +1175,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
532
533 # Target to prepare building external modules
534 PHONY += modules_prepare
535-modules_prepare: prepare scripts
536+modules_prepare: gcc-plugins prepare scripts
537
538 # Target to install modules
539 PHONY += modules_install
540@@ -1167,7 +1241,10 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
541 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
542 signing_key.priv signing_key.x509 x509.genkey \
543 extra_certificates signing_key.x509.keyid \
544- signing_key.x509.signer include/linux/version.h
545+ signing_key.x509.signer include/linux/version.h \
546+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
547+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
548+ tools/gcc/randomize_layout_seed.h
549
550 # clean - Delete most, but leave enough to build external modules
551 #
552@@ -1206,7 +1283,7 @@ distclean: mrproper
553 @find $(srctree) $(RCS_FIND_IGNORE) \
554 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
555 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
556- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
557+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
558 -type f -print | xargs rm -f
559
560
561@@ -1372,6 +1449,8 @@ PHONY += $(module-dirs) modules
562 $(module-dirs): crmodverdir $(objtree)/Module.symvers
563 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
564
565+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
566+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
567 modules: $(module-dirs)
568 @$(kecho) ' Building modules, stage 2.';
569 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
570@@ -1512,17 +1591,21 @@ else
571 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
572 endif
573
574-%.s: %.c prepare scripts FORCE
575+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
576+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
577+%.s: %.c gcc-plugins prepare scripts FORCE
578 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
579 %.i: %.c prepare scripts FORCE
580 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
581-%.o: %.c prepare scripts FORCE
582+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
583+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
584+%.o: %.c gcc-plugins prepare scripts FORCE
585 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
586 %.lst: %.c prepare scripts FORCE
587 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
588-%.s: %.S prepare scripts FORCE
589+%.s: %.S gcc-plugins prepare scripts FORCE
590 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
591-%.o: %.S prepare scripts FORCE
592+%.o: %.S gcc-plugins prepare scripts FORCE
593 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
594 %.symtypes: %.c prepare scripts FORCE
595 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
596@@ -1532,11 +1615,15 @@ endif
597 $(cmd_crmodverdir)
598 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
599 $(build)=$(build-dir)
600-%/: prepare scripts FORCE
601+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
602+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
603+%/: gcc-plugins prepare scripts FORCE
604 $(cmd_crmodverdir)
605 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
606 $(build)=$(build-dir)
607-%.ko: prepare scripts FORCE
608+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
609+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
610+%.ko: gcc-plugins prepare scripts FORCE
611 $(cmd_crmodverdir)
612 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
613 $(build)=$(build-dir) $(@:.ko=.o)
614diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
615index ed60a1e..47f1a55 100644
616--- a/arch/alpha/include/asm/atomic.h
617+++ b/arch/alpha/include/asm/atomic.h
618@@ -292,4 +292,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
619 #define atomic_dec(v) atomic_sub(1,(v))
620 #define atomic64_dec(v) atomic64_sub(1,(v))
621
622+#define atomic64_read_unchecked(v) atomic64_read(v)
623+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
624+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
625+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
626+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
627+#define atomic64_inc_unchecked(v) atomic64_inc(v)
628+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
629+#define atomic64_dec_unchecked(v) atomic64_dec(v)
630+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
631+
632 #endif /* _ALPHA_ATOMIC_H */
633diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
634index ad368a9..fbe0f25 100644
635--- a/arch/alpha/include/asm/cache.h
636+++ b/arch/alpha/include/asm/cache.h
637@@ -4,19 +4,19 @@
638 #ifndef __ARCH_ALPHA_CACHE_H
639 #define __ARCH_ALPHA_CACHE_H
640
641+#include <linux/const.h>
642
643 /* Bytes per L1 (data) cache line. */
644 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
645-# define L1_CACHE_BYTES 64
646 # define L1_CACHE_SHIFT 6
647 #else
648 /* Both EV4 and EV5 are write-through, read-allocate,
649 direct-mapped, physical.
650 */
651-# define L1_CACHE_BYTES 32
652 # define L1_CACHE_SHIFT 5
653 #endif
654
655+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
656 #define SMP_CACHE_BYTES L1_CACHE_BYTES
657
658 #endif
659diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
660index 968d999..d36b2df 100644
661--- a/arch/alpha/include/asm/elf.h
662+++ b/arch/alpha/include/asm/elf.h
663@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
664
665 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
666
667+#ifdef CONFIG_PAX_ASLR
668+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
669+
670+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
671+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
672+#endif
673+
674 /* $0 is set by ld.so to a pointer to a function which might be
675 registered using atexit. This provides a mean for the dynamic
676 linker to call DT_FINI functions for shared libraries that have
677diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
678index aab14a0..b4fa3e7 100644
679--- a/arch/alpha/include/asm/pgalloc.h
680+++ b/arch/alpha/include/asm/pgalloc.h
681@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
682 pgd_set(pgd, pmd);
683 }
684
685+static inline void
686+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
687+{
688+ pgd_populate(mm, pgd, pmd);
689+}
690+
691 extern pgd_t *pgd_alloc(struct mm_struct *mm);
692
693 static inline void
694diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
695index d8f9b7e..f6222fa 100644
696--- a/arch/alpha/include/asm/pgtable.h
697+++ b/arch/alpha/include/asm/pgtable.h
698@@ -102,6 +102,17 @@ struct vm_area_struct;
699 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
700 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
701 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
702+
703+#ifdef CONFIG_PAX_PAGEEXEC
704+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
705+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
706+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
707+#else
708+# define PAGE_SHARED_NOEXEC PAGE_SHARED
709+# define PAGE_COPY_NOEXEC PAGE_COPY
710+# define PAGE_READONLY_NOEXEC PAGE_READONLY
711+#endif
712+
713 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
714
715 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
716diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
717index 2fd00b7..cfd5069 100644
718--- a/arch/alpha/kernel/module.c
719+++ b/arch/alpha/kernel/module.c
720@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
721
722 /* The small sections were sorted to the end of the segment.
723 The following should definitely cover them. */
724- gp = (u64)me->module_core + me->core_size - 0x8000;
725+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
726 got = sechdrs[me->arch.gotsecindex].sh_addr;
727
728 for (i = 0; i < n; i++) {
729diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
730index 1402fcc..0b1abd2 100644
731--- a/arch/alpha/kernel/osf_sys.c
732+++ b/arch/alpha/kernel/osf_sys.c
733@@ -1298,10 +1298,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
734 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
735
736 static unsigned long
737-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
738- unsigned long limit)
739+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
740+ unsigned long limit, unsigned long flags)
741 {
742 struct vm_unmapped_area_info info;
743+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
744
745 info.flags = 0;
746 info.length = len;
747@@ -1309,6 +1310,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
748 info.high_limit = limit;
749 info.align_mask = 0;
750 info.align_offset = 0;
751+ info.threadstack_offset = offset;
752 return vm_unmapped_area(&info);
753 }
754
755@@ -1341,20 +1343,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
756 merely specific addresses, but regions of memory -- perhaps
757 this feature should be incorporated into all ports? */
758
759+#ifdef CONFIG_PAX_RANDMMAP
760+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
761+#endif
762+
763 if (addr) {
764- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
765+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
766 if (addr != (unsigned long) -ENOMEM)
767 return addr;
768 }
769
770 /* Next, try allocating at TASK_UNMAPPED_BASE. */
771- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
772- len, limit);
773+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
774+
775 if (addr != (unsigned long) -ENOMEM)
776 return addr;
777
778 /* Finally, try allocating in low memory. */
779- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
780+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
781
782 return addr;
783 }
784diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
785index 98838a0..b304fb4 100644
786--- a/arch/alpha/mm/fault.c
787+++ b/arch/alpha/mm/fault.c
788@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
789 __reload_thread(pcb);
790 }
791
792+#ifdef CONFIG_PAX_PAGEEXEC
793+/*
794+ * PaX: decide what to do with offenders (regs->pc = fault address)
795+ *
796+ * returns 1 when task should be killed
797+ * 2 when patched PLT trampoline was detected
798+ * 3 when unpatched PLT trampoline was detected
799+ */
800+static int pax_handle_fetch_fault(struct pt_regs *regs)
801+{
802+
803+#ifdef CONFIG_PAX_EMUPLT
804+ int err;
805+
806+ do { /* PaX: patched PLT emulation #1 */
807+ unsigned int ldah, ldq, jmp;
808+
809+ err = get_user(ldah, (unsigned int *)regs->pc);
810+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
811+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
812+
813+ if (err)
814+ break;
815+
816+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
817+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
818+ jmp == 0x6BFB0000U)
819+ {
820+ unsigned long r27, addr;
821+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
822+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
823+
824+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
825+ err = get_user(r27, (unsigned long *)addr);
826+ if (err)
827+ break;
828+
829+ regs->r27 = r27;
830+ regs->pc = r27;
831+ return 2;
832+ }
833+ } while (0);
834+
835+ do { /* PaX: patched PLT emulation #2 */
836+ unsigned int ldah, lda, br;
837+
838+ err = get_user(ldah, (unsigned int *)regs->pc);
839+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
840+ err |= get_user(br, (unsigned int *)(regs->pc+8));
841+
842+ if (err)
843+ break;
844+
845+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
846+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
847+ (br & 0xFFE00000U) == 0xC3E00000U)
848+ {
849+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
850+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
851+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
852+
853+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
854+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
855+ return 2;
856+ }
857+ } while (0);
858+
859+ do { /* PaX: unpatched PLT emulation */
860+ unsigned int br;
861+
862+ err = get_user(br, (unsigned int *)regs->pc);
863+
864+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
865+ unsigned int br2, ldq, nop, jmp;
866+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
867+
868+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
869+ err = get_user(br2, (unsigned int *)addr);
870+ err |= get_user(ldq, (unsigned int *)(addr+4));
871+ err |= get_user(nop, (unsigned int *)(addr+8));
872+ err |= get_user(jmp, (unsigned int *)(addr+12));
873+ err |= get_user(resolver, (unsigned long *)(addr+16));
874+
875+ if (err)
876+ break;
877+
878+ if (br2 == 0xC3600000U &&
879+ ldq == 0xA77B000CU &&
880+ nop == 0x47FF041FU &&
881+ jmp == 0x6B7B0000U)
882+ {
883+ regs->r28 = regs->pc+4;
884+ regs->r27 = addr+16;
885+ regs->pc = resolver;
886+ return 3;
887+ }
888+ }
889+ } while (0);
890+#endif
891+
892+ return 1;
893+}
894+
895+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
896+{
897+ unsigned long i;
898+
899+ printk(KERN_ERR "PAX: bytes at PC: ");
900+ for (i = 0; i < 5; i++) {
901+ unsigned int c;
902+ if (get_user(c, (unsigned int *)pc+i))
903+ printk(KERN_CONT "???????? ");
904+ else
905+ printk(KERN_CONT "%08x ", c);
906+ }
907+ printk("\n");
908+}
909+#endif
910
911 /*
912 * This routine handles page faults. It determines the address,
913@@ -133,8 +251,29 @@ retry:
914 good_area:
915 si_code = SEGV_ACCERR;
916 if (cause < 0) {
917- if (!(vma->vm_flags & VM_EXEC))
918+ if (!(vma->vm_flags & VM_EXEC)) {
919+
920+#ifdef CONFIG_PAX_PAGEEXEC
921+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
922+ goto bad_area;
923+
924+ up_read(&mm->mmap_sem);
925+ switch (pax_handle_fetch_fault(regs)) {
926+
927+#ifdef CONFIG_PAX_EMUPLT
928+ case 2:
929+ case 3:
930+ return;
931+#endif
932+
933+ }
934+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
935+ do_group_exit(SIGKILL);
936+#else
937 goto bad_area;
938+#endif
939+
940+ }
941 } else if (!cause) {
942 /* Allow reads even for write-only mappings */
943 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
944diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c
945index a2ff5c5..ecf6a78 100644
946--- a/arch/arc/kernel/kgdb.c
947+++ b/arch/arc/kernel/kgdb.c
948@@ -158,11 +158,6 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
949 return -1;
950 }
951
952-unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
953-{
954- return instruction_pointer(regs);
955-}
956-
957 int kgdb_arch_init(void)
958 {
959 single_step_data.armed = 0;
960diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
961index 32cbbd5..c102df9 100644
962--- a/arch/arm/Kconfig
963+++ b/arch/arm/Kconfig
964@@ -1719,7 +1719,7 @@ config ALIGNMENT_TRAP
965
966 config UACCESS_WITH_MEMCPY
967 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
968- depends on MMU
969+ depends on MMU && !PAX_MEMORY_UDEREF
970 default y if CPU_FEROCEON
971 help
972 Implement faster copy_to_user and clear_user methods for CPU
973@@ -1983,6 +1983,7 @@ config XIP_PHYS_ADDR
974 config KEXEC
975 bool "Kexec system call (EXPERIMENTAL)"
976 depends on (!SMP || PM_SLEEP_SMP)
977+ depends on !GRKERNSEC_KMEM
978 help
979 kexec is a system call that implements the ability to shutdown your
980 current kernel, and to start another kernel. It is like a reboot
981diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
982index 3040359..a494fa3 100644
983--- a/arch/arm/include/asm/atomic.h
984+++ b/arch/arm/include/asm/atomic.h
985@@ -18,17 +18,41 @@
986 #include <asm/barrier.h>
987 #include <asm/cmpxchg.h>
988
989+#ifdef CONFIG_GENERIC_ATOMIC64
990+#include <asm-generic/atomic64.h>
991+#endif
992+
993 #define ATOMIC_INIT(i) { (i) }
994
995 #ifdef __KERNEL__
996
997+#ifdef CONFIG_THUMB2_KERNEL
998+#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
999+#else
1000+#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
1001+#endif
1002+
1003+#define _ASM_EXTABLE(from, to) \
1004+" .pushsection __ex_table,\"a\"\n"\
1005+" .align 3\n" \
1006+" .long " #from ", " #to"\n" \
1007+" .popsection"
1008+
1009 /*
1010 * On ARM, ordinary assignment (str instruction) doesn't clear the local
1011 * strex/ldrex monitor on some implementations. The reason we can use it for
1012 * atomic_set() is the clrex or dummy strex done on every exception return.
1013 */
1014 #define atomic_read(v) (*(volatile int *)&(v)->counter)
1015+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
1016+{
1017+ return v->counter;
1018+}
1019 #define atomic_set(v,i) (((v)->counter) = (i))
1020+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1021+{
1022+ v->counter = i;
1023+}
1024
1025 #if __LINUX_ARM_ARCH__ >= 6
1026
1027@@ -44,6 +68,36 @@ static inline void atomic_add(int i, atomic_t *v)
1028
1029 prefetchw(&v->counter);
1030 __asm__ __volatile__("@ atomic_add\n"
1031+"1: ldrex %1, [%3]\n"
1032+" adds %0, %1, %4\n"
1033+
1034+#ifdef CONFIG_PAX_REFCOUNT
1035+" bvc 3f\n"
1036+"2: " REFCOUNT_TRAP_INSN "\n"
1037+"3:\n"
1038+#endif
1039+
1040+" strex %1, %0, [%3]\n"
1041+" teq %1, #0\n"
1042+" bne 1b"
1043+
1044+#ifdef CONFIG_PAX_REFCOUNT
1045+"\n4:\n"
1046+ _ASM_EXTABLE(2b, 4b)
1047+#endif
1048+
1049+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1050+ : "r" (&v->counter), "Ir" (i)
1051+ : "cc");
1052+}
1053+
1054+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1055+{
1056+ unsigned long tmp;
1057+ int result;
1058+
1059+ prefetchw(&v->counter);
1060+ __asm__ __volatile__("@ atomic_add_unchecked\n"
1061 "1: ldrex %0, [%3]\n"
1062 " add %0, %0, %4\n"
1063 " strex %1, %0, [%3]\n"
1064@@ -63,6 +117,43 @@ static inline int atomic_add_return(int i, atomic_t *v)
1065 prefetchw(&v->counter);
1066
1067 __asm__ __volatile__("@ atomic_add_return\n"
1068+"1: ldrex %1, [%3]\n"
1069+" adds %0, %1, %4\n"
1070+
1071+#ifdef CONFIG_PAX_REFCOUNT
1072+" bvc 3f\n"
1073+" mov %0, %1\n"
1074+"2: " REFCOUNT_TRAP_INSN "\n"
1075+"3:\n"
1076+#endif
1077+
1078+" strex %1, %0, [%3]\n"
1079+" teq %1, #0\n"
1080+" bne 1b"
1081+
1082+#ifdef CONFIG_PAX_REFCOUNT
1083+"\n4:\n"
1084+ _ASM_EXTABLE(2b, 4b)
1085+#endif
1086+
1087+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1088+ : "r" (&v->counter), "Ir" (i)
1089+ : "cc");
1090+
1091+ smp_mb();
1092+
1093+ return result;
1094+}
1095+
1096+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1097+{
1098+ unsigned long tmp;
1099+ int result;
1100+
1101+ smp_mb();
1102+ prefetchw(&v->counter);
1103+
1104+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
1105 "1: ldrex %0, [%3]\n"
1106 " add %0, %0, %4\n"
1107 " strex %1, %0, [%3]\n"
1108@@ -84,6 +175,36 @@ static inline void atomic_sub(int i, atomic_t *v)
1109
1110 prefetchw(&v->counter);
1111 __asm__ __volatile__("@ atomic_sub\n"
1112+"1: ldrex %1, [%3]\n"
1113+" subs %0, %1, %4\n"
1114+
1115+#ifdef CONFIG_PAX_REFCOUNT
1116+" bvc 3f\n"
1117+"2: " REFCOUNT_TRAP_INSN "\n"
1118+"3:\n"
1119+#endif
1120+
1121+" strex %1, %0, [%3]\n"
1122+" teq %1, #0\n"
1123+" bne 1b"
1124+
1125+#ifdef CONFIG_PAX_REFCOUNT
1126+"\n4:\n"
1127+ _ASM_EXTABLE(2b, 4b)
1128+#endif
1129+
1130+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1131+ : "r" (&v->counter), "Ir" (i)
1132+ : "cc");
1133+}
1134+
1135+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1136+{
1137+ unsigned long tmp;
1138+ int result;
1139+
1140+ prefetchw(&v->counter);
1141+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
1142 "1: ldrex %0, [%3]\n"
1143 " sub %0, %0, %4\n"
1144 " strex %1, %0, [%3]\n"
1145@@ -103,11 +224,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1146 prefetchw(&v->counter);
1147
1148 __asm__ __volatile__("@ atomic_sub_return\n"
1149-"1: ldrex %0, [%3]\n"
1150-" sub %0, %0, %4\n"
1151+"1: ldrex %1, [%3]\n"
1152+" subs %0, %1, %4\n"
1153+
1154+#ifdef CONFIG_PAX_REFCOUNT
1155+" bvc 3f\n"
1156+" mov %0, %1\n"
1157+"2: " REFCOUNT_TRAP_INSN "\n"
1158+"3:\n"
1159+#endif
1160+
1161 " strex %1, %0, [%3]\n"
1162 " teq %1, #0\n"
1163 " bne 1b"
1164+
1165+#ifdef CONFIG_PAX_REFCOUNT
1166+"\n4:\n"
1167+ _ASM_EXTABLE(2b, 4b)
1168+#endif
1169+
1170 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1171 : "r" (&v->counter), "Ir" (i)
1172 : "cc");
1173@@ -152,12 +287,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1174 __asm__ __volatile__ ("@ atomic_add_unless\n"
1175 "1: ldrex %0, [%4]\n"
1176 " teq %0, %5\n"
1177-" beq 2f\n"
1178-" add %1, %0, %6\n"
1179+" beq 4f\n"
1180+" adds %1, %0, %6\n"
1181+
1182+#ifdef CONFIG_PAX_REFCOUNT
1183+" bvc 3f\n"
1184+"2: " REFCOUNT_TRAP_INSN "\n"
1185+"3:\n"
1186+#endif
1187+
1188 " strex %2, %1, [%4]\n"
1189 " teq %2, #0\n"
1190 " bne 1b\n"
1191-"2:"
1192+"4:"
1193+
1194+#ifdef CONFIG_PAX_REFCOUNT
1195+ _ASM_EXTABLE(2b, 4b)
1196+#endif
1197+
1198 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1199 : "r" (&v->counter), "r" (u), "r" (a)
1200 : "cc");
1201@@ -168,6 +315,28 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1202 return oldval;
1203 }
1204
1205+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1206+{
1207+ unsigned long oldval, res;
1208+
1209+ smp_mb();
1210+
1211+ do {
1212+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1213+ "ldrex %1, [%3]\n"
1214+ "mov %0, #0\n"
1215+ "teq %1, %4\n"
1216+ "strexeq %0, %5, [%3]\n"
1217+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1218+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1219+ : "cc");
1220+ } while (res);
1221+
1222+ smp_mb();
1223+
1224+ return oldval;
1225+}
1226+
1227 #else /* ARM_ARCH_6 */
1228
1229 #ifdef CONFIG_SMP
1230@@ -186,7 +355,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1231
1232 return val;
1233 }
1234+
1235+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1236+{
1237+ return atomic_add_return(i, v);
1238+}
1239+
1240 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1241+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1242+{
1243+ (void) atomic_add_return(i, v);
1244+}
1245
1246 static inline int atomic_sub_return(int i, atomic_t *v)
1247 {
1248@@ -201,6 +380,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1249 return val;
1250 }
1251 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1252+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1253+{
1254+ (void) atomic_sub_return(i, v);
1255+}
1256
1257 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1258 {
1259@@ -216,6 +399,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1260 return ret;
1261 }
1262
1263+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1264+{
1265+ return atomic_cmpxchg(v, old, new);
1266+}
1267+
1268 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1269 {
1270 int c, old;
1271@@ -229,13 +417,33 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1272 #endif /* __LINUX_ARM_ARCH__ */
1273
1274 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1275+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1276+{
1277+ return xchg(&v->counter, new);
1278+}
1279
1280 #define atomic_inc(v) atomic_add(1, v)
1281+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1282+{
1283+ atomic_add_unchecked(1, v);
1284+}
1285 #define atomic_dec(v) atomic_sub(1, v)
1286+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1287+{
1288+ atomic_sub_unchecked(1, v);
1289+}
1290
1291 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1292+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1293+{
1294+ return atomic_add_return_unchecked(1, v) == 0;
1295+}
1296 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1297 #define atomic_inc_return(v) (atomic_add_return(1, v))
1298+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1299+{
1300+ return atomic_add_return_unchecked(1, v);
1301+}
1302 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1303 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1304
1305@@ -246,6 +454,14 @@ typedef struct {
1306 long long counter;
1307 } atomic64_t;
1308
1309+#ifdef CONFIG_PAX_REFCOUNT
1310+typedef struct {
1311+ long long counter;
1312+} atomic64_unchecked_t;
1313+#else
1314+typedef atomic64_t atomic64_unchecked_t;
1315+#endif
1316+
1317 #define ATOMIC64_INIT(i) { (i) }
1318
1319 #ifdef CONFIG_ARM_LPAE
1320@@ -262,6 +478,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1321 return result;
1322 }
1323
1324+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1325+{
1326+ long long result;
1327+
1328+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1329+" ldrd %0, %H0, [%1]"
1330+ : "=&r" (result)
1331+ : "r" (&v->counter), "Qo" (v->counter)
1332+ );
1333+
1334+ return result;
1335+}
1336+
1337 static inline void atomic64_set(atomic64_t *v, long long i)
1338 {
1339 __asm__ __volatile__("@ atomic64_set\n"
1340@@ -270,6 +499,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1341 : "r" (&v->counter), "r" (i)
1342 );
1343 }
1344+
1345+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1346+{
1347+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1348+" strd %2, %H2, [%1]"
1349+ : "=Qo" (v->counter)
1350+ : "r" (&v->counter), "r" (i)
1351+ );
1352+}
1353 #else
1354 static inline long long atomic64_read(const atomic64_t *v)
1355 {
1356@@ -284,6 +522,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1357 return result;
1358 }
1359
1360+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1361+{
1362+ long long result;
1363+
1364+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1365+" ldrexd %0, %H0, [%1]"
1366+ : "=&r" (result)
1367+ : "r" (&v->counter), "Qo" (v->counter)
1368+ );
1369+
1370+ return result;
1371+}
1372+
1373 static inline void atomic64_set(atomic64_t *v, long long i)
1374 {
1375 long long tmp;
1376@@ -298,6 +549,21 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1377 : "r" (&v->counter), "r" (i)
1378 : "cc");
1379 }
1380+
1381+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1382+{
1383+ long long tmp;
1384+
1385+ prefetchw(&v->counter);
1386+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1387+"1: ldrexd %0, %H0, [%2]\n"
1388+" strexd %0, %3, %H3, [%2]\n"
1389+" teq %0, #0\n"
1390+" bne 1b"
1391+ : "=&r" (tmp), "=Qo" (v->counter)
1392+ : "r" (&v->counter), "r" (i)
1393+ : "cc");
1394+}
1395 #endif
1396
1397 static inline void atomic64_add(long long i, atomic64_t *v)
1398@@ -309,6 +575,37 @@ static inline void atomic64_add(long long i, atomic64_t *v)
1399 __asm__ __volatile__("@ atomic64_add\n"
1400 "1: ldrexd %0, %H0, [%3]\n"
1401 " adds %Q0, %Q0, %Q4\n"
1402+" adcs %R0, %R0, %R4\n"
1403+
1404+#ifdef CONFIG_PAX_REFCOUNT
1405+" bvc 3f\n"
1406+"2: " REFCOUNT_TRAP_INSN "\n"
1407+"3:\n"
1408+#endif
1409+
1410+" strexd %1, %0, %H0, [%3]\n"
1411+" teq %1, #0\n"
1412+" bne 1b"
1413+
1414+#ifdef CONFIG_PAX_REFCOUNT
1415+"\n4:\n"
1416+ _ASM_EXTABLE(2b, 4b)
1417+#endif
1418+
1419+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1420+ : "r" (&v->counter), "r" (i)
1421+ : "cc");
1422+}
1423+
1424+static inline void atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
1425+{
1426+ long long result;
1427+ unsigned long tmp;
1428+
1429+ prefetchw(&v->counter);
1430+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1431+"1: ldrexd %0, %H0, [%3]\n"
1432+" adds %Q0, %Q0, %Q4\n"
1433 " adc %R0, %R0, %R4\n"
1434 " strexd %1, %0, %H0, [%3]\n"
1435 " teq %1, #0\n"
1436@@ -329,6 +626,44 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
1437 __asm__ __volatile__("@ atomic64_add_return\n"
1438 "1: ldrexd %0, %H0, [%3]\n"
1439 " adds %Q0, %Q0, %Q4\n"
1440+" adcs %R0, %R0, %R4\n"
1441+
1442+#ifdef CONFIG_PAX_REFCOUNT
1443+" bvc 3f\n"
1444+" mov %0, %1\n"
1445+" mov %H0, %H1\n"
1446+"2: " REFCOUNT_TRAP_INSN "\n"
1447+"3:\n"
1448+#endif
1449+
1450+" strexd %1, %0, %H0, [%3]\n"
1451+" teq %1, #0\n"
1452+" bne 1b"
1453+
1454+#ifdef CONFIG_PAX_REFCOUNT
1455+"\n4:\n"
1456+ _ASM_EXTABLE(2b, 4b)
1457+#endif
1458+
1459+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1460+ : "r" (&v->counter), "r" (i)
1461+ : "cc");
1462+
1463+ smp_mb();
1464+
1465+ return result;
1466+}
1467+
1468+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
1469+{
1470+ long long result;
1471+ unsigned long tmp;
1472+
1473+ smp_mb();
1474+
1475+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1476+"1: ldrexd %0, %H0, [%3]\n"
1477+" adds %Q0, %Q0, %Q4\n"
1478 " adc %R0, %R0, %R4\n"
1479 " strexd %1, %0, %H0, [%3]\n"
1480 " teq %1, #0\n"
1481@@ -351,6 +686,37 @@ static inline void atomic64_sub(long long i, atomic64_t *v)
1482 __asm__ __volatile__("@ atomic64_sub\n"
1483 "1: ldrexd %0, %H0, [%3]\n"
1484 " subs %Q0, %Q0, %Q4\n"
1485+" sbcs %R0, %R0, %R4\n"
1486+
1487+#ifdef CONFIG_PAX_REFCOUNT
1488+" bvc 3f\n"
1489+"2: " REFCOUNT_TRAP_INSN "\n"
1490+"3:\n"
1491+#endif
1492+
1493+" strexd %1, %0, %H0, [%3]\n"
1494+" teq %1, #0\n"
1495+" bne 1b"
1496+
1497+#ifdef CONFIG_PAX_REFCOUNT
1498+"\n4:\n"
1499+ _ASM_EXTABLE(2b, 4b)
1500+#endif
1501+
1502+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1503+ : "r" (&v->counter), "r" (i)
1504+ : "cc");
1505+}
1506+
1507+static inline void atomic64_sub_unchecked(long long i, atomic64_unchecked_t *v)
1508+{
1509+ long long result;
1510+ unsigned long tmp;
1511+
1512+ prefetchw(&v->counter);
1513+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1514+"1: ldrexd %0, %H0, [%3]\n"
1515+" subs %Q0, %Q0, %Q4\n"
1516 " sbc %R0, %R0, %R4\n"
1517 " strexd %1, %0, %H0, [%3]\n"
1518 " teq %1, #0\n"
1519@@ -371,10 +737,25 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
1520 __asm__ __volatile__("@ atomic64_sub_return\n"
1521 "1: ldrexd %0, %H0, [%3]\n"
1522 " subs %Q0, %Q0, %Q4\n"
1523-" sbc %R0, %R0, %R4\n"
1524+" sbcs %R0, %R0, %R4\n"
1525+
1526+#ifdef CONFIG_PAX_REFCOUNT
1527+" bvc 3f\n"
1528+" mov %0, %1\n"
1529+" mov %H0, %H1\n"
1530+"2: " REFCOUNT_TRAP_INSN "\n"
1531+"3:\n"
1532+#endif
1533+
1534 " strexd %1, %0, %H0, [%3]\n"
1535 " teq %1, #0\n"
1536 " bne 1b"
1537+
1538+#ifdef CONFIG_PAX_REFCOUNT
1539+"\n4:\n"
1540+ _ASM_EXTABLE(2b, 4b)
1541+#endif
1542+
1543 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1544 : "r" (&v->counter), "r" (i)
1545 : "cc");
1546@@ -410,6 +791,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1547 return oldval;
1548 }
1549
1550+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1551+ long long new)
1552+{
1553+ long long oldval;
1554+ unsigned long res;
1555+
1556+ smp_mb();
1557+
1558+ do {
1559+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1560+ "ldrexd %1, %H1, [%3]\n"
1561+ "mov %0, #0\n"
1562+ "teq %1, %4\n"
1563+ "teqeq %H1, %H4\n"
1564+ "strexdeq %0, %5, %H5, [%3]"
1565+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1566+ : "r" (&ptr->counter), "r" (old), "r" (new)
1567+ : "cc");
1568+ } while (res);
1569+
1570+ smp_mb();
1571+
1572+ return oldval;
1573+}
1574+
1575 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1576 {
1577 long long result;
1578@@ -435,21 +841,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1579 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1580 {
1581 long long result;
1582- unsigned long tmp;
1583+ u64 tmp;
1584
1585 smp_mb();
1586 prefetchw(&v->counter);
1587
1588 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1589-"1: ldrexd %0, %H0, [%3]\n"
1590-" subs %Q0, %Q0, #1\n"
1591-" sbc %R0, %R0, #0\n"
1592+"1: ldrexd %1, %H1, [%3]\n"
1593+" subs %Q0, %Q1, #1\n"
1594+" sbcs %R0, %R1, #0\n"
1595+
1596+#ifdef CONFIG_PAX_REFCOUNT
1597+" bvc 3f\n"
1598+" mov %Q0, %Q1\n"
1599+" mov %R0, %R1\n"
1600+"2: " REFCOUNT_TRAP_INSN "\n"
1601+"3:\n"
1602+#endif
1603+
1604 " teq %R0, #0\n"
1605-" bmi 2f\n"
1606+" bmi 4f\n"
1607 " strexd %1, %0, %H0, [%3]\n"
1608 " teq %1, #0\n"
1609 " bne 1b\n"
1610-"2:"
1611+"4:\n"
1612+
1613+#ifdef CONFIG_PAX_REFCOUNT
1614+ _ASM_EXTABLE(2b, 4b)
1615+#endif
1616+
1617 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1618 : "r" (&v->counter)
1619 : "cc");
1620@@ -473,13 +893,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1621 " teq %0, %5\n"
1622 " teqeq %H0, %H5\n"
1623 " moveq %1, #0\n"
1624-" beq 2f\n"
1625+" beq 4f\n"
1626 " adds %Q0, %Q0, %Q6\n"
1627-" adc %R0, %R0, %R6\n"
1628+" adcs %R0, %R0, %R6\n"
1629+
1630+#ifdef CONFIG_PAX_REFCOUNT
1631+" bvc 3f\n"
1632+"2: " REFCOUNT_TRAP_INSN "\n"
1633+"3:\n"
1634+#endif
1635+
1636 " strexd %2, %0, %H0, [%4]\n"
1637 " teq %2, #0\n"
1638 " bne 1b\n"
1639-"2:"
1640+"4:\n"
1641+
1642+#ifdef CONFIG_PAX_REFCOUNT
1643+ _ASM_EXTABLE(2b, 4b)
1644+#endif
1645+
1646 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1647 : "r" (&v->counter), "r" (u), "r" (a)
1648 : "cc");
1649@@ -492,10 +924,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1650
1651 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1652 #define atomic64_inc(v) atomic64_add(1LL, (v))
1653+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1654 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1655+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1656 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1657 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1658 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1659+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1660 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1661 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1662 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1663diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
1664index c6a3e73..35cca85 100644
1665--- a/arch/arm/include/asm/barrier.h
1666+++ b/arch/arm/include/asm/barrier.h
1667@@ -63,7 +63,7 @@
1668 do { \
1669 compiletime_assert_atomic_type(*p); \
1670 smp_mb(); \
1671- ACCESS_ONCE(*p) = (v); \
1672+ ACCESS_ONCE_RW(*p) = (v); \
1673 } while (0)
1674
1675 #define smp_load_acquire(p) \
1676diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1677index 75fe66b..ba3dee4 100644
1678--- a/arch/arm/include/asm/cache.h
1679+++ b/arch/arm/include/asm/cache.h
1680@@ -4,8 +4,10 @@
1681 #ifndef __ASMARM_CACHE_H
1682 #define __ASMARM_CACHE_H
1683
1684+#include <linux/const.h>
1685+
1686 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1687-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1688+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1689
1690 /*
1691 * Memory returned by kmalloc() may be used for DMA, so we must make
1692@@ -24,5 +26,6 @@
1693 #endif
1694
1695 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1696+#define __read_only __attribute__ ((__section__(".data..read_only")))
1697
1698 #endif
1699diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1700index 10e78d0..dc8505d 100644
1701--- a/arch/arm/include/asm/cacheflush.h
1702+++ b/arch/arm/include/asm/cacheflush.h
1703@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1704 void (*dma_unmap_area)(const void *, size_t, int);
1705
1706 void (*dma_flush_range)(const void *, const void *);
1707-};
1708+} __no_const;
1709
1710 /*
1711 * Select the calling method
1712diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1713index 5233151..87a71fa 100644
1714--- a/arch/arm/include/asm/checksum.h
1715+++ b/arch/arm/include/asm/checksum.h
1716@@ -37,7 +37,19 @@ __wsum
1717 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1718
1719 __wsum
1720-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1721+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1722+
1723+static inline __wsum
1724+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1725+{
1726+ __wsum ret;
1727+ pax_open_userland();
1728+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1729+ pax_close_userland();
1730+ return ret;
1731+}
1732+
1733+
1734
1735 /*
1736 * Fold a partial checksum without adding pseudo headers
1737diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1738index abb2c37..96db950 100644
1739--- a/arch/arm/include/asm/cmpxchg.h
1740+++ b/arch/arm/include/asm/cmpxchg.h
1741@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1742
1743 #define xchg(ptr,x) \
1744 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1745+#define xchg_unchecked(ptr,x) \
1746+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1747
1748 #include <asm-generic/cmpxchg-local.h>
1749
1750diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1751index 6ddbe44..b5e38b1 100644
1752--- a/arch/arm/include/asm/domain.h
1753+++ b/arch/arm/include/asm/domain.h
1754@@ -48,18 +48,37 @@
1755 * Domain types
1756 */
1757 #define DOMAIN_NOACCESS 0
1758-#define DOMAIN_CLIENT 1
1759 #ifdef CONFIG_CPU_USE_DOMAINS
1760+#define DOMAIN_USERCLIENT 1
1761+#define DOMAIN_KERNELCLIENT 1
1762 #define DOMAIN_MANAGER 3
1763+#define DOMAIN_VECTORS DOMAIN_USER
1764 #else
1765+
1766+#ifdef CONFIG_PAX_KERNEXEC
1767 #define DOMAIN_MANAGER 1
1768+#define DOMAIN_KERNEXEC 3
1769+#else
1770+#define DOMAIN_MANAGER 1
1771+#endif
1772+
1773+#ifdef CONFIG_PAX_MEMORY_UDEREF
1774+#define DOMAIN_USERCLIENT 0
1775+#define DOMAIN_UDEREF 1
1776+#define DOMAIN_VECTORS DOMAIN_KERNEL
1777+#else
1778+#define DOMAIN_USERCLIENT 1
1779+#define DOMAIN_VECTORS DOMAIN_USER
1780+#endif
1781+#define DOMAIN_KERNELCLIENT 1
1782+
1783 #endif
1784
1785 #define domain_val(dom,type) ((type) << (2*(dom)))
1786
1787 #ifndef __ASSEMBLY__
1788
1789-#ifdef CONFIG_CPU_USE_DOMAINS
1790+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1791 static inline void set_domain(unsigned val)
1792 {
1793 asm volatile(
1794@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1795 isb();
1796 }
1797
1798-#define modify_domain(dom,type) \
1799- do { \
1800- struct thread_info *thread = current_thread_info(); \
1801- unsigned int domain = thread->cpu_domain; \
1802- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1803- thread->cpu_domain = domain | domain_val(dom, type); \
1804- set_domain(thread->cpu_domain); \
1805- } while (0)
1806-
1807+extern void modify_domain(unsigned int dom, unsigned int type);
1808 #else
1809 static inline void set_domain(unsigned val) { }
1810 static inline void modify_domain(unsigned dom, unsigned type) { }
1811diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1812index afb9caf..9a0bac0 100644
1813--- a/arch/arm/include/asm/elf.h
1814+++ b/arch/arm/include/asm/elf.h
1815@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1816 the loader. We need to make sure that it is out of the way of the program
1817 that it will "exec", and that there is sufficient room for the brk. */
1818
1819-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1820+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1821+
1822+#ifdef CONFIG_PAX_ASLR
1823+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1824+
1825+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1826+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1827+#endif
1828
1829 /* When the program starts, a1 contains a pointer to a function to be
1830 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1831@@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1832 extern void elf_set_personality(const struct elf32_hdr *);
1833 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1834
1835-struct mm_struct;
1836-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1837-#define arch_randomize_brk arch_randomize_brk
1838-
1839 #ifdef CONFIG_MMU
1840 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1841 struct linux_binprm;
1842diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1843index de53547..52b9a28 100644
1844--- a/arch/arm/include/asm/fncpy.h
1845+++ b/arch/arm/include/asm/fncpy.h
1846@@ -81,7 +81,9 @@
1847 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1848 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1849 \
1850+ pax_open_kernel(); \
1851 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1852+ pax_close_kernel(); \
1853 flush_icache_range((unsigned long)(dest_buf), \
1854 (unsigned long)(dest_buf) + (size)); \
1855 \
1856diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1857index 53e69da..3fdc896 100644
1858--- a/arch/arm/include/asm/futex.h
1859+++ b/arch/arm/include/asm/futex.h
1860@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1861 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1862 return -EFAULT;
1863
1864+ pax_open_userland();
1865+
1866 smp_mb();
1867 /* Prefetching cannot fault */
1868 prefetchw(uaddr);
1869@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1870 : "cc", "memory");
1871 smp_mb();
1872
1873+ pax_close_userland();
1874+
1875 *uval = val;
1876 return ret;
1877 }
1878@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1879 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1880 return -EFAULT;
1881
1882+ pax_open_userland();
1883+
1884 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1885 "1: " TUSER(ldr) " %1, [%4]\n"
1886 " teq %1, %2\n"
1887@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1888 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1889 : "cc", "memory");
1890
1891+ pax_close_userland();
1892+
1893 *uval = val;
1894 return ret;
1895 }
1896@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1897 return -EFAULT;
1898
1899 pagefault_disable(); /* implies preempt_disable() */
1900+ pax_open_userland();
1901
1902 switch (op) {
1903 case FUTEX_OP_SET:
1904@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1905 ret = -ENOSYS;
1906 }
1907
1908+ pax_close_userland();
1909 pagefault_enable(); /* subsumes preempt_enable() */
1910
1911 if (!ret) {
1912diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1913index 83eb2f7..ed77159 100644
1914--- a/arch/arm/include/asm/kmap_types.h
1915+++ b/arch/arm/include/asm/kmap_types.h
1916@@ -4,6 +4,6 @@
1917 /*
1918 * This is the "bare minimum". AIO seems to require this.
1919 */
1920-#define KM_TYPE_NR 16
1921+#define KM_TYPE_NR 17
1922
1923 #endif
1924diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1925index 9e614a1..3302cca 100644
1926--- a/arch/arm/include/asm/mach/dma.h
1927+++ b/arch/arm/include/asm/mach/dma.h
1928@@ -22,7 +22,7 @@ struct dma_ops {
1929 int (*residue)(unsigned int, dma_t *); /* optional */
1930 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1931 const char *type;
1932-};
1933+} __do_const;
1934
1935 struct dma_struct {
1936 void *addr; /* single DMA address */
1937diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1938index f98c7f3..e5c626d 100644
1939--- a/arch/arm/include/asm/mach/map.h
1940+++ b/arch/arm/include/asm/mach/map.h
1941@@ -23,17 +23,19 @@ struct map_desc {
1942
1943 /* types 0-3 are defined in asm/io.h */
1944 enum {
1945- MT_UNCACHED = 4,
1946- MT_CACHECLEAN,
1947- MT_MINICLEAN,
1948+ MT_UNCACHED_RW = 4,
1949+ MT_CACHECLEAN_RO,
1950+ MT_MINICLEAN_RO,
1951 MT_LOW_VECTORS,
1952 MT_HIGH_VECTORS,
1953- MT_MEMORY_RWX,
1954+ __MT_MEMORY_RWX,
1955 MT_MEMORY_RW,
1956- MT_ROM,
1957- MT_MEMORY_RWX_NONCACHED,
1958+ MT_MEMORY_RX,
1959+ MT_ROM_RX,
1960+ MT_MEMORY_RW_NONCACHED,
1961+ MT_MEMORY_RX_NONCACHED,
1962 MT_MEMORY_RW_DTCM,
1963- MT_MEMORY_RWX_ITCM,
1964+ MT_MEMORY_RX_ITCM,
1965 MT_MEMORY_RW_SO,
1966 MT_MEMORY_DMA_READY,
1967 };
1968diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1969index 891a56b..48f337e 100644
1970--- a/arch/arm/include/asm/outercache.h
1971+++ b/arch/arm/include/asm/outercache.h
1972@@ -36,7 +36,7 @@ struct outer_cache_fns {
1973
1974 /* This is an ARM L2C thing */
1975 void (*write_sec)(unsigned long, unsigned);
1976-};
1977+} __no_const;
1978
1979 extern struct outer_cache_fns outer_cache;
1980
1981diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1982index 4355f0e..cd9168e 100644
1983--- a/arch/arm/include/asm/page.h
1984+++ b/arch/arm/include/asm/page.h
1985@@ -23,6 +23,7 @@
1986
1987 #else
1988
1989+#include <linux/compiler.h>
1990 #include <asm/glue.h>
1991
1992 /*
1993@@ -114,7 +115,7 @@ struct cpu_user_fns {
1994 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1995 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1996 unsigned long vaddr, struct vm_area_struct *vma);
1997-};
1998+} __no_const;
1999
2000 #ifdef MULTI_USER
2001 extern struct cpu_user_fns cpu_user;
2002diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
2003index 78a7793..e3dc06c 100644
2004--- a/arch/arm/include/asm/pgalloc.h
2005+++ b/arch/arm/include/asm/pgalloc.h
2006@@ -17,6 +17,7 @@
2007 #include <asm/processor.h>
2008 #include <asm/cacheflush.h>
2009 #include <asm/tlbflush.h>
2010+#include <asm/system_info.h>
2011
2012 #define check_pgt_cache() do { } while (0)
2013
2014@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2015 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
2016 }
2017
2018+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2019+{
2020+ pud_populate(mm, pud, pmd);
2021+}
2022+
2023 #else /* !CONFIG_ARM_LPAE */
2024
2025 /*
2026@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2027 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
2028 #define pmd_free(mm, pmd) do { } while (0)
2029 #define pud_populate(mm,pmd,pte) BUG()
2030+#define pud_populate_kernel(mm,pmd,pte) BUG()
2031
2032 #endif /* CONFIG_ARM_LPAE */
2033
2034@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
2035 __free_page(pte);
2036 }
2037
2038+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
2039+{
2040+#ifdef CONFIG_ARM_LPAE
2041+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
2042+#else
2043+ if (addr & SECTION_SIZE)
2044+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
2045+ else
2046+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
2047+#endif
2048+ flush_pmd_entry(pmdp);
2049+}
2050+
2051 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
2052 pmdval_t prot)
2053 {
2054@@ -157,7 +177,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
2055 static inline void
2056 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
2057 {
2058- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
2059+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
2060 }
2061 #define pmd_pgtable(pmd) pmd_page(pmd)
2062
2063diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
2064index 5cfba15..f415e1a 100644
2065--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
2066+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
2067@@ -20,12 +20,15 @@
2068 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
2069 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
2070 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
2071+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
2072 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
2073 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
2074 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
2075+
2076 /*
2077 * - section
2078 */
2079+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
2080 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
2081 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
2082 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
2083@@ -37,6 +40,7 @@
2084 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
2085 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
2086 #define PMD_SECT_AF (_AT(pmdval_t, 0))
2087+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
2088
2089 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
2090 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
2091@@ -66,6 +70,7 @@
2092 * - extended small page/tiny page
2093 */
2094 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
2095+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
2096 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
2097 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
2098 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
2099diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
2100index 219ac88..73ec32a 100644
2101--- a/arch/arm/include/asm/pgtable-2level.h
2102+++ b/arch/arm/include/asm/pgtable-2level.h
2103@@ -126,6 +126,9 @@
2104 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
2105 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
2106
2107+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
2108+#define L_PTE_PXN (_AT(pteval_t, 0))
2109+
2110 /*
2111 * These are the memory types, defined to be compatible with
2112 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
2113diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
2114index 9fd61c7..f8f1cff 100644
2115--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
2116+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
2117@@ -76,6 +76,7 @@
2118 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
2119 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
2120 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
2121+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2122 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
2123
2124 /*
2125diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
2126index 06e0bc0..c65bca8 100644
2127--- a/arch/arm/include/asm/pgtable-3level.h
2128+++ b/arch/arm/include/asm/pgtable-3level.h
2129@@ -81,6 +81,7 @@
2130 #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
2131 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
2132 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
2133+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2134 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
2135 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
2136 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
2137@@ -92,10 +93,12 @@
2138 #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
2139 #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
2140 #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
2141+#define PMD_SECT_RDONLY PMD_SECT_AP2
2142
2143 /*
2144 * To be used in assembly code with the upper page attributes.
2145 */
2146+#define L_PTE_PXN_HIGH (1 << (53 - 32))
2147 #define L_PTE_XN_HIGH (1 << (54 - 32))
2148 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
2149
2150diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
2151index 01baef0..73c156e 100644
2152--- a/arch/arm/include/asm/pgtable.h
2153+++ b/arch/arm/include/asm/pgtable.h
2154@@ -33,6 +33,9 @@
2155 #include <asm/pgtable-2level.h>
2156 #endif
2157
2158+#define ktla_ktva(addr) (addr)
2159+#define ktva_ktla(addr) (addr)
2160+
2161 /*
2162 * Just any arbitrary offset to the start of the vmalloc VM area: the
2163 * current 8MB value just means that there will be a 8MB "hole" after the
2164@@ -48,6 +51,9 @@
2165 #define LIBRARY_TEXT_START 0x0c000000
2166
2167 #ifndef __ASSEMBLY__
2168+extern pteval_t __supported_pte_mask;
2169+extern pmdval_t __supported_pmd_mask;
2170+
2171 extern void __pte_error(const char *file, int line, pte_t);
2172 extern void __pmd_error(const char *file, int line, pmd_t);
2173 extern void __pgd_error(const char *file, int line, pgd_t);
2174@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2175 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2176 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2177
2178+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2179+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2180+
2181+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2182+#include <asm/domain.h>
2183+#include <linux/thread_info.h>
2184+#include <linux/preempt.h>
2185+
2186+static inline int test_domain(int domain, int domaintype)
2187+{
2188+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2189+}
2190+#endif
2191+
2192+#ifdef CONFIG_PAX_KERNEXEC
2193+static inline unsigned long pax_open_kernel(void) {
2194+#ifdef CONFIG_ARM_LPAE
2195+ /* TODO */
2196+#else
2197+ preempt_disable();
2198+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2199+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2200+#endif
2201+ return 0;
2202+}
2203+
2204+static inline unsigned long pax_close_kernel(void) {
2205+#ifdef CONFIG_ARM_LPAE
2206+ /* TODO */
2207+#else
2208+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2209+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2210+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2211+ preempt_enable_no_resched();
2212+#endif
2213+ return 0;
2214+}
2215+#else
2216+static inline unsigned long pax_open_kernel(void) { return 0; }
2217+static inline unsigned long pax_close_kernel(void) { return 0; }
2218+#endif
2219+
2220 /*
2221 * This is the lowest virtual address we can permit any user space
2222 * mapping to be mapped at. This is particularly important for
2223@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2224 /*
2225 * The pgprot_* and protection_map entries will be fixed up in runtime
2226 * to include the cachable and bufferable bits based on memory policy,
2227- * as well as any architecture dependent bits like global/ASID and SMP
2228- * shared mapping bits.
2229+ * as well as any architecture dependent bits like global/ASID, PXN,
2230+ * and SMP shared mapping bits.
2231 */
2232 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2233
2234@@ -269,7 +317,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2235 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2236 {
2237 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2238- L_PTE_NONE | L_PTE_VALID;
2239+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2240 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2241 return pte;
2242 }
2243diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2244index c25ef3e..735f14b 100644
2245--- a/arch/arm/include/asm/psci.h
2246+++ b/arch/arm/include/asm/psci.h
2247@@ -32,7 +32,7 @@ struct psci_operations {
2248 int (*affinity_info)(unsigned long target_affinity,
2249 unsigned long lowest_affinity_level);
2250 int (*migrate_info_type)(void);
2251-};
2252+} __no_const;
2253
2254 extern struct psci_operations psci_ops;
2255 extern struct smp_operations psci_smp_ops;
2256diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2257index 2ec765c..beb1fe16 100644
2258--- a/arch/arm/include/asm/smp.h
2259+++ b/arch/arm/include/asm/smp.h
2260@@ -113,7 +113,7 @@ struct smp_operations {
2261 int (*cpu_disable)(unsigned int cpu);
2262 #endif
2263 #endif
2264-};
2265+} __no_const;
2266
2267 struct of_cpu_method {
2268 const char *method;
2269diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2270index ce73ab6..7310f8a 100644
2271--- a/arch/arm/include/asm/thread_info.h
2272+++ b/arch/arm/include/asm/thread_info.h
2273@@ -78,9 +78,9 @@ struct thread_info {
2274 .flags = 0, \
2275 .preempt_count = INIT_PREEMPT_COUNT, \
2276 .addr_limit = KERNEL_DS, \
2277- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2278- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2279- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2280+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2281+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2282+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2283 .restart_block = { \
2284 .fn = do_no_restart_syscall, \
2285 }, \
2286@@ -154,7 +154,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2287 #define TIF_SYSCALL_AUDIT 9
2288 #define TIF_SYSCALL_TRACEPOINT 10
2289 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2290-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2291+/* within 8 bits of TIF_SYSCALL_TRACE
2292+ * to meet flexible second operand requirements
2293+ */
2294+#define TIF_GRSEC_SETXID 12
2295+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2296 #define TIF_USING_IWMMXT 17
2297 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2298 #define TIF_RESTORE_SIGMASK 20
2299@@ -168,10 +172,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2300 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2301 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2302 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2303+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2304
2305 /* Checks for any syscall work in entry-common.S */
2306 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2307- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2308+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2309
2310 /*
2311 * Change these and you break ASM code in entry-common.S
2312diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
2313index 5f833f7..76e6644 100644
2314--- a/arch/arm/include/asm/tls.h
2315+++ b/arch/arm/include/asm/tls.h
2316@@ -3,6 +3,7 @@
2317
2318 #include <linux/compiler.h>
2319 #include <asm/thread_info.h>
2320+#include <asm/pgtable.h>
2321
2322 #ifdef __ASSEMBLY__
2323 #include <asm/asm-offsets.h>
2324@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
2325 * at 0xffff0fe0 must be used instead. (see
2326 * entry-armv.S for details)
2327 */
2328+ pax_open_kernel();
2329 *((unsigned int *)0xffff0ff0) = val;
2330+ pax_close_kernel();
2331 #endif
2332 }
2333
2334diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2335index 4767eb9..bf00668 100644
2336--- a/arch/arm/include/asm/uaccess.h
2337+++ b/arch/arm/include/asm/uaccess.h
2338@@ -18,6 +18,7 @@
2339 #include <asm/domain.h>
2340 #include <asm/unified.h>
2341 #include <asm/compiler.h>
2342+#include <asm/pgtable.h>
2343
2344 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2345 #include <asm-generic/uaccess-unaligned.h>
2346@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2347 static inline void set_fs(mm_segment_t fs)
2348 {
2349 current_thread_info()->addr_limit = fs;
2350- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2351+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2352 }
2353
2354 #define segment_eq(a,b) ((a) == (b))
2355
2356+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2357+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2358+
2359+static inline void pax_open_userland(void)
2360+{
2361+
2362+#ifdef CONFIG_PAX_MEMORY_UDEREF
2363+ if (segment_eq(get_fs(), USER_DS)) {
2364+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2365+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2366+ }
2367+#endif
2368+
2369+}
2370+
2371+static inline void pax_close_userland(void)
2372+{
2373+
2374+#ifdef CONFIG_PAX_MEMORY_UDEREF
2375+ if (segment_eq(get_fs(), USER_DS)) {
2376+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2377+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2378+ }
2379+#endif
2380+
2381+}
2382+
2383 #define __addr_ok(addr) ({ \
2384 unsigned long flag; \
2385 __asm__("cmp %2, %0; movlo %0, #0" \
2386@@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
2387
2388 #define get_user(x,p) \
2389 ({ \
2390+ int __e; \
2391 might_fault(); \
2392- __get_user_check(x,p); \
2393+ pax_open_userland(); \
2394+ __e = __get_user_check(x,p); \
2395+ pax_close_userland(); \
2396+ __e; \
2397 })
2398
2399 extern int __put_user_1(void *, unsigned int);
2400@@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
2401
2402 #define put_user(x,p) \
2403 ({ \
2404+ int __e; \
2405 might_fault(); \
2406- __put_user_check(x,p); \
2407+ pax_open_userland(); \
2408+ __e = __put_user_check(x,p); \
2409+ pax_close_userland(); \
2410+ __e; \
2411 })
2412
2413 #else /* CONFIG_MMU */
2414@@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
2415
2416 #endif /* CONFIG_MMU */
2417
2418+#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size))
2419 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
2420
2421 #define user_addr_max() \
2422@@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
2423 #define __get_user(x,ptr) \
2424 ({ \
2425 long __gu_err = 0; \
2426+ pax_open_userland(); \
2427 __get_user_err((x),(ptr),__gu_err); \
2428+ pax_close_userland(); \
2429 __gu_err; \
2430 })
2431
2432 #define __get_user_error(x,ptr,err) \
2433 ({ \
2434+ pax_open_userland(); \
2435 __get_user_err((x),(ptr),err); \
2436+ pax_close_userland(); \
2437 (void) 0; \
2438 })
2439
2440@@ -368,13 +409,17 @@ do { \
2441 #define __put_user(x,ptr) \
2442 ({ \
2443 long __pu_err = 0; \
2444+ pax_open_userland(); \
2445 __put_user_err((x),(ptr),__pu_err); \
2446+ pax_close_userland(); \
2447 __pu_err; \
2448 })
2449
2450 #define __put_user_error(x,ptr,err) \
2451 ({ \
2452+ pax_open_userland(); \
2453 __put_user_err((x),(ptr),err); \
2454+ pax_close_userland(); \
2455 (void) 0; \
2456 })
2457
2458@@ -474,11 +519,44 @@ do { \
2459
2460
2461 #ifdef CONFIG_MMU
2462-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2463-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2464+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2465+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2466+
2467+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2468+{
2469+ unsigned long ret;
2470+
2471+ check_object_size(to, n, false);
2472+ pax_open_userland();
2473+ ret = ___copy_from_user(to, from, n);
2474+ pax_close_userland();
2475+ return ret;
2476+}
2477+
2478+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2479+{
2480+ unsigned long ret;
2481+
2482+ check_object_size(from, n, true);
2483+ pax_open_userland();
2484+ ret = ___copy_to_user(to, from, n);
2485+ pax_close_userland();
2486+ return ret;
2487+}
2488+
2489 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2490-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2491+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2492 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2493+
2494+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2495+{
2496+ unsigned long ret;
2497+ pax_open_userland();
2498+ ret = ___clear_user(addr, n);
2499+ pax_close_userland();
2500+ return ret;
2501+}
2502+
2503 #else
2504 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2505 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2506@@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2507
2508 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2509 {
2510+ if ((long)n < 0)
2511+ return n;
2512+
2513 if (access_ok(VERIFY_READ, from, n))
2514 n = __copy_from_user(to, from, n);
2515 else /* security hole - plug it */
2516@@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2517
2518 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2519 {
2520+ if ((long)n < 0)
2521+ return n;
2522+
2523 if (access_ok(VERIFY_WRITE, to, n))
2524 n = __copy_to_user(to, from, n);
2525 return n;
2526diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2527index 5af0ed1..cea83883 100644
2528--- a/arch/arm/include/uapi/asm/ptrace.h
2529+++ b/arch/arm/include/uapi/asm/ptrace.h
2530@@ -92,7 +92,7 @@
2531 * ARMv7 groups of PSR bits
2532 */
2533 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2534-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2535+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2536 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2537 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2538
2539diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2540index a88671c..1cc895e 100644
2541--- a/arch/arm/kernel/armksyms.c
2542+++ b/arch/arm/kernel/armksyms.c
2543@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2544
2545 /* networking */
2546 EXPORT_SYMBOL(csum_partial);
2547-EXPORT_SYMBOL(csum_partial_copy_from_user);
2548+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2549 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2550 EXPORT_SYMBOL(__csum_ipv6_magic);
2551
2552@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
2553 #ifdef CONFIG_MMU
2554 EXPORT_SYMBOL(copy_page);
2555
2556-EXPORT_SYMBOL(__copy_from_user);
2557-EXPORT_SYMBOL(__copy_to_user);
2558-EXPORT_SYMBOL(__clear_user);
2559+EXPORT_SYMBOL(___copy_from_user);
2560+EXPORT_SYMBOL(___copy_to_user);
2561+EXPORT_SYMBOL(___clear_user);
2562
2563 EXPORT_SYMBOL(__get_user_1);
2564 EXPORT_SYMBOL(__get_user_2);
2565diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2566index 36276cd..9d7b13b 100644
2567--- a/arch/arm/kernel/entry-armv.S
2568+++ b/arch/arm/kernel/entry-armv.S
2569@@ -47,6 +47,87 @@
2570 9997:
2571 .endm
2572
2573+ .macro pax_enter_kernel
2574+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2575+ @ make aligned space for saved DACR
2576+ sub sp, sp, #8
2577+ @ save regs
2578+ stmdb sp!, {r1, r2}
2579+ @ read DACR from cpu_domain into r1
2580+ mov r2, sp
2581+ @ assume 8K pages, since we have to split the immediate in two
2582+ bic r2, r2, #(0x1fc0)
2583+ bic r2, r2, #(0x3f)
2584+ ldr r1, [r2, #TI_CPU_DOMAIN]
2585+ @ store old DACR on stack
2586+ str r1, [sp, #8]
2587+#ifdef CONFIG_PAX_KERNEXEC
2588+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2589+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2590+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2591+#endif
2592+#ifdef CONFIG_PAX_MEMORY_UDEREF
2593+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2594+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2595+#endif
2596+ @ write r1 to current_thread_info()->cpu_domain
2597+ str r1, [r2, #TI_CPU_DOMAIN]
2598+ @ write r1 to DACR
2599+ mcr p15, 0, r1, c3, c0, 0
2600+ @ instruction sync
2601+ instr_sync
2602+ @ restore regs
2603+ ldmia sp!, {r1, r2}
2604+#endif
2605+ .endm
2606+
2607+ .macro pax_open_userland
2608+#ifdef CONFIG_PAX_MEMORY_UDEREF
2609+ @ save regs
2610+ stmdb sp!, {r0, r1}
2611+ @ read DACR from cpu_domain into r1
2612+ mov r0, sp
2613+ @ assume 8K pages, since we have to split the immediate in two
2614+ bic r0, r0, #(0x1fc0)
2615+ bic r0, r0, #(0x3f)
2616+ ldr r1, [r0, #TI_CPU_DOMAIN]
2617+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2618+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2619+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2620+ @ write r1 to current_thread_info()->cpu_domain
2621+ str r1, [r0, #TI_CPU_DOMAIN]
2622+ @ write r1 to DACR
2623+ mcr p15, 0, r1, c3, c0, 0
2624+ @ instruction sync
2625+ instr_sync
2626+ @ restore regs
2627+ ldmia sp!, {r0, r1}
2628+#endif
2629+ .endm
2630+
2631+ .macro pax_close_userland
2632+#ifdef CONFIG_PAX_MEMORY_UDEREF
2633+ @ save regs
2634+ stmdb sp!, {r0, r1}
2635+ @ read DACR from cpu_domain into r1
2636+ mov r0, sp
2637+ @ assume 8K pages, since we have to split the immediate in two
2638+ bic r0, r0, #(0x1fc0)
2639+ bic r0, r0, #(0x3f)
2640+ ldr r1, [r0, #TI_CPU_DOMAIN]
2641+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2642+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2643+ @ write r1 to current_thread_info()->cpu_domain
2644+ str r1, [r0, #TI_CPU_DOMAIN]
2645+ @ write r1 to DACR
2646+ mcr p15, 0, r1, c3, c0, 0
2647+ @ instruction sync
2648+ instr_sync
2649+ @ restore regs
2650+ ldmia sp!, {r0, r1}
2651+#endif
2652+ .endm
2653+
2654 .macro pabt_helper
2655 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2656 #ifdef MULTI_PABORT
2657@@ -89,11 +170,15 @@
2658 * Invalid mode handlers
2659 */
2660 .macro inv_entry, reason
2661+
2662+ pax_enter_kernel
2663+
2664 sub sp, sp, #S_FRAME_SIZE
2665 ARM( stmib sp, {r1 - lr} )
2666 THUMB( stmia sp, {r0 - r12} )
2667 THUMB( str sp, [sp, #S_SP] )
2668 THUMB( str lr, [sp, #S_LR] )
2669+
2670 mov r1, #\reason
2671 .endm
2672
2673@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2674 .macro svc_entry, stack_hole=0
2675 UNWIND(.fnstart )
2676 UNWIND(.save {r0 - pc} )
2677+
2678+ pax_enter_kernel
2679+
2680 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2681+
2682 #ifdef CONFIG_THUMB2_KERNEL
2683 SPFIX( str r0, [sp] ) @ temporarily saved
2684 SPFIX( mov r0, sp )
2685@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2686 ldmia r0, {r3 - r5}
2687 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2688 mov r6, #-1 @ "" "" "" ""
2689+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2690+ @ offset sp by 8 as done in pax_enter_kernel
2691+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2692+#else
2693 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2694+#endif
2695 SPFIX( addeq r2, r2, #4 )
2696 str r3, [sp, #-4]! @ save the "real" r0 copied
2697 @ from the exception stack
2698@@ -317,6 +411,9 @@ ENDPROC(__pabt_svc)
2699 .macro usr_entry
2700 UNWIND(.fnstart )
2701 UNWIND(.cantunwind ) @ don't unwind the user space
2702+
2703+ pax_enter_kernel_user
2704+
2705 sub sp, sp, #S_FRAME_SIZE
2706 ARM( stmib sp, {r1 - r12} )
2707 THUMB( stmia sp, {r0 - r12} )
2708@@ -421,7 +518,9 @@ __und_usr:
2709 tst r3, #PSR_T_BIT @ Thumb mode?
2710 bne __und_usr_thumb
2711 sub r4, r2, #4 @ ARM instr at LR - 4
2712+ pax_open_userland
2713 1: ldrt r0, [r4]
2714+ pax_close_userland
2715 ARM_BE8(rev r0, r0) @ little endian instruction
2716
2717 @ r0 = 32-bit ARM instruction which caused the exception
2718@@ -455,11 +554,15 @@ __und_usr_thumb:
2719 */
2720 .arch armv6t2
2721 #endif
2722+ pax_open_userland
2723 2: ldrht r5, [r4]
2724+ pax_close_userland
2725 ARM_BE8(rev16 r5, r5) @ little endian instruction
2726 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2727 blo __und_usr_fault_16 @ 16bit undefined instruction
2728+ pax_open_userland
2729 3: ldrht r0, [r2]
2730+ pax_close_userland
2731 ARM_BE8(rev16 r0, r0) @ little endian instruction
2732 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2733 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2734@@ -489,7 +592,8 @@ ENDPROC(__und_usr)
2735 */
2736 .pushsection .fixup, "ax"
2737 .align 2
2738-4: str r4, [sp, #S_PC] @ retry current instruction
2739+4: pax_close_userland
2740+ str r4, [sp, #S_PC] @ retry current instruction
2741 ret r9
2742 .popsection
2743 .pushsection __ex_table,"a"
2744@@ -698,7 +802,7 @@ ENTRY(__switch_to)
2745 THUMB( str lr, [ip], #4 )
2746 ldr r4, [r2, #TI_TP_VALUE]
2747 ldr r5, [r2, #TI_TP_VALUE + 4]
2748-#ifdef CONFIG_CPU_USE_DOMAINS
2749+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2750 ldr r6, [r2, #TI_CPU_DOMAIN]
2751 #endif
2752 switch_tls r1, r4, r5, r3, r7
2753@@ -707,7 +811,7 @@ ENTRY(__switch_to)
2754 ldr r8, =__stack_chk_guard
2755 ldr r7, [r7, #TSK_STACK_CANARY]
2756 #endif
2757-#ifdef CONFIG_CPU_USE_DOMAINS
2758+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2759 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2760 #endif
2761 mov r5, r0
2762diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2763index e52fe5a..1b0a924 100644
2764--- a/arch/arm/kernel/entry-common.S
2765+++ b/arch/arm/kernel/entry-common.S
2766@@ -11,18 +11,46 @@
2767 #include <asm/assembler.h>
2768 #include <asm/unistd.h>
2769 #include <asm/ftrace.h>
2770+#include <asm/domain.h>
2771 #include <asm/unwind.h>
2772
2773+#include "entry-header.S"
2774+
2775 #ifdef CONFIG_NEED_RET_TO_USER
2776 #include <mach/entry-macro.S>
2777 #else
2778 .macro arch_ret_to_user, tmp1, tmp2
2779+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2780+ @ save regs
2781+ stmdb sp!, {r1, r2}
2782+ @ read DACR from cpu_domain into r1
2783+ mov r2, sp
2784+ @ assume 8K pages, since we have to split the immediate in two
2785+ bic r2, r2, #(0x1fc0)
2786+ bic r2, r2, #(0x3f)
2787+ ldr r1, [r2, #TI_CPU_DOMAIN]
2788+#ifdef CONFIG_PAX_KERNEXEC
2789+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2790+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2791+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2792+#endif
2793+#ifdef CONFIG_PAX_MEMORY_UDEREF
2794+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2795+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2796+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2797+#endif
2798+ @ write r1 to current_thread_info()->cpu_domain
2799+ str r1, [r2, #TI_CPU_DOMAIN]
2800+ @ write r1 to DACR
2801+ mcr p15, 0, r1, c3, c0, 0
2802+ @ instruction sync
2803+ instr_sync
2804+ @ restore regs
2805+ ldmia sp!, {r1, r2}
2806+#endif
2807 .endm
2808 #endif
2809
2810-#include "entry-header.S"
2811-
2812-
2813 .align 5
2814 /*
2815 * This is the fast syscall return path. We do as little as
2816@@ -406,6 +434,12 @@ ENTRY(vector_swi)
2817 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2818 #endif
2819
2820+ /*
2821+ * do this here to avoid a performance hit of wrapping the code above
2822+ * that directly dereferences userland to parse the SWI instruction
2823+ */
2824+ pax_enter_kernel_user
2825+
2826 adr tbl, sys_call_table @ load syscall table pointer
2827
2828 #if defined(CONFIG_OABI_COMPAT)
2829diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2830index 2fdf867..6e909e4 100644
2831--- a/arch/arm/kernel/entry-header.S
2832+++ b/arch/arm/kernel/entry-header.S
2833@@ -188,6 +188,60 @@
2834 msr cpsr_c, \rtemp @ switch back to the SVC mode
2835 .endm
2836
2837+ .macro pax_enter_kernel_user
2838+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2839+ @ save regs
2840+ stmdb sp!, {r0, r1}
2841+ @ read DACR from cpu_domain into r1
2842+ mov r0, sp
2843+ @ assume 8K pages, since we have to split the immediate in two
2844+ bic r0, r0, #(0x1fc0)
2845+ bic r0, r0, #(0x3f)
2846+ ldr r1, [r0, #TI_CPU_DOMAIN]
2847+#ifdef CONFIG_PAX_MEMORY_UDEREF
2848+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2849+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2850+#endif
2851+#ifdef CONFIG_PAX_KERNEXEC
2852+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2853+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2854+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2855+#endif
2856+ @ write r1 to current_thread_info()->cpu_domain
2857+ str r1, [r0, #TI_CPU_DOMAIN]
2858+ @ write r1 to DACR
2859+ mcr p15, 0, r1, c3, c0, 0
2860+ @ instruction sync
2861+ instr_sync
2862+ @ restore regs
2863+ ldmia sp!, {r0, r1}
2864+#endif
2865+ .endm
2866+
2867+ .macro pax_exit_kernel
2868+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2869+ @ save regs
2870+ stmdb sp!, {r0, r1}
2871+ @ read old DACR from stack into r1
2872+ ldr r1, [sp, #(8 + S_SP)]
2873+ sub r1, r1, #8
2874+ ldr r1, [r1]
2875+
2876+ @ write r1 to current_thread_info()->cpu_domain
2877+ mov r0, sp
2878+ @ assume 8K pages, since we have to split the immediate in two
2879+ bic r0, r0, #(0x1fc0)
2880+ bic r0, r0, #(0x3f)
2881+ str r1, [r0, #TI_CPU_DOMAIN]
2882+ @ write r1 to DACR
2883+ mcr p15, 0, r1, c3, c0, 0
2884+ @ instruction sync
2885+ instr_sync
2886+ @ restore regs
2887+ ldmia sp!, {r0, r1}
2888+#endif
2889+ .endm
2890+
2891 #ifndef CONFIG_THUMB2_KERNEL
2892 .macro svc_exit, rpsr, irq = 0
2893 .if \irq != 0
2894@@ -207,6 +261,9 @@
2895 blne trace_hardirqs_off
2896 #endif
2897 .endif
2898+
2899+ pax_exit_kernel
2900+
2901 msr spsr_cxsf, \rpsr
2902 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
2903 @ We must avoid clrex due to Cortex-A15 erratum #830321
2904@@ -254,6 +311,9 @@
2905 blne trace_hardirqs_off
2906 #endif
2907 .endif
2908+
2909+ pax_exit_kernel
2910+
2911 ldr lr, [sp, #S_SP] @ top of the stack
2912 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2913
2914diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2915index 918875d..cd5fa27 100644
2916--- a/arch/arm/kernel/fiq.c
2917+++ b/arch/arm/kernel/fiq.c
2918@@ -87,7 +87,10 @@ void set_fiq_handler(void *start, unsigned int length)
2919 void *base = vectors_page;
2920 unsigned offset = FIQ_OFFSET;
2921
2922+ pax_open_kernel();
2923 memcpy(base + offset, start, length);
2924+ pax_close_kernel();
2925+
2926 if (!cache_is_vipt_nonaliasing())
2927 flush_icache_range((unsigned long)base + offset, offset +
2928 length);
2929diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2930index 664eee8..f470938 100644
2931--- a/arch/arm/kernel/head.S
2932+++ b/arch/arm/kernel/head.S
2933@@ -437,7 +437,7 @@ __enable_mmu:
2934 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2935 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2936 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2937- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2938+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2939 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2940 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2941 #endif
2942diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2943index 6a4dffe..4a86a70 100644
2944--- a/arch/arm/kernel/module.c
2945+++ b/arch/arm/kernel/module.c
2946@@ -38,12 +38,39 @@
2947 #endif
2948
2949 #ifdef CONFIG_MMU
2950-void *module_alloc(unsigned long size)
2951+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2952 {
2953+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2954+ return NULL;
2955 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2956- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
2957+ GFP_KERNEL, prot, NUMA_NO_NODE,
2958 __builtin_return_address(0));
2959 }
2960+
2961+void *module_alloc(unsigned long size)
2962+{
2963+
2964+#ifdef CONFIG_PAX_KERNEXEC
2965+ return __module_alloc(size, PAGE_KERNEL);
2966+#else
2967+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2968+#endif
2969+
2970+}
2971+
2972+#ifdef CONFIG_PAX_KERNEXEC
2973+void module_free_exec(struct module *mod, void *module_region)
2974+{
2975+ module_free(mod, module_region);
2976+}
2977+EXPORT_SYMBOL(module_free_exec);
2978+
2979+void *module_alloc_exec(unsigned long size)
2980+{
2981+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2982+}
2983+EXPORT_SYMBOL(module_alloc_exec);
2984+#endif
2985 #endif
2986
2987 int
2988diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2989index 07314af..c46655c 100644
2990--- a/arch/arm/kernel/patch.c
2991+++ b/arch/arm/kernel/patch.c
2992@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2993 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2994 int size;
2995
2996+ pax_open_kernel();
2997 if (thumb2 && __opcode_is_thumb16(insn)) {
2998 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2999 size = sizeof(u16);
3000@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
3001 *(u32 *)addr = insn;
3002 size = sizeof(u32);
3003 }
3004+ pax_close_kernel();
3005
3006 flush_icache_range((uintptr_t)(addr),
3007 (uintptr_t)(addr) + size);
3008diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
3009index a35f6eb..7af43a0 100644
3010--- a/arch/arm/kernel/process.c
3011+++ b/arch/arm/kernel/process.c
3012@@ -212,6 +212,7 @@ void machine_power_off(void)
3013
3014 if (pm_power_off)
3015 pm_power_off();
3016+ BUG();
3017 }
3018
3019 /*
3020@@ -225,7 +226,7 @@ void machine_power_off(void)
3021 * executing pre-reset code, and using RAM that the primary CPU's code wishes
3022 * to use. Implementing such co-ordination would be essentially impossible.
3023 */
3024-void machine_restart(char *cmd)
3025+__noreturn void machine_restart(char *cmd)
3026 {
3027 local_irq_disable();
3028 smp_send_stop();
3029@@ -248,8 +249,8 @@ void __show_regs(struct pt_regs *regs)
3030
3031 show_regs_print_info(KERN_DEFAULT);
3032
3033- print_symbol("PC is at %s\n", instruction_pointer(regs));
3034- print_symbol("LR is at %s\n", regs->ARM_lr);
3035+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
3036+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
3037 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
3038 "sp : %08lx ip : %08lx fp : %08lx\n",
3039 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
3040@@ -427,12 +428,6 @@ unsigned long get_wchan(struct task_struct *p)
3041 return 0;
3042 }
3043
3044-unsigned long arch_randomize_brk(struct mm_struct *mm)
3045-{
3046- unsigned long range_end = mm->brk + 0x02000000;
3047- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
3048-}
3049-
3050 #ifdef CONFIG_MMU
3051 #ifdef CONFIG_KUSER_HELPERS
3052 /*
3053@@ -448,7 +443,7 @@ static struct vm_area_struct gate_vma = {
3054
3055 static int __init gate_vma_init(void)
3056 {
3057- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
3058+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
3059 return 0;
3060 }
3061 arch_initcall(gate_vma_init);
3062@@ -474,41 +469,16 @@ int in_gate_area_no_mm(unsigned long addr)
3063
3064 const char *arch_vma_name(struct vm_area_struct *vma)
3065 {
3066- return is_gate_vma(vma) ? "[vectors]" :
3067- (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
3068- "[sigpage]" : NULL;
3069+ return is_gate_vma(vma) ? "[vectors]" : NULL;
3070 }
3071
3072-static struct page *signal_page;
3073-extern struct page *get_signal_page(void);
3074-
3075 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3076 {
3077 struct mm_struct *mm = current->mm;
3078- unsigned long addr;
3079- int ret;
3080-
3081- if (!signal_page)
3082- signal_page = get_signal_page();
3083- if (!signal_page)
3084- return -ENOMEM;
3085
3086 down_write(&mm->mmap_sem);
3087- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
3088- if (IS_ERR_VALUE(addr)) {
3089- ret = addr;
3090- goto up_fail;
3091- }
3092-
3093- ret = install_special_mapping(mm, addr, PAGE_SIZE,
3094- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
3095- &signal_page);
3096-
3097- if (ret == 0)
3098- mm->context.sigpage = addr;
3099-
3100- up_fail:
3101+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
3102 up_write(&mm->mmap_sem);
3103- return ret;
3104+ return 0;
3105 }
3106 #endif
3107diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
3108index f73891b..cf3004e 100644
3109--- a/arch/arm/kernel/psci.c
3110+++ b/arch/arm/kernel/psci.c
3111@@ -28,7 +28,7 @@
3112 #include <asm/psci.h>
3113 #include <asm/system_misc.h>
3114
3115-struct psci_operations psci_ops;
3116+struct psci_operations psci_ops __read_only;
3117
3118 static int (*invoke_psci_fn)(u32, u32, u32, u32);
3119 typedef int (*psci_initcall_t)(const struct device_node *);
3120diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3121index 0c27ed6..b67388e 100644
3122--- a/arch/arm/kernel/ptrace.c
3123+++ b/arch/arm/kernel/ptrace.c
3124@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3125 regs->ARM_ip = ip;
3126 }
3127
3128+#ifdef CONFIG_GRKERNSEC_SETXID
3129+extern void gr_delayed_cred_worker(void);
3130+#endif
3131+
3132 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3133 {
3134 current_thread_info()->syscall = scno;
3135
3136+#ifdef CONFIG_GRKERNSEC_SETXID
3137+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3138+ gr_delayed_cred_worker();
3139+#endif
3140+
3141 /* Do the secure computing check first; failures should be fast. */
3142 if (secure_computing(scno) == -1)
3143 return -1;
3144diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3145index 84db893d..bd8213a 100644
3146--- a/arch/arm/kernel/setup.c
3147+++ b/arch/arm/kernel/setup.c
3148@@ -104,21 +104,23 @@ EXPORT_SYMBOL(elf_hwcap);
3149 unsigned int elf_hwcap2 __read_mostly;
3150 EXPORT_SYMBOL(elf_hwcap2);
3151
3152+pteval_t __supported_pte_mask __read_only;
3153+pmdval_t __supported_pmd_mask __read_only;
3154
3155 #ifdef MULTI_CPU
3156-struct processor processor __read_mostly;
3157+struct processor processor __read_only;
3158 #endif
3159 #ifdef MULTI_TLB
3160-struct cpu_tlb_fns cpu_tlb __read_mostly;
3161+struct cpu_tlb_fns cpu_tlb __read_only;
3162 #endif
3163 #ifdef MULTI_USER
3164-struct cpu_user_fns cpu_user __read_mostly;
3165+struct cpu_user_fns cpu_user __read_only;
3166 #endif
3167 #ifdef MULTI_CACHE
3168-struct cpu_cache_fns cpu_cache __read_mostly;
3169+struct cpu_cache_fns cpu_cache __read_only;
3170 #endif
3171 #ifdef CONFIG_OUTER_CACHE
3172-struct outer_cache_fns outer_cache __read_mostly;
3173+struct outer_cache_fns outer_cache __read_only;
3174 EXPORT_SYMBOL(outer_cache);
3175 #endif
3176
3177@@ -251,9 +253,13 @@ static int __get_cpu_architecture(void)
3178 asm("mrc p15, 0, %0, c0, c1, 4"
3179 : "=r" (mmfr0));
3180 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3181- (mmfr0 & 0x000000f0) >= 0x00000030)
3182+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3183 cpu_arch = CPU_ARCH_ARMv7;
3184- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3185+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3186+ __supported_pte_mask |= L_PTE_PXN;
3187+ __supported_pmd_mask |= PMD_PXNTABLE;
3188+ }
3189+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3190 (mmfr0 & 0x000000f0) == 0x00000020)
3191 cpu_arch = CPU_ARCH_ARMv6;
3192 else
3193diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3194index bd19834..e4d8c66 100644
3195--- a/arch/arm/kernel/signal.c
3196+++ b/arch/arm/kernel/signal.c
3197@@ -24,8 +24,6 @@
3198
3199 extern const unsigned long sigreturn_codes[7];
3200
3201-static unsigned long signal_return_offset;
3202-
3203 #ifdef CONFIG_CRUNCH
3204 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3205 {
3206@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3207 * except when the MPU has protected the vectors
3208 * page from PL0
3209 */
3210- retcode = mm->context.sigpage + signal_return_offset +
3211- (idx << 2) + thumb;
3212+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3213 } else
3214 #endif
3215 {
3216@@ -604,33 +601,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3217 } while (thread_flags & _TIF_WORK_MASK);
3218 return 0;
3219 }
3220-
3221-struct page *get_signal_page(void)
3222-{
3223- unsigned long ptr;
3224- unsigned offset;
3225- struct page *page;
3226- void *addr;
3227-
3228- page = alloc_pages(GFP_KERNEL, 0);
3229-
3230- if (!page)
3231- return NULL;
3232-
3233- addr = page_address(page);
3234-
3235- /* Give the signal return code some randomness */
3236- offset = 0x200 + (get_random_int() & 0x7fc);
3237- signal_return_offset = offset;
3238-
3239- /*
3240- * Copy signal return handlers into the vector page, and
3241- * set sigreturn to be a pointer to these.
3242- */
3243- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3244-
3245- ptr = (unsigned long)addr + offset;
3246- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3247-
3248- return page;
3249-}
3250diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3251index bbe22fc..d7737f5 100644
3252--- a/arch/arm/kernel/smp.c
3253+++ b/arch/arm/kernel/smp.c
3254@@ -76,7 +76,7 @@ enum ipi_msg_type {
3255
3256 static DECLARE_COMPLETION(cpu_running);
3257
3258-static struct smp_operations smp_ops;
3259+static struct smp_operations smp_ops __read_only;
3260
3261 void __init smp_set_ops(struct smp_operations *ops)
3262 {
3263diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3264index 7a3be1d..b00c7de 100644
3265--- a/arch/arm/kernel/tcm.c
3266+++ b/arch/arm/kernel/tcm.c
3267@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3268 .virtual = ITCM_OFFSET,
3269 .pfn = __phys_to_pfn(ITCM_OFFSET),
3270 .length = 0,
3271- .type = MT_MEMORY_RWX_ITCM,
3272+ .type = MT_MEMORY_RX_ITCM,
3273 }
3274 };
3275
3276@@ -267,7 +267,9 @@ no_dtcm:
3277 start = &__sitcm_text;
3278 end = &__eitcm_text;
3279 ram = &__itcm_start;
3280+ pax_open_kernel();
3281 memcpy(start, ram, itcm_code_sz);
3282+ pax_close_kernel();
3283 pr_debug("CPU ITCM: copied code from %p - %p\n",
3284 start, end);
3285 itcm_present = true;
3286diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3287index bea63f5..bc660a7 100644
3288--- a/arch/arm/kernel/traps.c
3289+++ b/arch/arm/kernel/traps.c
3290@@ -64,7 +64,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3291 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3292 {
3293 #ifdef CONFIG_KALLSYMS
3294- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3295+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3296 #else
3297 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3298 #endif
3299@@ -266,6 +266,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3300 static int die_owner = -1;
3301 static unsigned int die_nest_count;
3302
3303+extern void gr_handle_kernel_exploit(void);
3304+
3305 static unsigned long oops_begin(void)
3306 {
3307 int cpu;
3308@@ -308,6 +310,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3309 panic("Fatal exception in interrupt");
3310 if (panic_on_oops)
3311 panic("Fatal exception");
3312+
3313+ gr_handle_kernel_exploit();
3314+
3315 if (signr)
3316 do_exit(signr);
3317 }
3318@@ -860,7 +865,11 @@ void __init early_trap_init(void *vectors_base)
3319 kuser_init(vectors_base);
3320
3321 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3322- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3323+
3324+#ifndef CONFIG_PAX_MEMORY_UDEREF
3325+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3326+#endif
3327+
3328 #else /* ifndef CONFIG_CPU_V7M */
3329 /*
3330 * on V7-M there is no need to copy the vector table to a dedicated
3331diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3332index 6f57cb9..645f8c4 100644
3333--- a/arch/arm/kernel/vmlinux.lds.S
3334+++ b/arch/arm/kernel/vmlinux.lds.S
3335@@ -8,7 +8,11 @@
3336 #include <asm/thread_info.h>
3337 #include <asm/memory.h>
3338 #include <asm/page.h>
3339-
3340+
3341+#ifdef CONFIG_PAX_KERNEXEC
3342+#include <asm/pgtable.h>
3343+#endif
3344+
3345 #define PROC_INFO \
3346 . = ALIGN(4); \
3347 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3348@@ -34,7 +38,7 @@
3349 #endif
3350
3351 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3352- defined(CONFIG_GENERIC_BUG)
3353+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3354 #define ARM_EXIT_KEEP(x) x
3355 #define ARM_EXIT_DISCARD(x)
3356 #else
3357@@ -90,6 +94,11 @@ SECTIONS
3358 _text = .;
3359 HEAD_TEXT
3360 }
3361+
3362+#ifdef CONFIG_PAX_KERNEXEC
3363+ . = ALIGN(1<<SECTION_SHIFT);
3364+#endif
3365+
3366 .text : { /* Real text segment */
3367 _stext = .; /* Text and read-only data */
3368 __exception_text_start = .;
3369@@ -112,6 +121,8 @@ SECTIONS
3370 ARM_CPU_KEEP(PROC_INFO)
3371 }
3372
3373+ _etext = .; /* End of text section */
3374+
3375 RO_DATA(PAGE_SIZE)
3376
3377 . = ALIGN(4);
3378@@ -142,7 +153,9 @@ SECTIONS
3379
3380 NOTES
3381
3382- _etext = .; /* End of text and rodata section */
3383+#ifdef CONFIG_PAX_KERNEXEC
3384+ . = ALIGN(1<<SECTION_SHIFT);
3385+#endif
3386
3387 #ifndef CONFIG_XIP_KERNEL
3388 . = ALIGN(PAGE_SIZE);
3389@@ -220,6 +233,11 @@ SECTIONS
3390 . = PAGE_OFFSET + TEXT_OFFSET;
3391 #else
3392 __init_end = .;
3393+
3394+#ifdef CONFIG_PAX_KERNEXEC
3395+ . = ALIGN(1<<SECTION_SHIFT);
3396+#endif
3397+
3398 . = ALIGN(THREAD_SIZE);
3399 __data_loc = .;
3400 #endif
3401diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3402index a99e0cd..ab56421d 100644
3403--- a/arch/arm/kvm/arm.c
3404+++ b/arch/arm/kvm/arm.c
3405@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3406 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3407
3408 /* The VMID used in the VTTBR */
3409-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3410+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3411 static u8 kvm_next_vmid;
3412 static DEFINE_SPINLOCK(kvm_vmid_lock);
3413
3414@@ -372,7 +372,7 @@ void force_vm_exit(const cpumask_t *mask)
3415 */
3416 static bool need_new_vmid_gen(struct kvm *kvm)
3417 {
3418- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3419+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3420 }
3421
3422 /**
3423@@ -405,7 +405,7 @@ static void update_vttbr(struct kvm *kvm)
3424
3425 /* First user of a new VMID generation? */
3426 if (unlikely(kvm_next_vmid == 0)) {
3427- atomic64_inc(&kvm_vmid_gen);
3428+ atomic64_inc_unchecked(&kvm_vmid_gen);
3429 kvm_next_vmid = 1;
3430
3431 /*
3432@@ -422,7 +422,7 @@ static void update_vttbr(struct kvm *kvm)
3433 kvm_call_hyp(__kvm_flush_vm_context);
3434 }
3435
3436- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3437+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3438 kvm->arch.vmid = kvm_next_vmid;
3439 kvm_next_vmid++;
3440
3441@@ -997,7 +997,7 @@ static void check_kvm_target_cpu(void *ret)
3442 /**
3443 * Initialize Hyp-mode and memory mappings on all CPUs.
3444 */
3445-int kvm_arch_init(void *opaque)
3446+int kvm_arch_init(const void *opaque)
3447 {
3448 int err;
3449 int ret, cpu;
3450diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3451index 14a0d98..7771a7d 100644
3452--- a/arch/arm/lib/clear_user.S
3453+++ b/arch/arm/lib/clear_user.S
3454@@ -12,14 +12,14 @@
3455
3456 .text
3457
3458-/* Prototype: int __clear_user(void *addr, size_t sz)
3459+/* Prototype: int ___clear_user(void *addr, size_t sz)
3460 * Purpose : clear some user memory
3461 * Params : addr - user memory address to clear
3462 * : sz - number of bytes to clear
3463 * Returns : number of bytes NOT cleared
3464 */
3465 ENTRY(__clear_user_std)
3466-WEAK(__clear_user)
3467+WEAK(___clear_user)
3468 stmfd sp!, {r1, lr}
3469 mov r2, #0
3470 cmp r1, #4
3471@@ -44,7 +44,7 @@ WEAK(__clear_user)
3472 USER( strnebt r2, [r0])
3473 mov r0, #0
3474 ldmfd sp!, {r1, pc}
3475-ENDPROC(__clear_user)
3476+ENDPROC(___clear_user)
3477 ENDPROC(__clear_user_std)
3478
3479 .pushsection .fixup,"ax"
3480diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3481index 66a477a..bee61d3 100644
3482--- a/arch/arm/lib/copy_from_user.S
3483+++ b/arch/arm/lib/copy_from_user.S
3484@@ -16,7 +16,7 @@
3485 /*
3486 * Prototype:
3487 *
3488- * size_t __copy_from_user(void *to, const void *from, size_t n)
3489+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3490 *
3491 * Purpose:
3492 *
3493@@ -84,11 +84,11 @@
3494
3495 .text
3496
3497-ENTRY(__copy_from_user)
3498+ENTRY(___copy_from_user)
3499
3500 #include "copy_template.S"
3501
3502-ENDPROC(__copy_from_user)
3503+ENDPROC(___copy_from_user)
3504
3505 .pushsection .fixup,"ax"
3506 .align 0
3507diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3508index 6ee2f67..d1cce76 100644
3509--- a/arch/arm/lib/copy_page.S
3510+++ b/arch/arm/lib/copy_page.S
3511@@ -10,6 +10,7 @@
3512 * ASM optimised string functions
3513 */
3514 #include <linux/linkage.h>
3515+#include <linux/const.h>
3516 #include <asm/assembler.h>
3517 #include <asm/asm-offsets.h>
3518 #include <asm/cache.h>
3519diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3520index d066df6..df28194 100644
3521--- a/arch/arm/lib/copy_to_user.S
3522+++ b/arch/arm/lib/copy_to_user.S
3523@@ -16,7 +16,7 @@
3524 /*
3525 * Prototype:
3526 *
3527- * size_t __copy_to_user(void *to, const void *from, size_t n)
3528+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3529 *
3530 * Purpose:
3531 *
3532@@ -88,11 +88,11 @@
3533 .text
3534
3535 ENTRY(__copy_to_user_std)
3536-WEAK(__copy_to_user)
3537+WEAK(___copy_to_user)
3538
3539 #include "copy_template.S"
3540
3541-ENDPROC(__copy_to_user)
3542+ENDPROC(___copy_to_user)
3543 ENDPROC(__copy_to_user_std)
3544
3545 .pushsection .fixup,"ax"
3546diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3547index 7d08b43..f7ca7ea 100644
3548--- a/arch/arm/lib/csumpartialcopyuser.S
3549+++ b/arch/arm/lib/csumpartialcopyuser.S
3550@@ -57,8 +57,8 @@
3551 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3552 */
3553
3554-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3555-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3556+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3557+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3558
3559 #include "csumpartialcopygeneric.S"
3560
3561diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3562index 312d43e..21d2322 100644
3563--- a/arch/arm/lib/delay.c
3564+++ b/arch/arm/lib/delay.c
3565@@ -29,7 +29,7 @@
3566 /*
3567 * Default to the loop-based delay implementation.
3568 */
3569-struct arm_delay_ops arm_delay_ops = {
3570+struct arm_delay_ops arm_delay_ops __read_only = {
3571 .delay = __loop_delay,
3572 .const_udelay = __loop_const_udelay,
3573 .udelay = __loop_udelay,
3574diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3575index 3e58d71..029817c 100644
3576--- a/arch/arm/lib/uaccess_with_memcpy.c
3577+++ b/arch/arm/lib/uaccess_with_memcpy.c
3578@@ -136,7 +136,7 @@ out:
3579 }
3580
3581 unsigned long
3582-__copy_to_user(void __user *to, const void *from, unsigned long n)
3583+___copy_to_user(void __user *to, const void *from, unsigned long n)
3584 {
3585 /*
3586 * This test is stubbed out of the main function above to keep
3587@@ -190,7 +190,7 @@ out:
3588 return n;
3589 }
3590
3591-unsigned long __clear_user(void __user *addr, unsigned long n)
3592+unsigned long ___clear_user(void __user *addr, unsigned long n)
3593 {
3594 /* See rational for this in __copy_to_user() above. */
3595 if (n < 64)
3596diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
3597index f7a07a5..258e1f7 100644
3598--- a/arch/arm/mach-at91/setup.c
3599+++ b/arch/arm/mach-at91/setup.c
3600@@ -81,7 +81,7 @@ void __init at91_init_sram(int bank, unsigned long base, unsigned int length)
3601
3602 desc->pfn = __phys_to_pfn(base);
3603 desc->length = length;
3604- desc->type = MT_MEMORY_RWX_NONCACHED;
3605+ desc->type = MT_MEMORY_RW_NONCACHED;
3606
3607 pr_info("AT91: sram at 0x%lx of 0x%x mapped at 0x%lx\n",
3608 base, length, desc->virtual);
3609diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
3610index 7f352de..6dc0929 100644
3611--- a/arch/arm/mach-keystone/keystone.c
3612+++ b/arch/arm/mach-keystone/keystone.c
3613@@ -27,7 +27,7 @@
3614
3615 #include "keystone.h"
3616
3617-static struct notifier_block platform_nb;
3618+static notifier_block_no_const platform_nb;
3619 static unsigned long keystone_dma_pfn_offset __read_mostly;
3620
3621 static int keystone_platform_notifier(struct notifier_block *nb,
3622diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
3623index 044b511..afd1da8 100644
3624--- a/arch/arm/mach-mvebu/coherency.c
3625+++ b/arch/arm/mach-mvebu/coherency.c
3626@@ -316,7 +316,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
3627
3628 /*
3629 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
3630- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
3631+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
3632 * is needed as a workaround for a deadlock issue between the PCIe
3633 * interface and the cache controller.
3634 */
3635@@ -329,7 +329,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
3636 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
3637
3638 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
3639- mtype = MT_UNCACHED;
3640+ mtype = MT_UNCACHED_RW;
3641
3642 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
3643 }
3644diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3645index aead77a..a2253fa 100644
3646--- a/arch/arm/mach-omap2/board-n8x0.c
3647+++ b/arch/arm/mach-omap2/board-n8x0.c
3648@@ -568,7 +568,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3649 }
3650 #endif
3651
3652-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3653+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3654 .late_init = n8x0_menelaus_late_init,
3655 };
3656
3657diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3658index 2f97228..6ce10e1 100644
3659--- a/arch/arm/mach-omap2/gpmc.c
3660+++ b/arch/arm/mach-omap2/gpmc.c
3661@@ -151,7 +151,6 @@ struct omap3_gpmc_regs {
3662 };
3663
3664 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3665-static struct irq_chip gpmc_irq_chip;
3666 static int gpmc_irq_start;
3667
3668 static struct resource gpmc_mem_root;
3669@@ -736,6 +735,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3670
3671 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3672
3673+static struct irq_chip gpmc_irq_chip = {
3674+ .name = "gpmc",
3675+ .irq_startup = gpmc_irq_noop_ret,
3676+ .irq_enable = gpmc_irq_enable,
3677+ .irq_disable = gpmc_irq_disable,
3678+ .irq_shutdown = gpmc_irq_noop,
3679+ .irq_ack = gpmc_irq_noop,
3680+ .irq_mask = gpmc_irq_noop,
3681+ .irq_unmask = gpmc_irq_noop,
3682+
3683+};
3684+
3685 static int gpmc_setup_irq(void)
3686 {
3687 int i;
3688@@ -750,15 +761,6 @@ static int gpmc_setup_irq(void)
3689 return gpmc_irq_start;
3690 }
3691
3692- gpmc_irq_chip.name = "gpmc";
3693- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3694- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3695- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3696- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3697- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3698- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3699- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3700-
3701 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3702 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3703
3704diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3705index 4001325..b14e2a0 100644
3706--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3707+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3708@@ -84,7 +84,7 @@ struct cpu_pm_ops {
3709 int (*finish_suspend)(unsigned long cpu_state);
3710 void (*resume)(void);
3711 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3712-};
3713+} __no_const;
3714
3715 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3716 static struct powerdomain *mpuss_pd;
3717@@ -102,7 +102,7 @@ static void dummy_cpu_resume(void)
3718 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3719 {}
3720
3721-struct cpu_pm_ops omap_pm_ops = {
3722+static struct cpu_pm_ops omap_pm_ops __read_only = {
3723 .finish_suspend = default_finish_suspend,
3724 .resume = dummy_cpu_resume,
3725 .scu_prepare = dummy_scu_prepare,
3726diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3727index 37843a7..a98df13 100644
3728--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3729+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3730@@ -343,7 +343,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3731 return NOTIFY_OK;
3732 }
3733
3734-static struct notifier_block __refdata irq_hotplug_notifier = {
3735+static struct notifier_block irq_hotplug_notifier = {
3736 .notifier_call = irq_cpu_hotplug_notify,
3737 };
3738
3739diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3740index d22c30d..23697a1 100644
3741--- a/arch/arm/mach-omap2/omap_device.c
3742+++ b/arch/arm/mach-omap2/omap_device.c
3743@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
3744 struct platform_device __init *omap_device_build(const char *pdev_name,
3745 int pdev_id,
3746 struct omap_hwmod *oh,
3747- void *pdata, int pdata_len)
3748+ const void *pdata, int pdata_len)
3749 {
3750 struct omap_hwmod *ohs[] = { oh };
3751
3752@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3753 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3754 int pdev_id,
3755 struct omap_hwmod **ohs,
3756- int oh_cnt, void *pdata,
3757+ int oh_cnt, const void *pdata,
3758 int pdata_len)
3759 {
3760 int ret = -ENOMEM;
3761diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3762index 78c02b3..c94109a 100644
3763--- a/arch/arm/mach-omap2/omap_device.h
3764+++ b/arch/arm/mach-omap2/omap_device.h
3765@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3766 /* Core code interface */
3767
3768 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3769- struct omap_hwmod *oh, void *pdata,
3770+ struct omap_hwmod *oh, const void *pdata,
3771 int pdata_len);
3772
3773 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3774 struct omap_hwmod **oh, int oh_cnt,
3775- void *pdata, int pdata_len);
3776+ const void *pdata, int pdata_len);
3777
3778 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3779 struct omap_hwmod **ohs, int oh_cnt);
3780diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3781index 9e91a4e..357ed0d 100644
3782--- a/arch/arm/mach-omap2/omap_hwmod.c
3783+++ b/arch/arm/mach-omap2/omap_hwmod.c
3784@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
3785 int (*init_clkdm)(struct omap_hwmod *oh);
3786 void (*update_context_lost)(struct omap_hwmod *oh);
3787 int (*get_context_lost)(struct omap_hwmod *oh);
3788-};
3789+} __no_const;
3790
3791 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3792-static struct omap_hwmod_soc_ops soc_ops;
3793+static struct omap_hwmod_soc_ops soc_ops __read_only;
3794
3795 /* omap_hwmod_list contains all registered struct omap_hwmods */
3796 static LIST_HEAD(omap_hwmod_list);
3797diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3798index 95fee54..cfa9cf1 100644
3799--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3800+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3801@@ -10,6 +10,7 @@
3802
3803 #include <linux/kernel.h>
3804 #include <linux/init.h>
3805+#include <asm/pgtable.h>
3806
3807 #include "powerdomain.h"
3808
3809@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3810
3811 void __init am43xx_powerdomains_init(void)
3812 {
3813- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3814+ pax_open_kernel();
3815+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3816+ pax_close_kernel();
3817 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3818 pwrdm_register_pwrdms(powerdomains_am43xx);
3819 pwrdm_complete_init();
3820diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3821index 97d6607..8429d14 100644
3822--- a/arch/arm/mach-omap2/wd_timer.c
3823+++ b/arch/arm/mach-omap2/wd_timer.c
3824@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3825 struct omap_hwmod *oh;
3826 char *oh_name = "wd_timer2";
3827 char *dev_name = "omap_wdt";
3828- struct omap_wd_timer_platform_data pdata;
3829+ static struct omap_wd_timer_platform_data pdata = {
3830+ .read_reset_sources = prm_read_reset_sources
3831+ };
3832
3833 if (!cpu_class_is_omap2() || of_have_populated_dt())
3834 return 0;
3835@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3836 return -EINVAL;
3837 }
3838
3839- pdata.read_reset_sources = prm_read_reset_sources;
3840-
3841 pdev = omap_device_build(dev_name, id, oh, &pdata,
3842 sizeof(struct omap_wd_timer_platform_data));
3843 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3844diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3845index b30bf5c..d0825bf 100644
3846--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3847+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3848@@ -180,7 +180,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3849 bool entered_lp2 = false;
3850
3851 if (tegra_pending_sgi())
3852- ACCESS_ONCE(abort_flag) = true;
3853+ ACCESS_ONCE_RW(abort_flag) = true;
3854
3855 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3856
3857diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3858index 2dea8b5..6499da2 100644
3859--- a/arch/arm/mach-ux500/setup.h
3860+++ b/arch/arm/mach-ux500/setup.h
3861@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
3862 .type = MT_DEVICE, \
3863 }
3864
3865-#define __MEM_DEV_DESC(x, sz) { \
3866- .virtual = IO_ADDRESS(x), \
3867- .pfn = __phys_to_pfn(x), \
3868- .length = sz, \
3869- .type = MT_MEMORY_RWX, \
3870-}
3871-
3872 extern struct smp_operations ux500_smp_ops;
3873 extern void ux500_cpu_die(unsigned int cpu);
3874
3875diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3876index 7eb94e6..799ad3e 100644
3877--- a/arch/arm/mm/Kconfig
3878+++ b/arch/arm/mm/Kconfig
3879@@ -446,6 +446,7 @@ config CPU_32v5
3880
3881 config CPU_32v6
3882 bool
3883+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3884 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3885
3886 config CPU_32v6K
3887@@ -600,6 +601,7 @@ config CPU_CP15_MPU
3888
3889 config CPU_USE_DOMAINS
3890 bool
3891+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3892 help
3893 This option enables or disables the use of domain switching
3894 via the set_fs() function.
3895@@ -798,7 +800,7 @@ config NEED_KUSER_HELPERS
3896
3897 config KUSER_HELPERS
3898 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3899- depends on MMU
3900+ depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
3901 default y
3902 help
3903 Warning: disabling this option may break user programs.
3904@@ -812,7 +814,7 @@ config KUSER_HELPERS
3905 See Documentation/arm/kernel_user_helpers.txt for details.
3906
3907 However, the fixed address nature of these helpers can be used
3908- by ROP (return orientated programming) authors when creating
3909+ by ROP (Return Oriented Programming) authors when creating
3910 exploits.
3911
3912 If all of the binaries and libraries which run on your platform
3913diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3914index 83792f4..c25d36b 100644
3915--- a/arch/arm/mm/alignment.c
3916+++ b/arch/arm/mm/alignment.c
3917@@ -216,10 +216,12 @@ union offset_union {
3918 #define __get16_unaligned_check(ins,val,addr) \
3919 do { \
3920 unsigned int err = 0, v, a = addr; \
3921+ pax_open_userland(); \
3922 __get8_unaligned_check(ins,v,a,err); \
3923 val = v << ((BE) ? 8 : 0); \
3924 __get8_unaligned_check(ins,v,a,err); \
3925 val |= v << ((BE) ? 0 : 8); \
3926+ pax_close_userland(); \
3927 if (err) \
3928 goto fault; \
3929 } while (0)
3930@@ -233,6 +235,7 @@ union offset_union {
3931 #define __get32_unaligned_check(ins,val,addr) \
3932 do { \
3933 unsigned int err = 0, v, a = addr; \
3934+ pax_open_userland(); \
3935 __get8_unaligned_check(ins,v,a,err); \
3936 val = v << ((BE) ? 24 : 0); \
3937 __get8_unaligned_check(ins,v,a,err); \
3938@@ -241,6 +244,7 @@ union offset_union {
3939 val |= v << ((BE) ? 8 : 16); \
3940 __get8_unaligned_check(ins,v,a,err); \
3941 val |= v << ((BE) ? 0 : 24); \
3942+ pax_close_userland(); \
3943 if (err) \
3944 goto fault; \
3945 } while (0)
3946@@ -254,6 +258,7 @@ union offset_union {
3947 #define __put16_unaligned_check(ins,val,addr) \
3948 do { \
3949 unsigned int err = 0, v = val, a = addr; \
3950+ pax_open_userland(); \
3951 __asm__( FIRST_BYTE_16 \
3952 ARM( "1: "ins" %1, [%2], #1\n" ) \
3953 THUMB( "1: "ins" %1, [%2]\n" ) \
3954@@ -273,6 +278,7 @@ union offset_union {
3955 " .popsection\n" \
3956 : "=r" (err), "=&r" (v), "=&r" (a) \
3957 : "0" (err), "1" (v), "2" (a)); \
3958+ pax_close_userland(); \
3959 if (err) \
3960 goto fault; \
3961 } while (0)
3962@@ -286,6 +292,7 @@ union offset_union {
3963 #define __put32_unaligned_check(ins,val,addr) \
3964 do { \
3965 unsigned int err = 0, v = val, a = addr; \
3966+ pax_open_userland(); \
3967 __asm__( FIRST_BYTE_32 \
3968 ARM( "1: "ins" %1, [%2], #1\n" ) \
3969 THUMB( "1: "ins" %1, [%2]\n" ) \
3970@@ -315,6 +322,7 @@ union offset_union {
3971 " .popsection\n" \
3972 : "=r" (err), "=&r" (v), "=&r" (a) \
3973 : "0" (err), "1" (v), "2" (a)); \
3974+ pax_close_userland(); \
3975 if (err) \
3976 goto fault; \
3977 } while (0)
3978diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3979index 5f2c988..221412d 100644
3980--- a/arch/arm/mm/cache-l2x0.c
3981+++ b/arch/arm/mm/cache-l2x0.c
3982@@ -41,7 +41,7 @@ struct l2c_init_data {
3983 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
3984 void (*save)(void __iomem *);
3985 struct outer_cache_fns outer_cache;
3986-};
3987+} __do_const;
3988
3989 #define CACHE_LINE_SIZE 32
3990
3991diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3992index 6eb97b3..ac509f6 100644
3993--- a/arch/arm/mm/context.c
3994+++ b/arch/arm/mm/context.c
3995@@ -43,7 +43,7 @@
3996 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3997
3998 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3999-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
4000+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
4001 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
4002
4003 static DEFINE_PER_CPU(atomic64_t, active_asids);
4004@@ -182,7 +182,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
4005 {
4006 static u32 cur_idx = 1;
4007 u64 asid = atomic64_read(&mm->context.id);
4008- u64 generation = atomic64_read(&asid_generation);
4009+ u64 generation = atomic64_read_unchecked(&asid_generation);
4010
4011 if (asid != 0 && is_reserved_asid(asid)) {
4012 /*
4013@@ -203,7 +203,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
4014 */
4015 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
4016 if (asid == NUM_USER_ASIDS) {
4017- generation = atomic64_add_return(ASID_FIRST_VERSION,
4018+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
4019 &asid_generation);
4020 flush_context(cpu);
4021 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
4022@@ -234,14 +234,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
4023 cpu_set_reserved_ttbr0();
4024
4025 asid = atomic64_read(&mm->context.id);
4026- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
4027+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
4028 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
4029 goto switch_mm_fastpath;
4030
4031 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
4032 /* Check that our ASID belongs to the current generation. */
4033 asid = atomic64_read(&mm->context.id);
4034- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
4035+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
4036 asid = new_context(mm, cpu);
4037 atomic64_set(&mm->context.id, asid);
4038 }
4039diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
4040index eb8830a..e8ff52e 100644
4041--- a/arch/arm/mm/fault.c
4042+++ b/arch/arm/mm/fault.c
4043@@ -25,6 +25,7 @@
4044 #include <asm/system_misc.h>
4045 #include <asm/system_info.h>
4046 #include <asm/tlbflush.h>
4047+#include <asm/sections.h>
4048
4049 #include "fault.h"
4050
4051@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
4052 if (fixup_exception(regs))
4053 return;
4054
4055+#ifdef CONFIG_PAX_MEMORY_UDEREF
4056+ if (addr < TASK_SIZE) {
4057+ if (current->signal->curr_ip)
4058+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4059+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4060+ else
4061+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4062+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4063+ }
4064+#endif
4065+
4066+#ifdef CONFIG_PAX_KERNEXEC
4067+ if ((fsr & FSR_WRITE) &&
4068+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
4069+ (MODULES_VADDR <= addr && addr < MODULES_END)))
4070+ {
4071+ if (current->signal->curr_ip)
4072+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4073+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
4074+ else
4075+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
4076+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
4077+ }
4078+#endif
4079+
4080 /*
4081 * No handler, we'll have to terminate things with extreme prejudice.
4082 */
4083@@ -174,6 +200,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
4084 }
4085 #endif
4086
4087+#ifdef CONFIG_PAX_PAGEEXEC
4088+ if (fsr & FSR_LNX_PF) {
4089+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
4090+ do_group_exit(SIGKILL);
4091+ }
4092+#endif
4093+
4094 tsk->thread.address = addr;
4095 tsk->thread.error_code = fsr;
4096 tsk->thread.trap_no = 14;
4097@@ -401,6 +434,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
4098 }
4099 #endif /* CONFIG_MMU */
4100
4101+#ifdef CONFIG_PAX_PAGEEXEC
4102+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4103+{
4104+ long i;
4105+
4106+ printk(KERN_ERR "PAX: bytes at PC: ");
4107+ for (i = 0; i < 20; i++) {
4108+ unsigned char c;
4109+ if (get_user(c, (__force unsigned char __user *)pc+i))
4110+ printk(KERN_CONT "?? ");
4111+ else
4112+ printk(KERN_CONT "%02x ", c);
4113+ }
4114+ printk("\n");
4115+
4116+ printk(KERN_ERR "PAX: bytes at SP-4: ");
4117+ for (i = -1; i < 20; i++) {
4118+ unsigned long c;
4119+ if (get_user(c, (__force unsigned long __user *)sp+i))
4120+ printk(KERN_CONT "???????? ");
4121+ else
4122+ printk(KERN_CONT "%08lx ", c);
4123+ }
4124+ printk("\n");
4125+}
4126+#endif
4127+
4128 /*
4129 * First Level Translation Fault Handler
4130 *
4131@@ -548,9 +608,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
4132 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
4133 struct siginfo info;
4134
4135+#ifdef CONFIG_PAX_MEMORY_UDEREF
4136+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
4137+ if (current->signal->curr_ip)
4138+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4139+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4140+ else
4141+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4142+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4143+ goto die;
4144+ }
4145+#endif
4146+
4147 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
4148 return;
4149
4150+die:
4151 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
4152 inf->name, fsr, addr);
4153
4154@@ -574,15 +647,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
4155 ifsr_info[nr].name = name;
4156 }
4157
4158+asmlinkage int sys_sigreturn(struct pt_regs *regs);
4159+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4160+
4161 asmlinkage void __exception
4162 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4163 {
4164 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4165 struct siginfo info;
4166+ unsigned long pc = instruction_pointer(regs);
4167+
4168+ if (user_mode(regs)) {
4169+ unsigned long sigpage = current->mm->context.sigpage;
4170+
4171+ if (sigpage <= pc && pc < sigpage + 7*4) {
4172+ if (pc < sigpage + 3*4)
4173+ sys_sigreturn(regs);
4174+ else
4175+ sys_rt_sigreturn(regs);
4176+ return;
4177+ }
4178+ if (pc == 0xffff0f60UL) {
4179+ /*
4180+ * PaX: __kuser_cmpxchg64 emulation
4181+ */
4182+ // TODO
4183+ //regs->ARM_pc = regs->ARM_lr;
4184+ //return;
4185+ }
4186+ if (pc == 0xffff0fa0UL) {
4187+ /*
4188+ * PaX: __kuser_memory_barrier emulation
4189+ */
4190+ // dmb(); implied by the exception
4191+ regs->ARM_pc = regs->ARM_lr;
4192+ return;
4193+ }
4194+ if (pc == 0xffff0fc0UL) {
4195+ /*
4196+ * PaX: __kuser_cmpxchg emulation
4197+ */
4198+ // TODO
4199+ //long new;
4200+ //int op;
4201+
4202+ //op = FUTEX_OP_SET << 28;
4203+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4204+ //regs->ARM_r0 = old != new;
4205+ //regs->ARM_pc = regs->ARM_lr;
4206+ //return;
4207+ }
4208+ if (pc == 0xffff0fe0UL) {
4209+ /*
4210+ * PaX: __kuser_get_tls emulation
4211+ */
4212+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4213+ regs->ARM_pc = regs->ARM_lr;
4214+ return;
4215+ }
4216+ }
4217+
4218+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4219+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4220+ if (current->signal->curr_ip)
4221+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4222+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4223+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4224+ else
4225+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4226+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4227+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4228+ goto die;
4229+ }
4230+#endif
4231+
4232+#ifdef CONFIG_PAX_REFCOUNT
4233+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4234+#ifdef CONFIG_THUMB2_KERNEL
4235+ unsigned short bkpt;
4236+
4237+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
4238+#else
4239+ unsigned int bkpt;
4240+
4241+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4242+#endif
4243+ current->thread.error_code = ifsr;
4244+ current->thread.trap_no = 0;
4245+ pax_report_refcount_overflow(regs);
4246+ fixup_exception(regs);
4247+ return;
4248+ }
4249+ }
4250+#endif
4251
4252 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4253 return;
4254
4255+die:
4256 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4257 inf->name, ifsr, addr);
4258
4259diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4260index cf08bdf..772656c 100644
4261--- a/arch/arm/mm/fault.h
4262+++ b/arch/arm/mm/fault.h
4263@@ -3,6 +3,7 @@
4264
4265 /*
4266 * Fault status register encodings. We steal bit 31 for our own purposes.
4267+ * Set when the FSR value is from an instruction fault.
4268 */
4269 #define FSR_LNX_PF (1 << 31)
4270 #define FSR_WRITE (1 << 11)
4271@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4272 }
4273 #endif
4274
4275+/* valid for LPAE and !LPAE */
4276+static inline int is_xn_fault(unsigned int fsr)
4277+{
4278+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4279+}
4280+
4281+static inline int is_domain_fault(unsigned int fsr)
4282+{
4283+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4284+}
4285+
4286 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4287 unsigned long search_exception_table(unsigned long addr);
4288
4289diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4290index 659c75d..6f8c029 100644
4291--- a/arch/arm/mm/init.c
4292+++ b/arch/arm/mm/init.c
4293@@ -31,6 +31,8 @@
4294 #include <asm/setup.h>
4295 #include <asm/tlb.h>
4296 #include <asm/fixmap.h>
4297+#include <asm/system_info.h>
4298+#include <asm/cp15.h>
4299
4300 #include <asm/mach/arch.h>
4301 #include <asm/mach/map.h>
4302@@ -619,7 +621,46 @@ void free_initmem(void)
4303 {
4304 #ifdef CONFIG_HAVE_TCM
4305 extern char __tcm_start, __tcm_end;
4306+#endif
4307
4308+#ifdef CONFIG_PAX_KERNEXEC
4309+ unsigned long addr;
4310+ pgd_t *pgd;
4311+ pud_t *pud;
4312+ pmd_t *pmd;
4313+ int cpu_arch = cpu_architecture();
4314+ unsigned int cr = get_cr();
4315+
4316+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4317+ /* make pages tables, etc before .text NX */
4318+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4319+ pgd = pgd_offset_k(addr);
4320+ pud = pud_offset(pgd, addr);
4321+ pmd = pmd_offset(pud, addr);
4322+ __section_update(pmd, addr, PMD_SECT_XN);
4323+ }
4324+ /* make init NX */
4325+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4326+ pgd = pgd_offset_k(addr);
4327+ pud = pud_offset(pgd, addr);
4328+ pmd = pmd_offset(pud, addr);
4329+ __section_update(pmd, addr, PMD_SECT_XN);
4330+ }
4331+ /* make kernel code/rodata RX */
4332+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4333+ pgd = pgd_offset_k(addr);
4334+ pud = pud_offset(pgd, addr);
4335+ pmd = pmd_offset(pud, addr);
4336+#ifdef CONFIG_ARM_LPAE
4337+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4338+#else
4339+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4340+#endif
4341+ }
4342+ }
4343+#endif
4344+
4345+#ifdef CONFIG_HAVE_TCM
4346 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4347 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4348 #endif
4349diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4350index d1e5ad7..84dcbf2 100644
4351--- a/arch/arm/mm/ioremap.c
4352+++ b/arch/arm/mm/ioremap.c
4353@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4354 unsigned int mtype;
4355
4356 if (cached)
4357- mtype = MT_MEMORY_RWX;
4358+ mtype = MT_MEMORY_RX;
4359 else
4360- mtype = MT_MEMORY_RWX_NONCACHED;
4361+ mtype = MT_MEMORY_RX_NONCACHED;
4362
4363 return __arm_ioremap_caller(phys_addr, size, mtype,
4364 __builtin_return_address(0));
4365diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4366index 5e85ed3..b10a7ed 100644
4367--- a/arch/arm/mm/mmap.c
4368+++ b/arch/arm/mm/mmap.c
4369@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4370 struct vm_area_struct *vma;
4371 int do_align = 0;
4372 int aliasing = cache_is_vipt_aliasing();
4373+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4374 struct vm_unmapped_area_info info;
4375
4376 /*
4377@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4378 if (len > TASK_SIZE)
4379 return -ENOMEM;
4380
4381+#ifdef CONFIG_PAX_RANDMMAP
4382+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4383+#endif
4384+
4385 if (addr) {
4386 if (do_align)
4387 addr = COLOUR_ALIGN(addr, pgoff);
4388@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4389 addr = PAGE_ALIGN(addr);
4390
4391 vma = find_vma(mm, addr);
4392- if (TASK_SIZE - len >= addr &&
4393- (!vma || addr + len <= vma->vm_start))
4394+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4395 return addr;
4396 }
4397
4398@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4399 info.high_limit = TASK_SIZE;
4400 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4401 info.align_offset = pgoff << PAGE_SHIFT;
4402+ info.threadstack_offset = offset;
4403 return vm_unmapped_area(&info);
4404 }
4405
4406@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4407 unsigned long addr = addr0;
4408 int do_align = 0;
4409 int aliasing = cache_is_vipt_aliasing();
4410+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4411 struct vm_unmapped_area_info info;
4412
4413 /*
4414@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4415 return addr;
4416 }
4417
4418+#ifdef CONFIG_PAX_RANDMMAP
4419+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4420+#endif
4421+
4422 /* requesting a specific address */
4423 if (addr) {
4424 if (do_align)
4425@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4426 else
4427 addr = PAGE_ALIGN(addr);
4428 vma = find_vma(mm, addr);
4429- if (TASK_SIZE - len >= addr &&
4430- (!vma || addr + len <= vma->vm_start))
4431+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4432 return addr;
4433 }
4434
4435@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4436 info.high_limit = mm->mmap_base;
4437 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4438 info.align_offset = pgoff << PAGE_SHIFT;
4439+ info.threadstack_offset = offset;
4440 addr = vm_unmapped_area(&info);
4441
4442 /*
4443@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4444 {
4445 unsigned long random_factor = 0UL;
4446
4447+#ifdef CONFIG_PAX_RANDMMAP
4448+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4449+#endif
4450+
4451 /* 8 bits of randomness in 20 address space bits */
4452 if ((current->flags & PF_RANDOMIZE) &&
4453 !(current->personality & ADDR_NO_RANDOMIZE))
4454@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4455
4456 if (mmap_is_legacy()) {
4457 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4458+
4459+#ifdef CONFIG_PAX_RANDMMAP
4460+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4461+ mm->mmap_base += mm->delta_mmap;
4462+#endif
4463+
4464 mm->get_unmapped_area = arch_get_unmapped_area;
4465 } else {
4466 mm->mmap_base = mmap_base(random_factor);
4467+
4468+#ifdef CONFIG_PAX_RANDMMAP
4469+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4470+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4471+#endif
4472+
4473 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4474 }
4475 }
4476diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4477index 8348ed6..b73a807 100644
4478--- a/arch/arm/mm/mmu.c
4479+++ b/arch/arm/mm/mmu.c
4480@@ -40,6 +40,22 @@
4481 #include "mm.h"
4482 #include "tcm.h"
4483
4484+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4485+void modify_domain(unsigned int dom, unsigned int type)
4486+{
4487+ struct thread_info *thread = current_thread_info();
4488+ unsigned int domain = thread->cpu_domain;
4489+ /*
4490+ * DOMAIN_MANAGER might be defined to some other value,
4491+ * use the arch-defined constant
4492+ */
4493+ domain &= ~domain_val(dom, 3);
4494+ thread->cpu_domain = domain | domain_val(dom, type);
4495+ set_domain(thread->cpu_domain);
4496+}
4497+EXPORT_SYMBOL(modify_domain);
4498+#endif
4499+
4500 /*
4501 * empty_zero_page is a special page that is used for
4502 * zero-initialized data and COW.
4503@@ -239,7 +255,15 @@ __setup("noalign", noalign_setup);
4504 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4505 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4506
4507-static struct mem_type mem_types[] = {
4508+#ifdef CONFIG_PAX_KERNEXEC
4509+#define L_PTE_KERNEXEC L_PTE_RDONLY
4510+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4511+#else
4512+#define L_PTE_KERNEXEC L_PTE_DIRTY
4513+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4514+#endif
4515+
4516+static struct mem_type mem_types[] __read_only = {
4517 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4518 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4519 L_PTE_SHARED,
4520@@ -268,19 +292,19 @@ static struct mem_type mem_types[] = {
4521 .prot_sect = PROT_SECT_DEVICE,
4522 .domain = DOMAIN_IO,
4523 },
4524- [MT_UNCACHED] = {
4525+ [MT_UNCACHED_RW] = {
4526 .prot_pte = PROT_PTE_DEVICE,
4527 .prot_l1 = PMD_TYPE_TABLE,
4528 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4529 .domain = DOMAIN_IO,
4530 },
4531- [MT_CACHECLEAN] = {
4532- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4533+ [MT_CACHECLEAN_RO] = {
4534+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4535 .domain = DOMAIN_KERNEL,
4536 },
4537 #ifndef CONFIG_ARM_LPAE
4538- [MT_MINICLEAN] = {
4539- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4540+ [MT_MINICLEAN_RO] = {
4541+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4542 .domain = DOMAIN_KERNEL,
4543 },
4544 #endif
4545@@ -288,15 +312,15 @@ static struct mem_type mem_types[] = {
4546 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4547 L_PTE_RDONLY,
4548 .prot_l1 = PMD_TYPE_TABLE,
4549- .domain = DOMAIN_USER,
4550+ .domain = DOMAIN_VECTORS,
4551 },
4552 [MT_HIGH_VECTORS] = {
4553 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4554 L_PTE_USER | L_PTE_RDONLY,
4555 .prot_l1 = PMD_TYPE_TABLE,
4556- .domain = DOMAIN_USER,
4557+ .domain = DOMAIN_VECTORS,
4558 },
4559- [MT_MEMORY_RWX] = {
4560+ [__MT_MEMORY_RWX] = {
4561 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4562 .prot_l1 = PMD_TYPE_TABLE,
4563 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4564@@ -309,17 +333,30 @@ static struct mem_type mem_types[] = {
4565 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4566 .domain = DOMAIN_KERNEL,
4567 },
4568- [MT_ROM] = {
4569- .prot_sect = PMD_TYPE_SECT,
4570+ [MT_MEMORY_RX] = {
4571+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4572+ .prot_l1 = PMD_TYPE_TABLE,
4573+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4574+ .domain = DOMAIN_KERNEL,
4575+ },
4576+ [MT_ROM_RX] = {
4577+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4578 .domain = DOMAIN_KERNEL,
4579 },
4580- [MT_MEMORY_RWX_NONCACHED] = {
4581+ [MT_MEMORY_RW_NONCACHED] = {
4582 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4583 L_PTE_MT_BUFFERABLE,
4584 .prot_l1 = PMD_TYPE_TABLE,
4585 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4586 .domain = DOMAIN_KERNEL,
4587 },
4588+ [MT_MEMORY_RX_NONCACHED] = {
4589+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4590+ L_PTE_MT_BUFFERABLE,
4591+ .prot_l1 = PMD_TYPE_TABLE,
4592+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4593+ .domain = DOMAIN_KERNEL,
4594+ },
4595 [MT_MEMORY_RW_DTCM] = {
4596 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4597 L_PTE_XN,
4598@@ -327,9 +364,10 @@ static struct mem_type mem_types[] = {
4599 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4600 .domain = DOMAIN_KERNEL,
4601 },
4602- [MT_MEMORY_RWX_ITCM] = {
4603- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4604+ [MT_MEMORY_RX_ITCM] = {
4605+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4606 .prot_l1 = PMD_TYPE_TABLE,
4607+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4608 .domain = DOMAIN_KERNEL,
4609 },
4610 [MT_MEMORY_RW_SO] = {
4611@@ -547,9 +585,14 @@ static void __init build_mem_type_table(void)
4612 * Mark cache clean areas and XIP ROM read only
4613 * from SVC mode and no access from userspace.
4614 */
4615- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4616- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4617- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4618+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4619+#ifdef CONFIG_PAX_KERNEXEC
4620+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4621+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4622+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4623+#endif
4624+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4625+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4626 #endif
4627
4628 /*
4629@@ -566,13 +609,17 @@ static void __init build_mem_type_table(void)
4630 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4631 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4632 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4633- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4634- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4635+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4636+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4637 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4638 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4639+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4640+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4641 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4642- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
4643- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
4644+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
4645+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
4646+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
4647+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
4648 }
4649 }
4650
4651@@ -583,15 +630,20 @@ static void __init build_mem_type_table(void)
4652 if (cpu_arch >= CPU_ARCH_ARMv6) {
4653 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4654 /* Non-cacheable Normal is XCB = 001 */
4655- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4656+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4657+ PMD_SECT_BUFFERED;
4658+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4659 PMD_SECT_BUFFERED;
4660 } else {
4661 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4662- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4663+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4664+ PMD_SECT_TEX(1);
4665+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4666 PMD_SECT_TEX(1);
4667 }
4668 } else {
4669- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4670+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4671+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4672 }
4673
4674 #ifdef CONFIG_ARM_LPAE
4675@@ -607,6 +659,8 @@ static void __init build_mem_type_table(void)
4676 vecs_pgprot |= PTE_EXT_AF;
4677 #endif
4678
4679+ user_pgprot |= __supported_pte_mask;
4680+
4681 for (i = 0; i < 16; i++) {
4682 pteval_t v = pgprot_val(protection_map[i]);
4683 protection_map[i] = __pgprot(v | user_pgprot);
4684@@ -624,21 +678,24 @@ static void __init build_mem_type_table(void)
4685
4686 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4687 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4688- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4689- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4690+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4691+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4692 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4693 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4694+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4695+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4696 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4697- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
4698- mem_types[MT_ROM].prot_sect |= cp->pmd;
4699+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
4700+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
4701+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
4702
4703 switch (cp->pmd) {
4704 case PMD_SECT_WT:
4705- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
4706+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
4707 break;
4708 case PMD_SECT_WB:
4709 case PMD_SECT_WBWA:
4710- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
4711+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
4712 break;
4713 }
4714 pr_info("Memory policy: %sData cache %s\n",
4715@@ -856,7 +913,7 @@ static void __init create_mapping(struct map_desc *md)
4716 return;
4717 }
4718
4719- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
4720+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
4721 md->virtual >= PAGE_OFFSET &&
4722 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
4723 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
4724@@ -1224,18 +1281,15 @@ void __init arm_mm_memblock_reserve(void)
4725 * called function. This means you can't use any function or debugging
4726 * method which may touch any device, otherwise the kernel _will_ crash.
4727 */
4728+
4729+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4730+
4731 static void __init devicemaps_init(const struct machine_desc *mdesc)
4732 {
4733 struct map_desc map;
4734 unsigned long addr;
4735- void *vectors;
4736
4737- /*
4738- * Allocate the vector page early.
4739- */
4740- vectors = early_alloc(PAGE_SIZE * 2);
4741-
4742- early_trap_init(vectors);
4743+ early_trap_init(&vectors);
4744
4745 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4746 pmd_clear(pmd_off_k(addr));
4747@@ -1248,7 +1302,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4748 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
4749 map.virtual = MODULES_VADDR;
4750 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
4751- map.type = MT_ROM;
4752+ map.type = MT_ROM_RX;
4753 create_mapping(&map);
4754 #endif
4755
4756@@ -1259,14 +1313,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4757 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
4758 map.virtual = FLUSH_BASE;
4759 map.length = SZ_1M;
4760- map.type = MT_CACHECLEAN;
4761+ map.type = MT_CACHECLEAN_RO;
4762 create_mapping(&map);
4763 #endif
4764 #ifdef FLUSH_BASE_MINICACHE
4765 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
4766 map.virtual = FLUSH_BASE_MINICACHE;
4767 map.length = SZ_1M;
4768- map.type = MT_MINICLEAN;
4769+ map.type = MT_MINICLEAN_RO;
4770 create_mapping(&map);
4771 #endif
4772
4773@@ -1275,7 +1329,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4774 * location (0xffff0000). If we aren't using high-vectors, also
4775 * create a mapping at the low-vectors virtual address.
4776 */
4777- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4778+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4779 map.virtual = 0xffff0000;
4780 map.length = PAGE_SIZE;
4781 #ifdef CONFIG_KUSER_HELPERS
4782@@ -1335,8 +1389,10 @@ static void __init kmap_init(void)
4783 static void __init map_lowmem(void)
4784 {
4785 struct memblock_region *reg;
4786+#ifndef CONFIG_PAX_KERNEXEC
4787 unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
4788 unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
4789+#endif
4790
4791 /* Map all the lowmem memory banks. */
4792 for_each_memblock(memory, reg) {
4793@@ -1349,11 +1405,48 @@ static void __init map_lowmem(void)
4794 if (start >= end)
4795 break;
4796
4797+#ifdef CONFIG_PAX_KERNEXEC
4798+ map.pfn = __phys_to_pfn(start);
4799+ map.virtual = __phys_to_virt(start);
4800+ map.length = end - start;
4801+
4802+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4803+ struct map_desc kernel;
4804+ struct map_desc initmap;
4805+
4806+ /* when freeing initmem we will make this RW */
4807+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4808+ initmap.virtual = (unsigned long)__init_begin;
4809+ initmap.length = _sdata - __init_begin;
4810+ initmap.type = __MT_MEMORY_RWX;
4811+ create_mapping(&initmap);
4812+
4813+ /* when freeing initmem we will make this RX */
4814+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4815+ kernel.virtual = (unsigned long)_stext;
4816+ kernel.length = __init_begin - _stext;
4817+ kernel.type = __MT_MEMORY_RWX;
4818+ create_mapping(&kernel);
4819+
4820+ if (map.virtual < (unsigned long)_stext) {
4821+ map.length = (unsigned long)_stext - map.virtual;
4822+ map.type = __MT_MEMORY_RWX;
4823+ create_mapping(&map);
4824+ }
4825+
4826+ map.pfn = __phys_to_pfn(__pa(_sdata));
4827+ map.virtual = (unsigned long)_sdata;
4828+ map.length = end - __pa(_sdata);
4829+ }
4830+
4831+ map.type = MT_MEMORY_RW;
4832+ create_mapping(&map);
4833+#else
4834 if (end < kernel_x_start || start >= kernel_x_end) {
4835 map.pfn = __phys_to_pfn(start);
4836 map.virtual = __phys_to_virt(start);
4837 map.length = end - start;
4838- map.type = MT_MEMORY_RWX;
4839+ map.type = __MT_MEMORY_RWX;
4840
4841 create_mapping(&map);
4842 } else {
4843@@ -1370,7 +1463,7 @@ static void __init map_lowmem(void)
4844 map.pfn = __phys_to_pfn(kernel_x_start);
4845 map.virtual = __phys_to_virt(kernel_x_start);
4846 map.length = kernel_x_end - kernel_x_start;
4847- map.type = MT_MEMORY_RWX;
4848+ map.type = __MT_MEMORY_RWX;
4849
4850 create_mapping(&map);
4851
4852@@ -1383,6 +1476,7 @@ static void __init map_lowmem(void)
4853 create_mapping(&map);
4854 }
4855 }
4856+#endif
4857 }
4858 }
4859
4860diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
4861index a37b989..5c9ae75 100644
4862--- a/arch/arm/net/bpf_jit_32.c
4863+++ b/arch/arm/net/bpf_jit_32.c
4864@@ -71,7 +71,11 @@ struct jit_ctx {
4865 #endif
4866 };
4867
4868+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
4869+int bpf_jit_enable __read_only;
4870+#else
4871 int bpf_jit_enable __read_mostly;
4872+#endif
4873
4874 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
4875 {
4876@@ -930,5 +934,6 @@ void bpf_jit_free(struct bpf_prog *fp)
4877 {
4878 if (fp->jited)
4879 module_free(NULL, fp->bpf_func);
4880- kfree(fp);
4881+
4882+ bpf_prog_unlock_free(fp);
4883 }
4884diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
4885index 5b217f4..c23f40e 100644
4886--- a/arch/arm/plat-iop/setup.c
4887+++ b/arch/arm/plat-iop/setup.c
4888@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
4889 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
4890 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
4891 .length = IOP3XX_PERIPHERAL_SIZE,
4892- .type = MT_UNCACHED,
4893+ .type = MT_UNCACHED_RW,
4894 },
4895 };
4896
4897diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4898index a5bc92d..0bb4730 100644
4899--- a/arch/arm/plat-omap/sram.c
4900+++ b/arch/arm/plat-omap/sram.c
4901@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4902 * Looks like we need to preserve some bootloader code at the
4903 * beginning of SRAM for jumping to flash for reboot to work...
4904 */
4905+ pax_open_kernel();
4906 memset_io(omap_sram_base + omap_sram_skip, 0,
4907 omap_sram_size - omap_sram_skip);
4908+ pax_close_kernel();
4909 }
4910diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4911index ce6d763..cfea917 100644
4912--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4913+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4914@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4915 int (*started)(unsigned ch);
4916 int (*flush)(unsigned ch);
4917 int (*stop)(unsigned ch);
4918-};
4919+} __no_const;
4920
4921 extern void *samsung_dmadev_get_ops(void);
4922 extern void *s3c_dma_get_ops(void);
4923diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
4924index 6389d60..b5d3bdd 100644
4925--- a/arch/arm64/include/asm/barrier.h
4926+++ b/arch/arm64/include/asm/barrier.h
4927@@ -41,7 +41,7 @@
4928 do { \
4929 compiletime_assert_atomic_type(*p); \
4930 barrier(); \
4931- ACCESS_ONCE(*p) = (v); \
4932+ ACCESS_ONCE_RW(*p) = (v); \
4933 } while (0)
4934
4935 #define smp_load_acquire(p) \
4936diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
4937index 3bf8f4e..5dd5491 100644
4938--- a/arch/arm64/include/asm/uaccess.h
4939+++ b/arch/arm64/include/asm/uaccess.h
4940@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
4941 flag; \
4942 })
4943
4944+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
4945 #define access_ok(type, addr, size) __range_ok(addr, size)
4946 #define user_addr_max get_fs
4947
4948diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4949index c3a58a1..78fbf54 100644
4950--- a/arch/avr32/include/asm/cache.h
4951+++ b/arch/avr32/include/asm/cache.h
4952@@ -1,8 +1,10 @@
4953 #ifndef __ASM_AVR32_CACHE_H
4954 #define __ASM_AVR32_CACHE_H
4955
4956+#include <linux/const.h>
4957+
4958 #define L1_CACHE_SHIFT 5
4959-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4960+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4961
4962 /*
4963 * Memory returned by kmalloc() may be used for DMA, so we must make
4964diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4965index d232888..87c8df1 100644
4966--- a/arch/avr32/include/asm/elf.h
4967+++ b/arch/avr32/include/asm/elf.h
4968@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4969 the loader. We need to make sure that it is out of the way of the program
4970 that it will "exec", and that there is sufficient room for the brk. */
4971
4972-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4973+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4974
4975+#ifdef CONFIG_PAX_ASLR
4976+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4977+
4978+#define PAX_DELTA_MMAP_LEN 15
4979+#define PAX_DELTA_STACK_LEN 15
4980+#endif
4981
4982 /* This yields a mask that user programs can use to figure out what
4983 instruction set this CPU supports. This could be done in user space,
4984diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4985index 479330b..53717a8 100644
4986--- a/arch/avr32/include/asm/kmap_types.h
4987+++ b/arch/avr32/include/asm/kmap_types.h
4988@@ -2,9 +2,9 @@
4989 #define __ASM_AVR32_KMAP_TYPES_H
4990
4991 #ifdef CONFIG_DEBUG_HIGHMEM
4992-# define KM_TYPE_NR 29
4993+# define KM_TYPE_NR 30
4994 #else
4995-# define KM_TYPE_NR 14
4996+# define KM_TYPE_NR 15
4997 #endif
4998
4999 #endif /* __ASM_AVR32_KMAP_TYPES_H */
5000diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
5001index 0eca933..eb78c7b 100644
5002--- a/arch/avr32/mm/fault.c
5003+++ b/arch/avr32/mm/fault.c
5004@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
5005
5006 int exception_trace = 1;
5007
5008+#ifdef CONFIG_PAX_PAGEEXEC
5009+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5010+{
5011+ unsigned long i;
5012+
5013+ printk(KERN_ERR "PAX: bytes at PC: ");
5014+ for (i = 0; i < 20; i++) {
5015+ unsigned char c;
5016+ if (get_user(c, (unsigned char *)pc+i))
5017+ printk(KERN_CONT "???????? ");
5018+ else
5019+ printk(KERN_CONT "%02x ", c);
5020+ }
5021+ printk("\n");
5022+}
5023+#endif
5024+
5025 /*
5026 * This routine handles page faults. It determines the address and the
5027 * problem, and then passes it off to one of the appropriate routines.
5028@@ -176,6 +193,16 @@ bad_area:
5029 up_read(&mm->mmap_sem);
5030
5031 if (user_mode(regs)) {
5032+
5033+#ifdef CONFIG_PAX_PAGEEXEC
5034+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
5035+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
5036+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
5037+ do_group_exit(SIGKILL);
5038+ }
5039+ }
5040+#endif
5041+
5042 if (exception_trace && printk_ratelimit())
5043 printk("%s%s[%d]: segfault at %08lx pc %08lx "
5044 "sp %08lx ecr %lu\n",
5045diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
5046index 568885a..f8008df 100644
5047--- a/arch/blackfin/include/asm/cache.h
5048+++ b/arch/blackfin/include/asm/cache.h
5049@@ -7,6 +7,7 @@
5050 #ifndef __ARCH_BLACKFIN_CACHE_H
5051 #define __ARCH_BLACKFIN_CACHE_H
5052
5053+#include <linux/const.h>
5054 #include <linux/linkage.h> /* for asmlinkage */
5055
5056 /*
5057@@ -14,7 +15,7 @@
5058 * Blackfin loads 32 bytes for cache
5059 */
5060 #define L1_CACHE_SHIFT 5
5061-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5062+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5063 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5064
5065 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5066diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
5067index aea2718..3639a60 100644
5068--- a/arch/cris/include/arch-v10/arch/cache.h
5069+++ b/arch/cris/include/arch-v10/arch/cache.h
5070@@ -1,8 +1,9 @@
5071 #ifndef _ASM_ARCH_CACHE_H
5072 #define _ASM_ARCH_CACHE_H
5073
5074+#include <linux/const.h>
5075 /* Etrax 100LX have 32-byte cache-lines. */
5076-#define L1_CACHE_BYTES 32
5077 #define L1_CACHE_SHIFT 5
5078+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5079
5080 #endif /* _ASM_ARCH_CACHE_H */
5081diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
5082index 7caf25d..ee65ac5 100644
5083--- a/arch/cris/include/arch-v32/arch/cache.h
5084+++ b/arch/cris/include/arch-v32/arch/cache.h
5085@@ -1,11 +1,12 @@
5086 #ifndef _ASM_CRIS_ARCH_CACHE_H
5087 #define _ASM_CRIS_ARCH_CACHE_H
5088
5089+#include <linux/const.h>
5090 #include <arch/hwregs/dma.h>
5091
5092 /* A cache-line is 32 bytes. */
5093-#define L1_CACHE_BYTES 32
5094 #define L1_CACHE_SHIFT 5
5095+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5096
5097 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5098
5099diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
5100index f6c3a16..cd422a4 100644
5101--- a/arch/frv/include/asm/atomic.h
5102+++ b/arch/frv/include/asm/atomic.h
5103@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
5104 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
5105 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
5106
5107+#define atomic64_read_unchecked(v) atomic64_read(v)
5108+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5109+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5110+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5111+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5112+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5113+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5114+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5115+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5116+
5117 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5118 {
5119 int c, old;
5120diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
5121index 2797163..c2a401df9 100644
5122--- a/arch/frv/include/asm/cache.h
5123+++ b/arch/frv/include/asm/cache.h
5124@@ -12,10 +12,11 @@
5125 #ifndef __ASM_CACHE_H
5126 #define __ASM_CACHE_H
5127
5128+#include <linux/const.h>
5129
5130 /* bytes per L1 cache line */
5131 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
5132-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5133+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5134
5135 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5136 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5137diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
5138index 43901f2..0d8b865 100644
5139--- a/arch/frv/include/asm/kmap_types.h
5140+++ b/arch/frv/include/asm/kmap_types.h
5141@@ -2,6 +2,6 @@
5142 #ifndef _ASM_KMAP_TYPES_H
5143 #define _ASM_KMAP_TYPES_H
5144
5145-#define KM_TYPE_NR 17
5146+#define KM_TYPE_NR 18
5147
5148 #endif
5149diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
5150index 836f147..4cf23f5 100644
5151--- a/arch/frv/mm/elf-fdpic.c
5152+++ b/arch/frv/mm/elf-fdpic.c
5153@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5154 {
5155 struct vm_area_struct *vma;
5156 struct vm_unmapped_area_info info;
5157+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5158
5159 if (len > TASK_SIZE)
5160 return -ENOMEM;
5161@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5162 if (addr) {
5163 addr = PAGE_ALIGN(addr);
5164 vma = find_vma(current->mm, addr);
5165- if (TASK_SIZE - len >= addr &&
5166- (!vma || addr + len <= vma->vm_start))
5167+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5168 goto success;
5169 }
5170
5171@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5172 info.high_limit = (current->mm->start_stack - 0x00200000);
5173 info.align_mask = 0;
5174 info.align_offset = 0;
5175+ info.threadstack_offset = offset;
5176 addr = vm_unmapped_area(&info);
5177 if (!(addr & ~PAGE_MASK))
5178 goto success;
5179diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
5180index 2635117..fa223cb 100644
5181--- a/arch/hexagon/include/asm/cache.h
5182+++ b/arch/hexagon/include/asm/cache.h
5183@@ -21,9 +21,11 @@
5184 #ifndef __ASM_CACHE_H
5185 #define __ASM_CACHE_H
5186
5187+#include <linux/const.h>
5188+
5189 /* Bytes per L1 cache line */
5190-#define L1_CACHE_SHIFT (5)
5191-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5192+#define L1_CACHE_SHIFT 5
5193+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5194
5195 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
5196 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
5197diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5198index c84c88b..2a6e1ba 100644
5199--- a/arch/ia64/Kconfig
5200+++ b/arch/ia64/Kconfig
5201@@ -549,6 +549,7 @@ source "drivers/sn/Kconfig"
5202 config KEXEC
5203 bool "kexec system call"
5204 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5205+ depends on !GRKERNSEC_KMEM
5206 help
5207 kexec is a system call that implements the ability to shutdown your
5208 current kernel, and to start another kernel. It is like a reboot
5209diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
5210index 5441b14..039a446 100644
5211--- a/arch/ia64/Makefile
5212+++ b/arch/ia64/Makefile
5213@@ -99,5 +99,6 @@ endef
5214 archprepare: make_nr_irqs_h FORCE
5215 PHONY += make_nr_irqs_h FORCE
5216
5217+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
5218 make_nr_irqs_h: FORCE
5219 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
5220diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5221index 0f8bf48..40ea950 100644
5222--- a/arch/ia64/include/asm/atomic.h
5223+++ b/arch/ia64/include/asm/atomic.h
5224@@ -209,4 +209,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5225 #define atomic64_inc(v) atomic64_add(1, (v))
5226 #define atomic64_dec(v) atomic64_sub(1, (v))
5227
5228+#define atomic64_read_unchecked(v) atomic64_read(v)
5229+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5230+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5231+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5232+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5233+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5234+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5235+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5236+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5237+
5238 #endif /* _ASM_IA64_ATOMIC_H */
5239diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5240index a48957c..e097b56 100644
5241--- a/arch/ia64/include/asm/barrier.h
5242+++ b/arch/ia64/include/asm/barrier.h
5243@@ -67,7 +67,7 @@
5244 do { \
5245 compiletime_assert_atomic_type(*p); \
5246 barrier(); \
5247- ACCESS_ONCE(*p) = (v); \
5248+ ACCESS_ONCE_RW(*p) = (v); \
5249 } while (0)
5250
5251 #define smp_load_acquire(p) \
5252diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5253index 988254a..e1ee885 100644
5254--- a/arch/ia64/include/asm/cache.h
5255+++ b/arch/ia64/include/asm/cache.h
5256@@ -1,6 +1,7 @@
5257 #ifndef _ASM_IA64_CACHE_H
5258 #define _ASM_IA64_CACHE_H
5259
5260+#include <linux/const.h>
5261
5262 /*
5263 * Copyright (C) 1998-2000 Hewlett-Packard Co
5264@@ -9,7 +10,7 @@
5265
5266 /* Bytes per L1 (data) cache line. */
5267 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5268-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5269+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5270
5271 #ifdef CONFIG_SMP
5272 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5273diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5274index 5a83c5c..4d7f553 100644
5275--- a/arch/ia64/include/asm/elf.h
5276+++ b/arch/ia64/include/asm/elf.h
5277@@ -42,6 +42,13 @@
5278 */
5279 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5280
5281+#ifdef CONFIG_PAX_ASLR
5282+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5283+
5284+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5285+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5286+#endif
5287+
5288 #define PT_IA_64_UNWIND 0x70000001
5289
5290 /* IA-64 relocations: */
5291diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5292index 5767cdf..7462574 100644
5293--- a/arch/ia64/include/asm/pgalloc.h
5294+++ b/arch/ia64/include/asm/pgalloc.h
5295@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5296 pgd_val(*pgd_entry) = __pa(pud);
5297 }
5298
5299+static inline void
5300+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5301+{
5302+ pgd_populate(mm, pgd_entry, pud);
5303+}
5304+
5305 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5306 {
5307 return quicklist_alloc(0, GFP_KERNEL, NULL);
5308@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5309 pud_val(*pud_entry) = __pa(pmd);
5310 }
5311
5312+static inline void
5313+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5314+{
5315+ pud_populate(mm, pud_entry, pmd);
5316+}
5317+
5318 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5319 {
5320 return quicklist_alloc(0, GFP_KERNEL, NULL);
5321diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5322index 7935115..c0eca6a 100644
5323--- a/arch/ia64/include/asm/pgtable.h
5324+++ b/arch/ia64/include/asm/pgtable.h
5325@@ -12,7 +12,7 @@
5326 * David Mosberger-Tang <davidm@hpl.hp.com>
5327 */
5328
5329-
5330+#include <linux/const.h>
5331 #include <asm/mman.h>
5332 #include <asm/page.h>
5333 #include <asm/processor.h>
5334@@ -142,6 +142,17 @@
5335 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5336 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5337 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5338+
5339+#ifdef CONFIG_PAX_PAGEEXEC
5340+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5341+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5342+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5343+#else
5344+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5345+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5346+# define PAGE_COPY_NOEXEC PAGE_COPY
5347+#endif
5348+
5349 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5350 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5351 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5352diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5353index 45698cd..e8e2dbc 100644
5354--- a/arch/ia64/include/asm/spinlock.h
5355+++ b/arch/ia64/include/asm/spinlock.h
5356@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5357 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5358
5359 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5360- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5361+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5362 }
5363
5364 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5365diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5366index 449c8c0..3d4b1e9 100644
5367--- a/arch/ia64/include/asm/uaccess.h
5368+++ b/arch/ia64/include/asm/uaccess.h
5369@@ -70,6 +70,7 @@
5370 && ((segment).seg == KERNEL_DS.seg \
5371 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5372 })
5373+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5374 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5375
5376 /*
5377@@ -240,12 +241,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5378 static inline unsigned long
5379 __copy_to_user (void __user *to, const void *from, unsigned long count)
5380 {
5381+ if (count > INT_MAX)
5382+ return count;
5383+
5384+ if (!__builtin_constant_p(count))
5385+ check_object_size(from, count, true);
5386+
5387 return __copy_user(to, (__force void __user *) from, count);
5388 }
5389
5390 static inline unsigned long
5391 __copy_from_user (void *to, const void __user *from, unsigned long count)
5392 {
5393+ if (count > INT_MAX)
5394+ return count;
5395+
5396+ if (!__builtin_constant_p(count))
5397+ check_object_size(to, count, false);
5398+
5399 return __copy_user((__force void __user *) to, from, count);
5400 }
5401
5402@@ -255,10 +268,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5403 ({ \
5404 void __user *__cu_to = (to); \
5405 const void *__cu_from = (from); \
5406- long __cu_len = (n); \
5407+ unsigned long __cu_len = (n); \
5408 \
5409- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5410+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5411+ if (!__builtin_constant_p(n)) \
5412+ check_object_size(__cu_from, __cu_len, true); \
5413 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5414+ } \
5415 __cu_len; \
5416 })
5417
5418@@ -266,11 +282,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5419 ({ \
5420 void *__cu_to = (to); \
5421 const void __user *__cu_from = (from); \
5422- long __cu_len = (n); \
5423+ unsigned long __cu_len = (n); \
5424 \
5425 __chk_user_ptr(__cu_from); \
5426- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5427+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5428+ if (!__builtin_constant_p(n)) \
5429+ check_object_size(__cu_to, __cu_len, false); \
5430 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5431+ } \
5432 __cu_len; \
5433 })
5434
5435diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5436index 24603be..948052d 100644
5437--- a/arch/ia64/kernel/module.c
5438+++ b/arch/ia64/kernel/module.c
5439@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
5440 void
5441 module_free (struct module *mod, void *module_region)
5442 {
5443- if (mod && mod->arch.init_unw_table &&
5444- module_region == mod->module_init) {
5445+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
5446 unw_remove_unwind_table(mod->arch.init_unw_table);
5447 mod->arch.init_unw_table = NULL;
5448 }
5449@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5450 }
5451
5452 static inline int
5453+in_init_rx (const struct module *mod, uint64_t addr)
5454+{
5455+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5456+}
5457+
5458+static inline int
5459+in_init_rw (const struct module *mod, uint64_t addr)
5460+{
5461+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5462+}
5463+
5464+static inline int
5465 in_init (const struct module *mod, uint64_t addr)
5466 {
5467- return addr - (uint64_t) mod->module_init < mod->init_size;
5468+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5469+}
5470+
5471+static inline int
5472+in_core_rx (const struct module *mod, uint64_t addr)
5473+{
5474+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5475+}
5476+
5477+static inline int
5478+in_core_rw (const struct module *mod, uint64_t addr)
5479+{
5480+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5481 }
5482
5483 static inline int
5484 in_core (const struct module *mod, uint64_t addr)
5485 {
5486- return addr - (uint64_t) mod->module_core < mod->core_size;
5487+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5488 }
5489
5490 static inline int
5491@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5492 break;
5493
5494 case RV_BDREL:
5495- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5496+ if (in_init_rx(mod, val))
5497+ val -= (uint64_t) mod->module_init_rx;
5498+ else if (in_init_rw(mod, val))
5499+ val -= (uint64_t) mod->module_init_rw;
5500+ else if (in_core_rx(mod, val))
5501+ val -= (uint64_t) mod->module_core_rx;
5502+ else if (in_core_rw(mod, val))
5503+ val -= (uint64_t) mod->module_core_rw;
5504 break;
5505
5506 case RV_LTV:
5507@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5508 * addresses have been selected...
5509 */
5510 uint64_t gp;
5511- if (mod->core_size > MAX_LTOFF)
5512+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5513 /*
5514 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5515 * at the end of the module.
5516 */
5517- gp = mod->core_size - MAX_LTOFF / 2;
5518+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5519 else
5520- gp = mod->core_size / 2;
5521- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5522+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5523+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5524 mod->arch.gp = gp;
5525 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5526 }
5527diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5528index c39c3cd..3c77738 100644
5529--- a/arch/ia64/kernel/palinfo.c
5530+++ b/arch/ia64/kernel/palinfo.c
5531@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5532 return NOTIFY_OK;
5533 }
5534
5535-static struct notifier_block __refdata palinfo_cpu_notifier =
5536+static struct notifier_block palinfo_cpu_notifier =
5537 {
5538 .notifier_call = palinfo_cpu_callback,
5539 .priority = 0,
5540diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5541index 41e33f8..65180b2a 100644
5542--- a/arch/ia64/kernel/sys_ia64.c
5543+++ b/arch/ia64/kernel/sys_ia64.c
5544@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5545 unsigned long align_mask = 0;
5546 struct mm_struct *mm = current->mm;
5547 struct vm_unmapped_area_info info;
5548+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5549
5550 if (len > RGN_MAP_LIMIT)
5551 return -ENOMEM;
5552@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5553 if (REGION_NUMBER(addr) == RGN_HPAGE)
5554 addr = 0;
5555 #endif
5556+
5557+#ifdef CONFIG_PAX_RANDMMAP
5558+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5559+ addr = mm->free_area_cache;
5560+ else
5561+#endif
5562+
5563 if (!addr)
5564 addr = TASK_UNMAPPED_BASE;
5565
5566@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5567 info.high_limit = TASK_SIZE;
5568 info.align_mask = align_mask;
5569 info.align_offset = 0;
5570+ info.threadstack_offset = offset;
5571 return vm_unmapped_area(&info);
5572 }
5573
5574diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5575index 84f8a52..7c76178 100644
5576--- a/arch/ia64/kernel/vmlinux.lds.S
5577+++ b/arch/ia64/kernel/vmlinux.lds.S
5578@@ -192,7 +192,7 @@ SECTIONS {
5579 /* Per-cpu data: */
5580 . = ALIGN(PERCPU_PAGE_SIZE);
5581 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5582- __phys_per_cpu_start = __per_cpu_load;
5583+ __phys_per_cpu_start = per_cpu_load;
5584 /*
5585 * ensure percpu data fits
5586 * into percpu page size
5587diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5588index 7225dad..2a7c8256 100644
5589--- a/arch/ia64/mm/fault.c
5590+++ b/arch/ia64/mm/fault.c
5591@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5592 return pte_present(pte);
5593 }
5594
5595+#ifdef CONFIG_PAX_PAGEEXEC
5596+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5597+{
5598+ unsigned long i;
5599+
5600+ printk(KERN_ERR "PAX: bytes at PC: ");
5601+ for (i = 0; i < 8; i++) {
5602+ unsigned int c;
5603+ if (get_user(c, (unsigned int *)pc+i))
5604+ printk(KERN_CONT "???????? ");
5605+ else
5606+ printk(KERN_CONT "%08x ", c);
5607+ }
5608+ printk("\n");
5609+}
5610+#endif
5611+
5612 # define VM_READ_BIT 0
5613 # define VM_WRITE_BIT 1
5614 # define VM_EXEC_BIT 2
5615@@ -151,8 +168,21 @@ retry:
5616 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5617 goto bad_area;
5618
5619- if ((vma->vm_flags & mask) != mask)
5620+ if ((vma->vm_flags & mask) != mask) {
5621+
5622+#ifdef CONFIG_PAX_PAGEEXEC
5623+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5624+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5625+ goto bad_area;
5626+
5627+ up_read(&mm->mmap_sem);
5628+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5629+ do_group_exit(SIGKILL);
5630+ }
5631+#endif
5632+
5633 goto bad_area;
5634+ }
5635
5636 /*
5637 * If for any reason at all we couldn't handle the fault, make
5638diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5639index 76069c1..c2aa816 100644
5640--- a/arch/ia64/mm/hugetlbpage.c
5641+++ b/arch/ia64/mm/hugetlbpage.c
5642@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5643 unsigned long pgoff, unsigned long flags)
5644 {
5645 struct vm_unmapped_area_info info;
5646+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5647
5648 if (len > RGN_MAP_LIMIT)
5649 return -ENOMEM;
5650@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5651 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5652 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5653 info.align_offset = 0;
5654+ info.threadstack_offset = offset;
5655 return vm_unmapped_area(&info);
5656 }
5657
5658diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5659index 6b33457..88b5124 100644
5660--- a/arch/ia64/mm/init.c
5661+++ b/arch/ia64/mm/init.c
5662@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5663 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5664 vma->vm_end = vma->vm_start + PAGE_SIZE;
5665 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5666+
5667+#ifdef CONFIG_PAX_PAGEEXEC
5668+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5669+ vma->vm_flags &= ~VM_EXEC;
5670+
5671+#ifdef CONFIG_PAX_MPROTECT
5672+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5673+ vma->vm_flags &= ~VM_MAYEXEC;
5674+#endif
5675+
5676+ }
5677+#endif
5678+
5679 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5680 down_write(&current->mm->mmap_sem);
5681 if (insert_vm_struct(current->mm, vma)) {
5682@@ -286,7 +299,7 @@ static int __init gate_vma_init(void)
5683 gate_vma.vm_start = FIXADDR_USER_START;
5684 gate_vma.vm_end = FIXADDR_USER_END;
5685 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
5686- gate_vma.vm_page_prot = __P101;
5687+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
5688
5689 return 0;
5690 }
5691diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5692index 40b3ee9..8c2c112 100644
5693--- a/arch/m32r/include/asm/cache.h
5694+++ b/arch/m32r/include/asm/cache.h
5695@@ -1,8 +1,10 @@
5696 #ifndef _ASM_M32R_CACHE_H
5697 #define _ASM_M32R_CACHE_H
5698
5699+#include <linux/const.h>
5700+
5701 /* L1 cache line size */
5702 #define L1_CACHE_SHIFT 4
5703-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5704+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5705
5706 #endif /* _ASM_M32R_CACHE_H */
5707diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5708index 82abd15..d95ae5d 100644
5709--- a/arch/m32r/lib/usercopy.c
5710+++ b/arch/m32r/lib/usercopy.c
5711@@ -14,6 +14,9 @@
5712 unsigned long
5713 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5714 {
5715+ if ((long)n < 0)
5716+ return n;
5717+
5718 prefetch(from);
5719 if (access_ok(VERIFY_WRITE, to, n))
5720 __copy_user(to,from,n);
5721@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5722 unsigned long
5723 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5724 {
5725+ if ((long)n < 0)
5726+ return n;
5727+
5728 prefetchw(to);
5729 if (access_ok(VERIFY_READ, from, n))
5730 __copy_user_zeroing(to,from,n);
5731diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5732index 0395c51..5f26031 100644
5733--- a/arch/m68k/include/asm/cache.h
5734+++ b/arch/m68k/include/asm/cache.h
5735@@ -4,9 +4,11 @@
5736 #ifndef __ARCH_M68K_CACHE_H
5737 #define __ARCH_M68K_CACHE_H
5738
5739+#include <linux/const.h>
5740+
5741 /* bytes per L1 cache line */
5742 #define L1_CACHE_SHIFT 4
5743-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5744+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5745
5746 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5747
5748diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
5749index c7591e8..ecef036 100644
5750--- a/arch/metag/include/asm/barrier.h
5751+++ b/arch/metag/include/asm/barrier.h
5752@@ -89,7 +89,7 @@ static inline void fence(void)
5753 do { \
5754 compiletime_assert_atomic_type(*p); \
5755 smp_mb(); \
5756- ACCESS_ONCE(*p) = (v); \
5757+ ACCESS_ONCE_RW(*p) = (v); \
5758 } while (0)
5759
5760 #define smp_load_acquire(p) \
5761diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5762index 3c32075..ae0ae75 100644
5763--- a/arch/metag/mm/hugetlbpage.c
5764+++ b/arch/metag/mm/hugetlbpage.c
5765@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5766 info.high_limit = TASK_SIZE;
5767 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5768 info.align_offset = 0;
5769+ info.threadstack_offset = 0;
5770 return vm_unmapped_area(&info);
5771 }
5772
5773diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5774index 4efe96a..60e8699 100644
5775--- a/arch/microblaze/include/asm/cache.h
5776+++ b/arch/microblaze/include/asm/cache.h
5777@@ -13,11 +13,12 @@
5778 #ifndef _ASM_MICROBLAZE_CACHE_H
5779 #define _ASM_MICROBLAZE_CACHE_H
5780
5781+#include <linux/const.h>
5782 #include <asm/registers.h>
5783
5784 #define L1_CACHE_SHIFT 5
5785 /* word-granular cache in microblaze */
5786-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5787+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5788
5789 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5790
5791diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5792index 574c430..470200d 100644
5793--- a/arch/mips/Kconfig
5794+++ b/arch/mips/Kconfig
5795@@ -2399,6 +2399,7 @@ source "kernel/Kconfig.preempt"
5796
5797 config KEXEC
5798 bool "Kexec system call"
5799+ depends on !GRKERNSEC_KMEM
5800 help
5801 kexec is a system call that implements the ability to shutdown your
5802 current kernel, and to start another kernel. It is like a reboot
5803diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
5804index 02f2444..506969c 100644
5805--- a/arch/mips/cavium-octeon/dma-octeon.c
5806+++ b/arch/mips/cavium-octeon/dma-octeon.c
5807@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
5808 if (dma_release_from_coherent(dev, order, vaddr))
5809 return;
5810
5811- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5812+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5813 }
5814
5815 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5816diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5817index 37b2bef..02122b8 100644
5818--- a/arch/mips/include/asm/atomic.h
5819+++ b/arch/mips/include/asm/atomic.h
5820@@ -21,15 +21,39 @@
5821 #include <asm/cmpxchg.h>
5822 #include <asm/war.h>
5823
5824+#ifdef CONFIG_GENERIC_ATOMIC64
5825+#include <asm-generic/atomic64.h>
5826+#endif
5827+
5828 #define ATOMIC_INIT(i) { (i) }
5829
5830+#ifdef CONFIG_64BIT
5831+#define _ASM_EXTABLE(from, to) \
5832+" .section __ex_table,\"a\"\n" \
5833+" .dword " #from ", " #to"\n" \
5834+" .previous\n"
5835+#else
5836+#define _ASM_EXTABLE(from, to) \
5837+" .section __ex_table,\"a\"\n" \
5838+" .word " #from ", " #to"\n" \
5839+" .previous\n"
5840+#endif
5841+
5842 /*
5843 * atomic_read - read atomic variable
5844 * @v: pointer of type atomic_t
5845 *
5846 * Atomically reads the value of @v.
5847 */
5848-#define atomic_read(v) (*(volatile int *)&(v)->counter)
5849+static inline int atomic_read(const atomic_t *v)
5850+{
5851+ return (*(volatile const int *) &v->counter);
5852+}
5853+
5854+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5855+{
5856+ return (*(volatile const int *) &v->counter);
5857+}
5858
5859 /*
5860 * atomic_set - set atomic variable
5861@@ -38,7 +62,15 @@
5862 *
5863 * Atomically sets the value of @v to @i.
5864 */
5865-#define atomic_set(v, i) ((v)->counter = (i))
5866+static inline void atomic_set(atomic_t *v, int i)
5867+{
5868+ v->counter = i;
5869+}
5870+
5871+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5872+{
5873+ v->counter = i;
5874+}
5875
5876 /*
5877 * atomic_add - add integer to atomic variable
5878@@ -47,7 +79,67 @@
5879 *
5880 * Atomically adds @i to @v.
5881 */
5882-static __inline__ void atomic_add(int i, atomic_t * v)
5883+static __inline__ void atomic_add(int i, atomic_t *v)
5884+{
5885+ int temp;
5886+
5887+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5888+ __asm__ __volatile__(
5889+ " .set mips3 \n"
5890+ "1: ll %0, %1 # atomic_add \n"
5891+#ifdef CONFIG_PAX_REFCOUNT
5892+ /* Exception on overflow. */
5893+ "2: add %0, %2 \n"
5894+#else
5895+ " addu %0, %2 \n"
5896+#endif
5897+ " sc %0, %1 \n"
5898+ " beqzl %0, 1b \n"
5899+#ifdef CONFIG_PAX_REFCOUNT
5900+ "3: \n"
5901+ _ASM_EXTABLE(2b, 3b)
5902+#endif
5903+ " .set mips0 \n"
5904+ : "=&r" (temp), "+m" (v->counter)
5905+ : "Ir" (i));
5906+ } else if (kernel_uses_llsc) {
5907+ __asm__ __volatile__(
5908+ " .set mips3 \n"
5909+ "1: ll %0, %1 # atomic_add \n"
5910+#ifdef CONFIG_PAX_REFCOUNT
5911+ /* Exception on overflow. */
5912+ "2: add %0, %2 \n"
5913+#else
5914+ " addu %0, %2 \n"
5915+#endif
5916+ " sc %0, %1 \n"
5917+ " beqz %0, 1b \n"
5918+#ifdef CONFIG_PAX_REFCOUNT
5919+ "3: \n"
5920+ _ASM_EXTABLE(2b, 3b)
5921+#endif
5922+ " .set mips0 \n"
5923+ : "=&r" (temp), "+m" (v->counter)
5924+ : "Ir" (i));
5925+ } else {
5926+ unsigned long flags;
5927+
5928+ raw_local_irq_save(flags);
5929+ __asm__ __volatile__(
5930+#ifdef CONFIG_PAX_REFCOUNT
5931+ /* Exception on overflow. */
5932+ "1: add %0, %1 \n"
5933+ "2: \n"
5934+ _ASM_EXTABLE(1b, 2b)
5935+#else
5936+ " addu %0, %1 \n"
5937+#endif
5938+ : "+r" (v->counter) : "Ir" (i));
5939+ raw_local_irq_restore(flags);
5940+ }
5941+}
5942+
5943+static __inline__ void atomic_add_unchecked(int i, atomic_unchecked_t *v)
5944 {
5945 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5946 int temp;
5947@@ -90,7 +182,67 @@ static __inline__ void atomic_add(int i, atomic_t * v)
5948 *
5949 * Atomically subtracts @i from @v.
5950 */
5951-static __inline__ void atomic_sub(int i, atomic_t * v)
5952+static __inline__ void atomic_sub(int i, atomic_t *v)
5953+{
5954+ int temp;
5955+
5956+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5957+ __asm__ __volatile__(
5958+ " .set mips3 \n"
5959+ "1: ll %0, %1 # atomic64_sub \n"
5960+#ifdef CONFIG_PAX_REFCOUNT
5961+ /* Exception on overflow. */
5962+ "2: sub %0, %2 \n"
5963+#else
5964+ " subu %0, %2 \n"
5965+#endif
5966+ " sc %0, %1 \n"
5967+ " beqzl %0, 1b \n"
5968+#ifdef CONFIG_PAX_REFCOUNT
5969+ "3: \n"
5970+ _ASM_EXTABLE(2b, 3b)
5971+#endif
5972+ " .set mips0 \n"
5973+ : "=&r" (temp), "+m" (v->counter)
5974+ : "Ir" (i));
5975+ } else if (kernel_uses_llsc) {
5976+ __asm__ __volatile__(
5977+ " .set mips3 \n"
5978+ "1: ll %0, %1 # atomic64_sub \n"
5979+#ifdef CONFIG_PAX_REFCOUNT
5980+ /* Exception on overflow. */
5981+ "2: sub %0, %2 \n"
5982+#else
5983+ " subu %0, %2 \n"
5984+#endif
5985+ " sc %0, %1 \n"
5986+ " beqz %0, 1b \n"
5987+#ifdef CONFIG_PAX_REFCOUNT
5988+ "3: \n"
5989+ _ASM_EXTABLE(2b, 3b)
5990+#endif
5991+ " .set mips0 \n"
5992+ : "=&r" (temp), "+m" (v->counter)
5993+ : "Ir" (i));
5994+ } else {
5995+ unsigned long flags;
5996+
5997+ raw_local_irq_save(flags);
5998+ __asm__ __volatile__(
5999+#ifdef CONFIG_PAX_REFCOUNT
6000+ /* Exception on overflow. */
6001+ "1: sub %0, %1 \n"
6002+ "2: \n"
6003+ _ASM_EXTABLE(1b, 2b)
6004+#else
6005+ " subu %0, %1 \n"
6006+#endif
6007+ : "+r" (v->counter) : "Ir" (i));
6008+ raw_local_irq_restore(flags);
6009+ }
6010+}
6011+
6012+static __inline__ void atomic_sub_unchecked(long i, atomic_unchecked_t *v)
6013 {
6014 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6015 int temp;
6016@@ -129,7 +281,93 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
6017 /*
6018 * Same as above, but return the result value
6019 */
6020-static __inline__ int atomic_add_return(int i, atomic_t * v)
6021+static __inline__ int atomic_add_return(int i, atomic_t *v)
6022+{
6023+ int result;
6024+ int temp;
6025+
6026+ smp_mb__before_llsc();
6027+
6028+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6029+ __asm__ __volatile__(
6030+ " .set mips3 \n"
6031+ "1: ll %1, %2 # atomic_add_return \n"
6032+#ifdef CONFIG_PAX_REFCOUNT
6033+ "2: add %0, %1, %3 \n"
6034+#else
6035+ " addu %0, %1, %3 \n"
6036+#endif
6037+ " sc %0, %2 \n"
6038+ " beqzl %0, 1b \n"
6039+#ifdef CONFIG_PAX_REFCOUNT
6040+ " b 4f \n"
6041+ " .set noreorder \n"
6042+ "3: b 5f \n"
6043+ " move %0, %1 \n"
6044+ " .set reorder \n"
6045+ _ASM_EXTABLE(2b, 3b)
6046+#endif
6047+ "4: addu %0, %1, %3 \n"
6048+#ifdef CONFIG_PAX_REFCOUNT
6049+ "5: \n"
6050+#endif
6051+ " .set mips0 \n"
6052+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6053+ : "Ir" (i));
6054+ } else if (kernel_uses_llsc) {
6055+ __asm__ __volatile__(
6056+ " .set mips3 \n"
6057+ "1: ll %1, %2 # atomic_add_return \n"
6058+#ifdef CONFIG_PAX_REFCOUNT
6059+ "2: add %0, %1, %3 \n"
6060+#else
6061+ " addu %0, %1, %3 \n"
6062+#endif
6063+ " sc %0, %2 \n"
6064+ " bnez %0, 4f \n"
6065+ " b 1b \n"
6066+#ifdef CONFIG_PAX_REFCOUNT
6067+ " .set noreorder \n"
6068+ "3: b 5f \n"
6069+ " move %0, %1 \n"
6070+ " .set reorder \n"
6071+ _ASM_EXTABLE(2b, 3b)
6072+#endif
6073+ "4: addu %0, %1, %3 \n"
6074+#ifdef CONFIG_PAX_REFCOUNT
6075+ "5: \n"
6076+#endif
6077+ " .set mips0 \n"
6078+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6079+ : "Ir" (i));
6080+ } else {
6081+ unsigned long flags;
6082+
6083+ raw_local_irq_save(flags);
6084+ __asm__ __volatile__(
6085+ " lw %0, %1 \n"
6086+#ifdef CONFIG_PAX_REFCOUNT
6087+ /* Exception on overflow. */
6088+ "1: add %0, %2 \n"
6089+#else
6090+ " addu %0, %2 \n"
6091+#endif
6092+ " sw %0, %1 \n"
6093+#ifdef CONFIG_PAX_REFCOUNT
6094+ /* Note: Dest reg is not modified on overflow */
6095+ "2: \n"
6096+ _ASM_EXTABLE(1b, 2b)
6097+#endif
6098+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6099+ raw_local_irq_restore(flags);
6100+ }
6101+
6102+ smp_llsc_mb();
6103+
6104+ return result;
6105+}
6106+
6107+static __inline__ int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6108 {
6109 int result;
6110
6111@@ -178,7 +416,93 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
6112 return result;
6113 }
6114
6115-static __inline__ int atomic_sub_return(int i, atomic_t * v)
6116+static __inline__ int atomic_sub_return(int i, atomic_t *v)
6117+{
6118+ int result;
6119+ int temp;
6120+
6121+ smp_mb__before_llsc();
6122+
6123+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6124+ __asm__ __volatile__(
6125+ " .set mips3 \n"
6126+ "1: ll %1, %2 # atomic_sub_return \n"
6127+#ifdef CONFIG_PAX_REFCOUNT
6128+ "2: sub %0, %1, %3 \n"
6129+#else
6130+ " subu %0, %1, %3 \n"
6131+#endif
6132+ " sc %0, %2 \n"
6133+ " beqzl %0, 1b \n"
6134+#ifdef CONFIG_PAX_REFCOUNT
6135+ " b 4f \n"
6136+ " .set noreorder \n"
6137+ "3: b 5f \n"
6138+ " move %0, %1 \n"
6139+ " .set reorder \n"
6140+ _ASM_EXTABLE(2b, 3b)
6141+#endif
6142+ "4: subu %0, %1, %3 \n"
6143+#ifdef CONFIG_PAX_REFCOUNT
6144+ "5: \n"
6145+#endif
6146+ " .set mips0 \n"
6147+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6148+ : "Ir" (i), "m" (v->counter)
6149+ : "memory");
6150+ } else if (kernel_uses_llsc) {
6151+ __asm__ __volatile__(
6152+ " .set mips3 \n"
6153+ "1: ll %1, %2 # atomic_sub_return \n"
6154+#ifdef CONFIG_PAX_REFCOUNT
6155+ "2: sub %0, %1, %3 \n"
6156+#else
6157+ " subu %0, %1, %3 \n"
6158+#endif
6159+ " sc %0, %2 \n"
6160+ " bnez %0, 4f \n"
6161+ " b 1b \n"
6162+#ifdef CONFIG_PAX_REFCOUNT
6163+ " .set noreorder \n"
6164+ "3: b 5f \n"
6165+ " move %0, %1 \n"
6166+ " .set reorder \n"
6167+ _ASM_EXTABLE(2b, 3b)
6168+#endif
6169+ "4: subu %0, %1, %3 \n"
6170+#ifdef CONFIG_PAX_REFCOUNT
6171+ "5: \n"
6172+#endif
6173+ " .set mips0 \n"
6174+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6175+ : "Ir" (i));
6176+ } else {
6177+ unsigned long flags;
6178+
6179+ raw_local_irq_save(flags);
6180+ __asm__ __volatile__(
6181+ " lw %0, %1 \n"
6182+#ifdef CONFIG_PAX_REFCOUNT
6183+ /* Exception on overflow. */
6184+ "1: sub %0, %2 \n"
6185+#else
6186+ " subu %0, %2 \n"
6187+#endif
6188+ " sw %0, %1 \n"
6189+#ifdef CONFIG_PAX_REFCOUNT
6190+ /* Note: Dest reg is not modified on overflow */
6191+ "2: \n"
6192+ _ASM_EXTABLE(1b, 2b)
6193+#endif
6194+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6195+ raw_local_irq_restore(flags);
6196+ }
6197+
6198+ smp_llsc_mb();
6199+
6200+ return result;
6201+}
6202+static __inline__ int atomic_sub_return_unchecked(int i, atomic_unchecked_t *v)
6203 {
6204 int result;
6205
6206@@ -238,7 +562,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
6207 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6208 * The function returns the old value of @v minus @i.
6209 */
6210-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6211+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
6212 {
6213 int result;
6214
6215@@ -295,8 +619,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6216 return result;
6217 }
6218
6219-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
6220-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
6221+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6222+{
6223+ return cmpxchg(&v->counter, old, new);
6224+}
6225+
6226+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
6227+ int new)
6228+{
6229+ return cmpxchg(&(v->counter), old, new);
6230+}
6231+
6232+static inline int atomic_xchg(atomic_t *v, int new)
6233+{
6234+ return xchg(&v->counter, new);
6235+}
6236+
6237+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6238+{
6239+ return xchg(&(v->counter), new);
6240+}
6241
6242 /**
6243 * __atomic_add_unless - add unless the number is a given value
6244@@ -324,6 +666,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6245
6246 #define atomic_dec_return(v) atomic_sub_return(1, (v))
6247 #define atomic_inc_return(v) atomic_add_return(1, (v))
6248+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6249+{
6250+ return atomic_add_return_unchecked(1, v);
6251+}
6252
6253 /*
6254 * atomic_sub_and_test - subtract value from variable and test result
6255@@ -345,6 +691,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6256 * other cases.
6257 */
6258 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
6259+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6260+{
6261+ return atomic_add_return_unchecked(1, v) == 0;
6262+}
6263
6264 /*
6265 * atomic_dec_and_test - decrement by 1 and test
6266@@ -369,6 +719,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6267 * Atomically increments @v by 1.
6268 */
6269 #define atomic_inc(v) atomic_add(1, (v))
6270+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
6271+{
6272+ atomic_add_unchecked(1, v);
6273+}
6274
6275 /*
6276 * atomic_dec - decrement and test
6277@@ -377,6 +731,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6278 * Atomically decrements @v by 1.
6279 */
6280 #define atomic_dec(v) atomic_sub(1, (v))
6281+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6282+{
6283+ atomic_sub_unchecked(1, v);
6284+}
6285
6286 /*
6287 * atomic_add_negative - add and test if negative
6288@@ -398,14 +756,30 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6289 * @v: pointer of type atomic64_t
6290 *
6291 */
6292-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
6293+static inline long atomic64_read(const atomic64_t *v)
6294+{
6295+ return (*(volatile const long *) &v->counter);
6296+}
6297+
6298+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6299+{
6300+ return (*(volatile const long *) &v->counter);
6301+}
6302
6303 /*
6304 * atomic64_set - set atomic variable
6305 * @v: pointer of type atomic64_t
6306 * @i: required value
6307 */
6308-#define atomic64_set(v, i) ((v)->counter = (i))
6309+static inline void atomic64_set(atomic64_t *v, long i)
6310+{
6311+ v->counter = i;
6312+}
6313+
6314+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6315+{
6316+ v->counter = i;
6317+}
6318
6319 /*
6320 * atomic64_add - add integer to atomic variable
6321@@ -414,7 +788,66 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6322 *
6323 * Atomically adds @i to @v.
6324 */
6325-static __inline__ void atomic64_add(long i, atomic64_t * v)
6326+static __inline__ void atomic64_add(long i, atomic64_t *v)
6327+{
6328+ long temp;
6329+
6330+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6331+ __asm__ __volatile__(
6332+ " .set mips3 \n"
6333+ "1: lld %0, %1 # atomic64_add \n"
6334+#ifdef CONFIG_PAX_REFCOUNT
6335+ /* Exception on overflow. */
6336+ "2: dadd %0, %2 \n"
6337+#else
6338+ " daddu %0, %2 \n"
6339+#endif
6340+ " scd %0, %1 \n"
6341+ " beqzl %0, 1b \n"
6342+#ifdef CONFIG_PAX_REFCOUNT
6343+ "3: \n"
6344+ _ASM_EXTABLE(2b, 3b)
6345+#endif
6346+ " .set mips0 \n"
6347+ : "=&r" (temp), "+m" (v->counter)
6348+ : "Ir" (i));
6349+ } else if (kernel_uses_llsc) {
6350+ __asm__ __volatile__(
6351+ " .set mips3 \n"
6352+ "1: lld %0, %1 # atomic64_add \n"
6353+#ifdef CONFIG_PAX_REFCOUNT
6354+ /* Exception on overflow. */
6355+ "2: dadd %0, %2 \n"
6356+#else
6357+ " daddu %0, %2 \n"
6358+#endif
6359+ " scd %0, %1 \n"
6360+ " beqz %0, 1b \n"
6361+#ifdef CONFIG_PAX_REFCOUNT
6362+ "3: \n"
6363+ _ASM_EXTABLE(2b, 3b)
6364+#endif
6365+ " .set mips0 \n"
6366+ : "=&r" (temp), "+m" (v->counter)
6367+ : "Ir" (i));
6368+ } else {
6369+ unsigned long flags;
6370+
6371+ raw_local_irq_save(flags);
6372+ __asm__ __volatile__(
6373+#ifdef CONFIG_PAX_REFCOUNT
6374+ /* Exception on overflow. */
6375+ "1: dadd %0, %1 \n"
6376+ "2: \n"
6377+ _ASM_EXTABLE(1b, 2b)
6378+#else
6379+ " daddu %0, %1 \n"
6380+#endif
6381+ : "+r" (v->counter) : "Ir" (i));
6382+ raw_local_irq_restore(flags);
6383+ }
6384+}
6385+static __inline__ void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6386 {
6387 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6388 long temp;
6389@@ -457,7 +890,67 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
6390 *
6391 * Atomically subtracts @i from @v.
6392 */
6393-static __inline__ void atomic64_sub(long i, atomic64_t * v)
6394+static __inline__ void atomic64_sub(long i, atomic64_t *v)
6395+{
6396+ long temp;
6397+
6398+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6399+ __asm__ __volatile__(
6400+ " .set mips3 \n"
6401+ "1: lld %0, %1 # atomic64_sub \n"
6402+#ifdef CONFIG_PAX_REFCOUNT
6403+ /* Exception on overflow. */
6404+ "2: dsub %0, %2 \n"
6405+#else
6406+ " dsubu %0, %2 \n"
6407+#endif
6408+ " scd %0, %1 \n"
6409+ " beqzl %0, 1b \n"
6410+#ifdef CONFIG_PAX_REFCOUNT
6411+ "3: \n"
6412+ _ASM_EXTABLE(2b, 3b)
6413+#endif
6414+ " .set mips0 \n"
6415+ : "=&r" (temp), "+m" (v->counter)
6416+ : "Ir" (i));
6417+ } else if (kernel_uses_llsc) {
6418+ __asm__ __volatile__(
6419+ " .set mips3 \n"
6420+ "1: lld %0, %1 # atomic64_sub \n"
6421+#ifdef CONFIG_PAX_REFCOUNT
6422+ /* Exception on overflow. */
6423+ "2: dsub %0, %2 \n"
6424+#else
6425+ " dsubu %0, %2 \n"
6426+#endif
6427+ " scd %0, %1 \n"
6428+ " beqz %0, 1b \n"
6429+#ifdef CONFIG_PAX_REFCOUNT
6430+ "3: \n"
6431+ _ASM_EXTABLE(2b, 3b)
6432+#endif
6433+ " .set mips0 \n"
6434+ : "=&r" (temp), "+m" (v->counter)
6435+ : "Ir" (i));
6436+ } else {
6437+ unsigned long flags;
6438+
6439+ raw_local_irq_save(flags);
6440+ __asm__ __volatile__(
6441+#ifdef CONFIG_PAX_REFCOUNT
6442+ /* Exception on overflow. */
6443+ "1: dsub %0, %1 \n"
6444+ "2: \n"
6445+ _ASM_EXTABLE(1b, 2b)
6446+#else
6447+ " dsubu %0, %1 \n"
6448+#endif
6449+ : "+r" (v->counter) : "Ir" (i));
6450+ raw_local_irq_restore(flags);
6451+ }
6452+}
6453+
6454+static __inline__ void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6455 {
6456 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6457 long temp;
6458@@ -496,7 +989,93 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
6459 /*
6460 * Same as above, but return the result value
6461 */
6462-static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6463+static __inline__ long atomic64_add_return(long i, atomic64_t *v)
6464+{
6465+ long result;
6466+ long temp;
6467+
6468+ smp_mb__before_llsc();
6469+
6470+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6471+ __asm__ __volatile__(
6472+ " .set mips3 \n"
6473+ "1: lld %1, %2 # atomic64_add_return \n"
6474+#ifdef CONFIG_PAX_REFCOUNT
6475+ "2: dadd %0, %1, %3 \n"
6476+#else
6477+ " daddu %0, %1, %3 \n"
6478+#endif
6479+ " scd %0, %2 \n"
6480+ " beqzl %0, 1b \n"
6481+#ifdef CONFIG_PAX_REFCOUNT
6482+ " b 4f \n"
6483+ " .set noreorder \n"
6484+ "3: b 5f \n"
6485+ " move %0, %1 \n"
6486+ " .set reorder \n"
6487+ _ASM_EXTABLE(2b, 3b)
6488+#endif
6489+ "4: daddu %0, %1, %3 \n"
6490+#ifdef CONFIG_PAX_REFCOUNT
6491+ "5: \n"
6492+#endif
6493+ " .set mips0 \n"
6494+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6495+ : "Ir" (i));
6496+ } else if (kernel_uses_llsc) {
6497+ __asm__ __volatile__(
6498+ " .set mips3 \n"
6499+ "1: lld %1, %2 # atomic64_add_return \n"
6500+#ifdef CONFIG_PAX_REFCOUNT
6501+ "2: dadd %0, %1, %3 \n"
6502+#else
6503+ " daddu %0, %1, %3 \n"
6504+#endif
6505+ " scd %0, %2 \n"
6506+ " bnez %0, 4f \n"
6507+ " b 1b \n"
6508+#ifdef CONFIG_PAX_REFCOUNT
6509+ " .set noreorder \n"
6510+ "3: b 5f \n"
6511+ " move %0, %1 \n"
6512+ " .set reorder \n"
6513+ _ASM_EXTABLE(2b, 3b)
6514+#endif
6515+ "4: daddu %0, %1, %3 \n"
6516+#ifdef CONFIG_PAX_REFCOUNT
6517+ "5: \n"
6518+#endif
6519+ " .set mips0 \n"
6520+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6521+ : "Ir" (i), "m" (v->counter)
6522+ : "memory");
6523+ } else {
6524+ unsigned long flags;
6525+
6526+ raw_local_irq_save(flags);
6527+ __asm__ __volatile__(
6528+ " ld %0, %1 \n"
6529+#ifdef CONFIG_PAX_REFCOUNT
6530+ /* Exception on overflow. */
6531+ "1: dadd %0, %2 \n"
6532+#else
6533+ " daddu %0, %2 \n"
6534+#endif
6535+ " sd %0, %1 \n"
6536+#ifdef CONFIG_PAX_REFCOUNT
6537+ /* Note: Dest reg is not modified on overflow */
6538+ "2: \n"
6539+ _ASM_EXTABLE(1b, 2b)
6540+#endif
6541+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6542+ raw_local_irq_restore(flags);
6543+ }
6544+
6545+ smp_llsc_mb();
6546+
6547+ return result;
6548+}
6549+static __inline__ long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6550 {
6551 long result;
6552
6553@@ -546,7 +1125,97 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6554 return result;
6555 }
6556
6557-static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6558+static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
6559+{
6560+ long result;
6561+ long temp;
6562+
6563+ smp_mb__before_llsc();
6564+
6565+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6566+ long temp;
6567+
6568+ __asm__ __volatile__(
6569+ " .set mips3 \n"
6570+ "1: lld %1, %2 # atomic64_sub_return \n"
6571+#ifdef CONFIG_PAX_REFCOUNT
6572+ "2: dsub %0, %1, %3 \n"
6573+#else
6574+ " dsubu %0, %1, %3 \n"
6575+#endif
6576+ " scd %0, %2 \n"
6577+ " beqzl %0, 1b \n"
6578+#ifdef CONFIG_PAX_REFCOUNT
6579+ " b 4f \n"
6580+ " .set noreorder \n"
6581+ "3: b 5f \n"
6582+ " move %0, %1 \n"
6583+ " .set reorder \n"
6584+ _ASM_EXTABLE(2b, 3b)
6585+#endif
6586+ "4: dsubu %0, %1, %3 \n"
6587+#ifdef CONFIG_PAX_REFCOUNT
6588+ "5: \n"
6589+#endif
6590+ " .set mips0 \n"
6591+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6592+ : "Ir" (i), "m" (v->counter)
6593+ : "memory");
6594+ } else if (kernel_uses_llsc) {
6595+ __asm__ __volatile__(
6596+ " .set mips3 \n"
6597+ "1: lld %1, %2 # atomic64_sub_return \n"
6598+#ifdef CONFIG_PAX_REFCOUNT
6599+ "2: dsub %0, %1, %3 \n"
6600+#else
6601+ " dsubu %0, %1, %3 \n"
6602+#endif
6603+ " scd %0, %2 \n"
6604+ " bnez %0, 4f \n"
6605+ " b 1b \n"
6606+#ifdef CONFIG_PAX_REFCOUNT
6607+ " .set noreorder \n"
6608+ "3: b 5f \n"
6609+ " move %0, %1 \n"
6610+ " .set reorder \n"
6611+ _ASM_EXTABLE(2b, 3b)
6612+#endif
6613+ "4: dsubu %0, %1, %3 \n"
6614+#ifdef CONFIG_PAX_REFCOUNT
6615+ "5: \n"
6616+#endif
6617+ " .set mips0 \n"
6618+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6619+ : "Ir" (i), "m" (v->counter)
6620+ : "memory");
6621+ } else {
6622+ unsigned long flags;
6623+
6624+ raw_local_irq_save(flags);
6625+ __asm__ __volatile__(
6626+ " ld %0, %1 \n"
6627+#ifdef CONFIG_PAX_REFCOUNT
6628+ /* Exception on overflow. */
6629+ "1: dsub %0, %2 \n"
6630+#else
6631+ " dsubu %0, %2 \n"
6632+#endif
6633+ " sd %0, %1 \n"
6634+#ifdef CONFIG_PAX_REFCOUNT
6635+ /* Note: Dest reg is not modified on overflow */
6636+ "2: \n"
6637+ _ASM_EXTABLE(1b, 2b)
6638+#endif
6639+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6640+ raw_local_irq_restore(flags);
6641+ }
6642+
6643+ smp_llsc_mb();
6644+
6645+ return result;
6646+}
6647+
6648+static __inline__ long atomic64_sub_return_unchecked(long i, atomic64_unchecked_t *v)
6649 {
6650 long result;
6651
6652@@ -605,7 +1274,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6653 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6654 * The function returns the old value of @v minus @i.
6655 */
6656-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6657+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6658 {
6659 long result;
6660
6661@@ -662,9 +1331,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6662 return result;
6663 }
6664
6665-#define atomic64_cmpxchg(v, o, n) \
6666- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6667-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6668+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6669+{
6670+ return cmpxchg(&v->counter, old, new);
6671+}
6672+
6673+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6674+ long new)
6675+{
6676+ return cmpxchg(&(v->counter), old, new);
6677+}
6678+
6679+static inline long atomic64_xchg(atomic64_t *v, long new)
6680+{
6681+ return xchg(&v->counter, new);
6682+}
6683+
6684+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6685+{
6686+ return xchg(&(v->counter), new);
6687+}
6688
6689 /**
6690 * atomic64_add_unless - add unless the number is a given value
6691@@ -694,6 +1380,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6692
6693 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6694 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6695+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6696
6697 /*
6698 * atomic64_sub_and_test - subtract value from variable and test result
6699@@ -715,6 +1402,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6700 * other cases.
6701 */
6702 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6703+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6704
6705 /*
6706 * atomic64_dec_and_test - decrement by 1 and test
6707@@ -739,6 +1427,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6708 * Atomically increments @v by 1.
6709 */
6710 #define atomic64_inc(v) atomic64_add(1, (v))
6711+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6712
6713 /*
6714 * atomic64_dec - decrement and test
6715@@ -747,6 +1436,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6716 * Atomically decrements @v by 1.
6717 */
6718 #define atomic64_dec(v) atomic64_sub(1, (v))
6719+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6720
6721 /*
6722 * atomic64_add_negative - add and test if negative
6723diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
6724index d0101dd..266982c 100644
6725--- a/arch/mips/include/asm/barrier.h
6726+++ b/arch/mips/include/asm/barrier.h
6727@@ -184,7 +184,7 @@
6728 do { \
6729 compiletime_assert_atomic_type(*p); \
6730 smp_mb(); \
6731- ACCESS_ONCE(*p) = (v); \
6732+ ACCESS_ONCE_RW(*p) = (v); \
6733 } while (0)
6734
6735 #define smp_load_acquire(p) \
6736diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6737index b4db69f..8f3b093 100644
6738--- a/arch/mips/include/asm/cache.h
6739+++ b/arch/mips/include/asm/cache.h
6740@@ -9,10 +9,11 @@
6741 #ifndef _ASM_CACHE_H
6742 #define _ASM_CACHE_H
6743
6744+#include <linux/const.h>
6745 #include <kmalloc.h>
6746
6747 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6748-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6749+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6750
6751 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6752 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6753diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6754index 1d38fe0..9beabc9 100644
6755--- a/arch/mips/include/asm/elf.h
6756+++ b/arch/mips/include/asm/elf.h
6757@@ -381,13 +381,16 @@ extern const char *__elf_platform;
6758 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6759 #endif
6760
6761+#ifdef CONFIG_PAX_ASLR
6762+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6763+
6764+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6765+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6766+#endif
6767+
6768 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6769 struct linux_binprm;
6770 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6771 int uses_interp);
6772
6773-struct mm_struct;
6774-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6775-#define arch_randomize_brk arch_randomize_brk
6776-
6777 #endif /* _ASM_ELF_H */
6778diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6779index c1f6afa..38cc6e9 100644
6780--- a/arch/mips/include/asm/exec.h
6781+++ b/arch/mips/include/asm/exec.h
6782@@ -12,6 +12,6 @@
6783 #ifndef _ASM_EXEC_H
6784 #define _ASM_EXEC_H
6785
6786-extern unsigned long arch_align_stack(unsigned long sp);
6787+#define arch_align_stack(x) ((x) & ~0xfUL)
6788
6789 #endif /* _ASM_EXEC_H */
6790diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
6791index 9e8ef59..1139d6b 100644
6792--- a/arch/mips/include/asm/hw_irq.h
6793+++ b/arch/mips/include/asm/hw_irq.h
6794@@ -10,7 +10,7 @@
6795
6796 #include <linux/atomic.h>
6797
6798-extern atomic_t irq_err_count;
6799+extern atomic_unchecked_t irq_err_count;
6800
6801 /*
6802 * interrupt-retrigger: NOP for now. This may not be appropriate for all
6803diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6804index 46dfc3c..a16b13a 100644
6805--- a/arch/mips/include/asm/local.h
6806+++ b/arch/mips/include/asm/local.h
6807@@ -12,15 +12,25 @@ typedef struct
6808 atomic_long_t a;
6809 } local_t;
6810
6811+typedef struct {
6812+ atomic_long_unchecked_t a;
6813+} local_unchecked_t;
6814+
6815 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6816
6817 #define local_read(l) atomic_long_read(&(l)->a)
6818+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6819 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6820+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6821
6822 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6823+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6824 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6825+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6826 #define local_inc(l) atomic_long_inc(&(l)->a)
6827+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6828 #define local_dec(l) atomic_long_dec(&(l)->a)
6829+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6830
6831 /*
6832 * Same as above, but return the result value
6833@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6834 return result;
6835 }
6836
6837+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6838+{
6839+ unsigned long result;
6840+
6841+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6842+ unsigned long temp;
6843+
6844+ __asm__ __volatile__(
6845+ " .set mips3 \n"
6846+ "1:" __LL "%1, %2 # local_add_return \n"
6847+ " addu %0, %1, %3 \n"
6848+ __SC "%0, %2 \n"
6849+ " beqzl %0, 1b \n"
6850+ " addu %0, %1, %3 \n"
6851+ " .set mips0 \n"
6852+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6853+ : "Ir" (i), "m" (l->a.counter)
6854+ : "memory");
6855+ } else if (kernel_uses_llsc) {
6856+ unsigned long temp;
6857+
6858+ __asm__ __volatile__(
6859+ " .set mips3 \n"
6860+ "1:" __LL "%1, %2 # local_add_return \n"
6861+ " addu %0, %1, %3 \n"
6862+ __SC "%0, %2 \n"
6863+ " beqz %0, 1b \n"
6864+ " addu %0, %1, %3 \n"
6865+ " .set mips0 \n"
6866+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6867+ : "Ir" (i), "m" (l->a.counter)
6868+ : "memory");
6869+ } else {
6870+ unsigned long flags;
6871+
6872+ local_irq_save(flags);
6873+ result = l->a.counter;
6874+ result += i;
6875+ l->a.counter = result;
6876+ local_irq_restore(flags);
6877+ }
6878+
6879+ return result;
6880+}
6881+
6882 static __inline__ long local_sub_return(long i, local_t * l)
6883 {
6884 unsigned long result;
6885@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6886
6887 #define local_cmpxchg(l, o, n) \
6888 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6889+#define local_cmpxchg_unchecked(l, o, n) \
6890+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6891 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6892
6893 /**
6894diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6895index 3be8180..c4798d5 100644
6896--- a/arch/mips/include/asm/page.h
6897+++ b/arch/mips/include/asm/page.h
6898@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6899 #ifdef CONFIG_CPU_MIPS32
6900 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6901 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6902- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6903+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6904 #else
6905 typedef struct { unsigned long long pte; } pte_t;
6906 #define pte_val(x) ((x).pte)
6907diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6908index b336037..5b874cc 100644
6909--- a/arch/mips/include/asm/pgalloc.h
6910+++ b/arch/mips/include/asm/pgalloc.h
6911@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6912 {
6913 set_pud(pud, __pud((unsigned long)pmd));
6914 }
6915+
6916+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6917+{
6918+ pud_populate(mm, pud, pmd);
6919+}
6920 #endif
6921
6922 /*
6923diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
6924index df49a30..c0d3dd6 100644
6925--- a/arch/mips/include/asm/pgtable.h
6926+++ b/arch/mips/include/asm/pgtable.h
6927@@ -20,6 +20,9 @@
6928 #include <asm/io.h>
6929 #include <asm/pgtable-bits.h>
6930
6931+#define ktla_ktva(addr) (addr)
6932+#define ktva_ktla(addr) (addr)
6933+
6934 struct mm_struct;
6935 struct vm_area_struct;
6936
6937diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6938index 7de8658..c109224 100644
6939--- a/arch/mips/include/asm/thread_info.h
6940+++ b/arch/mips/include/asm/thread_info.h
6941@@ -105,6 +105,9 @@ static inline struct thread_info *current_thread_info(void)
6942 #define TIF_SECCOMP 4 /* secure computing */
6943 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
6944 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
6945+/* li takes a 32bit immediate */
6946+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
6947+
6948 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
6949 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
6950 #define TIF_NOHZ 19 /* in adaptive nohz mode */
6951@@ -138,14 +141,16 @@ static inline struct thread_info *current_thread_info(void)
6952 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
6953 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
6954 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6955+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6956
6957 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6958 _TIF_SYSCALL_AUDIT | \
6959- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
6960+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
6961+ _TIF_GRSEC_SETXID)
6962
6963 /* work to do in syscall_trace_leave() */
6964 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6965- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6966+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6967
6968 /* work to do on interrupt/exception return */
6969 #define _TIF_WORK_MASK \
6970@@ -153,7 +158,7 @@ static inline struct thread_info *current_thread_info(void)
6971 /* work to do on any return to u-space */
6972 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6973 _TIF_WORK_SYSCALL_EXIT | \
6974- _TIF_SYSCALL_TRACEPOINT)
6975+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6976
6977 /*
6978 * We stash processor id into a COP0 register to retrieve it fast
6979diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
6980index b9ab717..3a15c28 100644
6981--- a/arch/mips/include/asm/uaccess.h
6982+++ b/arch/mips/include/asm/uaccess.h
6983@@ -130,6 +130,7 @@ extern u64 __ua_limit;
6984 __ok == 0; \
6985 })
6986
6987+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
6988 #define access_ok(type, addr, size) \
6989 likely(__access_ok((addr), (size), __access_mask))
6990
6991@@ -301,7 +302,8 @@ do { \
6992 __get_kernel_common((x), size, __gu_ptr); \
6993 else \
6994 __get_user_common((x), size, __gu_ptr); \
6995- } \
6996+ } else \
6997+ (x) = 0; \
6998 \
6999 __gu_err; \
7000 })
7001@@ -316,6 +318,7 @@ do { \
7002 " .insn \n" \
7003 " .section .fixup,\"ax\" \n" \
7004 "3: li %0, %4 \n" \
7005+ " move %1, $0 \n" \
7006 " j 2b \n" \
7007 " .previous \n" \
7008 " .section __ex_table,\"a\" \n" \
7009@@ -630,6 +633,7 @@ do { \
7010 " .insn \n" \
7011 " .section .fixup,\"ax\" \n" \
7012 "3: li %0, %4 \n" \
7013+ " move %1, $0 \n" \
7014 " j 2b \n" \
7015 " .previous \n" \
7016 " .section __ex_table,\"a\" \n" \
7017diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
7018index 1188e00..41cf144 100644
7019--- a/arch/mips/kernel/binfmt_elfn32.c
7020+++ b/arch/mips/kernel/binfmt_elfn32.c
7021@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
7022 #undef ELF_ET_DYN_BASE
7023 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
7024
7025+#ifdef CONFIG_PAX_ASLR
7026+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
7027+
7028+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7029+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7030+#endif
7031+
7032 #include <asm/processor.h>
7033 #include <linux/module.h>
7034 #include <linux/elfcore.h>
7035diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
7036index 9287678..f870e47 100644
7037--- a/arch/mips/kernel/binfmt_elfo32.c
7038+++ b/arch/mips/kernel/binfmt_elfo32.c
7039@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
7040 #undef ELF_ET_DYN_BASE
7041 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
7042
7043+#ifdef CONFIG_PAX_ASLR
7044+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
7045+
7046+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7047+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7048+#endif
7049+
7050 #include <asm/processor.h>
7051
7052 #include <linux/module.h>
7053diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
7054index 50b3648..c2f3cec 100644
7055--- a/arch/mips/kernel/i8259.c
7056+++ b/arch/mips/kernel/i8259.c
7057@@ -201,7 +201,7 @@ spurious_8259A_irq:
7058 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
7059 spurious_irq_mask |= irqmask;
7060 }
7061- atomic_inc(&irq_err_count);
7062+ atomic_inc_unchecked(&irq_err_count);
7063 /*
7064 * Theoretically we do not have to handle this IRQ,
7065 * but in Linux this does not cause problems and is
7066diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
7067index 44a1f79..2bd6aa3 100644
7068--- a/arch/mips/kernel/irq-gt641xx.c
7069+++ b/arch/mips/kernel/irq-gt641xx.c
7070@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
7071 }
7072 }
7073
7074- atomic_inc(&irq_err_count);
7075+ atomic_inc_unchecked(&irq_err_count);
7076 }
7077
7078 void __init gt641xx_irq_init(void)
7079diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
7080index d2bfbc2..a8eacd2 100644
7081--- a/arch/mips/kernel/irq.c
7082+++ b/arch/mips/kernel/irq.c
7083@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
7084 printk("unexpected IRQ # %d\n", irq);
7085 }
7086
7087-atomic_t irq_err_count;
7088+atomic_unchecked_t irq_err_count;
7089
7090 int arch_show_interrupts(struct seq_file *p, int prec)
7091 {
7092- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
7093+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
7094 return 0;
7095 }
7096
7097 asmlinkage void spurious_interrupt(void)
7098 {
7099- atomic_inc(&irq_err_count);
7100+ atomic_inc_unchecked(&irq_err_count);
7101 }
7102
7103 void __init init_IRQ(void)
7104@@ -109,7 +109,10 @@ void __init init_IRQ(void)
7105 #endif
7106 }
7107
7108+
7109 #ifdef DEBUG_STACKOVERFLOW
7110+extern void gr_handle_kernel_exploit(void);
7111+
7112 static inline void check_stack_overflow(void)
7113 {
7114 unsigned long sp;
7115@@ -125,6 +128,7 @@ static inline void check_stack_overflow(void)
7116 printk("do_IRQ: stack overflow: %ld\n",
7117 sp - sizeof(struct thread_info));
7118 dump_stack();
7119+ gr_handle_kernel_exploit();
7120 }
7121 }
7122 #else
7123diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
7124index 0614717..002fa43 100644
7125--- a/arch/mips/kernel/pm-cps.c
7126+++ b/arch/mips/kernel/pm-cps.c
7127@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
7128 nc_core_ready_count = nc_addr;
7129
7130 /* Ensure ready_count is zero-initialised before the assembly runs */
7131- ACCESS_ONCE(*nc_core_ready_count) = 0;
7132+ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
7133 coupled_barrier(&per_cpu(pm_barrier, core), online);
7134
7135 /* Run the generated entry code */
7136diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
7137index 636b074..8fbb91f 100644
7138--- a/arch/mips/kernel/process.c
7139+++ b/arch/mips/kernel/process.c
7140@@ -520,15 +520,3 @@ unsigned long get_wchan(struct task_struct *task)
7141 out:
7142 return pc;
7143 }
7144-
7145-/*
7146- * Don't forget that the stack pointer must be aligned on a 8 bytes
7147- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
7148- */
7149-unsigned long arch_align_stack(unsigned long sp)
7150-{
7151- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7152- sp -= get_random_int() & ~PAGE_MASK;
7153-
7154- return sp & ALMASK;
7155-}
7156diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
7157index 645b3c4..909c75a 100644
7158--- a/arch/mips/kernel/ptrace.c
7159+++ b/arch/mips/kernel/ptrace.c
7160@@ -761,6 +761,10 @@ long arch_ptrace(struct task_struct *child, long request,
7161 return ret;
7162 }
7163
7164+#ifdef CONFIG_GRKERNSEC_SETXID
7165+extern void gr_delayed_cred_worker(void);
7166+#endif
7167+
7168 /*
7169 * Notification of system call entry/exit
7170 * - triggered by current->work.syscall_trace
7171@@ -777,6 +781,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
7172 tracehook_report_syscall_entry(regs))
7173 ret = -1;
7174
7175+#ifdef CONFIG_GRKERNSEC_SETXID
7176+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7177+ gr_delayed_cred_worker();
7178+#endif
7179+
7180 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
7181 trace_sys_enter(regs, regs->regs[2]);
7182
7183diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
7184index 07fc524..b9d7f28 100644
7185--- a/arch/mips/kernel/reset.c
7186+++ b/arch/mips/kernel/reset.c
7187@@ -13,6 +13,7 @@
7188 #include <linux/reboot.h>
7189
7190 #include <asm/reboot.h>
7191+#include <asm/bug.h>
7192
7193 /*
7194 * Urgs ... Too many MIPS machines to handle this in a generic way.
7195@@ -29,16 +30,19 @@ void machine_restart(char *command)
7196 {
7197 if (_machine_restart)
7198 _machine_restart(command);
7199+ BUG();
7200 }
7201
7202 void machine_halt(void)
7203 {
7204 if (_machine_halt)
7205 _machine_halt();
7206+ BUG();
7207 }
7208
7209 void machine_power_off(void)
7210 {
7211 if (pm_power_off)
7212 pm_power_off();
7213+ BUG();
7214 }
7215diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
7216index 2242bdd..b284048 100644
7217--- a/arch/mips/kernel/sync-r4k.c
7218+++ b/arch/mips/kernel/sync-r4k.c
7219@@ -18,8 +18,8 @@
7220 #include <asm/mipsregs.h>
7221
7222 static atomic_t count_start_flag = ATOMIC_INIT(0);
7223-static atomic_t count_count_start = ATOMIC_INIT(0);
7224-static atomic_t count_count_stop = ATOMIC_INIT(0);
7225+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
7226+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
7227 static atomic_t count_reference = ATOMIC_INIT(0);
7228
7229 #define COUNTON 100
7230@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
7231
7232 for (i = 0; i < NR_LOOPS; i++) {
7233 /* slaves loop on '!= 2' */
7234- while (atomic_read(&count_count_start) != 1)
7235+ while (atomic_read_unchecked(&count_count_start) != 1)
7236 mb();
7237- atomic_set(&count_count_stop, 0);
7238+ atomic_set_unchecked(&count_count_stop, 0);
7239 smp_wmb();
7240
7241 /* this lets the slaves write their count register */
7242- atomic_inc(&count_count_start);
7243+ atomic_inc_unchecked(&count_count_start);
7244
7245 /*
7246 * Everyone initialises count in the last loop:
7247@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
7248 /*
7249 * Wait for all slaves to leave the synchronization point:
7250 */
7251- while (atomic_read(&count_count_stop) != 1)
7252+ while (atomic_read_unchecked(&count_count_stop) != 1)
7253 mb();
7254- atomic_set(&count_count_start, 0);
7255+ atomic_set_unchecked(&count_count_start, 0);
7256 smp_wmb();
7257- atomic_inc(&count_count_stop);
7258+ atomic_inc_unchecked(&count_count_stop);
7259 }
7260 /* Arrange for an interrupt in a short while */
7261 write_c0_compare(read_c0_count() + COUNTON);
7262@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
7263 initcount = atomic_read(&count_reference);
7264
7265 for (i = 0; i < NR_LOOPS; i++) {
7266- atomic_inc(&count_count_start);
7267- while (atomic_read(&count_count_start) != 2)
7268+ atomic_inc_unchecked(&count_count_start);
7269+ while (atomic_read_unchecked(&count_count_start) != 2)
7270 mb();
7271
7272 /*
7273@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
7274 if (i == NR_LOOPS-1)
7275 write_c0_count(initcount);
7276
7277- atomic_inc(&count_count_stop);
7278- while (atomic_read(&count_count_stop) != 2)
7279+ atomic_inc_unchecked(&count_count_stop);
7280+ while (atomic_read_unchecked(&count_count_stop) != 2)
7281 mb();
7282 }
7283 /* Arrange for an interrupt in a short while */
7284diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
7285index 22b19c2..c5cc8c4 100644
7286--- a/arch/mips/kernel/traps.c
7287+++ b/arch/mips/kernel/traps.c
7288@@ -688,7 +688,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
7289 siginfo_t info;
7290
7291 prev_state = exception_enter();
7292- die_if_kernel("Integer overflow", regs);
7293+ if (unlikely(!user_mode(regs))) {
7294+
7295+#ifdef CONFIG_PAX_REFCOUNT
7296+ if (fixup_exception(regs)) {
7297+ pax_report_refcount_overflow(regs);
7298+ exception_exit(prev_state);
7299+ return;
7300+ }
7301+#endif
7302+
7303+ die("Integer overflow", regs);
7304+ }
7305
7306 info.si_code = FPE_INTOVF;
7307 info.si_signo = SIGFPE;
7308diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
7309index cd71141..e02c4df 100644
7310--- a/arch/mips/kvm/mips.c
7311+++ b/arch/mips/kvm/mips.c
7312@@ -839,7 +839,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
7313 return r;
7314 }
7315
7316-int kvm_arch_init(void *opaque)
7317+int kvm_arch_init(const void *opaque)
7318 {
7319 if (kvm_mips_callbacks) {
7320 kvm_err("kvm: module already exists\n");
7321diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
7322index becc42b..9e43d4b 100644
7323--- a/arch/mips/mm/fault.c
7324+++ b/arch/mips/mm/fault.c
7325@@ -28,6 +28,23 @@
7326 #include <asm/highmem.h> /* For VMALLOC_END */
7327 #include <linux/kdebug.h>
7328
7329+#ifdef CONFIG_PAX_PAGEEXEC
7330+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7331+{
7332+ unsigned long i;
7333+
7334+ printk(KERN_ERR "PAX: bytes at PC: ");
7335+ for (i = 0; i < 5; i++) {
7336+ unsigned int c;
7337+ if (get_user(c, (unsigned int *)pc+i))
7338+ printk(KERN_CONT "???????? ");
7339+ else
7340+ printk(KERN_CONT "%08x ", c);
7341+ }
7342+ printk("\n");
7343+}
7344+#endif
7345+
7346 /*
7347 * This routine handles page faults. It determines the address,
7348 * and the problem, and then passes it off to one of the appropriate
7349@@ -199,6 +216,14 @@ bad_area:
7350 bad_area_nosemaphore:
7351 /* User mode accesses just cause a SIGSEGV */
7352 if (user_mode(regs)) {
7353+
7354+#ifdef CONFIG_PAX_PAGEEXEC
7355+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
7356+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
7357+ do_group_exit(SIGKILL);
7358+ }
7359+#endif
7360+
7361 tsk->thread.cp0_badvaddr = address;
7362 tsk->thread.error_code = write;
7363 #if 0
7364diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
7365index f1baadd..5472dca 100644
7366--- a/arch/mips/mm/mmap.c
7367+++ b/arch/mips/mm/mmap.c
7368@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7369 struct vm_area_struct *vma;
7370 unsigned long addr = addr0;
7371 int do_color_align;
7372+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7373 struct vm_unmapped_area_info info;
7374
7375 if (unlikely(len > TASK_SIZE))
7376@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7377 do_color_align = 1;
7378
7379 /* requesting a specific address */
7380+
7381+#ifdef CONFIG_PAX_RANDMMAP
7382+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
7383+#endif
7384+
7385 if (addr) {
7386 if (do_color_align)
7387 addr = COLOUR_ALIGN(addr, pgoff);
7388@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7389 addr = PAGE_ALIGN(addr);
7390
7391 vma = find_vma(mm, addr);
7392- if (TASK_SIZE - len >= addr &&
7393- (!vma || addr + len <= vma->vm_start))
7394+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7395 return addr;
7396 }
7397
7398 info.length = len;
7399 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
7400 info.align_offset = pgoff << PAGE_SHIFT;
7401+ info.threadstack_offset = offset;
7402
7403 if (dir == DOWN) {
7404 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
7405@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7406 {
7407 unsigned long random_factor = 0UL;
7408
7409+#ifdef CONFIG_PAX_RANDMMAP
7410+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7411+#endif
7412+
7413 if (current->flags & PF_RANDOMIZE) {
7414 random_factor = get_random_int();
7415 random_factor = random_factor << PAGE_SHIFT;
7416@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7417
7418 if (mmap_is_legacy()) {
7419 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
7420+
7421+#ifdef CONFIG_PAX_RANDMMAP
7422+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7423+ mm->mmap_base += mm->delta_mmap;
7424+#endif
7425+
7426 mm->get_unmapped_area = arch_get_unmapped_area;
7427 } else {
7428 mm->mmap_base = mmap_base(random_factor);
7429+
7430+#ifdef CONFIG_PAX_RANDMMAP
7431+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7432+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7433+#endif
7434+
7435 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7436 }
7437 }
7438
7439-static inline unsigned long brk_rnd(void)
7440-{
7441- unsigned long rnd = get_random_int();
7442-
7443- rnd = rnd << PAGE_SHIFT;
7444- /* 8MB for 32bit, 256MB for 64bit */
7445- if (TASK_IS_32BIT_ADDR)
7446- rnd = rnd & 0x7ffffful;
7447- else
7448- rnd = rnd & 0xffffffful;
7449-
7450- return rnd;
7451-}
7452-
7453-unsigned long arch_randomize_brk(struct mm_struct *mm)
7454-{
7455- unsigned long base = mm->brk;
7456- unsigned long ret;
7457-
7458- ret = PAGE_ALIGN(base + brk_rnd());
7459-
7460- if (ret < mm->brk)
7461- return mm->brk;
7462-
7463- return ret;
7464-}
7465-
7466 int __virt_addr_valid(const volatile void *kaddr)
7467 {
7468 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7469diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
7470index 9f7ecbd..6e370fc 100644
7471--- a/arch/mips/net/bpf_jit.c
7472+++ b/arch/mips/net/bpf_jit.c
7473@@ -1428,5 +1428,6 @@ void bpf_jit_free(struct bpf_prog *fp)
7474 {
7475 if (fp->jited)
7476 module_free(NULL, fp->bpf_func);
7477- kfree(fp);
7478+
7479+ bpf_prog_unlock_free(fp);
7480 }
7481diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
7482index 59cccd9..f39ac2f 100644
7483--- a/arch/mips/pci/pci-octeon.c
7484+++ b/arch/mips/pci/pci-octeon.c
7485@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
7486
7487
7488 static struct pci_ops octeon_pci_ops = {
7489- octeon_read_config,
7490- octeon_write_config,
7491+ .read = octeon_read_config,
7492+ .write = octeon_write_config,
7493 };
7494
7495 static struct resource octeon_pci_mem_resource = {
7496diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
7497index 5e36c33..eb4a17b 100644
7498--- a/arch/mips/pci/pcie-octeon.c
7499+++ b/arch/mips/pci/pcie-octeon.c
7500@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
7501 }
7502
7503 static struct pci_ops octeon_pcie0_ops = {
7504- octeon_pcie0_read_config,
7505- octeon_pcie0_write_config,
7506+ .read = octeon_pcie0_read_config,
7507+ .write = octeon_pcie0_write_config,
7508 };
7509
7510 static struct resource octeon_pcie0_mem_resource = {
7511@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
7512 };
7513
7514 static struct pci_ops octeon_pcie1_ops = {
7515- octeon_pcie1_read_config,
7516- octeon_pcie1_write_config,
7517+ .read = octeon_pcie1_read_config,
7518+ .write = octeon_pcie1_write_config,
7519 };
7520
7521 static struct resource octeon_pcie1_mem_resource = {
7522@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
7523 };
7524
7525 static struct pci_ops octeon_dummy_ops = {
7526- octeon_dummy_read_config,
7527- octeon_dummy_write_config,
7528+ .read = octeon_dummy_read_config,
7529+ .write = octeon_dummy_write_config,
7530 };
7531
7532 static struct resource octeon_dummy_mem_resource = {
7533diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7534index a2358b4..7cead4f 100644
7535--- a/arch/mips/sgi-ip27/ip27-nmi.c
7536+++ b/arch/mips/sgi-ip27/ip27-nmi.c
7537@@ -187,9 +187,9 @@ void
7538 cont_nmi_dump(void)
7539 {
7540 #ifndef REAL_NMI_SIGNAL
7541- static atomic_t nmied_cpus = ATOMIC_INIT(0);
7542+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7543
7544- atomic_inc(&nmied_cpus);
7545+ atomic_inc_unchecked(&nmied_cpus);
7546 #endif
7547 /*
7548 * Only allow 1 cpu to proceed
7549@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7550 udelay(10000);
7551 }
7552 #else
7553- while (atomic_read(&nmied_cpus) != num_online_cpus());
7554+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7555 #endif
7556
7557 /*
7558diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7559index a046b30..6799527 100644
7560--- a/arch/mips/sni/rm200.c
7561+++ b/arch/mips/sni/rm200.c
7562@@ -270,7 +270,7 @@ spurious_8259A_irq:
7563 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7564 spurious_irq_mask |= irqmask;
7565 }
7566- atomic_inc(&irq_err_count);
7567+ atomic_inc_unchecked(&irq_err_count);
7568 /*
7569 * Theoretically we do not have to handle this IRQ,
7570 * but in Linux this does not cause problems and is
7571diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7572index 41e873b..34d33a7 100644
7573--- a/arch/mips/vr41xx/common/icu.c
7574+++ b/arch/mips/vr41xx/common/icu.c
7575@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7576
7577 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7578
7579- atomic_inc(&irq_err_count);
7580+ atomic_inc_unchecked(&irq_err_count);
7581
7582 return -1;
7583 }
7584diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7585index ae0e4ee..e8f0692 100644
7586--- a/arch/mips/vr41xx/common/irq.c
7587+++ b/arch/mips/vr41xx/common/irq.c
7588@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7589 irq_cascade_t *cascade;
7590
7591 if (irq >= NR_IRQS) {
7592- atomic_inc(&irq_err_count);
7593+ atomic_inc_unchecked(&irq_err_count);
7594 return;
7595 }
7596
7597@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7598 ret = cascade->get_irq(irq);
7599 irq = ret;
7600 if (ret < 0)
7601- atomic_inc(&irq_err_count);
7602+ atomic_inc_unchecked(&irq_err_count);
7603 else
7604 irq_dispatch(irq);
7605 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7606diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7607index 967d144..db12197 100644
7608--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7609+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7610@@ -11,12 +11,14 @@
7611 #ifndef _ASM_PROC_CACHE_H
7612 #define _ASM_PROC_CACHE_H
7613
7614+#include <linux/const.h>
7615+
7616 /* L1 cache */
7617
7618 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7619 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7620-#define L1_CACHE_BYTES 16 /* bytes per entry */
7621 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7622+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7623 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7624
7625 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7626diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7627index bcb5df2..84fabd2 100644
7628--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7629+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7630@@ -16,13 +16,15 @@
7631 #ifndef _ASM_PROC_CACHE_H
7632 #define _ASM_PROC_CACHE_H
7633
7634+#include <linux/const.h>
7635+
7636 /*
7637 * L1 cache
7638 */
7639 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7640 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7641-#define L1_CACHE_BYTES 32 /* bytes per entry */
7642 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7643+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7644 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7645
7646 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7647diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7648index 4ce7a01..449202a 100644
7649--- a/arch/openrisc/include/asm/cache.h
7650+++ b/arch/openrisc/include/asm/cache.h
7651@@ -19,11 +19,13 @@
7652 #ifndef __ASM_OPENRISC_CACHE_H
7653 #define __ASM_OPENRISC_CACHE_H
7654
7655+#include <linux/const.h>
7656+
7657 /* FIXME: How can we replace these with values from the CPU...
7658 * they shouldn't be hard-coded!
7659 */
7660
7661-#define L1_CACHE_BYTES 16
7662 #define L1_CACHE_SHIFT 4
7663+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7664
7665 #endif /* __ASM_OPENRISC_CACHE_H */
7666diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7667index 0be2db2..1b0f26d 100644
7668--- a/arch/parisc/include/asm/atomic.h
7669+++ b/arch/parisc/include/asm/atomic.h
7670@@ -248,6 +248,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7671 return dec;
7672 }
7673
7674+#define atomic64_read_unchecked(v) atomic64_read(v)
7675+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7676+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7677+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7678+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7679+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7680+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7681+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7682+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7683+
7684 #endif /* !CONFIG_64BIT */
7685
7686
7687diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7688index 47f11c7..3420df2 100644
7689--- a/arch/parisc/include/asm/cache.h
7690+++ b/arch/parisc/include/asm/cache.h
7691@@ -5,6 +5,7 @@
7692 #ifndef __ARCH_PARISC_CACHE_H
7693 #define __ARCH_PARISC_CACHE_H
7694
7695+#include <linux/const.h>
7696
7697 /*
7698 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7699@@ -15,13 +16,13 @@
7700 * just ruin performance.
7701 */
7702 #ifdef CONFIG_PA20
7703-#define L1_CACHE_BYTES 64
7704 #define L1_CACHE_SHIFT 6
7705 #else
7706-#define L1_CACHE_BYTES 32
7707 #define L1_CACHE_SHIFT 5
7708 #endif
7709
7710+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7711+
7712 #ifndef __ASSEMBLY__
7713
7714 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7715diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7716index 3391d06..c23a2cc 100644
7717--- a/arch/parisc/include/asm/elf.h
7718+++ b/arch/parisc/include/asm/elf.h
7719@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7720
7721 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7722
7723+#ifdef CONFIG_PAX_ASLR
7724+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7725+
7726+#define PAX_DELTA_MMAP_LEN 16
7727+#define PAX_DELTA_STACK_LEN 16
7728+#endif
7729+
7730 /* This yields a mask that user programs can use to figure out what
7731 instruction set this CPU supports. This could be done in user space,
7732 but it's not easy, and we've already done it here. */
7733diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7734index f213f5b..0af3e8e 100644
7735--- a/arch/parisc/include/asm/pgalloc.h
7736+++ b/arch/parisc/include/asm/pgalloc.h
7737@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7738 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7739 }
7740
7741+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7742+{
7743+ pgd_populate(mm, pgd, pmd);
7744+}
7745+
7746 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7747 {
7748 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7749@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7750 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7751 #define pmd_free(mm, x) do { } while (0)
7752 #define pgd_populate(mm, pmd, pte) BUG()
7753+#define pgd_populate_kernel(mm, pmd, pte) BUG()
7754
7755 #endif
7756
7757diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7758index 22b89d1..ce34230 100644
7759--- a/arch/parisc/include/asm/pgtable.h
7760+++ b/arch/parisc/include/asm/pgtable.h
7761@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7762 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7763 #define PAGE_COPY PAGE_EXECREAD
7764 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7765+
7766+#ifdef CONFIG_PAX_PAGEEXEC
7767+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7768+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7769+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7770+#else
7771+# define PAGE_SHARED_NOEXEC PAGE_SHARED
7772+# define PAGE_COPY_NOEXEC PAGE_COPY
7773+# define PAGE_READONLY_NOEXEC PAGE_READONLY
7774+#endif
7775+
7776 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7777 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7778 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7779diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7780index 4006964..fcb3cc2 100644
7781--- a/arch/parisc/include/asm/uaccess.h
7782+++ b/arch/parisc/include/asm/uaccess.h
7783@@ -246,10 +246,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7784 const void __user *from,
7785 unsigned long n)
7786 {
7787- int sz = __compiletime_object_size(to);
7788+ size_t sz = __compiletime_object_size(to);
7789 int ret = -EFAULT;
7790
7791- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7792+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7793 ret = __copy_from_user(to, from, n);
7794 else
7795 copy_from_user_overflow();
7796diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7797index 50dfafc..b9fc230 100644
7798--- a/arch/parisc/kernel/module.c
7799+++ b/arch/parisc/kernel/module.c
7800@@ -98,16 +98,38 @@
7801
7802 /* three functions to determine where in the module core
7803 * or init pieces the location is */
7804+static inline int in_init_rx(struct module *me, void *loc)
7805+{
7806+ return (loc >= me->module_init_rx &&
7807+ loc < (me->module_init_rx + me->init_size_rx));
7808+}
7809+
7810+static inline int in_init_rw(struct module *me, void *loc)
7811+{
7812+ return (loc >= me->module_init_rw &&
7813+ loc < (me->module_init_rw + me->init_size_rw));
7814+}
7815+
7816 static inline int in_init(struct module *me, void *loc)
7817 {
7818- return (loc >= me->module_init &&
7819- loc <= (me->module_init + me->init_size));
7820+ return in_init_rx(me, loc) || in_init_rw(me, loc);
7821+}
7822+
7823+static inline int in_core_rx(struct module *me, void *loc)
7824+{
7825+ return (loc >= me->module_core_rx &&
7826+ loc < (me->module_core_rx + me->core_size_rx));
7827+}
7828+
7829+static inline int in_core_rw(struct module *me, void *loc)
7830+{
7831+ return (loc >= me->module_core_rw &&
7832+ loc < (me->module_core_rw + me->core_size_rw));
7833 }
7834
7835 static inline int in_core(struct module *me, void *loc)
7836 {
7837- return (loc >= me->module_core &&
7838- loc <= (me->module_core + me->core_size));
7839+ return in_core_rx(me, loc) || in_core_rw(me, loc);
7840 }
7841
7842 static inline int in_local(struct module *me, void *loc)
7843@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7844 }
7845
7846 /* align things a bit */
7847- me->core_size = ALIGN(me->core_size, 16);
7848- me->arch.got_offset = me->core_size;
7849- me->core_size += gots * sizeof(struct got_entry);
7850+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7851+ me->arch.got_offset = me->core_size_rw;
7852+ me->core_size_rw += gots * sizeof(struct got_entry);
7853
7854- me->core_size = ALIGN(me->core_size, 16);
7855- me->arch.fdesc_offset = me->core_size;
7856- me->core_size += fdescs * sizeof(Elf_Fdesc);
7857+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7858+ me->arch.fdesc_offset = me->core_size_rw;
7859+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7860
7861 me->arch.got_max = gots;
7862 me->arch.fdesc_max = fdescs;
7863@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7864
7865 BUG_ON(value == 0);
7866
7867- got = me->module_core + me->arch.got_offset;
7868+ got = me->module_core_rw + me->arch.got_offset;
7869 for (i = 0; got[i].addr; i++)
7870 if (got[i].addr == value)
7871 goto out;
7872@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7873 #ifdef CONFIG_64BIT
7874 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7875 {
7876- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7877+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7878
7879 if (!value) {
7880 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7881@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7882
7883 /* Create new one */
7884 fdesc->addr = value;
7885- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7886+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7887 return (Elf_Addr)fdesc;
7888 }
7889 #endif /* CONFIG_64BIT */
7890@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
7891
7892 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7893 end = table + sechdrs[me->arch.unwind_section].sh_size;
7894- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7895+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7896
7897 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7898 me->arch.unwind_section, table, end, gp);
7899diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7900index e1ffea2..46ed66e 100644
7901--- a/arch/parisc/kernel/sys_parisc.c
7902+++ b/arch/parisc/kernel/sys_parisc.c
7903@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7904 unsigned long task_size = TASK_SIZE;
7905 int do_color_align, last_mmap;
7906 struct vm_unmapped_area_info info;
7907+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7908
7909 if (len > task_size)
7910 return -ENOMEM;
7911@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7912 goto found_addr;
7913 }
7914
7915+#ifdef CONFIG_PAX_RANDMMAP
7916+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7917+#endif
7918+
7919 if (addr) {
7920 if (do_color_align && last_mmap)
7921 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7922@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7923 info.high_limit = mmap_upper_limit();
7924 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7925 info.align_offset = shared_align_offset(last_mmap, pgoff);
7926+ info.threadstack_offset = offset;
7927 addr = vm_unmapped_area(&info);
7928
7929 found_addr:
7930@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7931 unsigned long addr = addr0;
7932 int do_color_align, last_mmap;
7933 struct vm_unmapped_area_info info;
7934+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7935
7936 #ifdef CONFIG_64BIT
7937 /* This should only ever run for 32-bit processes. */
7938@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7939 }
7940
7941 /* requesting a specific address */
7942+#ifdef CONFIG_PAX_RANDMMAP
7943+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7944+#endif
7945+
7946 if (addr) {
7947 if (do_color_align && last_mmap)
7948 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7949@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7950 info.high_limit = mm->mmap_base;
7951 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7952 info.align_offset = shared_align_offset(last_mmap, pgoff);
7953+ info.threadstack_offset = offset;
7954 addr = vm_unmapped_area(&info);
7955 if (!(addr & ~PAGE_MASK))
7956 goto found_addr;
7957@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7958 mm->mmap_legacy_base = mmap_legacy_base();
7959 mm->mmap_base = mmap_upper_limit();
7960
7961+#ifdef CONFIG_PAX_RANDMMAP
7962+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
7963+ mm->mmap_legacy_base += mm->delta_mmap;
7964+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7965+ }
7966+#endif
7967+
7968 if (mmap_is_legacy()) {
7969 mm->mmap_base = mm->mmap_legacy_base;
7970 mm->get_unmapped_area = arch_get_unmapped_area;
7971diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7972index 47ee620..1107387 100644
7973--- a/arch/parisc/kernel/traps.c
7974+++ b/arch/parisc/kernel/traps.c
7975@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7976
7977 down_read(&current->mm->mmap_sem);
7978 vma = find_vma(current->mm,regs->iaoq[0]);
7979- if (vma && (regs->iaoq[0] >= vma->vm_start)
7980- && (vma->vm_flags & VM_EXEC)) {
7981-
7982+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7983 fault_address = regs->iaoq[0];
7984 fault_space = regs->iasq[0];
7985
7986diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7987index 3ca9c11..d163ef7 100644
7988--- a/arch/parisc/mm/fault.c
7989+++ b/arch/parisc/mm/fault.c
7990@@ -15,6 +15,7 @@
7991 #include <linux/sched.h>
7992 #include <linux/interrupt.h>
7993 #include <linux/module.h>
7994+#include <linux/unistd.h>
7995
7996 #include <asm/uaccess.h>
7997 #include <asm/traps.h>
7998@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
7999 static unsigned long
8000 parisc_acctyp(unsigned long code, unsigned int inst)
8001 {
8002- if (code == 6 || code == 16)
8003+ if (code == 6 || code == 7 || code == 16)
8004 return VM_EXEC;
8005
8006 switch (inst & 0xf0000000) {
8007@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
8008 }
8009 #endif
8010
8011+#ifdef CONFIG_PAX_PAGEEXEC
8012+/*
8013+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
8014+ *
8015+ * returns 1 when task should be killed
8016+ * 2 when rt_sigreturn trampoline was detected
8017+ * 3 when unpatched PLT trampoline was detected
8018+ */
8019+static int pax_handle_fetch_fault(struct pt_regs *regs)
8020+{
8021+
8022+#ifdef CONFIG_PAX_EMUPLT
8023+ int err;
8024+
8025+ do { /* PaX: unpatched PLT emulation */
8026+ unsigned int bl, depwi;
8027+
8028+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
8029+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
8030+
8031+ if (err)
8032+ break;
8033+
8034+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
8035+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
8036+
8037+ err = get_user(ldw, (unsigned int *)addr);
8038+ err |= get_user(bv, (unsigned int *)(addr+4));
8039+ err |= get_user(ldw2, (unsigned int *)(addr+8));
8040+
8041+ if (err)
8042+ break;
8043+
8044+ if (ldw == 0x0E801096U &&
8045+ bv == 0xEAC0C000U &&
8046+ ldw2 == 0x0E881095U)
8047+ {
8048+ unsigned int resolver, map;
8049+
8050+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
8051+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
8052+ if (err)
8053+ break;
8054+
8055+ regs->gr[20] = instruction_pointer(regs)+8;
8056+ regs->gr[21] = map;
8057+ regs->gr[22] = resolver;
8058+ regs->iaoq[0] = resolver | 3UL;
8059+ regs->iaoq[1] = regs->iaoq[0] + 4;
8060+ return 3;
8061+ }
8062+ }
8063+ } while (0);
8064+#endif
8065+
8066+#ifdef CONFIG_PAX_EMUTRAMP
8067+
8068+#ifndef CONFIG_PAX_EMUSIGRT
8069+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
8070+ return 1;
8071+#endif
8072+
8073+ do { /* PaX: rt_sigreturn emulation */
8074+ unsigned int ldi1, ldi2, bel, nop;
8075+
8076+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
8077+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
8078+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
8079+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
8080+
8081+ if (err)
8082+ break;
8083+
8084+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
8085+ ldi2 == 0x3414015AU &&
8086+ bel == 0xE4008200U &&
8087+ nop == 0x08000240U)
8088+ {
8089+ regs->gr[25] = (ldi1 & 2) >> 1;
8090+ regs->gr[20] = __NR_rt_sigreturn;
8091+ regs->gr[31] = regs->iaoq[1] + 16;
8092+ regs->sr[0] = regs->iasq[1];
8093+ regs->iaoq[0] = 0x100UL;
8094+ regs->iaoq[1] = regs->iaoq[0] + 4;
8095+ regs->iasq[0] = regs->sr[2];
8096+ regs->iasq[1] = regs->sr[2];
8097+ return 2;
8098+ }
8099+ } while (0);
8100+#endif
8101+
8102+ return 1;
8103+}
8104+
8105+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
8106+{
8107+ unsigned long i;
8108+
8109+ printk(KERN_ERR "PAX: bytes at PC: ");
8110+ for (i = 0; i < 5; i++) {
8111+ unsigned int c;
8112+ if (get_user(c, (unsigned int *)pc+i))
8113+ printk(KERN_CONT "???????? ");
8114+ else
8115+ printk(KERN_CONT "%08x ", c);
8116+ }
8117+ printk("\n");
8118+}
8119+#endif
8120+
8121 int fixup_exception(struct pt_regs *regs)
8122 {
8123 const struct exception_table_entry *fix;
8124@@ -234,8 +345,33 @@ retry:
8125
8126 good_area:
8127
8128- if ((vma->vm_flags & acc_type) != acc_type)
8129+ if ((vma->vm_flags & acc_type) != acc_type) {
8130+
8131+#ifdef CONFIG_PAX_PAGEEXEC
8132+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
8133+ (address & ~3UL) == instruction_pointer(regs))
8134+ {
8135+ up_read(&mm->mmap_sem);
8136+ switch (pax_handle_fetch_fault(regs)) {
8137+
8138+#ifdef CONFIG_PAX_EMUPLT
8139+ case 3:
8140+ return;
8141+#endif
8142+
8143+#ifdef CONFIG_PAX_EMUTRAMP
8144+ case 2:
8145+ return;
8146+#endif
8147+
8148+ }
8149+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
8150+ do_group_exit(SIGKILL);
8151+ }
8152+#endif
8153+
8154 goto bad_area;
8155+ }
8156
8157 /*
8158 * If for any reason at all we couldn't handle the fault, make
8159diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
8160index 4bc7b62..107e0b2 100644
8161--- a/arch/powerpc/Kconfig
8162+++ b/arch/powerpc/Kconfig
8163@@ -399,6 +399,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
8164 config KEXEC
8165 bool "kexec system call"
8166 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
8167+ depends on !GRKERNSEC_KMEM
8168 help
8169 kexec is a system call that implements the ability to shutdown your
8170 current kernel, and to start another kernel. It is like a reboot
8171diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
8172index 28992d0..434c881 100644
8173--- a/arch/powerpc/include/asm/atomic.h
8174+++ b/arch/powerpc/include/asm/atomic.h
8175@@ -12,6 +12,11 @@
8176
8177 #define ATOMIC_INIT(i) { (i) }
8178
8179+#define _ASM_EXTABLE(from, to) \
8180+" .section __ex_table,\"a\"\n" \
8181+ PPC_LONG" " #from ", " #to"\n" \
8182+" .previous\n"
8183+
8184 static __inline__ int atomic_read(const atomic_t *v)
8185 {
8186 int t;
8187@@ -21,16 +26,61 @@ static __inline__ int atomic_read(const atomic_t *v)
8188 return t;
8189 }
8190
8191+static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
8192+{
8193+ int t;
8194+
8195+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
8196+
8197+ return t;
8198+}
8199+
8200 static __inline__ void atomic_set(atomic_t *v, int i)
8201 {
8202 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8203 }
8204
8205+static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8206+{
8207+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8208+}
8209+
8210 static __inline__ void atomic_add(int a, atomic_t *v)
8211 {
8212 int t;
8213
8214 __asm__ __volatile__(
8215+"1: lwarx %0,0,%3 # atomic_add\n"
8216+
8217+#ifdef CONFIG_PAX_REFCOUNT
8218+" mcrxr cr0\n"
8219+" addo. %0,%2,%0\n"
8220+" bf 4*cr0+so, 3f\n"
8221+"2:.long " "0x00c00b00""\n"
8222+#else
8223+" add %0,%2,%0\n"
8224+#endif
8225+
8226+"3:\n"
8227+ PPC405_ERR77(0,%3)
8228+" stwcx. %0,0,%3 \n\
8229+ bne- 1b"
8230+
8231+#ifdef CONFIG_PAX_REFCOUNT
8232+"\n4:\n"
8233+ _ASM_EXTABLE(2b, 4b)
8234+#endif
8235+
8236+ : "=&r" (t), "+m" (v->counter)
8237+ : "r" (a), "r" (&v->counter)
8238+ : "cc");
8239+}
8240+
8241+static __inline__ void atomic_add_unchecked(int a, atomic_unchecked_t *v)
8242+{
8243+ int t;
8244+
8245+ __asm__ __volatile__(
8246 "1: lwarx %0,0,%3 # atomic_add\n\
8247 add %0,%2,%0\n"
8248 PPC405_ERR77(0,%3)
8249@@ -41,12 +91,49 @@ static __inline__ void atomic_add(int a, atomic_t *v)
8250 : "cc");
8251 }
8252
8253+/* Same as atomic_add but return the value */
8254 static __inline__ int atomic_add_return(int a, atomic_t *v)
8255 {
8256 int t;
8257
8258 __asm__ __volatile__(
8259 PPC_ATOMIC_ENTRY_BARRIER
8260+"1: lwarx %0,0,%2 # atomic_add_return\n"
8261+
8262+#ifdef CONFIG_PAX_REFCOUNT
8263+" mcrxr cr0\n"
8264+" addo. %0,%1,%0\n"
8265+" bf 4*cr0+so, 3f\n"
8266+"2:.long " "0x00c00b00""\n"
8267+#else
8268+" add %0,%1,%0\n"
8269+#endif
8270+
8271+"3:\n"
8272+ PPC405_ERR77(0,%2)
8273+" stwcx. %0,0,%2 \n\
8274+ bne- 1b\n"
8275+"4:"
8276+
8277+#ifdef CONFIG_PAX_REFCOUNT
8278+ _ASM_EXTABLE(2b, 4b)
8279+#endif
8280+
8281+ PPC_ATOMIC_EXIT_BARRIER
8282+ : "=&r" (t)
8283+ : "r" (a), "r" (&v->counter)
8284+ : "cc", "memory");
8285+
8286+ return t;
8287+}
8288+
8289+/* Same as atomic_add_unchecked but return the value */
8290+static __inline__ int atomic_add_return_unchecked(int a, atomic_unchecked_t *v)
8291+{
8292+ int t;
8293+
8294+ __asm__ __volatile__(
8295+ PPC_ATOMIC_ENTRY_BARRIER
8296 "1: lwarx %0,0,%2 # atomic_add_return\n\
8297 add %0,%1,%0\n"
8298 PPC405_ERR77(0,%2)
8299@@ -67,6 +154,37 @@ static __inline__ void atomic_sub(int a, atomic_t *v)
8300 int t;
8301
8302 __asm__ __volatile__(
8303+"1: lwarx %0,0,%3 # atomic_sub\n"
8304+
8305+#ifdef CONFIG_PAX_REFCOUNT
8306+" mcrxr cr0\n"
8307+" subfo. %0,%2,%0\n"
8308+" bf 4*cr0+so, 3f\n"
8309+"2:.long " "0x00c00b00""\n"
8310+#else
8311+" subf %0,%2,%0\n"
8312+#endif
8313+
8314+"3:\n"
8315+ PPC405_ERR77(0,%3)
8316+" stwcx. %0,0,%3 \n\
8317+ bne- 1b\n"
8318+"4:"
8319+
8320+#ifdef CONFIG_PAX_REFCOUNT
8321+ _ASM_EXTABLE(2b, 4b)
8322+#endif
8323+
8324+ : "=&r" (t), "+m" (v->counter)
8325+ : "r" (a), "r" (&v->counter)
8326+ : "cc");
8327+}
8328+
8329+static __inline__ void atomic_sub_unchecked(int a, atomic_unchecked_t *v)
8330+{
8331+ int t;
8332+
8333+ __asm__ __volatile__(
8334 "1: lwarx %0,0,%3 # atomic_sub\n\
8335 subf %0,%2,%0\n"
8336 PPC405_ERR77(0,%3)
8337@@ -77,12 +195,49 @@ static __inline__ void atomic_sub(int a, atomic_t *v)
8338 : "cc");
8339 }
8340
8341+/* Same as atomic_sub but return the value */
8342 static __inline__ int atomic_sub_return(int a, atomic_t *v)
8343 {
8344 int t;
8345
8346 __asm__ __volatile__(
8347 PPC_ATOMIC_ENTRY_BARRIER
8348+"1: lwarx %0,0,%2 # atomic_sub_return\n"
8349+
8350+#ifdef CONFIG_PAX_REFCOUNT
8351+" mcrxr cr0\n"
8352+" subfo. %0,%1,%0\n"
8353+" bf 4*cr0+so, 3f\n"
8354+"2:.long " "0x00c00b00""\n"
8355+#else
8356+" subf %0,%1,%0\n"
8357+#endif
8358+
8359+"3:\n"
8360+ PPC405_ERR77(0,%2)
8361+" stwcx. %0,0,%2 \n\
8362+ bne- 1b\n"
8363+ PPC_ATOMIC_EXIT_BARRIER
8364+"4:"
8365+
8366+#ifdef CONFIG_PAX_REFCOUNT
8367+ _ASM_EXTABLE(2b, 4b)
8368+#endif
8369+
8370+ : "=&r" (t)
8371+ : "r" (a), "r" (&v->counter)
8372+ : "cc", "memory");
8373+
8374+ return t;
8375+}
8376+
8377+/* Same as atomic_sub_unchecked but return the value */
8378+static __inline__ int atomic_sub_return_unchecked(int a, atomic_unchecked_t *v)
8379+{
8380+ int t;
8381+
8382+ __asm__ __volatile__(
8383+ PPC_ATOMIC_ENTRY_BARRIER
8384 "1: lwarx %0,0,%2 # atomic_sub_return\n\
8385 subf %0,%1,%0\n"
8386 PPC405_ERR77(0,%2)
8387@@ -96,38 +251,23 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v)
8388 return t;
8389 }
8390
8391-static __inline__ void atomic_inc(atomic_t *v)
8392-{
8393- int t;
8394+/*
8395+ * atomic_inc - increment atomic variable
8396+ * @v: pointer of type atomic_t
8397+ *
8398+ * Automatically increments @v by 1
8399+ */
8400+#define atomic_inc(v) atomic_add(1, (v))
8401+#define atomic_inc_return(v) atomic_add_return(1, (v))
8402
8403- __asm__ __volatile__(
8404-"1: lwarx %0,0,%2 # atomic_inc\n\
8405- addic %0,%0,1\n"
8406- PPC405_ERR77(0,%2)
8407-" stwcx. %0,0,%2 \n\
8408- bne- 1b"
8409- : "=&r" (t), "+m" (v->counter)
8410- : "r" (&v->counter)
8411- : "cc", "xer");
8412+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
8413+{
8414+ atomic_add_unchecked(1, v);
8415 }
8416
8417-static __inline__ int atomic_inc_return(atomic_t *v)
8418+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8419 {
8420- int t;
8421-
8422- __asm__ __volatile__(
8423- PPC_ATOMIC_ENTRY_BARRIER
8424-"1: lwarx %0,0,%1 # atomic_inc_return\n\
8425- addic %0,%0,1\n"
8426- PPC405_ERR77(0,%1)
8427-" stwcx. %0,0,%1 \n\
8428- bne- 1b"
8429- PPC_ATOMIC_EXIT_BARRIER
8430- : "=&r" (t)
8431- : "r" (&v->counter)
8432- : "cc", "xer", "memory");
8433-
8434- return t;
8435+ return atomic_add_return_unchecked(1, v);
8436 }
8437
8438 /*
8439@@ -140,43 +280,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
8440 */
8441 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
8442
8443-static __inline__ void atomic_dec(atomic_t *v)
8444+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8445 {
8446- int t;
8447-
8448- __asm__ __volatile__(
8449-"1: lwarx %0,0,%2 # atomic_dec\n\
8450- addic %0,%0,-1\n"
8451- PPC405_ERR77(0,%2)\
8452-" stwcx. %0,0,%2\n\
8453- bne- 1b"
8454- : "=&r" (t), "+m" (v->counter)
8455- : "r" (&v->counter)
8456- : "cc", "xer");
8457+ return atomic_add_return_unchecked(1, v) == 0;
8458 }
8459
8460-static __inline__ int atomic_dec_return(atomic_t *v)
8461+/*
8462+ * atomic_dec - decrement atomic variable
8463+ * @v: pointer of type atomic_t
8464+ *
8465+ * Atomically decrements @v by 1
8466+ */
8467+#define atomic_dec(v) atomic_sub(1, (v))
8468+#define atomic_dec_return(v) atomic_sub_return(1, (v))
8469+
8470+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
8471 {
8472- int t;
8473-
8474- __asm__ __volatile__(
8475- PPC_ATOMIC_ENTRY_BARRIER
8476-"1: lwarx %0,0,%1 # atomic_dec_return\n\
8477- addic %0,%0,-1\n"
8478- PPC405_ERR77(0,%1)
8479-" stwcx. %0,0,%1\n\
8480- bne- 1b"
8481- PPC_ATOMIC_EXIT_BARRIER
8482- : "=&r" (t)
8483- : "r" (&v->counter)
8484- : "cc", "xer", "memory");
8485-
8486- return t;
8487+ atomic_sub_unchecked(1, v);
8488 }
8489
8490 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8491 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
8492
8493+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8494+{
8495+ return cmpxchg(&(v->counter), old, new);
8496+}
8497+
8498+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8499+{
8500+ return xchg(&(v->counter), new);
8501+}
8502+
8503 /**
8504 * __atomic_add_unless - add unless the number is a given value
8505 * @v: pointer of type atomic_t
8506@@ -271,6 +406,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
8507 }
8508 #define atomic_dec_if_positive atomic_dec_if_positive
8509
8510+#define smp_mb__before_atomic_dec() smp_mb()
8511+#define smp_mb__after_atomic_dec() smp_mb()
8512+#define smp_mb__before_atomic_inc() smp_mb()
8513+#define smp_mb__after_atomic_inc() smp_mb()
8514+
8515 #ifdef __powerpc64__
8516
8517 #define ATOMIC64_INIT(i) { (i) }
8518@@ -284,11 +424,25 @@ static __inline__ long atomic64_read(const atomic64_t *v)
8519 return t;
8520 }
8521
8522+static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
8523+{
8524+ long t;
8525+
8526+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
8527+
8528+ return t;
8529+}
8530+
8531 static __inline__ void atomic64_set(atomic64_t *v, long i)
8532 {
8533 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8534 }
8535
8536+static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8537+{
8538+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8539+}
8540+
8541 static __inline__ void atomic64_add(long a, atomic64_t *v)
8542 {
8543 long t;
8544@@ -303,12 +457,76 @@ static __inline__ void atomic64_add(long a, atomic64_t *v)
8545 : "cc");
8546 }
8547
8548+static __inline__ void atomic64_add_unchecked(long a, atomic64_unchecked_t *v)
8549+{
8550+ long t;
8551+
8552+ __asm__ __volatile__(
8553+"1: ldarx %0,0,%3 # atomic64_add\n"
8554+
8555+#ifdef CONFIG_PAX_REFCOUNT
8556+" mcrxr cr0\n"
8557+" addo. %0,%2,%0\n"
8558+" bf 4*cr0+so, 3f\n"
8559+"2:.long " "0x00c00b00""\n"
8560+#else
8561+" add %0,%2,%0\n"
8562+#endif
8563+
8564+"3:\n"
8565+" stdcx. %0,0,%3 \n\
8566+ bne- 1b\n"
8567+"4:"
8568+
8569+#ifdef CONFIG_PAX_REFCOUNT
8570+ _ASM_EXTABLE(2b, 4b)
8571+#endif
8572+
8573+ : "=&r" (t), "+m" (v->counter)
8574+ : "r" (a), "r" (&v->counter)
8575+ : "cc");
8576+}
8577+
8578 static __inline__ long atomic64_add_return(long a, atomic64_t *v)
8579 {
8580 long t;
8581
8582 __asm__ __volatile__(
8583 PPC_ATOMIC_ENTRY_BARRIER
8584+"1: ldarx %0,0,%2 # atomic64_add_return\n"
8585+
8586+#ifdef CONFIG_PAX_REFCOUNT
8587+" mcrxr cr0\n"
8588+" addo. %0,%1,%0\n"
8589+" bf 4*cr0+so, 3f\n"
8590+"2:.long " "0x00c00b00""\n"
8591+#else
8592+" add %0,%1,%0\n"
8593+#endif
8594+
8595+"3:\n"
8596+" stdcx. %0,0,%2 \n\
8597+ bne- 1b\n"
8598+ PPC_ATOMIC_EXIT_BARRIER
8599+"4:"
8600+
8601+#ifdef CONFIG_PAX_REFCOUNT
8602+ _ASM_EXTABLE(2b, 4b)
8603+#endif
8604+
8605+ : "=&r" (t)
8606+ : "r" (a), "r" (&v->counter)
8607+ : "cc", "memory");
8608+
8609+ return t;
8610+}
8611+
8612+static __inline__ long atomic64_add_return_unchecked(long a, atomic64_unchecked_t *v)
8613+{
8614+ long t;
8615+
8616+ __asm__ __volatile__(
8617+ PPC_ATOMIC_ENTRY_BARRIER
8618 "1: ldarx %0,0,%2 # atomic64_add_return\n\
8619 add %0,%1,%0\n\
8620 stdcx. %0,0,%2 \n\
8621@@ -328,6 +546,36 @@ static __inline__ void atomic64_sub(long a, atomic64_t *v)
8622 long t;
8623
8624 __asm__ __volatile__(
8625+"1: ldarx %0,0,%3 # atomic64_sub\n"
8626+
8627+#ifdef CONFIG_PAX_REFCOUNT
8628+" mcrxr cr0\n"
8629+" subfo. %0,%2,%0\n"
8630+" bf 4*cr0+so, 3f\n"
8631+"2:.long " "0x00c00b00""\n"
8632+#else
8633+" subf %0,%2,%0\n"
8634+#endif
8635+
8636+"3:\n"
8637+" stdcx. %0,0,%3 \n\
8638+ bne- 1b"
8639+"4:"
8640+
8641+#ifdef CONFIG_PAX_REFCOUNT
8642+ _ASM_EXTABLE(2b, 4b)
8643+#endif
8644+
8645+ : "=&r" (t), "+m" (v->counter)
8646+ : "r" (a), "r" (&v->counter)
8647+ : "cc");
8648+}
8649+
8650+static __inline__ void atomic64_sub_unchecked(long a, atomic64_unchecked_t *v)
8651+{
8652+ long t;
8653+
8654+ __asm__ __volatile__(
8655 "1: ldarx %0,0,%3 # atomic64_sub\n\
8656 subf %0,%2,%0\n\
8657 stdcx. %0,0,%3 \n\
8658@@ -343,6 +591,40 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
8659
8660 __asm__ __volatile__(
8661 PPC_ATOMIC_ENTRY_BARRIER
8662+"1: ldarx %0,0,%2 # atomic64_sub_return\n"
8663+
8664+#ifdef CONFIG_PAX_REFCOUNT
8665+" mcrxr cr0\n"
8666+" subfo. %0,%1,%0\n"
8667+" bf 4*cr0+so, 3f\n"
8668+"2:.long " "0x00c00b00""\n"
8669+#else
8670+" subf %0,%1,%0\n"
8671+#endif
8672+
8673+"3:\n"
8674+" stdcx. %0,0,%2 \n\
8675+ bne- 1b\n"
8676+ PPC_ATOMIC_EXIT_BARRIER
8677+"4:"
8678+
8679+#ifdef CONFIG_PAX_REFCOUNT
8680+ _ASM_EXTABLE(2b, 4b)
8681+#endif
8682+
8683+ : "=&r" (t)
8684+ : "r" (a), "r" (&v->counter)
8685+ : "cc", "memory");
8686+
8687+ return t;
8688+}
8689+
8690+static __inline__ long atomic64_sub_return_unchecked(long a, atomic64_unchecked_t *v)
8691+{
8692+ long t;
8693+
8694+ __asm__ __volatile__(
8695+ PPC_ATOMIC_ENTRY_BARRIER
8696 "1: ldarx %0,0,%2 # atomic64_sub_return\n\
8697 subf %0,%1,%0\n\
8698 stdcx. %0,0,%2 \n\
8699@@ -355,36 +637,23 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
8700 return t;
8701 }
8702
8703-static __inline__ void atomic64_inc(atomic64_t *v)
8704-{
8705- long t;
8706+/*
8707+ * atomic64_inc - increment atomic variable
8708+ * @v: pointer of type atomic64_t
8709+ *
8710+ * Automatically increments @v by 1
8711+ */
8712+#define atomic64_inc(v) atomic64_add(1, (v))
8713+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
8714
8715- __asm__ __volatile__(
8716-"1: ldarx %0,0,%2 # atomic64_inc\n\
8717- addic %0,%0,1\n\
8718- stdcx. %0,0,%2 \n\
8719- bne- 1b"
8720- : "=&r" (t), "+m" (v->counter)
8721- : "r" (&v->counter)
8722- : "cc", "xer");
8723+static __inline__ void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8724+{
8725+ atomic64_add_unchecked(1, v);
8726 }
8727
8728-static __inline__ long atomic64_inc_return(atomic64_t *v)
8729+static __inline__ int atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8730 {
8731- long t;
8732-
8733- __asm__ __volatile__(
8734- PPC_ATOMIC_ENTRY_BARRIER
8735-"1: ldarx %0,0,%1 # atomic64_inc_return\n\
8736- addic %0,%0,1\n\
8737- stdcx. %0,0,%1 \n\
8738- bne- 1b"
8739- PPC_ATOMIC_EXIT_BARRIER
8740- : "=&r" (t)
8741- : "r" (&v->counter)
8742- : "cc", "xer", "memory");
8743-
8744- return t;
8745+ return atomic64_add_return_unchecked(1, v);
8746 }
8747
8748 /*
8749@@ -397,36 +666,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
8750 */
8751 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8752
8753-static __inline__ void atomic64_dec(atomic64_t *v)
8754+/*
8755+ * atomic64_dec - decrement atomic variable
8756+ * @v: pointer of type atomic64_t
8757+ *
8758+ * Atomically decrements @v by 1
8759+ */
8760+#define atomic64_dec(v) atomic64_sub(1, (v))
8761+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
8762+
8763+static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8764 {
8765- long t;
8766-
8767- __asm__ __volatile__(
8768-"1: ldarx %0,0,%2 # atomic64_dec\n\
8769- addic %0,%0,-1\n\
8770- stdcx. %0,0,%2\n\
8771- bne- 1b"
8772- : "=&r" (t), "+m" (v->counter)
8773- : "r" (&v->counter)
8774- : "cc", "xer");
8775-}
8776-
8777-static __inline__ long atomic64_dec_return(atomic64_t *v)
8778-{
8779- long t;
8780-
8781- __asm__ __volatile__(
8782- PPC_ATOMIC_ENTRY_BARRIER
8783-"1: ldarx %0,0,%1 # atomic64_dec_return\n\
8784- addic %0,%0,-1\n\
8785- stdcx. %0,0,%1\n\
8786- bne- 1b"
8787- PPC_ATOMIC_EXIT_BARRIER
8788- : "=&r" (t)
8789- : "r" (&v->counter)
8790- : "cc", "xer", "memory");
8791-
8792- return t;
8793+ atomic64_sub_unchecked(1, v);
8794 }
8795
8796 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
8797@@ -459,6 +710,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
8798 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8799 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8800
8801+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8802+{
8803+ return cmpxchg(&(v->counter), old, new);
8804+}
8805+
8806+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8807+{
8808+ return xchg(&(v->counter), new);
8809+}
8810+
8811 /**
8812 * atomic64_add_unless - add unless the number is a given value
8813 * @v: pointer of type atomic64_t
8814diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
8815index bab79a1..4a3eabc 100644
8816--- a/arch/powerpc/include/asm/barrier.h
8817+++ b/arch/powerpc/include/asm/barrier.h
8818@@ -73,7 +73,7 @@
8819 do { \
8820 compiletime_assert_atomic_type(*p); \
8821 __lwsync(); \
8822- ACCESS_ONCE(*p) = (v); \
8823+ ACCESS_ONCE_RW(*p) = (v); \
8824 } while (0)
8825
8826 #define smp_load_acquire(p) \
8827diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
8828index 34a05a1..a1f2c67 100644
8829--- a/arch/powerpc/include/asm/cache.h
8830+++ b/arch/powerpc/include/asm/cache.h
8831@@ -4,6 +4,7 @@
8832 #ifdef __KERNEL__
8833
8834 #include <asm/reg.h>
8835+#include <linux/const.h>
8836
8837 /* bytes per L1 cache line */
8838 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
8839@@ -23,7 +24,7 @@
8840 #define L1_CACHE_SHIFT 7
8841 #endif
8842
8843-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8844+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8845
8846 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8847
8848diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
8849index 888d8f3..66f581c 100644
8850--- a/arch/powerpc/include/asm/elf.h
8851+++ b/arch/powerpc/include/asm/elf.h
8852@@ -28,8 +28,19 @@
8853 the loader. We need to make sure that it is out of the way of the program
8854 that it will "exec", and that there is sufficient room for the brk. */
8855
8856-extern unsigned long randomize_et_dyn(unsigned long base);
8857-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
8858+#define ELF_ET_DYN_BASE (0x20000000)
8859+
8860+#ifdef CONFIG_PAX_ASLR
8861+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
8862+
8863+#ifdef __powerpc64__
8864+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
8865+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
8866+#else
8867+#define PAX_DELTA_MMAP_LEN 15
8868+#define PAX_DELTA_STACK_LEN 15
8869+#endif
8870+#endif
8871
8872 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
8873
8874@@ -129,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8875 (0x7ff >> (PAGE_SHIFT - 12)) : \
8876 (0x3ffff >> (PAGE_SHIFT - 12)))
8877
8878-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8879-#define arch_randomize_brk arch_randomize_brk
8880-
8881-
8882 #ifdef CONFIG_SPU_BASE
8883 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
8884 #define NT_SPU 1
8885diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
8886index 8196e9c..d83a9f3 100644
8887--- a/arch/powerpc/include/asm/exec.h
8888+++ b/arch/powerpc/include/asm/exec.h
8889@@ -4,6 +4,6 @@
8890 #ifndef _ASM_POWERPC_EXEC_H
8891 #define _ASM_POWERPC_EXEC_H
8892
8893-extern unsigned long arch_align_stack(unsigned long sp);
8894+#define arch_align_stack(x) ((x) & ~0xfUL)
8895
8896 #endif /* _ASM_POWERPC_EXEC_H */
8897diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8898index 5acabbd..7ea14fa 100644
8899--- a/arch/powerpc/include/asm/kmap_types.h
8900+++ b/arch/powerpc/include/asm/kmap_types.h
8901@@ -10,7 +10,7 @@
8902 * 2 of the License, or (at your option) any later version.
8903 */
8904
8905-#define KM_TYPE_NR 16
8906+#define KM_TYPE_NR 17
8907
8908 #endif /* __KERNEL__ */
8909 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8910diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8911index b8da913..c02b593 100644
8912--- a/arch/powerpc/include/asm/local.h
8913+++ b/arch/powerpc/include/asm/local.h
8914@@ -9,21 +9,65 @@ typedef struct
8915 atomic_long_t a;
8916 } local_t;
8917
8918+typedef struct
8919+{
8920+ atomic_long_unchecked_t a;
8921+} local_unchecked_t;
8922+
8923 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8924
8925 #define local_read(l) atomic_long_read(&(l)->a)
8926+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8927 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8928+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8929
8930 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8931+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8932 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8933+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8934 #define local_inc(l) atomic_long_inc(&(l)->a)
8935+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8936 #define local_dec(l) atomic_long_dec(&(l)->a)
8937+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8938
8939 static __inline__ long local_add_return(long a, local_t *l)
8940 {
8941 long t;
8942
8943 __asm__ __volatile__(
8944+"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n"
8945+
8946+#ifdef CONFIG_PAX_REFCOUNT
8947+" mcrxr cr0\n"
8948+" addo. %0,%1,%0\n"
8949+" bf 4*cr0+so, 3f\n"
8950+"2:.long " "0x00c00b00""\n"
8951+#else
8952+" add %0,%1,%0\n"
8953+#endif
8954+
8955+"3:\n"
8956+ PPC405_ERR77(0,%2)
8957+ PPC_STLCX "%0,0,%2 \n\
8958+ bne- 1b"
8959+
8960+#ifdef CONFIG_PAX_REFCOUNT
8961+"\n4:\n"
8962+ _ASM_EXTABLE(2b, 4b)
8963+#endif
8964+
8965+ : "=&r" (t)
8966+ : "r" (a), "r" (&(l->a.counter))
8967+ : "cc", "memory");
8968+
8969+ return t;
8970+}
8971+
8972+static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
8973+{
8974+ long t;
8975+
8976+ __asm__ __volatile__(
8977 "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\
8978 add %0,%1,%0\n"
8979 PPC405_ERR77(0,%2)
8980@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
8981
8982 #define local_cmpxchg(l, o, n) \
8983 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8984+#define local_cmpxchg_unchecked(l, o, n) \
8985+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
8986 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8987
8988 /**
8989diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8990index 8565c25..2865190 100644
8991--- a/arch/powerpc/include/asm/mman.h
8992+++ b/arch/powerpc/include/asm/mman.h
8993@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8994 }
8995 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8996
8997-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8998+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8999 {
9000 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
9001 }
9002diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
9003index 26fe1ae..987ffc5 100644
9004--- a/arch/powerpc/include/asm/page.h
9005+++ b/arch/powerpc/include/asm/page.h
9006@@ -227,8 +227,9 @@ extern long long virt_phys_offset;
9007 * and needs to be executable. This means the whole heap ends
9008 * up being executable.
9009 */
9010-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
9011- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
9012+#define VM_DATA_DEFAULT_FLAGS32 \
9013+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
9014+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
9015
9016 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
9017 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
9018@@ -256,6 +257,9 @@ extern long long virt_phys_offset;
9019 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
9020 #endif
9021
9022+#define ktla_ktva(addr) (addr)
9023+#define ktva_ktla(addr) (addr)
9024+
9025 #ifndef CONFIG_PPC_BOOK3S_64
9026 /*
9027 * Use the top bit of the higher-level page table entries to indicate whether
9028diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
9029index 88693ce..ac6f9ab 100644
9030--- a/arch/powerpc/include/asm/page_64.h
9031+++ b/arch/powerpc/include/asm/page_64.h
9032@@ -153,15 +153,18 @@ do { \
9033 * stack by default, so in the absence of a PT_GNU_STACK program header
9034 * we turn execute permission off.
9035 */
9036-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
9037- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
9038+#define VM_STACK_DEFAULT_FLAGS32 \
9039+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
9040+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
9041
9042 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
9043 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
9044
9045+#ifndef CONFIG_PAX_PAGEEXEC
9046 #define VM_STACK_DEFAULT_FLAGS \
9047 (is_32bit_task() ? \
9048 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
9049+#endif
9050
9051 #include <asm-generic/getorder.h>
9052
9053diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
9054index 4b0be20..c15a27d 100644
9055--- a/arch/powerpc/include/asm/pgalloc-64.h
9056+++ b/arch/powerpc/include/asm/pgalloc-64.h
9057@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
9058 #ifndef CONFIG_PPC_64K_PAGES
9059
9060 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
9061+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
9062
9063 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
9064 {
9065@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
9066 pud_set(pud, (unsigned long)pmd);
9067 }
9068
9069+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
9070+{
9071+ pud_populate(mm, pud, pmd);
9072+}
9073+
9074 #define pmd_populate(mm, pmd, pte_page) \
9075 pmd_populate_kernel(mm, pmd, page_address(pte_page))
9076 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
9077@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
9078 #endif
9079
9080 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
9081+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
9082
9083 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
9084 pte_t *pte)
9085diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
9086index d98c1ec..9f61569 100644
9087--- a/arch/powerpc/include/asm/pgtable.h
9088+++ b/arch/powerpc/include/asm/pgtable.h
9089@@ -2,6 +2,7 @@
9090 #define _ASM_POWERPC_PGTABLE_H
9091 #ifdef __KERNEL__
9092
9093+#include <linux/const.h>
9094 #ifndef __ASSEMBLY__
9095 #include <linux/mmdebug.h>
9096 #include <asm/processor.h> /* For TASK_SIZE */
9097diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
9098index 4aad413..85d86bf 100644
9099--- a/arch/powerpc/include/asm/pte-hash32.h
9100+++ b/arch/powerpc/include/asm/pte-hash32.h
9101@@ -21,6 +21,7 @@
9102 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
9103 #define _PAGE_USER 0x004 /* usermode access allowed */
9104 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
9105+#define _PAGE_EXEC _PAGE_GUARDED
9106 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
9107 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
9108 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
9109diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
9110index 0c05059..7e056e4 100644
9111--- a/arch/powerpc/include/asm/reg.h
9112+++ b/arch/powerpc/include/asm/reg.h
9113@@ -251,6 +251,7 @@
9114 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
9115 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
9116 #define DSISR_NOHPTE 0x40000000 /* no translation found */
9117+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
9118 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
9119 #define DSISR_ISSTORE 0x02000000 /* access was a store */
9120 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
9121diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
9122index 5a6614a..d89995d1 100644
9123--- a/arch/powerpc/include/asm/smp.h
9124+++ b/arch/powerpc/include/asm/smp.h
9125@@ -51,7 +51,7 @@ struct smp_ops_t {
9126 int (*cpu_disable)(void);
9127 void (*cpu_die)(unsigned int nr);
9128 int (*cpu_bootable)(unsigned int nr);
9129-};
9130+} __no_const;
9131
9132 extern void smp_send_debugger_break(void);
9133 extern void start_secondary_resume(void);
9134diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
9135index 4dbe072..b803275 100644
9136--- a/arch/powerpc/include/asm/spinlock.h
9137+++ b/arch/powerpc/include/asm/spinlock.h
9138@@ -204,13 +204,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
9139 __asm__ __volatile__(
9140 "1: " PPC_LWARX(%0,0,%1,1) "\n"
9141 __DO_SIGN_EXTEND
9142-" addic. %0,%0,1\n\
9143- ble- 2f\n"
9144+
9145+#ifdef CONFIG_PAX_REFCOUNT
9146+" mcrxr cr0\n"
9147+" addico. %0,%0,1\n"
9148+" bf 4*cr0+so, 3f\n"
9149+"2:.long " "0x00c00b00""\n"
9150+#else
9151+" addic. %0,%0,1\n"
9152+#endif
9153+
9154+"3:\n"
9155+ "ble- 4f\n"
9156 PPC405_ERR77(0,%1)
9157 " stwcx. %0,0,%1\n\
9158 bne- 1b\n"
9159 PPC_ACQUIRE_BARRIER
9160-"2:" : "=&r" (tmp)
9161+"4:"
9162+
9163+#ifdef CONFIG_PAX_REFCOUNT
9164+ _ASM_EXTABLE(2b,4b)
9165+#endif
9166+
9167+ : "=&r" (tmp)
9168 : "r" (&rw->lock)
9169 : "cr0", "xer", "memory");
9170
9171@@ -286,11 +302,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
9172 __asm__ __volatile__(
9173 "# read_unlock\n\t"
9174 PPC_RELEASE_BARRIER
9175-"1: lwarx %0,0,%1\n\
9176- addic %0,%0,-1\n"
9177+"1: lwarx %0,0,%1\n"
9178+
9179+#ifdef CONFIG_PAX_REFCOUNT
9180+" mcrxr cr0\n"
9181+" addico. %0,%0,-1\n"
9182+" bf 4*cr0+so, 3f\n"
9183+"2:.long " "0x00c00b00""\n"
9184+#else
9185+" addic. %0,%0,-1\n"
9186+#endif
9187+
9188+"3:\n"
9189 PPC405_ERR77(0,%1)
9190 " stwcx. %0,0,%1\n\
9191 bne- 1b"
9192+
9193+#ifdef CONFIG_PAX_REFCOUNT
9194+"\n4:\n"
9195+ _ASM_EXTABLE(2b, 4b)
9196+#endif
9197+
9198 : "=&r"(tmp)
9199 : "r"(&rw->lock)
9200 : "cr0", "xer", "memory");
9201diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
9202index b034ecd..af7e31f 100644
9203--- a/arch/powerpc/include/asm/thread_info.h
9204+++ b/arch/powerpc/include/asm/thread_info.h
9205@@ -107,6 +107,8 @@ static inline struct thread_info *current_thread_info(void)
9206 #if defined(CONFIG_PPC64)
9207 #define TIF_ELF2ABI 18 /* function descriptors must die! */
9208 #endif
9209+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
9210+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
9211
9212 /* as above, but as bit values */
9213 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
9214@@ -125,9 +127,10 @@ static inline struct thread_info *current_thread_info(void)
9215 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
9216 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
9217 #define _TIF_NOHZ (1<<TIF_NOHZ)
9218+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
9219 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
9220 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
9221- _TIF_NOHZ)
9222+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
9223
9224 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
9225 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
9226diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
9227index 9485b43..3bd3c16 100644
9228--- a/arch/powerpc/include/asm/uaccess.h
9229+++ b/arch/powerpc/include/asm/uaccess.h
9230@@ -58,6 +58,7 @@
9231
9232 #endif
9233
9234+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9235 #define access_ok(type, addr, size) \
9236 (__chk_user_ptr(addr), \
9237 __access_ok((__force unsigned long)(addr), (size), get_fs()))
9238@@ -318,52 +319,6 @@ do { \
9239 extern unsigned long __copy_tofrom_user(void __user *to,
9240 const void __user *from, unsigned long size);
9241
9242-#ifndef __powerpc64__
9243-
9244-static inline unsigned long copy_from_user(void *to,
9245- const void __user *from, unsigned long n)
9246-{
9247- unsigned long over;
9248-
9249- if (access_ok(VERIFY_READ, from, n))
9250- return __copy_tofrom_user((__force void __user *)to, from, n);
9251- if ((unsigned long)from < TASK_SIZE) {
9252- over = (unsigned long)from + n - TASK_SIZE;
9253- return __copy_tofrom_user((__force void __user *)to, from,
9254- n - over) + over;
9255- }
9256- return n;
9257-}
9258-
9259-static inline unsigned long copy_to_user(void __user *to,
9260- const void *from, unsigned long n)
9261-{
9262- unsigned long over;
9263-
9264- if (access_ok(VERIFY_WRITE, to, n))
9265- return __copy_tofrom_user(to, (__force void __user *)from, n);
9266- if ((unsigned long)to < TASK_SIZE) {
9267- over = (unsigned long)to + n - TASK_SIZE;
9268- return __copy_tofrom_user(to, (__force void __user *)from,
9269- n - over) + over;
9270- }
9271- return n;
9272-}
9273-
9274-#else /* __powerpc64__ */
9275-
9276-#define __copy_in_user(to, from, size) \
9277- __copy_tofrom_user((to), (from), (size))
9278-
9279-extern unsigned long copy_from_user(void *to, const void __user *from,
9280- unsigned long n);
9281-extern unsigned long copy_to_user(void __user *to, const void *from,
9282- unsigned long n);
9283-extern unsigned long copy_in_user(void __user *to, const void __user *from,
9284- unsigned long n);
9285-
9286-#endif /* __powerpc64__ */
9287-
9288 static inline unsigned long __copy_from_user_inatomic(void *to,
9289 const void __user *from, unsigned long n)
9290 {
9291@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
9292 if (ret == 0)
9293 return 0;
9294 }
9295+
9296+ if (!__builtin_constant_p(n))
9297+ check_object_size(to, n, false);
9298+
9299 return __copy_tofrom_user((__force void __user *)to, from, n);
9300 }
9301
9302@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
9303 if (ret == 0)
9304 return 0;
9305 }
9306+
9307+ if (!__builtin_constant_p(n))
9308+ check_object_size(from, n, true);
9309+
9310 return __copy_tofrom_user(to, (__force const void __user *)from, n);
9311 }
9312
9313@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
9314 return __copy_to_user_inatomic(to, from, size);
9315 }
9316
9317+#ifndef __powerpc64__
9318+
9319+static inline unsigned long __must_check copy_from_user(void *to,
9320+ const void __user *from, unsigned long n)
9321+{
9322+ unsigned long over;
9323+
9324+ if ((long)n < 0)
9325+ return n;
9326+
9327+ if (access_ok(VERIFY_READ, from, n)) {
9328+ if (!__builtin_constant_p(n))
9329+ check_object_size(to, n, false);
9330+ return __copy_tofrom_user((__force void __user *)to, from, n);
9331+ }
9332+ if ((unsigned long)from < TASK_SIZE) {
9333+ over = (unsigned long)from + n - TASK_SIZE;
9334+ if (!__builtin_constant_p(n - over))
9335+ check_object_size(to, n - over, false);
9336+ return __copy_tofrom_user((__force void __user *)to, from,
9337+ n - over) + over;
9338+ }
9339+ return n;
9340+}
9341+
9342+static inline unsigned long __must_check copy_to_user(void __user *to,
9343+ const void *from, unsigned long n)
9344+{
9345+ unsigned long over;
9346+
9347+ if ((long)n < 0)
9348+ return n;
9349+
9350+ if (access_ok(VERIFY_WRITE, to, n)) {
9351+ if (!__builtin_constant_p(n))
9352+ check_object_size(from, n, true);
9353+ return __copy_tofrom_user(to, (__force void __user *)from, n);
9354+ }
9355+ if ((unsigned long)to < TASK_SIZE) {
9356+ over = (unsigned long)to + n - TASK_SIZE;
9357+ if (!__builtin_constant_p(n))
9358+ check_object_size(from, n - over, true);
9359+ return __copy_tofrom_user(to, (__force void __user *)from,
9360+ n - over) + over;
9361+ }
9362+ return n;
9363+}
9364+
9365+#else /* __powerpc64__ */
9366+
9367+#define __copy_in_user(to, from, size) \
9368+ __copy_tofrom_user((to), (from), (size))
9369+
9370+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
9371+{
9372+ if ((long)n < 0 || n > INT_MAX)
9373+ return n;
9374+
9375+ if (!__builtin_constant_p(n))
9376+ check_object_size(to, n, false);
9377+
9378+ if (likely(access_ok(VERIFY_READ, from, n)))
9379+ n = __copy_from_user(to, from, n);
9380+ else
9381+ memset(to, 0, n);
9382+ return n;
9383+}
9384+
9385+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
9386+{
9387+ if ((long)n < 0 || n > INT_MAX)
9388+ return n;
9389+
9390+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
9391+ if (!__builtin_constant_p(n))
9392+ check_object_size(from, n, true);
9393+ n = __copy_to_user(to, from, n);
9394+ }
9395+ return n;
9396+}
9397+
9398+extern unsigned long copy_in_user(void __user *to, const void __user *from,
9399+ unsigned long n);
9400+
9401+#endif /* __powerpc64__ */
9402+
9403 extern unsigned long __clear_user(void __user *addr, unsigned long size);
9404
9405 static inline unsigned long clear_user(void __user *addr, unsigned long size)
9406diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
9407index 670c312..60c2b52 100644
9408--- a/arch/powerpc/kernel/Makefile
9409+++ b/arch/powerpc/kernel/Makefile
9410@@ -27,6 +27,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
9411 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
9412 endif
9413
9414+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
9415+
9416 obj-y := cputable.o ptrace.o syscalls.o \
9417 irq.o align.o signal_32.o pmc.o vdso.o \
9418 process.o systbl.o idle.o \
9419diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
9420index bb9cac6..5181202 100644
9421--- a/arch/powerpc/kernel/exceptions-64e.S
9422+++ b/arch/powerpc/kernel/exceptions-64e.S
9423@@ -1010,6 +1010,7 @@ storage_fault_common:
9424 std r14,_DAR(r1)
9425 std r15,_DSISR(r1)
9426 addi r3,r1,STACK_FRAME_OVERHEAD
9427+ bl save_nvgprs
9428 mr r4,r14
9429 mr r5,r15
9430 ld r14,PACA_EXGEN+EX_R14(r13)
9431@@ -1018,8 +1019,7 @@ storage_fault_common:
9432 cmpdi r3,0
9433 bne- 1f
9434 b ret_from_except_lite
9435-1: bl save_nvgprs
9436- mr r5,r3
9437+1: mr r5,r3
9438 addi r3,r1,STACK_FRAME_OVERHEAD
9439 ld r4,_DAR(r1)
9440 bl bad_page_fault
9441diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
9442index 050f79a..f385bfe 100644
9443--- a/arch/powerpc/kernel/exceptions-64s.S
9444+++ b/arch/powerpc/kernel/exceptions-64s.S
9445@@ -1593,10 +1593,10 @@ handle_page_fault:
9446 11: ld r4,_DAR(r1)
9447 ld r5,_DSISR(r1)
9448 addi r3,r1,STACK_FRAME_OVERHEAD
9449+ bl save_nvgprs
9450 bl do_page_fault
9451 cmpdi r3,0
9452 beq+ 12f
9453- bl save_nvgprs
9454 mr r5,r3
9455 addi r3,r1,STACK_FRAME_OVERHEAD
9456 lwz r4,_DAR(r1)
9457diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
9458index 4c5891d..a5d88bb 100644
9459--- a/arch/powerpc/kernel/irq.c
9460+++ b/arch/powerpc/kernel/irq.c
9461@@ -461,6 +461,8 @@ void migrate_irqs(void)
9462 }
9463 #endif
9464
9465+extern void gr_handle_kernel_exploit(void);
9466+
9467 static inline void check_stack_overflow(void)
9468 {
9469 #ifdef CONFIG_DEBUG_STACKOVERFLOW
9470@@ -473,6 +475,7 @@ static inline void check_stack_overflow(void)
9471 printk("do_IRQ: stack overflow: %ld\n",
9472 sp - sizeof(struct thread_info));
9473 dump_stack();
9474+ gr_handle_kernel_exploit();
9475 }
9476 #endif
9477 }
9478diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
9479index 6cff040..74ac5d1b 100644
9480--- a/arch/powerpc/kernel/module_32.c
9481+++ b/arch/powerpc/kernel/module_32.c
9482@@ -161,7 +161,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
9483 me->arch.core_plt_section = i;
9484 }
9485 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
9486- printk("Module doesn't contain .plt or .init.plt sections.\n");
9487+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
9488 return -ENOEXEC;
9489 }
9490
9491@@ -191,11 +191,16 @@ static uint32_t do_plt_call(void *location,
9492
9493 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
9494 /* Init, or core PLT? */
9495- if (location >= mod->module_core
9496- && location < mod->module_core + mod->core_size)
9497+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
9498+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
9499 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
9500- else
9501+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
9502+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
9503 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
9504+ else {
9505+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
9506+ return ~0UL;
9507+ }
9508
9509 /* Find this entry, or if that fails, the next avail. entry */
9510 while (entry->jump[0]) {
9511@@ -299,7 +304,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
9512 }
9513 #ifdef CONFIG_DYNAMIC_FTRACE
9514 module->arch.tramp =
9515- do_plt_call(module->module_core,
9516+ do_plt_call(module->module_core_rx,
9517 (unsigned long)ftrace_caller,
9518 sechdrs, module);
9519 #endif
9520diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
9521index bf44ae9..6d2ce71 100644
9522--- a/arch/powerpc/kernel/process.c
9523+++ b/arch/powerpc/kernel/process.c
9524@@ -1039,8 +1039,8 @@ void show_regs(struct pt_regs * regs)
9525 * Lookup NIP late so we have the best change of getting the
9526 * above info out without failing
9527 */
9528- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
9529- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
9530+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
9531+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
9532 #endif
9533 show_stack(current, (unsigned long *) regs->gpr[1]);
9534 if (!user_mode(regs))
9535@@ -1558,10 +1558,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
9536 newsp = stack[0];
9537 ip = stack[STACK_FRAME_LR_SAVE];
9538 if (!firstframe || ip != lr) {
9539- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
9540+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
9541 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
9542 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
9543- printk(" (%pS)",
9544+ printk(" (%pA)",
9545 (void *)current->ret_stack[curr_frame].ret);
9546 curr_frame--;
9547 }
9548@@ -1581,7 +1581,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
9549 struct pt_regs *regs = (struct pt_regs *)
9550 (sp + STACK_FRAME_OVERHEAD);
9551 lr = regs->link;
9552- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
9553+ printk("--- interrupt: %lx at %pA\n LR = %pA\n",
9554 regs->trap, (void *)regs->nip, (void *)lr);
9555 firstframe = 1;
9556 }
9557@@ -1617,58 +1617,3 @@ void notrace __ppc64_runlatch_off(void)
9558 mtspr(SPRN_CTRLT, ctrl);
9559 }
9560 #endif /* CONFIG_PPC64 */
9561-
9562-unsigned long arch_align_stack(unsigned long sp)
9563-{
9564- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9565- sp -= get_random_int() & ~PAGE_MASK;
9566- return sp & ~0xf;
9567-}
9568-
9569-static inline unsigned long brk_rnd(void)
9570-{
9571- unsigned long rnd = 0;
9572-
9573- /* 8MB for 32bit, 1GB for 64bit */
9574- if (is_32bit_task())
9575- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
9576- else
9577- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
9578-
9579- return rnd << PAGE_SHIFT;
9580-}
9581-
9582-unsigned long arch_randomize_brk(struct mm_struct *mm)
9583-{
9584- unsigned long base = mm->brk;
9585- unsigned long ret;
9586-
9587-#ifdef CONFIG_PPC_STD_MMU_64
9588- /*
9589- * If we are using 1TB segments and we are allowed to randomise
9590- * the heap, we can put it above 1TB so it is backed by a 1TB
9591- * segment. Otherwise the heap will be in the bottom 1TB
9592- * which always uses 256MB segments and this may result in a
9593- * performance penalty.
9594- */
9595- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
9596- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
9597-#endif
9598-
9599- ret = PAGE_ALIGN(base + brk_rnd());
9600-
9601- if (ret < mm->brk)
9602- return mm->brk;
9603-
9604- return ret;
9605-}
9606-
9607-unsigned long randomize_et_dyn(unsigned long base)
9608-{
9609- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
9610-
9611- if (ret < base)
9612- return base;
9613-
9614- return ret;
9615-}
9616diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
9617index 2e3d2bf..35df241 100644
9618--- a/arch/powerpc/kernel/ptrace.c
9619+++ b/arch/powerpc/kernel/ptrace.c
9620@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
9621 return ret;
9622 }
9623
9624+#ifdef CONFIG_GRKERNSEC_SETXID
9625+extern void gr_delayed_cred_worker(void);
9626+#endif
9627+
9628 /*
9629 * We must return the syscall number to actually look up in the table.
9630 * This can be -1L to skip running any syscall at all.
9631@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
9632
9633 secure_computing_strict(regs->gpr[0]);
9634
9635+#ifdef CONFIG_GRKERNSEC_SETXID
9636+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9637+ gr_delayed_cred_worker();
9638+#endif
9639+
9640 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
9641 tracehook_report_syscall_entry(regs))
9642 /*
9643@@ -1808,6 +1817,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
9644 {
9645 int step;
9646
9647+#ifdef CONFIG_GRKERNSEC_SETXID
9648+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9649+ gr_delayed_cred_worker();
9650+#endif
9651+
9652 audit_syscall_exit(regs);
9653
9654 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9655diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
9656index b171001..4ac7ac5 100644
9657--- a/arch/powerpc/kernel/signal_32.c
9658+++ b/arch/powerpc/kernel/signal_32.c
9659@@ -1011,7 +1011,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
9660 /* Save user registers on the stack */
9661 frame = &rt_sf->uc.uc_mcontext;
9662 addr = frame;
9663- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
9664+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9665 sigret = 0;
9666 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
9667 } else {
9668diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
9669index 2cb0c94..c0c0bc9 100644
9670--- a/arch/powerpc/kernel/signal_64.c
9671+++ b/arch/powerpc/kernel/signal_64.c
9672@@ -754,7 +754,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
9673 current->thread.fp_state.fpscr = 0;
9674
9675 /* Set up to return from userspace. */
9676- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
9677+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9678 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
9679 } else {
9680 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
9681diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
9682index 0dc43f9..a885d33 100644
9683--- a/arch/powerpc/kernel/traps.c
9684+++ b/arch/powerpc/kernel/traps.c
9685@@ -36,6 +36,7 @@
9686 #include <linux/debugfs.h>
9687 #include <linux/ratelimit.h>
9688 #include <linux/context_tracking.h>
9689+#include <linux/uaccess.h>
9690
9691 #include <asm/emulated_ops.h>
9692 #include <asm/pgtable.h>
9693@@ -142,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
9694 return flags;
9695 }
9696
9697+extern void gr_handle_kernel_exploit(void);
9698+
9699 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9700 int signr)
9701 {
9702@@ -191,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9703 panic("Fatal exception in interrupt");
9704 if (panic_on_oops)
9705 panic("Fatal exception");
9706+
9707+ gr_handle_kernel_exploit();
9708+
9709 do_exit(signr);
9710 }
9711
9712@@ -1137,6 +1143,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
9713 enum ctx_state prev_state = exception_enter();
9714 unsigned int reason = get_reason(regs);
9715
9716+#ifdef CONFIG_PAX_REFCOUNT
9717+ unsigned int bkpt;
9718+ const struct exception_table_entry *entry;
9719+
9720+ if (reason & REASON_ILLEGAL) {
9721+ /* Check if PaX bad instruction */
9722+ if (!probe_kernel_address(regs->nip, bkpt) && bkpt == 0xc00b00) {
9723+ current->thread.trap_nr = 0;
9724+ pax_report_refcount_overflow(regs);
9725+ /* fixup_exception() for PowerPC does not exist, simulate its job */
9726+ if ((entry = search_exception_tables(regs->nip)) != NULL) {
9727+ regs->nip = entry->fixup;
9728+ return;
9729+ }
9730+ /* fixup_exception() could not handle */
9731+ goto bail;
9732+ }
9733+ }
9734+#endif
9735+
9736 /* We can now get here via a FP Unavailable exception if the core
9737 * has no FPU, in that case the reason flags will be 0 */
9738
9739diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
9740index f174351..5722009 100644
9741--- a/arch/powerpc/kernel/vdso.c
9742+++ b/arch/powerpc/kernel/vdso.c
9743@@ -35,6 +35,7 @@
9744 #include <asm/vdso.h>
9745 #include <asm/vdso_datapage.h>
9746 #include <asm/setup.h>
9747+#include <asm/mman.h>
9748
9749 #undef DEBUG
9750
9751@@ -221,7 +222,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9752 vdso_base = VDSO32_MBASE;
9753 #endif
9754
9755- current->mm->context.vdso_base = 0;
9756+ current->mm->context.vdso_base = ~0UL;
9757
9758 /* vDSO has a problem and was disabled, just don't "enable" it for the
9759 * process
9760@@ -241,7 +242,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9761 vdso_base = get_unmapped_area(NULL, vdso_base,
9762 (vdso_pages << PAGE_SHIFT) +
9763 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
9764- 0, 0);
9765+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
9766 if (IS_ERR_VALUE(vdso_base)) {
9767 rc = vdso_base;
9768 goto fail_mmapsem;
9769diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
9770index 4c79284..0e462c3 100644
9771--- a/arch/powerpc/kvm/powerpc.c
9772+++ b/arch/powerpc/kvm/powerpc.c
9773@@ -1338,7 +1338,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
9774 }
9775 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
9776
9777-int kvm_arch_init(void *opaque)
9778+int kvm_arch_init(const void *opaque)
9779 {
9780 return 0;
9781 }
9782diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
9783index 5eea6f3..5d10396 100644
9784--- a/arch/powerpc/lib/usercopy_64.c
9785+++ b/arch/powerpc/lib/usercopy_64.c
9786@@ -9,22 +9,6 @@
9787 #include <linux/module.h>
9788 #include <asm/uaccess.h>
9789
9790-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9791-{
9792- if (likely(access_ok(VERIFY_READ, from, n)))
9793- n = __copy_from_user(to, from, n);
9794- else
9795- memset(to, 0, n);
9796- return n;
9797-}
9798-
9799-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9800-{
9801- if (likely(access_ok(VERIFY_WRITE, to, n)))
9802- n = __copy_to_user(to, from, n);
9803- return n;
9804-}
9805-
9806 unsigned long copy_in_user(void __user *to, const void __user *from,
9807 unsigned long n)
9808 {
9809@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
9810 return n;
9811 }
9812
9813-EXPORT_SYMBOL(copy_from_user);
9814-EXPORT_SYMBOL(copy_to_user);
9815 EXPORT_SYMBOL(copy_in_user);
9816
9817diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
9818index 51ab9e7..7d3c78b 100644
9819--- a/arch/powerpc/mm/fault.c
9820+++ b/arch/powerpc/mm/fault.c
9821@@ -33,6 +33,10 @@
9822 #include <linux/magic.h>
9823 #include <linux/ratelimit.h>
9824 #include <linux/context_tracking.h>
9825+#include <linux/slab.h>
9826+#include <linux/pagemap.h>
9827+#include <linux/compiler.h>
9828+#include <linux/unistd.h>
9829
9830 #include <asm/firmware.h>
9831 #include <asm/page.h>
9832@@ -69,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
9833 }
9834 #endif
9835
9836+#ifdef CONFIG_PAX_PAGEEXEC
9837+/*
9838+ * PaX: decide what to do with offenders (regs->nip = fault address)
9839+ *
9840+ * returns 1 when task should be killed
9841+ */
9842+static int pax_handle_fetch_fault(struct pt_regs *regs)
9843+{
9844+ return 1;
9845+}
9846+
9847+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9848+{
9849+ unsigned long i;
9850+
9851+ printk(KERN_ERR "PAX: bytes at PC: ");
9852+ for (i = 0; i < 5; i++) {
9853+ unsigned int c;
9854+ if (get_user(c, (unsigned int __user *)pc+i))
9855+ printk(KERN_CONT "???????? ");
9856+ else
9857+ printk(KERN_CONT "%08x ", c);
9858+ }
9859+ printk("\n");
9860+}
9861+#endif
9862+
9863 /*
9864 * Check whether the instruction at regs->nip is a store using
9865 * an update addressing form which will update r1.
9866@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
9867 * indicate errors in DSISR but can validly be set in SRR1.
9868 */
9869 if (trap == 0x400)
9870- error_code &= 0x48200000;
9871+ error_code &= 0x58200000;
9872 else
9873 is_write = error_code & DSISR_ISSTORE;
9874 #else
9875@@ -378,7 +409,7 @@ good_area:
9876 * "undefined". Of those that can be set, this is the only
9877 * one which seems bad.
9878 */
9879- if (error_code & 0x10000000)
9880+ if (error_code & DSISR_GUARDED)
9881 /* Guarded storage error. */
9882 goto bad_area;
9883 #endif /* CONFIG_8xx */
9884@@ -393,7 +424,7 @@ good_area:
9885 * processors use the same I/D cache coherency mechanism
9886 * as embedded.
9887 */
9888- if (error_code & DSISR_PROTFAULT)
9889+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
9890 goto bad_area;
9891 #endif /* CONFIG_PPC_STD_MMU */
9892
9893@@ -483,6 +514,23 @@ bad_area:
9894 bad_area_nosemaphore:
9895 /* User mode accesses cause a SIGSEGV */
9896 if (user_mode(regs)) {
9897+
9898+#ifdef CONFIG_PAX_PAGEEXEC
9899+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
9900+#ifdef CONFIG_PPC_STD_MMU
9901+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
9902+#else
9903+ if (is_exec && regs->nip == address) {
9904+#endif
9905+ switch (pax_handle_fetch_fault(regs)) {
9906+ }
9907+
9908+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
9909+ do_group_exit(SIGKILL);
9910+ }
9911+ }
9912+#endif
9913+
9914 _exception(SIGSEGV, regs, code, address);
9915 goto bail;
9916 }
9917diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
9918index cb8bdbe..cde4bc7 100644
9919--- a/arch/powerpc/mm/mmap.c
9920+++ b/arch/powerpc/mm/mmap.c
9921@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
9922 return sysctl_legacy_va_layout;
9923 }
9924
9925-static unsigned long mmap_rnd(void)
9926+static unsigned long mmap_rnd(struct mm_struct *mm)
9927 {
9928 unsigned long rnd = 0;
9929
9930+#ifdef CONFIG_PAX_RANDMMAP
9931+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9932+#endif
9933+
9934 if (current->flags & PF_RANDOMIZE) {
9935 /* 8MB for 32bit, 1GB for 64bit */
9936 if (is_32bit_task())
9937@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
9938 return rnd << PAGE_SHIFT;
9939 }
9940
9941-static inline unsigned long mmap_base(void)
9942+static inline unsigned long mmap_base(struct mm_struct *mm)
9943 {
9944 unsigned long gap = rlimit(RLIMIT_STACK);
9945
9946@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
9947 else if (gap > MAX_GAP)
9948 gap = MAX_GAP;
9949
9950- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
9951+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
9952 }
9953
9954 /*
9955@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9956 */
9957 if (mmap_is_legacy()) {
9958 mm->mmap_base = TASK_UNMAPPED_BASE;
9959+
9960+#ifdef CONFIG_PAX_RANDMMAP
9961+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9962+ mm->mmap_base += mm->delta_mmap;
9963+#endif
9964+
9965 mm->get_unmapped_area = arch_get_unmapped_area;
9966 } else {
9967- mm->mmap_base = mmap_base();
9968+ mm->mmap_base = mmap_base(mm);
9969+
9970+#ifdef CONFIG_PAX_RANDMMAP
9971+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9972+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9973+#endif
9974+
9975 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9976 }
9977 }
9978diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
9979index b0c75cc..ef7fb93 100644
9980--- a/arch/powerpc/mm/slice.c
9981+++ b/arch/powerpc/mm/slice.c
9982@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
9983 if ((mm->task_size - len) < addr)
9984 return 0;
9985 vma = find_vma(mm, addr);
9986- return (!vma || (addr + len) <= vma->vm_start);
9987+ return check_heap_stack_gap(vma, addr, len, 0);
9988 }
9989
9990 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
9991@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
9992 info.align_offset = 0;
9993
9994 addr = TASK_UNMAPPED_BASE;
9995+
9996+#ifdef CONFIG_PAX_RANDMMAP
9997+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9998+ addr += mm->delta_mmap;
9999+#endif
10000+
10001 while (addr < TASK_SIZE) {
10002 info.low_limit = addr;
10003 if (!slice_scan_available(addr, available, 1, &addr))
10004@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
10005 if (fixed && addr > (mm->task_size - len))
10006 return -ENOMEM;
10007
10008+#ifdef CONFIG_PAX_RANDMMAP
10009+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
10010+ addr = 0;
10011+#endif
10012+
10013 /* If hint, make sure it matches our alignment restrictions */
10014 if (!fixed && addr) {
10015 addr = _ALIGN_UP(addr, 1ul << pshift);
10016diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
10017index 3afa6f4..40c53ff 100644
10018--- a/arch/powerpc/net/bpf_jit_comp.c
10019+++ b/arch/powerpc/net/bpf_jit_comp.c
10020@@ -697,5 +697,6 @@ void bpf_jit_free(struct bpf_prog *fp)
10021 {
10022 if (fp->jited)
10023 module_free(NULL, fp->bpf_func);
10024- kfree(fp);
10025+
10026+ bpf_prog_unlock_free(fp);
10027 }
10028diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
10029index 4278acf..67fd0e6 100644
10030--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
10031+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
10032@@ -400,8 +400,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
10033 }
10034
10035 static struct pci_ops scc_pciex_pci_ops = {
10036- scc_pciex_read_config,
10037- scc_pciex_write_config,
10038+ .read = scc_pciex_read_config,
10039+ .write = scc_pciex_write_config,
10040 };
10041
10042 static void pciex_clear_intr_all(unsigned int __iomem *base)
10043diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
10044index d966bbe..372124a 100644
10045--- a/arch/powerpc/platforms/cell/spufs/file.c
10046+++ b/arch/powerpc/platforms/cell/spufs/file.c
10047@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
10048 return VM_FAULT_NOPAGE;
10049 }
10050
10051-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
10052+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
10053 unsigned long address,
10054- void *buf, int len, int write)
10055+ void *buf, size_t len, int write)
10056 {
10057 struct spu_context *ctx = vma->vm_file->private_data;
10058 unsigned long offset = address - vma->vm_start;
10059diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
10060index fa934fe..c296056 100644
10061--- a/arch/s390/include/asm/atomic.h
10062+++ b/arch/s390/include/asm/atomic.h
10063@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
10064 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
10065 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10066
10067+#define atomic64_read_unchecked(v) atomic64_read(v)
10068+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
10069+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
10070+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
10071+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
10072+#define atomic64_inc_unchecked(v) atomic64_inc(v)
10073+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
10074+#define atomic64_dec_unchecked(v) atomic64_dec(v)
10075+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
10076+
10077 #endif /* __ARCH_S390_ATOMIC__ */
10078diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
10079index 19ff956..8d39cb1 100644
10080--- a/arch/s390/include/asm/barrier.h
10081+++ b/arch/s390/include/asm/barrier.h
10082@@ -37,7 +37,7 @@
10083 do { \
10084 compiletime_assert_atomic_type(*p); \
10085 barrier(); \
10086- ACCESS_ONCE(*p) = (v); \
10087+ ACCESS_ONCE_RW(*p) = (v); \
10088 } while (0)
10089
10090 #define smp_load_acquire(p) \
10091diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
10092index 4d7ccac..d03d0ad 100644
10093--- a/arch/s390/include/asm/cache.h
10094+++ b/arch/s390/include/asm/cache.h
10095@@ -9,8 +9,10 @@
10096 #ifndef __ARCH_S390_CACHE_H
10097 #define __ARCH_S390_CACHE_H
10098
10099-#define L1_CACHE_BYTES 256
10100+#include <linux/const.h>
10101+
10102 #define L1_CACHE_SHIFT 8
10103+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10104 #define NET_SKB_PAD 32
10105
10106 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
10107diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
10108index 78f4f87..598ce39 100644
10109--- a/arch/s390/include/asm/elf.h
10110+++ b/arch/s390/include/asm/elf.h
10111@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
10112 the loader. We need to make sure that it is out of the way of the program
10113 that it will "exec", and that there is sufficient room for the brk. */
10114
10115-extern unsigned long randomize_et_dyn(unsigned long base);
10116-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
10117+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
10118+
10119+#ifdef CONFIG_PAX_ASLR
10120+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
10121+
10122+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
10123+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
10124+#endif
10125
10126 /* This yields a mask that user programs can use to figure out what
10127 instruction set this CPU supports. */
10128@@ -222,9 +228,6 @@ struct linux_binprm;
10129 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
10130 int arch_setup_additional_pages(struct linux_binprm *, int);
10131
10132-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10133-#define arch_randomize_brk arch_randomize_brk
10134-
10135 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
10136
10137 #endif
10138diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
10139index c4a93d6..4d2a9b4 100644
10140--- a/arch/s390/include/asm/exec.h
10141+++ b/arch/s390/include/asm/exec.h
10142@@ -7,6 +7,6 @@
10143 #ifndef __ASM_EXEC_H
10144 #define __ASM_EXEC_H
10145
10146-extern unsigned long arch_align_stack(unsigned long sp);
10147+#define arch_align_stack(x) ((x) & ~0xfUL)
10148
10149 #endif /* __ASM_EXEC_H */
10150diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
10151index cd4c68e..6764641 100644
10152--- a/arch/s390/include/asm/uaccess.h
10153+++ b/arch/s390/include/asm/uaccess.h
10154@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
10155 __range_ok((unsigned long)(addr), (size)); \
10156 })
10157
10158+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
10159 #define access_ok(type, addr, size) __access_ok(addr, size)
10160
10161 /*
10162@@ -275,6 +276,10 @@ static inline unsigned long __must_check
10163 copy_to_user(void __user *to, const void *from, unsigned long n)
10164 {
10165 might_fault();
10166+
10167+ if ((long)n < 0)
10168+ return n;
10169+
10170 return __copy_to_user(to, from, n);
10171 }
10172
10173@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
10174 static inline unsigned long __must_check
10175 copy_from_user(void *to, const void __user *from, unsigned long n)
10176 {
10177- unsigned int sz = __compiletime_object_size(to);
10178+ size_t sz = __compiletime_object_size(to);
10179
10180 might_fault();
10181- if (unlikely(sz != -1 && sz < n)) {
10182+
10183+ if ((long)n < 0)
10184+ return n;
10185+
10186+ if (unlikely(sz != (size_t)-1 && sz < n)) {
10187 copy_from_user_overflow();
10188 return n;
10189 }
10190diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
10191index b89b591..fd9609d 100644
10192--- a/arch/s390/kernel/module.c
10193+++ b/arch/s390/kernel/module.c
10194@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
10195
10196 /* Increase core size by size of got & plt and set start
10197 offsets for got and plt. */
10198- me->core_size = ALIGN(me->core_size, 4);
10199- me->arch.got_offset = me->core_size;
10200- me->core_size += me->arch.got_size;
10201- me->arch.plt_offset = me->core_size;
10202- me->core_size += me->arch.plt_size;
10203+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
10204+ me->arch.got_offset = me->core_size_rw;
10205+ me->core_size_rw += me->arch.got_size;
10206+ me->arch.plt_offset = me->core_size_rx;
10207+ me->core_size_rx += me->arch.plt_size;
10208 return 0;
10209 }
10210
10211@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
10212 if (info->got_initialized == 0) {
10213 Elf_Addr *gotent;
10214
10215- gotent = me->module_core + me->arch.got_offset +
10216+ gotent = me->module_core_rw + me->arch.got_offset +
10217 info->got_offset;
10218 *gotent = val;
10219 info->got_initialized = 1;
10220@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
10221 rc = apply_rela_bits(loc, val, 0, 64, 0);
10222 else if (r_type == R_390_GOTENT ||
10223 r_type == R_390_GOTPLTENT) {
10224- val += (Elf_Addr) me->module_core - loc;
10225+ val += (Elf_Addr) me->module_core_rw - loc;
10226 rc = apply_rela_bits(loc, val, 1, 32, 1);
10227 }
10228 break;
10229@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
10230 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
10231 if (info->plt_initialized == 0) {
10232 unsigned int *ip;
10233- ip = me->module_core + me->arch.plt_offset +
10234+ ip = me->module_core_rx + me->arch.plt_offset +
10235 info->plt_offset;
10236 #ifndef CONFIG_64BIT
10237 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
10238@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
10239 val - loc + 0xffffUL < 0x1ffffeUL) ||
10240 (r_type == R_390_PLT32DBL &&
10241 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
10242- val = (Elf_Addr) me->module_core +
10243+ val = (Elf_Addr) me->module_core_rx +
10244 me->arch.plt_offset +
10245 info->plt_offset;
10246 val += rela->r_addend - loc;
10247@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
10248 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
10249 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
10250 val = val + rela->r_addend -
10251- ((Elf_Addr) me->module_core + me->arch.got_offset);
10252+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
10253 if (r_type == R_390_GOTOFF16)
10254 rc = apply_rela_bits(loc, val, 0, 16, 0);
10255 else if (r_type == R_390_GOTOFF32)
10256@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
10257 break;
10258 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
10259 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
10260- val = (Elf_Addr) me->module_core + me->arch.got_offset +
10261+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
10262 rela->r_addend - loc;
10263 if (r_type == R_390_GOTPC)
10264 rc = apply_rela_bits(loc, val, 1, 32, 0);
10265diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
10266index 93b9ca4..4ea1454 100644
10267--- a/arch/s390/kernel/process.c
10268+++ b/arch/s390/kernel/process.c
10269@@ -242,37 +242,3 @@ unsigned long get_wchan(struct task_struct *p)
10270 }
10271 return 0;
10272 }
10273-
10274-unsigned long arch_align_stack(unsigned long sp)
10275-{
10276- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
10277- sp -= get_random_int() & ~PAGE_MASK;
10278- return sp & ~0xf;
10279-}
10280-
10281-static inline unsigned long brk_rnd(void)
10282-{
10283- /* 8MB for 32bit, 1GB for 64bit */
10284- if (is_32bit_task())
10285- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
10286- else
10287- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
10288-}
10289-
10290-unsigned long arch_randomize_brk(struct mm_struct *mm)
10291-{
10292- unsigned long ret;
10293-
10294- ret = PAGE_ALIGN(mm->brk + brk_rnd());
10295- return (ret > mm->brk) ? ret : mm->brk;
10296-}
10297-
10298-unsigned long randomize_et_dyn(unsigned long base)
10299-{
10300- unsigned long ret;
10301-
10302- if (!(current->flags & PF_RANDOMIZE))
10303- return base;
10304- ret = PAGE_ALIGN(base + brk_rnd());
10305- return (ret > base) ? ret : base;
10306-}
10307diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
10308index 9b436c2..54fbf0a 100644
10309--- a/arch/s390/mm/mmap.c
10310+++ b/arch/s390/mm/mmap.c
10311@@ -95,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10312 */
10313 if (mmap_is_legacy()) {
10314 mm->mmap_base = mmap_base_legacy();
10315+
10316+#ifdef CONFIG_PAX_RANDMMAP
10317+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10318+ mm->mmap_base += mm->delta_mmap;
10319+#endif
10320+
10321 mm->get_unmapped_area = arch_get_unmapped_area;
10322 } else {
10323 mm->mmap_base = mmap_base();
10324+
10325+#ifdef CONFIG_PAX_RANDMMAP
10326+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10327+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10328+#endif
10329+
10330 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
10331 }
10332 }
10333@@ -170,9 +182,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10334 */
10335 if (mmap_is_legacy()) {
10336 mm->mmap_base = mmap_base_legacy();
10337+
10338+#ifdef CONFIG_PAX_RANDMMAP
10339+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10340+ mm->mmap_base += mm->delta_mmap;
10341+#endif
10342+
10343 mm->get_unmapped_area = s390_get_unmapped_area;
10344 } else {
10345 mm->mmap_base = mmap_base();
10346+
10347+#ifdef CONFIG_PAX_RANDMMAP
10348+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10349+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10350+#endif
10351+
10352 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
10353 }
10354 }
10355diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
10356index 61e45b7..f2833c5 100644
10357--- a/arch/s390/net/bpf_jit_comp.c
10358+++ b/arch/s390/net/bpf_jit_comp.c
10359@@ -887,5 +887,5 @@ void bpf_jit_free(struct bpf_prog *fp)
10360 module_free(NULL, header);
10361
10362 free_filter:
10363- kfree(fp);
10364+ bpf_prog_unlock_free(fp);
10365 }
10366diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
10367index ae3d59f..f65f075 100644
10368--- a/arch/score/include/asm/cache.h
10369+++ b/arch/score/include/asm/cache.h
10370@@ -1,7 +1,9 @@
10371 #ifndef _ASM_SCORE_CACHE_H
10372 #define _ASM_SCORE_CACHE_H
10373
10374+#include <linux/const.h>
10375+
10376 #define L1_CACHE_SHIFT 4
10377-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10378+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10379
10380 #endif /* _ASM_SCORE_CACHE_H */
10381diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
10382index f9f3cd5..58ff438 100644
10383--- a/arch/score/include/asm/exec.h
10384+++ b/arch/score/include/asm/exec.h
10385@@ -1,6 +1,6 @@
10386 #ifndef _ASM_SCORE_EXEC_H
10387 #define _ASM_SCORE_EXEC_H
10388
10389-extern unsigned long arch_align_stack(unsigned long sp);
10390+#define arch_align_stack(x) (x)
10391
10392 #endif /* _ASM_SCORE_EXEC_H */
10393diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
10394index a1519ad3..e8ac1ff 100644
10395--- a/arch/score/kernel/process.c
10396+++ b/arch/score/kernel/process.c
10397@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
10398
10399 return task_pt_regs(task)->cp0_epc;
10400 }
10401-
10402-unsigned long arch_align_stack(unsigned long sp)
10403-{
10404- return sp;
10405-}
10406diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
10407index ef9e555..331bd29 100644
10408--- a/arch/sh/include/asm/cache.h
10409+++ b/arch/sh/include/asm/cache.h
10410@@ -9,10 +9,11 @@
10411 #define __ASM_SH_CACHE_H
10412 #ifdef __KERNEL__
10413
10414+#include <linux/const.h>
10415 #include <linux/init.h>
10416 #include <cpu/cache.h>
10417
10418-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10419+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10420
10421 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
10422
10423diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
10424index 6777177..cb5e44f 100644
10425--- a/arch/sh/mm/mmap.c
10426+++ b/arch/sh/mm/mmap.c
10427@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
10428 struct mm_struct *mm = current->mm;
10429 struct vm_area_struct *vma;
10430 int do_colour_align;
10431+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10432 struct vm_unmapped_area_info info;
10433
10434 if (flags & MAP_FIXED) {
10435@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
10436 if (filp || (flags & MAP_SHARED))
10437 do_colour_align = 1;
10438
10439+#ifdef CONFIG_PAX_RANDMMAP
10440+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10441+#endif
10442+
10443 if (addr) {
10444 if (do_colour_align)
10445 addr = COLOUR_ALIGN(addr, pgoff);
10446@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
10447 addr = PAGE_ALIGN(addr);
10448
10449 vma = find_vma(mm, addr);
10450- if (TASK_SIZE - len >= addr &&
10451- (!vma || addr + len <= vma->vm_start))
10452+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10453 return addr;
10454 }
10455
10456 info.flags = 0;
10457 info.length = len;
10458- info.low_limit = TASK_UNMAPPED_BASE;
10459+ info.low_limit = mm->mmap_base;
10460 info.high_limit = TASK_SIZE;
10461 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
10462 info.align_offset = pgoff << PAGE_SHIFT;
10463@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10464 struct mm_struct *mm = current->mm;
10465 unsigned long addr = addr0;
10466 int do_colour_align;
10467+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10468 struct vm_unmapped_area_info info;
10469
10470 if (flags & MAP_FIXED) {
10471@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10472 if (filp || (flags & MAP_SHARED))
10473 do_colour_align = 1;
10474
10475+#ifdef CONFIG_PAX_RANDMMAP
10476+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10477+#endif
10478+
10479 /* requesting a specific address */
10480 if (addr) {
10481 if (do_colour_align)
10482@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10483 addr = PAGE_ALIGN(addr);
10484
10485 vma = find_vma(mm, addr);
10486- if (TASK_SIZE - len >= addr &&
10487- (!vma || addr + len <= vma->vm_start))
10488+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10489 return addr;
10490 }
10491
10492@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10493 VM_BUG_ON(addr != -ENOMEM);
10494 info.flags = 0;
10495 info.low_limit = TASK_UNMAPPED_BASE;
10496+
10497+#ifdef CONFIG_PAX_RANDMMAP
10498+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10499+ info.low_limit += mm->delta_mmap;
10500+#endif
10501+
10502 info.high_limit = TASK_SIZE;
10503 addr = vm_unmapped_area(&info);
10504 }
10505diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
10506index bb894c8..8141d5c 100644
10507--- a/arch/sparc/include/asm/atomic_64.h
10508+++ b/arch/sparc/include/asm/atomic_64.h
10509@@ -15,18 +15,40 @@
10510 #define ATOMIC64_INIT(i) { (i) }
10511
10512 #define atomic_read(v) (*(volatile int *)&(v)->counter)
10513+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
10514+{
10515+ return v->counter;
10516+}
10517 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
10518+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
10519+{
10520+ return v->counter;
10521+}
10522
10523 #define atomic_set(v, i) (((v)->counter) = i)
10524+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
10525+{
10526+ v->counter = i;
10527+}
10528 #define atomic64_set(v, i) (((v)->counter) = i)
10529+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
10530+{
10531+ v->counter = i;
10532+}
10533
10534 void atomic_add(int, atomic_t *);
10535+void atomic_add_unchecked(int, atomic_unchecked_t *);
10536 void atomic64_add(long, atomic64_t *);
10537+void atomic64_add_unchecked(long, atomic64_unchecked_t *);
10538 void atomic_sub(int, atomic_t *);
10539+void atomic_sub_unchecked(int, atomic_unchecked_t *);
10540 void atomic64_sub(long, atomic64_t *);
10541+void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
10542
10543 int atomic_add_ret(int, atomic_t *);
10544+int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
10545 long atomic64_add_ret(long, atomic64_t *);
10546+long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
10547 int atomic_sub_ret(int, atomic_t *);
10548 long atomic64_sub_ret(long, atomic64_t *);
10549
10550@@ -34,13 +56,29 @@ long atomic64_sub_ret(long, atomic64_t *);
10551 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
10552
10553 #define atomic_inc_return(v) atomic_add_ret(1, v)
10554+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
10555+{
10556+ return atomic_add_ret_unchecked(1, v);
10557+}
10558 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
10559+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10560+{
10561+ return atomic64_add_ret_unchecked(1, v);
10562+}
10563
10564 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
10565 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
10566
10567 #define atomic_add_return(i, v) atomic_add_ret(i, v)
10568+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
10569+{
10570+ return atomic_add_ret_unchecked(i, v);
10571+}
10572 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
10573+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
10574+{
10575+ return atomic64_add_ret_unchecked(i, v);
10576+}
10577
10578 /*
10579 * atomic_inc_and_test - increment and test
10580@@ -51,6 +89,10 @@ long atomic64_sub_ret(long, atomic64_t *);
10581 * other cases.
10582 */
10583 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
10584+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
10585+{
10586+ return atomic_inc_return_unchecked(v) == 0;
10587+}
10588 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
10589
10590 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
10591@@ -60,25 +102,60 @@ long atomic64_sub_ret(long, atomic64_t *);
10592 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
10593
10594 #define atomic_inc(v) atomic_add(1, v)
10595+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
10596+{
10597+ atomic_add_unchecked(1, v);
10598+}
10599 #define atomic64_inc(v) atomic64_add(1, v)
10600+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
10601+{
10602+ atomic64_add_unchecked(1, v);
10603+}
10604
10605 #define atomic_dec(v) atomic_sub(1, v)
10606+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
10607+{
10608+ atomic_sub_unchecked(1, v);
10609+}
10610 #define atomic64_dec(v) atomic64_sub(1, v)
10611+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
10612+{
10613+ atomic64_sub_unchecked(1, v);
10614+}
10615
10616 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
10617 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
10618
10619 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
10620+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
10621+{
10622+ return cmpxchg(&v->counter, old, new);
10623+}
10624 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
10625+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
10626+{
10627+ return xchg(&v->counter, new);
10628+}
10629
10630 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10631 {
10632- int c, old;
10633+ int c, old, new;
10634 c = atomic_read(v);
10635 for (;;) {
10636- if (unlikely(c == (u)))
10637+ if (unlikely(c == u))
10638 break;
10639- old = atomic_cmpxchg((v), c, c + (a));
10640+
10641+ asm volatile("addcc %2, %0, %0\n"
10642+
10643+#ifdef CONFIG_PAX_REFCOUNT
10644+ "tvs %%icc, 6\n"
10645+#endif
10646+
10647+ : "=r" (new)
10648+ : "0" (c), "ir" (a)
10649+ : "cc");
10650+
10651+ old = atomic_cmpxchg(v, c, new);
10652 if (likely(old == c))
10653 break;
10654 c = old;
10655@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10656 #define atomic64_cmpxchg(v, o, n) \
10657 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
10658 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
10659+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10660+{
10661+ return xchg(&v->counter, new);
10662+}
10663
10664 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10665 {
10666- long c, old;
10667+ long c, old, new;
10668 c = atomic64_read(v);
10669 for (;;) {
10670- if (unlikely(c == (u)))
10671+ if (unlikely(c == u))
10672 break;
10673- old = atomic64_cmpxchg((v), c, c + (a));
10674+
10675+ asm volatile("addcc %2, %0, %0\n"
10676+
10677+#ifdef CONFIG_PAX_REFCOUNT
10678+ "tvs %%xcc, 6\n"
10679+#endif
10680+
10681+ : "=r" (new)
10682+ : "0" (c), "ir" (a)
10683+ : "cc");
10684+
10685+ old = atomic64_cmpxchg(v, c, new);
10686 if (likely(old == c))
10687 break;
10688 c = old;
10689 }
10690- return c != (u);
10691+ return c != u;
10692 }
10693
10694 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10695diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
10696index 305dcc3..7835030 100644
10697--- a/arch/sparc/include/asm/barrier_64.h
10698+++ b/arch/sparc/include/asm/barrier_64.h
10699@@ -57,7 +57,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
10700 do { \
10701 compiletime_assert_atomic_type(*p); \
10702 barrier(); \
10703- ACCESS_ONCE(*p) = (v); \
10704+ ACCESS_ONCE_RW(*p) = (v); \
10705 } while (0)
10706
10707 #define smp_load_acquire(p) \
10708diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
10709index 5bb6991..5c2132e 100644
10710--- a/arch/sparc/include/asm/cache.h
10711+++ b/arch/sparc/include/asm/cache.h
10712@@ -7,10 +7,12 @@
10713 #ifndef _SPARC_CACHE_H
10714 #define _SPARC_CACHE_H
10715
10716+#include <linux/const.h>
10717+
10718 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
10719
10720 #define L1_CACHE_SHIFT 5
10721-#define L1_CACHE_BYTES 32
10722+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10723
10724 #ifdef CONFIG_SPARC32
10725 #define SMP_CACHE_BYTES_SHIFT 5
10726diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
10727index a24e41f..47677ff 100644
10728--- a/arch/sparc/include/asm/elf_32.h
10729+++ b/arch/sparc/include/asm/elf_32.h
10730@@ -114,6 +114,13 @@ typedef struct {
10731
10732 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
10733
10734+#ifdef CONFIG_PAX_ASLR
10735+#define PAX_ELF_ET_DYN_BASE 0x10000UL
10736+
10737+#define PAX_DELTA_MMAP_LEN 16
10738+#define PAX_DELTA_STACK_LEN 16
10739+#endif
10740+
10741 /* This yields a mask that user programs can use to figure out what
10742 instruction set this cpu supports. This can NOT be done in userspace
10743 on Sparc. */
10744diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
10745index 370ca1e..d4f4a98 100644
10746--- a/arch/sparc/include/asm/elf_64.h
10747+++ b/arch/sparc/include/asm/elf_64.h
10748@@ -189,6 +189,13 @@ typedef struct {
10749 #define ELF_ET_DYN_BASE 0x0000010000000000UL
10750 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
10751
10752+#ifdef CONFIG_PAX_ASLR
10753+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
10754+
10755+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
10756+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
10757+#endif
10758+
10759 extern unsigned long sparc64_elf_hwcap;
10760 #define ELF_HWCAP sparc64_elf_hwcap
10761
10762diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
10763index a3890da..f6a408e 100644
10764--- a/arch/sparc/include/asm/pgalloc_32.h
10765+++ b/arch/sparc/include/asm/pgalloc_32.h
10766@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
10767 }
10768
10769 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
10770+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10771
10772 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
10773 unsigned long address)
10774diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
10775index 5e31871..b71c9d7 100644
10776--- a/arch/sparc/include/asm/pgalloc_64.h
10777+++ b/arch/sparc/include/asm/pgalloc_64.h
10778@@ -38,6 +38,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
10779 }
10780
10781 #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
10782+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
10783
10784 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
10785 {
10786diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
10787index 59ba6f6..4518128 100644
10788--- a/arch/sparc/include/asm/pgtable.h
10789+++ b/arch/sparc/include/asm/pgtable.h
10790@@ -5,4 +5,8 @@
10791 #else
10792 #include <asm/pgtable_32.h>
10793 #endif
10794+
10795+#define ktla_ktva(addr) (addr)
10796+#define ktva_ktla(addr) (addr)
10797+
10798 #endif
10799diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
10800index b9b91ae..950b91e 100644
10801--- a/arch/sparc/include/asm/pgtable_32.h
10802+++ b/arch/sparc/include/asm/pgtable_32.h
10803@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
10804 #define PAGE_SHARED SRMMU_PAGE_SHARED
10805 #define PAGE_COPY SRMMU_PAGE_COPY
10806 #define PAGE_READONLY SRMMU_PAGE_RDONLY
10807+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
10808+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
10809+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
10810 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
10811
10812 /* Top-level page directory - dummy used by init-mm.
10813@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
10814
10815 /* xwr */
10816 #define __P000 PAGE_NONE
10817-#define __P001 PAGE_READONLY
10818-#define __P010 PAGE_COPY
10819-#define __P011 PAGE_COPY
10820+#define __P001 PAGE_READONLY_NOEXEC
10821+#define __P010 PAGE_COPY_NOEXEC
10822+#define __P011 PAGE_COPY_NOEXEC
10823 #define __P100 PAGE_READONLY
10824 #define __P101 PAGE_READONLY
10825 #define __P110 PAGE_COPY
10826 #define __P111 PAGE_COPY
10827
10828 #define __S000 PAGE_NONE
10829-#define __S001 PAGE_READONLY
10830-#define __S010 PAGE_SHARED
10831-#define __S011 PAGE_SHARED
10832+#define __S001 PAGE_READONLY_NOEXEC
10833+#define __S010 PAGE_SHARED_NOEXEC
10834+#define __S011 PAGE_SHARED_NOEXEC
10835 #define __S100 PAGE_READONLY
10836 #define __S101 PAGE_READONLY
10837 #define __S110 PAGE_SHARED
10838diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
10839index 79da178..c2eede8 100644
10840--- a/arch/sparc/include/asm/pgtsrmmu.h
10841+++ b/arch/sparc/include/asm/pgtsrmmu.h
10842@@ -115,6 +115,11 @@
10843 SRMMU_EXEC | SRMMU_REF)
10844 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
10845 SRMMU_EXEC | SRMMU_REF)
10846+
10847+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
10848+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10849+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10850+
10851 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
10852 SRMMU_DIRTY | SRMMU_REF)
10853
10854diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
10855index 29d64b1..4272fe8 100644
10856--- a/arch/sparc/include/asm/setup.h
10857+++ b/arch/sparc/include/asm/setup.h
10858@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
10859 void handle_ld_nf(u32 insn, struct pt_regs *regs);
10860
10861 /* init_64.c */
10862-extern atomic_t dcpage_flushes;
10863-extern atomic_t dcpage_flushes_xcall;
10864+extern atomic_unchecked_t dcpage_flushes;
10865+extern atomic_unchecked_t dcpage_flushes_xcall;
10866
10867 extern int sysctl_tsb_ratio;
10868 #endif
10869diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
10870index 9689176..63c18ea 100644
10871--- a/arch/sparc/include/asm/spinlock_64.h
10872+++ b/arch/sparc/include/asm/spinlock_64.h
10873@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
10874
10875 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
10876
10877-static void inline arch_read_lock(arch_rwlock_t *lock)
10878+static inline void arch_read_lock(arch_rwlock_t *lock)
10879 {
10880 unsigned long tmp1, tmp2;
10881
10882 __asm__ __volatile__ (
10883 "1: ldsw [%2], %0\n"
10884 " brlz,pn %0, 2f\n"
10885-"4: add %0, 1, %1\n"
10886+"4: addcc %0, 1, %1\n"
10887+
10888+#ifdef CONFIG_PAX_REFCOUNT
10889+" tvs %%icc, 6\n"
10890+#endif
10891+
10892 " cas [%2], %0, %1\n"
10893 " cmp %0, %1\n"
10894 " bne,pn %%icc, 1b\n"
10895@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
10896 " .previous"
10897 : "=&r" (tmp1), "=&r" (tmp2)
10898 : "r" (lock)
10899- : "memory");
10900+ : "memory", "cc");
10901 }
10902
10903-static int inline arch_read_trylock(arch_rwlock_t *lock)
10904+static inline int arch_read_trylock(arch_rwlock_t *lock)
10905 {
10906 int tmp1, tmp2;
10907
10908@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10909 "1: ldsw [%2], %0\n"
10910 " brlz,a,pn %0, 2f\n"
10911 " mov 0, %0\n"
10912-" add %0, 1, %1\n"
10913+" addcc %0, 1, %1\n"
10914+
10915+#ifdef CONFIG_PAX_REFCOUNT
10916+" tvs %%icc, 6\n"
10917+#endif
10918+
10919 " cas [%2], %0, %1\n"
10920 " cmp %0, %1\n"
10921 " bne,pn %%icc, 1b\n"
10922@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10923 return tmp1;
10924 }
10925
10926-static void inline arch_read_unlock(arch_rwlock_t *lock)
10927+static inline void arch_read_unlock(arch_rwlock_t *lock)
10928 {
10929 unsigned long tmp1, tmp2;
10930
10931 __asm__ __volatile__(
10932 "1: lduw [%2], %0\n"
10933-" sub %0, 1, %1\n"
10934+" subcc %0, 1, %1\n"
10935+
10936+#ifdef CONFIG_PAX_REFCOUNT
10937+" tvs %%icc, 6\n"
10938+#endif
10939+
10940 " cas [%2], %0, %1\n"
10941 " cmp %0, %1\n"
10942 " bne,pn %%xcc, 1b\n"
10943@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
10944 : "memory");
10945 }
10946
10947-static void inline arch_write_lock(arch_rwlock_t *lock)
10948+static inline void arch_write_lock(arch_rwlock_t *lock)
10949 {
10950 unsigned long mask, tmp1, tmp2;
10951
10952@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
10953 : "memory");
10954 }
10955
10956-static void inline arch_write_unlock(arch_rwlock_t *lock)
10957+static inline void arch_write_unlock(arch_rwlock_t *lock)
10958 {
10959 __asm__ __volatile__(
10960 " stw %%g0, [%0]"
10961@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
10962 : "memory");
10963 }
10964
10965-static int inline arch_write_trylock(arch_rwlock_t *lock)
10966+static inline int arch_write_trylock(arch_rwlock_t *lock)
10967 {
10968 unsigned long mask, tmp1, tmp2, result;
10969
10970diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
10971index 96efa7a..16858bf 100644
10972--- a/arch/sparc/include/asm/thread_info_32.h
10973+++ b/arch/sparc/include/asm/thread_info_32.h
10974@@ -49,6 +49,8 @@ struct thread_info {
10975 unsigned long w_saved;
10976
10977 struct restart_block restart_block;
10978+
10979+ unsigned long lowest_stack;
10980 };
10981
10982 /*
10983diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
10984index cc6275c..7eb8e21 100644
10985--- a/arch/sparc/include/asm/thread_info_64.h
10986+++ b/arch/sparc/include/asm/thread_info_64.h
10987@@ -63,6 +63,8 @@ struct thread_info {
10988 struct pt_regs *kern_una_regs;
10989 unsigned int kern_una_insn;
10990
10991+ unsigned long lowest_stack;
10992+
10993 unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
10994 __attribute__ ((aligned(64)));
10995 };
10996@@ -190,12 +192,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
10997 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
10998 /* flag bit 4 is available */
10999 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
11000-/* flag bit 6 is available */
11001+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
11002 #define TIF_32BIT 7 /* 32-bit binary */
11003 #define TIF_NOHZ 8 /* in adaptive nohz mode */
11004 #define TIF_SECCOMP 9 /* secure computing */
11005 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
11006 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
11007+
11008 /* NOTE: Thread flags >= 12 should be ones we have no interest
11009 * in using in assembly, else we can't use the mask as
11010 * an immediate value in instructions such as andcc.
11011@@ -215,12 +218,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
11012 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
11013 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
11014 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
11015+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
11016
11017 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
11018 _TIF_DO_NOTIFY_RESUME_MASK | \
11019 _TIF_NEED_RESCHED)
11020 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
11021
11022+#define _TIF_WORK_SYSCALL \
11023+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
11024+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
11025+
11026+
11027 /*
11028 * Thread-synchronous status.
11029 *
11030diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
11031index bd56c28..4b63d83 100644
11032--- a/arch/sparc/include/asm/uaccess.h
11033+++ b/arch/sparc/include/asm/uaccess.h
11034@@ -1,5 +1,6 @@
11035 #ifndef ___ASM_SPARC_UACCESS_H
11036 #define ___ASM_SPARC_UACCESS_H
11037+
11038 #if defined(__sparc__) && defined(__arch64__)
11039 #include <asm/uaccess_64.h>
11040 #else
11041diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
11042index 9634d08..f55fe4f 100644
11043--- a/arch/sparc/include/asm/uaccess_32.h
11044+++ b/arch/sparc/include/asm/uaccess_32.h
11045@@ -250,27 +250,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
11046
11047 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
11048 {
11049- if (n && __access_ok((unsigned long) to, n))
11050+ if ((long)n < 0)
11051+ return n;
11052+
11053+ if (n && __access_ok((unsigned long) to, n)) {
11054+ if (!__builtin_constant_p(n))
11055+ check_object_size(from, n, true);
11056 return __copy_user(to, (__force void __user *) from, n);
11057- else
11058+ } else
11059 return n;
11060 }
11061
11062 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
11063 {
11064+ if ((long)n < 0)
11065+ return n;
11066+
11067+ if (!__builtin_constant_p(n))
11068+ check_object_size(from, n, true);
11069+
11070 return __copy_user(to, (__force void __user *) from, n);
11071 }
11072
11073 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
11074 {
11075- if (n && __access_ok((unsigned long) from, n))
11076+ if ((long)n < 0)
11077+ return n;
11078+
11079+ if (n && __access_ok((unsigned long) from, n)) {
11080+ if (!__builtin_constant_p(n))
11081+ check_object_size(to, n, false);
11082 return __copy_user((__force void __user *) to, from, n);
11083- else
11084+ } else
11085 return n;
11086 }
11087
11088 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
11089 {
11090+ if ((long)n < 0)
11091+ return n;
11092+
11093 return __copy_user((__force void __user *) to, from, n);
11094 }
11095
11096diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
11097index c990a5e..f17b9c1 100644
11098--- a/arch/sparc/include/asm/uaccess_64.h
11099+++ b/arch/sparc/include/asm/uaccess_64.h
11100@@ -10,6 +10,7 @@
11101 #include <linux/compiler.h>
11102 #include <linux/string.h>
11103 #include <linux/thread_info.h>
11104+#include <linux/kernel.h>
11105 #include <asm/asi.h>
11106 #include <asm/spitfire.h>
11107 #include <asm-generic/uaccess-unaligned.h>
11108@@ -214,8 +215,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
11109 static inline unsigned long __must_check
11110 copy_from_user(void *to, const void __user *from, unsigned long size)
11111 {
11112- unsigned long ret = ___copy_from_user(to, from, size);
11113+ unsigned long ret;
11114
11115+ if ((long)size < 0 || size > INT_MAX)
11116+ return size;
11117+
11118+ if (!__builtin_constant_p(size))
11119+ check_object_size(to, size, false);
11120+
11121+ ret = ___copy_from_user(to, from, size);
11122 if (unlikely(ret))
11123 ret = copy_from_user_fixup(to, from, size);
11124
11125@@ -231,8 +239,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
11126 static inline unsigned long __must_check
11127 copy_to_user(void __user *to, const void *from, unsigned long size)
11128 {
11129- unsigned long ret = ___copy_to_user(to, from, size);
11130+ unsigned long ret;
11131
11132+ if ((long)size < 0 || size > INT_MAX)
11133+ return size;
11134+
11135+ if (!__builtin_constant_p(size))
11136+ check_object_size(from, size, true);
11137+
11138+ ret = ___copy_to_user(to, from, size);
11139 if (unlikely(ret))
11140 ret = copy_to_user_fixup(to, from, size);
11141 return ret;
11142diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
11143index 7cf9c6e..6206648 100644
11144--- a/arch/sparc/kernel/Makefile
11145+++ b/arch/sparc/kernel/Makefile
11146@@ -4,7 +4,7 @@
11147 #
11148
11149 asflags-y := -ansi
11150-ccflags-y := -Werror
11151+#ccflags-y := -Werror
11152
11153 extra-y := head_$(BITS).o
11154
11155diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
11156index 50e7b62..79fae35 100644
11157--- a/arch/sparc/kernel/process_32.c
11158+++ b/arch/sparc/kernel/process_32.c
11159@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
11160
11161 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
11162 r->psr, r->pc, r->npc, r->y, print_tainted());
11163- printk("PC: <%pS>\n", (void *) r->pc);
11164+ printk("PC: <%pA>\n", (void *) r->pc);
11165 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
11166 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
11167 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
11168 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
11169 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
11170 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
11171- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
11172+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
11173
11174 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
11175 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
11176@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11177 rw = (struct reg_window32 *) fp;
11178 pc = rw->ins[7];
11179 printk("[%08lx : ", pc);
11180- printk("%pS ] ", (void *) pc);
11181+ printk("%pA ] ", (void *) pc);
11182 fp = rw->ins[6];
11183 } while (++count < 16);
11184 printk("\n");
11185diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
11186index 0be7bf9..2b1cba8 100644
11187--- a/arch/sparc/kernel/process_64.c
11188+++ b/arch/sparc/kernel/process_64.c
11189@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
11190 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
11191 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
11192 if (regs->tstate & TSTATE_PRIV)
11193- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
11194+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
11195 }
11196
11197 void show_regs(struct pt_regs *regs)
11198@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
11199
11200 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
11201 regs->tpc, regs->tnpc, regs->y, print_tainted());
11202- printk("TPC: <%pS>\n", (void *) regs->tpc);
11203+ printk("TPC: <%pA>\n", (void *) regs->tpc);
11204 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
11205 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
11206 regs->u_regs[3]);
11207@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
11208 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
11209 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
11210 regs->u_regs[15]);
11211- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
11212+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
11213 show_regwindow(regs);
11214 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
11215 }
11216@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
11217 ((tp && tp->task) ? tp->task->pid : -1));
11218
11219 if (gp->tstate & TSTATE_PRIV) {
11220- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
11221+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
11222 (void *) gp->tpc,
11223 (void *) gp->o7,
11224 (void *) gp->i7,
11225diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
11226index 79cc0d1..ec62734 100644
11227--- a/arch/sparc/kernel/prom_common.c
11228+++ b/arch/sparc/kernel/prom_common.c
11229@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
11230
11231 unsigned int prom_early_allocated __initdata;
11232
11233-static struct of_pdt_ops prom_sparc_ops __initdata = {
11234+static struct of_pdt_ops prom_sparc_ops __initconst = {
11235 .nextprop = prom_common_nextprop,
11236 .getproplen = prom_getproplen,
11237 .getproperty = prom_getproperty,
11238diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
11239index c13c9f2..d572c34 100644
11240--- a/arch/sparc/kernel/ptrace_64.c
11241+++ b/arch/sparc/kernel/ptrace_64.c
11242@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
11243 return ret;
11244 }
11245
11246+#ifdef CONFIG_GRKERNSEC_SETXID
11247+extern void gr_delayed_cred_worker(void);
11248+#endif
11249+
11250 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
11251 {
11252 int ret = 0;
11253@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
11254 if (test_thread_flag(TIF_NOHZ))
11255 user_exit();
11256
11257+#ifdef CONFIG_GRKERNSEC_SETXID
11258+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
11259+ gr_delayed_cred_worker();
11260+#endif
11261+
11262 if (test_thread_flag(TIF_SYSCALL_TRACE))
11263 ret = tracehook_report_syscall_entry(regs);
11264
11265@@ -1093,6 +1102,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
11266 if (test_thread_flag(TIF_NOHZ))
11267 user_exit();
11268
11269+#ifdef CONFIG_GRKERNSEC_SETXID
11270+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
11271+ gr_delayed_cred_worker();
11272+#endif
11273+
11274 audit_syscall_exit(regs);
11275
11276 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
11277diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
11278index 81954ee..6cfaa98 100644
11279--- a/arch/sparc/kernel/smp_64.c
11280+++ b/arch/sparc/kernel/smp_64.c
11281@@ -887,7 +887,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
11282 return;
11283
11284 #ifdef CONFIG_DEBUG_DCFLUSH
11285- atomic_inc(&dcpage_flushes);
11286+ atomic_inc_unchecked(&dcpage_flushes);
11287 #endif
11288
11289 this_cpu = get_cpu();
11290@@ -911,7 +911,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
11291 xcall_deliver(data0, __pa(pg_addr),
11292 (u64) pg_addr, cpumask_of(cpu));
11293 #ifdef CONFIG_DEBUG_DCFLUSH
11294- atomic_inc(&dcpage_flushes_xcall);
11295+ atomic_inc_unchecked(&dcpage_flushes_xcall);
11296 #endif
11297 }
11298 }
11299@@ -930,7 +930,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
11300 preempt_disable();
11301
11302 #ifdef CONFIG_DEBUG_DCFLUSH
11303- atomic_inc(&dcpage_flushes);
11304+ atomic_inc_unchecked(&dcpage_flushes);
11305 #endif
11306 data0 = 0;
11307 pg_addr = page_address(page);
11308@@ -947,7 +947,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
11309 xcall_deliver(data0, __pa(pg_addr),
11310 (u64) pg_addr, cpu_online_mask);
11311 #ifdef CONFIG_DEBUG_DCFLUSH
11312- atomic_inc(&dcpage_flushes_xcall);
11313+ atomic_inc_unchecked(&dcpage_flushes_xcall);
11314 #endif
11315 }
11316 __local_flush_dcache_page(page);
11317diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
11318index 646988d..b88905f 100644
11319--- a/arch/sparc/kernel/sys_sparc_32.c
11320+++ b/arch/sparc/kernel/sys_sparc_32.c
11321@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
11322 if (len > TASK_SIZE - PAGE_SIZE)
11323 return -ENOMEM;
11324 if (!addr)
11325- addr = TASK_UNMAPPED_BASE;
11326+ addr = current->mm->mmap_base;
11327
11328 info.flags = 0;
11329 info.length = len;
11330diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
11331index c85403d..6af95c9 100644
11332--- a/arch/sparc/kernel/sys_sparc_64.c
11333+++ b/arch/sparc/kernel/sys_sparc_64.c
11334@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
11335 struct vm_area_struct * vma;
11336 unsigned long task_size = TASK_SIZE;
11337 int do_color_align;
11338+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
11339 struct vm_unmapped_area_info info;
11340
11341 if (flags & MAP_FIXED) {
11342 /* We do not accept a shared mapping if it would violate
11343 * cache aliasing constraints.
11344 */
11345- if ((flags & MAP_SHARED) &&
11346+ if ((filp || (flags & MAP_SHARED)) &&
11347 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
11348 return -EINVAL;
11349 return addr;
11350@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
11351 if (filp || (flags & MAP_SHARED))
11352 do_color_align = 1;
11353
11354+#ifdef CONFIG_PAX_RANDMMAP
11355+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
11356+#endif
11357+
11358 if (addr) {
11359 if (do_color_align)
11360 addr = COLOR_ALIGN(addr, pgoff);
11361@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
11362 addr = PAGE_ALIGN(addr);
11363
11364 vma = find_vma(mm, addr);
11365- if (task_size - len >= addr &&
11366- (!vma || addr + len <= vma->vm_start))
11367+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
11368 return addr;
11369 }
11370
11371 info.flags = 0;
11372 info.length = len;
11373- info.low_limit = TASK_UNMAPPED_BASE;
11374+ info.low_limit = mm->mmap_base;
11375 info.high_limit = min(task_size, VA_EXCLUDE_START);
11376 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
11377 info.align_offset = pgoff << PAGE_SHIFT;
11378+ info.threadstack_offset = offset;
11379 addr = vm_unmapped_area(&info);
11380
11381 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
11382 VM_BUG_ON(addr != -ENOMEM);
11383 info.low_limit = VA_EXCLUDE_END;
11384+
11385+#ifdef CONFIG_PAX_RANDMMAP
11386+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11387+ info.low_limit += mm->delta_mmap;
11388+#endif
11389+
11390 info.high_limit = task_size;
11391 addr = vm_unmapped_area(&info);
11392 }
11393@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11394 unsigned long task_size = STACK_TOP32;
11395 unsigned long addr = addr0;
11396 int do_color_align;
11397+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
11398 struct vm_unmapped_area_info info;
11399
11400 /* This should only ever run for 32-bit processes. */
11401@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11402 /* We do not accept a shared mapping if it would violate
11403 * cache aliasing constraints.
11404 */
11405- if ((flags & MAP_SHARED) &&
11406+ if ((filp || (flags & MAP_SHARED)) &&
11407 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
11408 return -EINVAL;
11409 return addr;
11410@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11411 if (filp || (flags & MAP_SHARED))
11412 do_color_align = 1;
11413
11414+#ifdef CONFIG_PAX_RANDMMAP
11415+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
11416+#endif
11417+
11418 /* requesting a specific address */
11419 if (addr) {
11420 if (do_color_align)
11421@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11422 addr = PAGE_ALIGN(addr);
11423
11424 vma = find_vma(mm, addr);
11425- if (task_size - len >= addr &&
11426- (!vma || addr + len <= vma->vm_start))
11427+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
11428 return addr;
11429 }
11430
11431@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11432 info.high_limit = mm->mmap_base;
11433 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
11434 info.align_offset = pgoff << PAGE_SHIFT;
11435+ info.threadstack_offset = offset;
11436 addr = vm_unmapped_area(&info);
11437
11438 /*
11439@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11440 VM_BUG_ON(addr != -ENOMEM);
11441 info.flags = 0;
11442 info.low_limit = TASK_UNMAPPED_BASE;
11443+
11444+#ifdef CONFIG_PAX_RANDMMAP
11445+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11446+ info.low_limit += mm->delta_mmap;
11447+#endif
11448+
11449 info.high_limit = STACK_TOP32;
11450 addr = vm_unmapped_area(&info);
11451 }
11452@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
11453 EXPORT_SYMBOL(get_fb_unmapped_area);
11454
11455 /* Essentially the same as PowerPC. */
11456-static unsigned long mmap_rnd(void)
11457+static unsigned long mmap_rnd(struct mm_struct *mm)
11458 {
11459 unsigned long rnd = 0UL;
11460
11461+#ifdef CONFIG_PAX_RANDMMAP
11462+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
11463+#endif
11464+
11465 if (current->flags & PF_RANDOMIZE) {
11466 unsigned long val = get_random_int();
11467 if (test_thread_flag(TIF_32BIT))
11468@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
11469
11470 void arch_pick_mmap_layout(struct mm_struct *mm)
11471 {
11472- unsigned long random_factor = mmap_rnd();
11473+ unsigned long random_factor = mmap_rnd(mm);
11474 unsigned long gap;
11475
11476 /*
11477@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
11478 gap == RLIM_INFINITY ||
11479 sysctl_legacy_va_layout) {
11480 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
11481+
11482+#ifdef CONFIG_PAX_RANDMMAP
11483+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11484+ mm->mmap_base += mm->delta_mmap;
11485+#endif
11486+
11487 mm->get_unmapped_area = arch_get_unmapped_area;
11488 } else {
11489 /* We know it's 32-bit */
11490@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
11491 gap = (task_size / 6 * 5);
11492
11493 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
11494+
11495+#ifdef CONFIG_PAX_RANDMMAP
11496+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11497+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
11498+#endif
11499+
11500 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
11501 }
11502 }
11503diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
11504index 33a17e7..d87fb1f 100644
11505--- a/arch/sparc/kernel/syscalls.S
11506+++ b/arch/sparc/kernel/syscalls.S
11507@@ -52,7 +52,7 @@ sys32_rt_sigreturn:
11508 #endif
11509 .align 32
11510 1: ldx [%g6 + TI_FLAGS], %l5
11511- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11512+ andcc %l5, _TIF_WORK_SYSCALL, %g0
11513 be,pt %icc, rtrap
11514 nop
11515 call syscall_trace_leave
11516@@ -184,7 +184,7 @@ linux_sparc_syscall32:
11517
11518 srl %i3, 0, %o3 ! IEU0
11519 srl %i2, 0, %o2 ! IEU0 Group
11520- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11521+ andcc %l0, _TIF_WORK_SYSCALL, %g0
11522 bne,pn %icc, linux_syscall_trace32 ! CTI
11523 mov %i0, %l5 ! IEU1
11524 5: call %l7 ! CTI Group brk forced
11525@@ -208,7 +208,7 @@ linux_sparc_syscall:
11526
11527 mov %i3, %o3 ! IEU1
11528 mov %i4, %o4 ! IEU0 Group
11529- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11530+ andcc %l0, _TIF_WORK_SYSCALL, %g0
11531 bne,pn %icc, linux_syscall_trace ! CTI Group
11532 mov %i0, %l5 ! IEU0
11533 2: call %l7 ! CTI Group brk forced
11534@@ -223,7 +223,7 @@ ret_sys_call:
11535
11536 cmp %o0, -ERESTART_RESTARTBLOCK
11537 bgeu,pn %xcc, 1f
11538- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11539+ andcc %l0, _TIF_WORK_SYSCALL, %g0
11540 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
11541
11542 2:
11543diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
11544index 6fd386c5..6907d81 100644
11545--- a/arch/sparc/kernel/traps_32.c
11546+++ b/arch/sparc/kernel/traps_32.c
11547@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
11548 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
11549 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
11550
11551+extern void gr_handle_kernel_exploit(void);
11552+
11553 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11554 {
11555 static int die_counter;
11556@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11557 count++ < 30 &&
11558 (((unsigned long) rw) >= PAGE_OFFSET) &&
11559 !(((unsigned long) rw) & 0x7)) {
11560- printk("Caller[%08lx]: %pS\n", rw->ins[7],
11561+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
11562 (void *) rw->ins[7]);
11563 rw = (struct reg_window32 *)rw->ins[6];
11564 }
11565 }
11566 printk("Instruction DUMP:");
11567 instruction_dump ((unsigned long *) regs->pc);
11568- if(regs->psr & PSR_PS)
11569+ if(regs->psr & PSR_PS) {
11570+ gr_handle_kernel_exploit();
11571 do_exit(SIGKILL);
11572+ }
11573 do_exit(SIGSEGV);
11574 }
11575
11576diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
11577index 981a769..d906eda 100644
11578--- a/arch/sparc/kernel/traps_64.c
11579+++ b/arch/sparc/kernel/traps_64.c
11580@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
11581 i + 1,
11582 p->trapstack[i].tstate, p->trapstack[i].tpc,
11583 p->trapstack[i].tnpc, p->trapstack[i].tt);
11584- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
11585+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
11586 }
11587 }
11588
11589@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
11590
11591 lvl -= 0x100;
11592 if (regs->tstate & TSTATE_PRIV) {
11593+
11594+#ifdef CONFIG_PAX_REFCOUNT
11595+ if (lvl == 6)
11596+ pax_report_refcount_overflow(regs);
11597+#endif
11598+
11599 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
11600 die_if_kernel(buffer, regs);
11601 }
11602@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
11603 void bad_trap_tl1(struct pt_regs *regs, long lvl)
11604 {
11605 char buffer[32];
11606-
11607+
11608 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
11609 0, lvl, SIGTRAP) == NOTIFY_STOP)
11610 return;
11611
11612+#ifdef CONFIG_PAX_REFCOUNT
11613+ if (lvl == 6)
11614+ pax_report_refcount_overflow(regs);
11615+#endif
11616+
11617 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
11618
11619 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
11620@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
11621 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
11622 printk("%s" "ERROR(%d): ",
11623 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
11624- printk("TPC<%pS>\n", (void *) regs->tpc);
11625+ printk("TPC<%pA>\n", (void *) regs->tpc);
11626 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
11627 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
11628 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
11629@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11630 smp_processor_id(),
11631 (type & 0x1) ? 'I' : 'D',
11632 regs->tpc);
11633- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
11634+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
11635 panic("Irrecoverable Cheetah+ parity error.");
11636 }
11637
11638@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11639 smp_processor_id(),
11640 (type & 0x1) ? 'I' : 'D',
11641 regs->tpc);
11642- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
11643+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
11644 }
11645
11646 struct sun4v_error_entry {
11647@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
11648 /*0x38*/u64 reserved_5;
11649 };
11650
11651-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11652-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11653+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11654+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11655
11656 static const char *sun4v_err_type_to_str(u8 type)
11657 {
11658@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
11659 }
11660
11661 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11662- int cpu, const char *pfx, atomic_t *ocnt)
11663+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
11664 {
11665 u64 *raw_ptr = (u64 *) ent;
11666 u32 attrs;
11667@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11668
11669 show_regs(regs);
11670
11671- if ((cnt = atomic_read(ocnt)) != 0) {
11672- atomic_set(ocnt, 0);
11673+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
11674+ atomic_set_unchecked(ocnt, 0);
11675 wmb();
11676 printk("%s: Queue overflowed %d times.\n",
11677 pfx, cnt);
11678@@ -2048,7 +2059,7 @@ out:
11679 */
11680 void sun4v_resum_overflow(struct pt_regs *regs)
11681 {
11682- atomic_inc(&sun4v_resum_oflow_cnt);
11683+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
11684 }
11685
11686 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
11687@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
11688 /* XXX Actually even this can make not that much sense. Perhaps
11689 * XXX we should just pull the plug and panic directly from here?
11690 */
11691- atomic_inc(&sun4v_nonresum_oflow_cnt);
11692+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
11693 }
11694
11695 static void sun4v_tlb_error(struct pt_regs *regs)
11696@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
11697
11698 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
11699 regs->tpc, tl);
11700- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
11701+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
11702 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11703- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
11704+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
11705 (void *) regs->u_regs[UREG_I7]);
11706 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
11707 "pte[%lx] error[%lx]\n",
11708@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
11709
11710 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
11711 regs->tpc, tl);
11712- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
11713+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
11714 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11715- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
11716+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
11717 (void *) regs->u_regs[UREG_I7]);
11718 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
11719 "pte[%lx] error[%lx]\n",
11720@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11721 fp = (unsigned long)sf->fp + STACK_BIAS;
11722 }
11723
11724- printk(" [%016lx] %pS\n", pc, (void *) pc);
11725+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11726 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11727 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
11728 int index = tsk->curr_ret_stack;
11729 if (tsk->ret_stack && index >= graph) {
11730 pc = tsk->ret_stack[index - graph].ret;
11731- printk(" [%016lx] %pS\n", pc, (void *) pc);
11732+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11733 graph++;
11734 }
11735 }
11736@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
11737 return (struct reg_window *) (fp + STACK_BIAS);
11738 }
11739
11740+extern void gr_handle_kernel_exploit(void);
11741+
11742 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11743 {
11744 static int die_counter;
11745@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11746 while (rw &&
11747 count++ < 30 &&
11748 kstack_valid(tp, (unsigned long) rw)) {
11749- printk("Caller[%016lx]: %pS\n", rw->ins[7],
11750+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
11751 (void *) rw->ins[7]);
11752
11753 rw = kernel_stack_up(rw);
11754@@ -2427,8 +2440,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11755 }
11756 user_instruction_dump ((unsigned int __user *) regs->tpc);
11757 }
11758- if (regs->tstate & TSTATE_PRIV)
11759+ if (regs->tstate & TSTATE_PRIV) {
11760+ gr_handle_kernel_exploit();
11761 do_exit(SIGKILL);
11762+ }
11763 do_exit(SIGSEGV);
11764 }
11765 EXPORT_SYMBOL(die_if_kernel);
11766diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
11767index 62098a8..547ab2c 100644
11768--- a/arch/sparc/kernel/unaligned_64.c
11769+++ b/arch/sparc/kernel/unaligned_64.c
11770@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
11771 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
11772
11773 if (__ratelimit(&ratelimit)) {
11774- printk("Kernel unaligned access at TPC[%lx] %pS\n",
11775+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
11776 regs->tpc, (void *) regs->tpc);
11777 }
11778 }
11779diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
11780index 3269b02..64f5231 100644
11781--- a/arch/sparc/lib/Makefile
11782+++ b/arch/sparc/lib/Makefile
11783@@ -2,7 +2,7 @@
11784 #
11785
11786 asflags-y := -ansi -DST_DIV0=0x02
11787-ccflags-y := -Werror
11788+#ccflags-y := -Werror
11789
11790 lib-$(CONFIG_SPARC32) += ashrdi3.o
11791 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
11792diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
11793index 85c233d..68500e0 100644
11794--- a/arch/sparc/lib/atomic_64.S
11795+++ b/arch/sparc/lib/atomic_64.S
11796@@ -17,7 +17,12 @@
11797 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
11798 BACKOFF_SETUP(%o2)
11799 1: lduw [%o1], %g1
11800- add %g1, %o0, %g7
11801+ addcc %g1, %o0, %g7
11802+
11803+#ifdef CONFIG_PAX_REFCOUNT
11804+ tvs %icc, 6
11805+#endif
11806+
11807 cas [%o1], %g1, %g7
11808 cmp %g1, %g7
11809 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11810@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
11811 2: BACKOFF_SPIN(%o2, %o3, 1b)
11812 ENDPROC(atomic_add)
11813
11814+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11815+ BACKOFF_SETUP(%o2)
11816+1: lduw [%o1], %g1
11817+ add %g1, %o0, %g7
11818+ cas [%o1], %g1, %g7
11819+ cmp %g1, %g7
11820+ bne,pn %icc, 2f
11821+ nop
11822+ retl
11823+ nop
11824+2: BACKOFF_SPIN(%o2, %o3, 1b)
11825+ENDPROC(atomic_add_unchecked)
11826+
11827 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11828 BACKOFF_SETUP(%o2)
11829 1: lduw [%o1], %g1
11830- sub %g1, %o0, %g7
11831+ subcc %g1, %o0, %g7
11832+
11833+#ifdef CONFIG_PAX_REFCOUNT
11834+ tvs %icc, 6
11835+#endif
11836+
11837 cas [%o1], %g1, %g7
11838 cmp %g1, %g7
11839 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11840@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11841 2: BACKOFF_SPIN(%o2, %o3, 1b)
11842 ENDPROC(atomic_sub)
11843
11844+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
11845+ BACKOFF_SETUP(%o2)
11846+1: lduw [%o1], %g1
11847+ sub %g1, %o0, %g7
11848+ cas [%o1], %g1, %g7
11849+ cmp %g1, %g7
11850+ bne,pn %icc, 2f
11851+ nop
11852+ retl
11853+ nop
11854+2: BACKOFF_SPIN(%o2, %o3, 1b)
11855+ENDPROC(atomic_sub_unchecked)
11856+
11857 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11858 BACKOFF_SETUP(%o2)
11859 1: lduw [%o1], %g1
11860- add %g1, %o0, %g7
11861+ addcc %g1, %o0, %g7
11862+
11863+#ifdef CONFIG_PAX_REFCOUNT
11864+ tvs %icc, 6
11865+#endif
11866+
11867 cas [%o1], %g1, %g7
11868 cmp %g1, %g7
11869 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11870@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11871 2: BACKOFF_SPIN(%o2, %o3, 1b)
11872 ENDPROC(atomic_add_ret)
11873
11874+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11875+ BACKOFF_SETUP(%o2)
11876+1: lduw [%o1], %g1
11877+ addcc %g1, %o0, %g7
11878+ cas [%o1], %g1, %g7
11879+ cmp %g1, %g7
11880+ bne,pn %icc, 2f
11881+ add %g7, %o0, %g7
11882+ sra %g7, 0, %o0
11883+ retl
11884+ nop
11885+2: BACKOFF_SPIN(%o2, %o3, 1b)
11886+ENDPROC(atomic_add_ret_unchecked)
11887+
11888 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
11889 BACKOFF_SETUP(%o2)
11890 1: lduw [%o1], %g1
11891- sub %g1, %o0, %g7
11892+ subcc %g1, %o0, %g7
11893+
11894+#ifdef CONFIG_PAX_REFCOUNT
11895+ tvs %icc, 6
11896+#endif
11897+
11898 cas [%o1], %g1, %g7
11899 cmp %g1, %g7
11900 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11901@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
11902 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
11903 BACKOFF_SETUP(%o2)
11904 1: ldx [%o1], %g1
11905- add %g1, %o0, %g7
11906+ addcc %g1, %o0, %g7
11907+
11908+#ifdef CONFIG_PAX_REFCOUNT
11909+ tvs %xcc, 6
11910+#endif
11911+
11912 casx [%o1], %g1, %g7
11913 cmp %g1, %g7
11914 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11915@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
11916 2: BACKOFF_SPIN(%o2, %o3, 1b)
11917 ENDPROC(atomic64_add)
11918
11919+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11920+ BACKOFF_SETUP(%o2)
11921+1: ldx [%o1], %g1
11922+ addcc %g1, %o0, %g7
11923+ casx [%o1], %g1, %g7
11924+ cmp %g1, %g7
11925+ bne,pn %xcc, 2f
11926+ nop
11927+ retl
11928+ nop
11929+2: BACKOFF_SPIN(%o2, %o3, 1b)
11930+ENDPROC(atomic64_add_unchecked)
11931+
11932 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11933 BACKOFF_SETUP(%o2)
11934 1: ldx [%o1], %g1
11935- sub %g1, %o0, %g7
11936+ subcc %g1, %o0, %g7
11937+
11938+#ifdef CONFIG_PAX_REFCOUNT
11939+ tvs %xcc, 6
11940+#endif
11941+
11942 casx [%o1], %g1, %g7
11943 cmp %g1, %g7
11944 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11945@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11946 2: BACKOFF_SPIN(%o2, %o3, 1b)
11947 ENDPROC(atomic64_sub)
11948
11949+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
11950+ BACKOFF_SETUP(%o2)
11951+1: ldx [%o1], %g1
11952+ subcc %g1, %o0, %g7
11953+ casx [%o1], %g1, %g7
11954+ cmp %g1, %g7
11955+ bne,pn %xcc, 2f
11956+ nop
11957+ retl
11958+ nop
11959+2: BACKOFF_SPIN(%o2, %o3, 1b)
11960+ENDPROC(atomic64_sub_unchecked)
11961+
11962 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11963 BACKOFF_SETUP(%o2)
11964 1: ldx [%o1], %g1
11965- add %g1, %o0, %g7
11966+ addcc %g1, %o0, %g7
11967+
11968+#ifdef CONFIG_PAX_REFCOUNT
11969+ tvs %xcc, 6
11970+#endif
11971+
11972 casx [%o1], %g1, %g7
11973 cmp %g1, %g7
11974 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11975@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11976 2: BACKOFF_SPIN(%o2, %o3, 1b)
11977 ENDPROC(atomic64_add_ret)
11978
11979+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11980+ BACKOFF_SETUP(%o2)
11981+1: ldx [%o1], %g1
11982+ addcc %g1, %o0, %g7
11983+ casx [%o1], %g1, %g7
11984+ cmp %g1, %g7
11985+ bne,pn %xcc, 2f
11986+ add %g7, %o0, %g7
11987+ mov %g7, %o0
11988+ retl
11989+ nop
11990+2: BACKOFF_SPIN(%o2, %o3, 1b)
11991+ENDPROC(atomic64_add_ret_unchecked)
11992+
11993 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
11994 BACKOFF_SETUP(%o2)
11995 1: ldx [%o1], %g1
11996- sub %g1, %o0, %g7
11997+ subcc %g1, %o0, %g7
11998+
11999+#ifdef CONFIG_PAX_REFCOUNT
12000+ tvs %xcc, 6
12001+#endif
12002+
12003 casx [%o1], %g1, %g7
12004 cmp %g1, %g7
12005 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
12006diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
12007index 323335b..ed85ea2 100644
12008--- a/arch/sparc/lib/ksyms.c
12009+++ b/arch/sparc/lib/ksyms.c
12010@@ -100,12 +100,18 @@ EXPORT_SYMBOL(__clear_user);
12011
12012 /* Atomic counter implementation. */
12013 EXPORT_SYMBOL(atomic_add);
12014+EXPORT_SYMBOL(atomic_add_unchecked);
12015 EXPORT_SYMBOL(atomic_add_ret);
12016+EXPORT_SYMBOL(atomic_add_ret_unchecked);
12017 EXPORT_SYMBOL(atomic_sub);
12018+EXPORT_SYMBOL(atomic_sub_unchecked);
12019 EXPORT_SYMBOL(atomic_sub_ret);
12020 EXPORT_SYMBOL(atomic64_add);
12021+EXPORT_SYMBOL(atomic64_add_unchecked);
12022 EXPORT_SYMBOL(atomic64_add_ret);
12023+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
12024 EXPORT_SYMBOL(atomic64_sub);
12025+EXPORT_SYMBOL(atomic64_sub_unchecked);
12026 EXPORT_SYMBOL(atomic64_sub_ret);
12027 EXPORT_SYMBOL(atomic64_dec_if_positive);
12028
12029diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
12030index 30c3ecc..736f015 100644
12031--- a/arch/sparc/mm/Makefile
12032+++ b/arch/sparc/mm/Makefile
12033@@ -2,7 +2,7 @@
12034 #
12035
12036 asflags-y := -ansi
12037-ccflags-y := -Werror
12038+#ccflags-y := -Werror
12039
12040 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
12041 obj-y += fault_$(BITS).o
12042diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
12043index 908e8c1..1524793 100644
12044--- a/arch/sparc/mm/fault_32.c
12045+++ b/arch/sparc/mm/fault_32.c
12046@@ -21,6 +21,9 @@
12047 #include <linux/perf_event.h>
12048 #include <linux/interrupt.h>
12049 #include <linux/kdebug.h>
12050+#include <linux/slab.h>
12051+#include <linux/pagemap.h>
12052+#include <linux/compiler.h>
12053
12054 #include <asm/page.h>
12055 #include <asm/pgtable.h>
12056@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
12057 return safe_compute_effective_address(regs, insn);
12058 }
12059
12060+#ifdef CONFIG_PAX_PAGEEXEC
12061+#ifdef CONFIG_PAX_DLRESOLVE
12062+static void pax_emuplt_close(struct vm_area_struct *vma)
12063+{
12064+ vma->vm_mm->call_dl_resolve = 0UL;
12065+}
12066+
12067+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
12068+{
12069+ unsigned int *kaddr;
12070+
12071+ vmf->page = alloc_page(GFP_HIGHUSER);
12072+ if (!vmf->page)
12073+ return VM_FAULT_OOM;
12074+
12075+ kaddr = kmap(vmf->page);
12076+ memset(kaddr, 0, PAGE_SIZE);
12077+ kaddr[0] = 0x9DE3BFA8U; /* save */
12078+ flush_dcache_page(vmf->page);
12079+ kunmap(vmf->page);
12080+ return VM_FAULT_MAJOR;
12081+}
12082+
12083+static const struct vm_operations_struct pax_vm_ops = {
12084+ .close = pax_emuplt_close,
12085+ .fault = pax_emuplt_fault
12086+};
12087+
12088+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
12089+{
12090+ int ret;
12091+
12092+ INIT_LIST_HEAD(&vma->anon_vma_chain);
12093+ vma->vm_mm = current->mm;
12094+ vma->vm_start = addr;
12095+ vma->vm_end = addr + PAGE_SIZE;
12096+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
12097+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
12098+ vma->vm_ops = &pax_vm_ops;
12099+
12100+ ret = insert_vm_struct(current->mm, vma);
12101+ if (ret)
12102+ return ret;
12103+
12104+ ++current->mm->total_vm;
12105+ return 0;
12106+}
12107+#endif
12108+
12109+/*
12110+ * PaX: decide what to do with offenders (regs->pc = fault address)
12111+ *
12112+ * returns 1 when task should be killed
12113+ * 2 when patched PLT trampoline was detected
12114+ * 3 when unpatched PLT trampoline was detected
12115+ */
12116+static int pax_handle_fetch_fault(struct pt_regs *regs)
12117+{
12118+
12119+#ifdef CONFIG_PAX_EMUPLT
12120+ int err;
12121+
12122+ do { /* PaX: patched PLT emulation #1 */
12123+ unsigned int sethi1, sethi2, jmpl;
12124+
12125+ err = get_user(sethi1, (unsigned int *)regs->pc);
12126+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
12127+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
12128+
12129+ if (err)
12130+ break;
12131+
12132+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
12133+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
12134+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
12135+ {
12136+ unsigned int addr;
12137+
12138+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
12139+ addr = regs->u_regs[UREG_G1];
12140+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
12141+ regs->pc = addr;
12142+ regs->npc = addr+4;
12143+ return 2;
12144+ }
12145+ } while (0);
12146+
12147+ do { /* PaX: patched PLT emulation #2 */
12148+ unsigned int ba;
12149+
12150+ err = get_user(ba, (unsigned int *)regs->pc);
12151+
12152+ if (err)
12153+ break;
12154+
12155+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
12156+ unsigned int addr;
12157+
12158+ if ((ba & 0xFFC00000U) == 0x30800000U)
12159+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
12160+ else
12161+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
12162+ regs->pc = addr;
12163+ regs->npc = addr+4;
12164+ return 2;
12165+ }
12166+ } while (0);
12167+
12168+ do { /* PaX: patched PLT emulation #3 */
12169+ unsigned int sethi, bajmpl, nop;
12170+
12171+ err = get_user(sethi, (unsigned int *)regs->pc);
12172+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
12173+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
12174+
12175+ if (err)
12176+ break;
12177+
12178+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12179+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
12180+ nop == 0x01000000U)
12181+ {
12182+ unsigned int addr;
12183+
12184+ addr = (sethi & 0x003FFFFFU) << 10;
12185+ regs->u_regs[UREG_G1] = addr;
12186+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
12187+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
12188+ else
12189+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
12190+ regs->pc = addr;
12191+ regs->npc = addr+4;
12192+ return 2;
12193+ }
12194+ } while (0);
12195+
12196+ do { /* PaX: unpatched PLT emulation step 1 */
12197+ unsigned int sethi, ba, nop;
12198+
12199+ err = get_user(sethi, (unsigned int *)regs->pc);
12200+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
12201+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
12202+
12203+ if (err)
12204+ break;
12205+
12206+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12207+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
12208+ nop == 0x01000000U)
12209+ {
12210+ unsigned int addr, save, call;
12211+
12212+ if ((ba & 0xFFC00000U) == 0x30800000U)
12213+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
12214+ else
12215+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
12216+
12217+ err = get_user(save, (unsigned int *)addr);
12218+ err |= get_user(call, (unsigned int *)(addr+4));
12219+ err |= get_user(nop, (unsigned int *)(addr+8));
12220+ if (err)
12221+ break;
12222+
12223+#ifdef CONFIG_PAX_DLRESOLVE
12224+ if (save == 0x9DE3BFA8U &&
12225+ (call & 0xC0000000U) == 0x40000000U &&
12226+ nop == 0x01000000U)
12227+ {
12228+ struct vm_area_struct *vma;
12229+ unsigned long call_dl_resolve;
12230+
12231+ down_read(&current->mm->mmap_sem);
12232+ call_dl_resolve = current->mm->call_dl_resolve;
12233+ up_read(&current->mm->mmap_sem);
12234+ if (likely(call_dl_resolve))
12235+ goto emulate;
12236+
12237+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
12238+
12239+ down_write(&current->mm->mmap_sem);
12240+ if (current->mm->call_dl_resolve) {
12241+ call_dl_resolve = current->mm->call_dl_resolve;
12242+ up_write(&current->mm->mmap_sem);
12243+ if (vma)
12244+ kmem_cache_free(vm_area_cachep, vma);
12245+ goto emulate;
12246+ }
12247+
12248+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
12249+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
12250+ up_write(&current->mm->mmap_sem);
12251+ if (vma)
12252+ kmem_cache_free(vm_area_cachep, vma);
12253+ return 1;
12254+ }
12255+
12256+ if (pax_insert_vma(vma, call_dl_resolve)) {
12257+ up_write(&current->mm->mmap_sem);
12258+ kmem_cache_free(vm_area_cachep, vma);
12259+ return 1;
12260+ }
12261+
12262+ current->mm->call_dl_resolve = call_dl_resolve;
12263+ up_write(&current->mm->mmap_sem);
12264+
12265+emulate:
12266+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12267+ regs->pc = call_dl_resolve;
12268+ regs->npc = addr+4;
12269+ return 3;
12270+ }
12271+#endif
12272+
12273+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
12274+ if ((save & 0xFFC00000U) == 0x05000000U &&
12275+ (call & 0xFFFFE000U) == 0x85C0A000U &&
12276+ nop == 0x01000000U)
12277+ {
12278+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12279+ regs->u_regs[UREG_G2] = addr + 4;
12280+ addr = (save & 0x003FFFFFU) << 10;
12281+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
12282+ regs->pc = addr;
12283+ regs->npc = addr+4;
12284+ return 3;
12285+ }
12286+ }
12287+ } while (0);
12288+
12289+ do { /* PaX: unpatched PLT emulation step 2 */
12290+ unsigned int save, call, nop;
12291+
12292+ err = get_user(save, (unsigned int *)(regs->pc-4));
12293+ err |= get_user(call, (unsigned int *)regs->pc);
12294+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
12295+ if (err)
12296+ break;
12297+
12298+ if (save == 0x9DE3BFA8U &&
12299+ (call & 0xC0000000U) == 0x40000000U &&
12300+ nop == 0x01000000U)
12301+ {
12302+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
12303+
12304+ regs->u_regs[UREG_RETPC] = regs->pc;
12305+ regs->pc = dl_resolve;
12306+ regs->npc = dl_resolve+4;
12307+ return 3;
12308+ }
12309+ } while (0);
12310+#endif
12311+
12312+ return 1;
12313+}
12314+
12315+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12316+{
12317+ unsigned long i;
12318+
12319+ printk(KERN_ERR "PAX: bytes at PC: ");
12320+ for (i = 0; i < 8; i++) {
12321+ unsigned int c;
12322+ if (get_user(c, (unsigned int *)pc+i))
12323+ printk(KERN_CONT "???????? ");
12324+ else
12325+ printk(KERN_CONT "%08x ", c);
12326+ }
12327+ printk("\n");
12328+}
12329+#endif
12330+
12331 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
12332 int text_fault)
12333 {
12334@@ -226,6 +500,24 @@ good_area:
12335 if (!(vma->vm_flags & VM_WRITE))
12336 goto bad_area;
12337 } else {
12338+
12339+#ifdef CONFIG_PAX_PAGEEXEC
12340+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
12341+ up_read(&mm->mmap_sem);
12342+ switch (pax_handle_fetch_fault(regs)) {
12343+
12344+#ifdef CONFIG_PAX_EMUPLT
12345+ case 2:
12346+ case 3:
12347+ return;
12348+#endif
12349+
12350+ }
12351+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
12352+ do_group_exit(SIGKILL);
12353+ }
12354+#endif
12355+
12356 /* Allow reads even for write-only mappings */
12357 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
12358 goto bad_area;
12359diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
12360index 18fcd71..e4fe821 100644
12361--- a/arch/sparc/mm/fault_64.c
12362+++ b/arch/sparc/mm/fault_64.c
12363@@ -22,6 +22,9 @@
12364 #include <linux/kdebug.h>
12365 #include <linux/percpu.h>
12366 #include <linux/context_tracking.h>
12367+#include <linux/slab.h>
12368+#include <linux/pagemap.h>
12369+#include <linux/compiler.h>
12370
12371 #include <asm/page.h>
12372 #include <asm/pgtable.h>
12373@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
12374 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
12375 regs->tpc);
12376 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
12377- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
12378+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
12379 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
12380 dump_stack();
12381 unhandled_fault(regs->tpc, current, regs);
12382@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
12383 show_regs(regs);
12384 }
12385
12386+#ifdef CONFIG_PAX_PAGEEXEC
12387+#ifdef CONFIG_PAX_DLRESOLVE
12388+static void pax_emuplt_close(struct vm_area_struct *vma)
12389+{
12390+ vma->vm_mm->call_dl_resolve = 0UL;
12391+}
12392+
12393+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
12394+{
12395+ unsigned int *kaddr;
12396+
12397+ vmf->page = alloc_page(GFP_HIGHUSER);
12398+ if (!vmf->page)
12399+ return VM_FAULT_OOM;
12400+
12401+ kaddr = kmap(vmf->page);
12402+ memset(kaddr, 0, PAGE_SIZE);
12403+ kaddr[0] = 0x9DE3BFA8U; /* save */
12404+ flush_dcache_page(vmf->page);
12405+ kunmap(vmf->page);
12406+ return VM_FAULT_MAJOR;
12407+}
12408+
12409+static const struct vm_operations_struct pax_vm_ops = {
12410+ .close = pax_emuplt_close,
12411+ .fault = pax_emuplt_fault
12412+};
12413+
12414+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
12415+{
12416+ int ret;
12417+
12418+ INIT_LIST_HEAD(&vma->anon_vma_chain);
12419+ vma->vm_mm = current->mm;
12420+ vma->vm_start = addr;
12421+ vma->vm_end = addr + PAGE_SIZE;
12422+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
12423+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
12424+ vma->vm_ops = &pax_vm_ops;
12425+
12426+ ret = insert_vm_struct(current->mm, vma);
12427+ if (ret)
12428+ return ret;
12429+
12430+ ++current->mm->total_vm;
12431+ return 0;
12432+}
12433+#endif
12434+
12435+/*
12436+ * PaX: decide what to do with offenders (regs->tpc = fault address)
12437+ *
12438+ * returns 1 when task should be killed
12439+ * 2 when patched PLT trampoline was detected
12440+ * 3 when unpatched PLT trampoline was detected
12441+ */
12442+static int pax_handle_fetch_fault(struct pt_regs *regs)
12443+{
12444+
12445+#ifdef CONFIG_PAX_EMUPLT
12446+ int err;
12447+
12448+ do { /* PaX: patched PLT emulation #1 */
12449+ unsigned int sethi1, sethi2, jmpl;
12450+
12451+ err = get_user(sethi1, (unsigned int *)regs->tpc);
12452+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
12453+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
12454+
12455+ if (err)
12456+ break;
12457+
12458+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
12459+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
12460+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
12461+ {
12462+ unsigned long addr;
12463+
12464+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
12465+ addr = regs->u_regs[UREG_G1];
12466+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12467+
12468+ if (test_thread_flag(TIF_32BIT))
12469+ addr &= 0xFFFFFFFFUL;
12470+
12471+ regs->tpc = addr;
12472+ regs->tnpc = addr+4;
12473+ return 2;
12474+ }
12475+ } while (0);
12476+
12477+ do { /* PaX: patched PLT emulation #2 */
12478+ unsigned int ba;
12479+
12480+ err = get_user(ba, (unsigned int *)regs->tpc);
12481+
12482+ if (err)
12483+ break;
12484+
12485+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
12486+ unsigned long addr;
12487+
12488+ if ((ba & 0xFFC00000U) == 0x30800000U)
12489+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
12490+ else
12491+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12492+
12493+ if (test_thread_flag(TIF_32BIT))
12494+ addr &= 0xFFFFFFFFUL;
12495+
12496+ regs->tpc = addr;
12497+ regs->tnpc = addr+4;
12498+ return 2;
12499+ }
12500+ } while (0);
12501+
12502+ do { /* PaX: patched PLT emulation #3 */
12503+ unsigned int sethi, bajmpl, nop;
12504+
12505+ err = get_user(sethi, (unsigned int *)regs->tpc);
12506+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
12507+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12508+
12509+ if (err)
12510+ break;
12511+
12512+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12513+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
12514+ nop == 0x01000000U)
12515+ {
12516+ unsigned long addr;
12517+
12518+ addr = (sethi & 0x003FFFFFU) << 10;
12519+ regs->u_regs[UREG_G1] = addr;
12520+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
12521+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12522+ else
12523+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12524+
12525+ if (test_thread_flag(TIF_32BIT))
12526+ addr &= 0xFFFFFFFFUL;
12527+
12528+ regs->tpc = addr;
12529+ regs->tnpc = addr+4;
12530+ return 2;
12531+ }
12532+ } while (0);
12533+
12534+ do { /* PaX: patched PLT emulation #4 */
12535+ unsigned int sethi, mov1, call, mov2;
12536+
12537+ err = get_user(sethi, (unsigned int *)regs->tpc);
12538+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
12539+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
12540+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
12541+
12542+ if (err)
12543+ break;
12544+
12545+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12546+ mov1 == 0x8210000FU &&
12547+ (call & 0xC0000000U) == 0x40000000U &&
12548+ mov2 == 0x9E100001U)
12549+ {
12550+ unsigned long addr;
12551+
12552+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
12553+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12554+
12555+ if (test_thread_flag(TIF_32BIT))
12556+ addr &= 0xFFFFFFFFUL;
12557+
12558+ regs->tpc = addr;
12559+ regs->tnpc = addr+4;
12560+ return 2;
12561+ }
12562+ } while (0);
12563+
12564+ do { /* PaX: patched PLT emulation #5 */
12565+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
12566+
12567+ err = get_user(sethi, (unsigned int *)regs->tpc);
12568+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
12569+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
12570+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
12571+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
12572+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
12573+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
12574+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
12575+
12576+ if (err)
12577+ break;
12578+
12579+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12580+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
12581+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12582+ (or1 & 0xFFFFE000U) == 0x82106000U &&
12583+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12584+ sllx == 0x83287020U &&
12585+ jmpl == 0x81C04005U &&
12586+ nop == 0x01000000U)
12587+ {
12588+ unsigned long addr;
12589+
12590+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12591+ regs->u_regs[UREG_G1] <<= 32;
12592+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12593+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
12594+ regs->tpc = addr;
12595+ regs->tnpc = addr+4;
12596+ return 2;
12597+ }
12598+ } while (0);
12599+
12600+ do { /* PaX: patched PLT emulation #6 */
12601+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
12602+
12603+ err = get_user(sethi, (unsigned int *)regs->tpc);
12604+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
12605+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
12606+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
12607+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
12608+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
12609+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
12610+
12611+ if (err)
12612+ break;
12613+
12614+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12615+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
12616+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12617+ sllx == 0x83287020U &&
12618+ (or & 0xFFFFE000U) == 0x8A116000U &&
12619+ jmpl == 0x81C04005U &&
12620+ nop == 0x01000000U)
12621+ {
12622+ unsigned long addr;
12623+
12624+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
12625+ regs->u_regs[UREG_G1] <<= 32;
12626+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
12627+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
12628+ regs->tpc = addr;
12629+ regs->tnpc = addr+4;
12630+ return 2;
12631+ }
12632+ } while (0);
12633+
12634+ do { /* PaX: unpatched PLT emulation step 1 */
12635+ unsigned int sethi, ba, nop;
12636+
12637+ err = get_user(sethi, (unsigned int *)regs->tpc);
12638+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12639+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12640+
12641+ if (err)
12642+ break;
12643+
12644+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12645+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
12646+ nop == 0x01000000U)
12647+ {
12648+ unsigned long addr;
12649+ unsigned int save, call;
12650+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
12651+
12652+ if ((ba & 0xFFC00000U) == 0x30800000U)
12653+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
12654+ else
12655+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12656+
12657+ if (test_thread_flag(TIF_32BIT))
12658+ addr &= 0xFFFFFFFFUL;
12659+
12660+ err = get_user(save, (unsigned int *)addr);
12661+ err |= get_user(call, (unsigned int *)(addr+4));
12662+ err |= get_user(nop, (unsigned int *)(addr+8));
12663+ if (err)
12664+ break;
12665+
12666+#ifdef CONFIG_PAX_DLRESOLVE
12667+ if (save == 0x9DE3BFA8U &&
12668+ (call & 0xC0000000U) == 0x40000000U &&
12669+ nop == 0x01000000U)
12670+ {
12671+ struct vm_area_struct *vma;
12672+ unsigned long call_dl_resolve;
12673+
12674+ down_read(&current->mm->mmap_sem);
12675+ call_dl_resolve = current->mm->call_dl_resolve;
12676+ up_read(&current->mm->mmap_sem);
12677+ if (likely(call_dl_resolve))
12678+ goto emulate;
12679+
12680+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
12681+
12682+ down_write(&current->mm->mmap_sem);
12683+ if (current->mm->call_dl_resolve) {
12684+ call_dl_resolve = current->mm->call_dl_resolve;
12685+ up_write(&current->mm->mmap_sem);
12686+ if (vma)
12687+ kmem_cache_free(vm_area_cachep, vma);
12688+ goto emulate;
12689+ }
12690+
12691+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
12692+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
12693+ up_write(&current->mm->mmap_sem);
12694+ if (vma)
12695+ kmem_cache_free(vm_area_cachep, vma);
12696+ return 1;
12697+ }
12698+
12699+ if (pax_insert_vma(vma, call_dl_resolve)) {
12700+ up_write(&current->mm->mmap_sem);
12701+ kmem_cache_free(vm_area_cachep, vma);
12702+ return 1;
12703+ }
12704+
12705+ current->mm->call_dl_resolve = call_dl_resolve;
12706+ up_write(&current->mm->mmap_sem);
12707+
12708+emulate:
12709+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12710+ regs->tpc = call_dl_resolve;
12711+ regs->tnpc = addr+4;
12712+ return 3;
12713+ }
12714+#endif
12715+
12716+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
12717+ if ((save & 0xFFC00000U) == 0x05000000U &&
12718+ (call & 0xFFFFE000U) == 0x85C0A000U &&
12719+ nop == 0x01000000U)
12720+ {
12721+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12722+ regs->u_regs[UREG_G2] = addr + 4;
12723+ addr = (save & 0x003FFFFFU) << 10;
12724+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12725+
12726+ if (test_thread_flag(TIF_32BIT))
12727+ addr &= 0xFFFFFFFFUL;
12728+
12729+ regs->tpc = addr;
12730+ regs->tnpc = addr+4;
12731+ return 3;
12732+ }
12733+
12734+ /* PaX: 64-bit PLT stub */
12735+ err = get_user(sethi1, (unsigned int *)addr);
12736+ err |= get_user(sethi2, (unsigned int *)(addr+4));
12737+ err |= get_user(or1, (unsigned int *)(addr+8));
12738+ err |= get_user(or2, (unsigned int *)(addr+12));
12739+ err |= get_user(sllx, (unsigned int *)(addr+16));
12740+ err |= get_user(add, (unsigned int *)(addr+20));
12741+ err |= get_user(jmpl, (unsigned int *)(addr+24));
12742+ err |= get_user(nop, (unsigned int *)(addr+28));
12743+ if (err)
12744+ break;
12745+
12746+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
12747+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12748+ (or1 & 0xFFFFE000U) == 0x88112000U &&
12749+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12750+ sllx == 0x89293020U &&
12751+ add == 0x8A010005U &&
12752+ jmpl == 0x89C14000U &&
12753+ nop == 0x01000000U)
12754+ {
12755+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12756+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12757+ regs->u_regs[UREG_G4] <<= 32;
12758+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12759+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
12760+ regs->u_regs[UREG_G4] = addr + 24;
12761+ addr = regs->u_regs[UREG_G5];
12762+ regs->tpc = addr;
12763+ regs->tnpc = addr+4;
12764+ return 3;
12765+ }
12766+ }
12767+ } while (0);
12768+
12769+#ifdef CONFIG_PAX_DLRESOLVE
12770+ do { /* PaX: unpatched PLT emulation step 2 */
12771+ unsigned int save, call, nop;
12772+
12773+ err = get_user(save, (unsigned int *)(regs->tpc-4));
12774+ err |= get_user(call, (unsigned int *)regs->tpc);
12775+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
12776+ if (err)
12777+ break;
12778+
12779+ if (save == 0x9DE3BFA8U &&
12780+ (call & 0xC0000000U) == 0x40000000U &&
12781+ nop == 0x01000000U)
12782+ {
12783+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12784+
12785+ if (test_thread_flag(TIF_32BIT))
12786+ dl_resolve &= 0xFFFFFFFFUL;
12787+
12788+ regs->u_regs[UREG_RETPC] = regs->tpc;
12789+ regs->tpc = dl_resolve;
12790+ regs->tnpc = dl_resolve+4;
12791+ return 3;
12792+ }
12793+ } while (0);
12794+#endif
12795+
12796+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
12797+ unsigned int sethi, ba, nop;
12798+
12799+ err = get_user(sethi, (unsigned int *)regs->tpc);
12800+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12801+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12802+
12803+ if (err)
12804+ break;
12805+
12806+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12807+ (ba & 0xFFF00000U) == 0x30600000U &&
12808+ nop == 0x01000000U)
12809+ {
12810+ unsigned long addr;
12811+
12812+ addr = (sethi & 0x003FFFFFU) << 10;
12813+ regs->u_regs[UREG_G1] = addr;
12814+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12815+
12816+ if (test_thread_flag(TIF_32BIT))
12817+ addr &= 0xFFFFFFFFUL;
12818+
12819+ regs->tpc = addr;
12820+ regs->tnpc = addr+4;
12821+ return 2;
12822+ }
12823+ } while (0);
12824+
12825+#endif
12826+
12827+ return 1;
12828+}
12829+
12830+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12831+{
12832+ unsigned long i;
12833+
12834+ printk(KERN_ERR "PAX: bytes at PC: ");
12835+ for (i = 0; i < 8; i++) {
12836+ unsigned int c;
12837+ if (get_user(c, (unsigned int *)pc+i))
12838+ printk(KERN_CONT "???????? ");
12839+ else
12840+ printk(KERN_CONT "%08x ", c);
12841+ }
12842+ printk("\n");
12843+}
12844+#endif
12845+
12846 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12847 {
12848 enum ctx_state prev_state = exception_enter();
12849@@ -353,6 +816,29 @@ retry:
12850 if (!vma)
12851 goto bad_area;
12852
12853+#ifdef CONFIG_PAX_PAGEEXEC
12854+ /* PaX: detect ITLB misses on non-exec pages */
12855+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
12856+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
12857+ {
12858+ if (address != regs->tpc)
12859+ goto good_area;
12860+
12861+ up_read(&mm->mmap_sem);
12862+ switch (pax_handle_fetch_fault(regs)) {
12863+
12864+#ifdef CONFIG_PAX_EMUPLT
12865+ case 2:
12866+ case 3:
12867+ return;
12868+#endif
12869+
12870+ }
12871+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
12872+ do_group_exit(SIGKILL);
12873+ }
12874+#endif
12875+
12876 /* Pure DTLB misses do not tell us whether the fault causing
12877 * load/store/atomic was a write or not, it only says that there
12878 * was no match. So in such a case we (carefully) read the
12879diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
12880index d329537..2c3746a 100644
12881--- a/arch/sparc/mm/hugetlbpage.c
12882+++ b/arch/sparc/mm/hugetlbpage.c
12883@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12884 unsigned long addr,
12885 unsigned long len,
12886 unsigned long pgoff,
12887- unsigned long flags)
12888+ unsigned long flags,
12889+ unsigned long offset)
12890 {
12891+ struct mm_struct *mm = current->mm;
12892 unsigned long task_size = TASK_SIZE;
12893 struct vm_unmapped_area_info info;
12894
12895@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12896
12897 info.flags = 0;
12898 info.length = len;
12899- info.low_limit = TASK_UNMAPPED_BASE;
12900+ info.low_limit = mm->mmap_base;
12901 info.high_limit = min(task_size, VA_EXCLUDE_START);
12902 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12903 info.align_offset = 0;
12904+ info.threadstack_offset = offset;
12905 addr = vm_unmapped_area(&info);
12906
12907 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
12908 VM_BUG_ON(addr != -ENOMEM);
12909 info.low_limit = VA_EXCLUDE_END;
12910+
12911+#ifdef CONFIG_PAX_RANDMMAP
12912+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12913+ info.low_limit += mm->delta_mmap;
12914+#endif
12915+
12916 info.high_limit = task_size;
12917 addr = vm_unmapped_area(&info);
12918 }
12919@@ -55,7 +64,8 @@ static unsigned long
12920 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12921 const unsigned long len,
12922 const unsigned long pgoff,
12923- const unsigned long flags)
12924+ const unsigned long flags,
12925+ const unsigned long offset)
12926 {
12927 struct mm_struct *mm = current->mm;
12928 unsigned long addr = addr0;
12929@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12930 info.high_limit = mm->mmap_base;
12931 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12932 info.align_offset = 0;
12933+ info.threadstack_offset = offset;
12934 addr = vm_unmapped_area(&info);
12935
12936 /*
12937@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12938 VM_BUG_ON(addr != -ENOMEM);
12939 info.flags = 0;
12940 info.low_limit = TASK_UNMAPPED_BASE;
12941+
12942+#ifdef CONFIG_PAX_RANDMMAP
12943+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12944+ info.low_limit += mm->delta_mmap;
12945+#endif
12946+
12947 info.high_limit = STACK_TOP32;
12948 addr = vm_unmapped_area(&info);
12949 }
12950@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12951 struct mm_struct *mm = current->mm;
12952 struct vm_area_struct *vma;
12953 unsigned long task_size = TASK_SIZE;
12954+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
12955
12956 if (test_thread_flag(TIF_32BIT))
12957 task_size = STACK_TOP32;
12958@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12959 return addr;
12960 }
12961
12962+#ifdef CONFIG_PAX_RANDMMAP
12963+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
12964+#endif
12965+
12966 if (addr) {
12967 addr = ALIGN(addr, HPAGE_SIZE);
12968 vma = find_vma(mm, addr);
12969- if (task_size - len >= addr &&
12970- (!vma || addr + len <= vma->vm_start))
12971+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12972 return addr;
12973 }
12974 if (mm->get_unmapped_area == arch_get_unmapped_area)
12975 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12976- pgoff, flags);
12977+ pgoff, flags, offset);
12978 else
12979 return hugetlb_get_unmapped_area_topdown(file, addr, len,
12980- pgoff, flags);
12981+ pgoff, flags, offset);
12982 }
12983
12984 pte_t *huge_pte_alloc(struct mm_struct *mm,
12985diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
12986index 04bc826..0fefab9 100644
12987--- a/arch/sparc/mm/init_64.c
12988+++ b/arch/sparc/mm/init_64.c
12989@@ -186,9 +186,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
12990 int num_kernel_image_mappings;
12991
12992 #ifdef CONFIG_DEBUG_DCFLUSH
12993-atomic_t dcpage_flushes = ATOMIC_INIT(0);
12994+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
12995 #ifdef CONFIG_SMP
12996-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12997+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
12998 #endif
12999 #endif
13000
13001@@ -196,7 +196,7 @@ inline void flush_dcache_page_impl(struct page *page)
13002 {
13003 BUG_ON(tlb_type == hypervisor);
13004 #ifdef CONFIG_DEBUG_DCFLUSH
13005- atomic_inc(&dcpage_flushes);
13006+ atomic_inc_unchecked(&dcpage_flushes);
13007 #endif
13008
13009 #ifdef DCACHE_ALIASING_POSSIBLE
13010@@ -468,10 +468,10 @@ void mmu_info(struct seq_file *m)
13011
13012 #ifdef CONFIG_DEBUG_DCFLUSH
13013 seq_printf(m, "DCPageFlushes\t: %d\n",
13014- atomic_read(&dcpage_flushes));
13015+ atomic_read_unchecked(&dcpage_flushes));
13016 #ifdef CONFIG_SMP
13017 seq_printf(m, "DCPageFlushesXC\t: %d\n",
13018- atomic_read(&dcpage_flushes_xcall));
13019+ atomic_read_unchecked(&dcpage_flushes_xcall));
13020 #endif /* CONFIG_SMP */
13021 #endif /* CONFIG_DEBUG_DCFLUSH */
13022 }
13023diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
13024index ece4af0..f04b862 100644
13025--- a/arch/sparc/net/bpf_jit_comp.c
13026+++ b/arch/sparc/net/bpf_jit_comp.c
13027@@ -823,5 +823,6 @@ void bpf_jit_free(struct bpf_prog *fp)
13028 {
13029 if (fp->jited)
13030 module_free(NULL, fp->bpf_func);
13031- kfree(fp);
13032+
13033+ bpf_prog_unlock_free(fp);
13034 }
13035diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
13036index 7fcd492..1311074 100644
13037--- a/arch/tile/Kconfig
13038+++ b/arch/tile/Kconfig
13039@@ -191,6 +191,7 @@ source "kernel/Kconfig.hz"
13040
13041 config KEXEC
13042 bool "kexec system call"
13043+ depends on !GRKERNSEC_KMEM
13044 ---help---
13045 kexec is a system call that implements the ability to shutdown your
13046 current kernel, and to start another kernel. It is like a reboot
13047diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
13048index 7b11c5f..755a026 100644
13049--- a/arch/tile/include/asm/atomic_64.h
13050+++ b/arch/tile/include/asm/atomic_64.h
13051@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
13052
13053 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
13054
13055+#define atomic64_read_unchecked(v) atomic64_read(v)
13056+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
13057+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
13058+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
13059+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
13060+#define atomic64_inc_unchecked(v) atomic64_inc(v)
13061+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
13062+#define atomic64_dec_unchecked(v) atomic64_dec(v)
13063+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
13064+
13065 /* Define this to indicate that cmpxchg is an efficient operation. */
13066 #define __HAVE_ARCH_CMPXCHG
13067
13068diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
13069index 6160761..00cac88 100644
13070--- a/arch/tile/include/asm/cache.h
13071+++ b/arch/tile/include/asm/cache.h
13072@@ -15,11 +15,12 @@
13073 #ifndef _ASM_TILE_CACHE_H
13074 #define _ASM_TILE_CACHE_H
13075
13076+#include <linux/const.h>
13077 #include <arch/chip.h>
13078
13079 /* bytes per L1 data cache line */
13080 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
13081-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
13082+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
13083
13084 /* bytes per L2 cache line */
13085 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
13086diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
13087index b6cde32..c0cb736 100644
13088--- a/arch/tile/include/asm/uaccess.h
13089+++ b/arch/tile/include/asm/uaccess.h
13090@@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
13091 const void __user *from,
13092 unsigned long n)
13093 {
13094- int sz = __compiletime_object_size(to);
13095+ size_t sz = __compiletime_object_size(to);
13096
13097- if (likely(sz == -1 || sz >= n))
13098+ if (likely(sz == (size_t)-1 || sz >= n))
13099 n = _copy_from_user(to, from, n);
13100 else
13101 copy_from_user_overflow();
13102diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
13103index e514899..f8743c4 100644
13104--- a/arch/tile/mm/hugetlbpage.c
13105+++ b/arch/tile/mm/hugetlbpage.c
13106@@ -207,6 +207,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
13107 info.high_limit = TASK_SIZE;
13108 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
13109 info.align_offset = 0;
13110+ info.threadstack_offset = 0;
13111 return vm_unmapped_area(&info);
13112 }
13113
13114@@ -224,6 +225,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
13115 info.high_limit = current->mm->mmap_base;
13116 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
13117 info.align_offset = 0;
13118+ info.threadstack_offset = 0;
13119 addr = vm_unmapped_area(&info);
13120
13121 /*
13122diff --git a/arch/um/Makefile b/arch/um/Makefile
13123index e4b1a96..16162f8 100644
13124--- a/arch/um/Makefile
13125+++ b/arch/um/Makefile
13126@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
13127 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
13128 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
13129
13130+ifdef CONSTIFY_PLUGIN
13131+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
13132+endif
13133+
13134 #This will adjust *FLAGS accordingly to the platform.
13135 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
13136
13137diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
13138index 19e1bdd..3665b77 100644
13139--- a/arch/um/include/asm/cache.h
13140+++ b/arch/um/include/asm/cache.h
13141@@ -1,6 +1,7 @@
13142 #ifndef __UM_CACHE_H
13143 #define __UM_CACHE_H
13144
13145+#include <linux/const.h>
13146
13147 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
13148 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
13149@@ -12,6 +13,6 @@
13150 # define L1_CACHE_SHIFT 5
13151 #endif
13152
13153-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
13154+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
13155
13156 #endif
13157diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
13158index 2e0a6b1..a64d0f5 100644
13159--- a/arch/um/include/asm/kmap_types.h
13160+++ b/arch/um/include/asm/kmap_types.h
13161@@ -8,6 +8,6 @@
13162
13163 /* No more #include "asm/arch/kmap_types.h" ! */
13164
13165-#define KM_TYPE_NR 14
13166+#define KM_TYPE_NR 15
13167
13168 #endif
13169diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
13170index 71c5d13..4c7b9f1 100644
13171--- a/arch/um/include/asm/page.h
13172+++ b/arch/um/include/asm/page.h
13173@@ -14,6 +14,9 @@
13174 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
13175 #define PAGE_MASK (~(PAGE_SIZE-1))
13176
13177+#define ktla_ktva(addr) (addr)
13178+#define ktva_ktla(addr) (addr)
13179+
13180 #ifndef __ASSEMBLY__
13181
13182 struct page;
13183diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
13184index 0032f92..cd151e0 100644
13185--- a/arch/um/include/asm/pgtable-3level.h
13186+++ b/arch/um/include/asm/pgtable-3level.h
13187@@ -58,6 +58,7 @@
13188 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
13189 #define pud_populate(mm, pud, pmd) \
13190 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
13191+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
13192
13193 #ifdef CONFIG_64BIT
13194 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
13195diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
13196index f17bca8..48adb87 100644
13197--- a/arch/um/kernel/process.c
13198+++ b/arch/um/kernel/process.c
13199@@ -356,22 +356,6 @@ int singlestepping(void * t)
13200 return 2;
13201 }
13202
13203-/*
13204- * Only x86 and x86_64 have an arch_align_stack().
13205- * All other arches have "#define arch_align_stack(x) (x)"
13206- * in their asm/exec.h
13207- * As this is included in UML from asm-um/system-generic.h,
13208- * we can use it to behave as the subarch does.
13209- */
13210-#ifndef arch_align_stack
13211-unsigned long arch_align_stack(unsigned long sp)
13212-{
13213- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
13214- sp -= get_random_int() % 8192;
13215- return sp & ~0xf;
13216-}
13217-#endif
13218-
13219 unsigned long get_wchan(struct task_struct *p)
13220 {
13221 unsigned long stack_page, sp, ip;
13222diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
13223index ad8f795..2c7eec6 100644
13224--- a/arch/unicore32/include/asm/cache.h
13225+++ b/arch/unicore32/include/asm/cache.h
13226@@ -12,8 +12,10 @@
13227 #ifndef __UNICORE_CACHE_H__
13228 #define __UNICORE_CACHE_H__
13229
13230-#define L1_CACHE_SHIFT (5)
13231-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
13232+#include <linux/const.h>
13233+
13234+#define L1_CACHE_SHIFT 5
13235+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
13236
13237 /*
13238 * Memory returned by kmalloc() may be used for DMA, so we must make
13239diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
13240index 3632743..630a8bb 100644
13241--- a/arch/x86/Kconfig
13242+++ b/arch/x86/Kconfig
13243@@ -130,7 +130,7 @@ config X86
13244 select RTC_LIB
13245 select HAVE_DEBUG_STACKOVERFLOW
13246 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
13247- select HAVE_CC_STACKPROTECTOR
13248+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
13249 select GENERIC_CPU_AUTOPROBE
13250 select HAVE_ARCH_AUDITSYSCALL
13251 select ARCH_SUPPORTS_ATOMIC_RMW
13252@@ -258,7 +258,7 @@ config X86_HT
13253
13254 config X86_32_LAZY_GS
13255 def_bool y
13256- depends on X86_32 && !CC_STACKPROTECTOR
13257+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
13258
13259 config ARCH_HWEIGHT_CFLAGS
13260 string
13261@@ -555,6 +555,7 @@ config SCHED_OMIT_FRAME_POINTER
13262
13263 menuconfig HYPERVISOR_GUEST
13264 bool "Linux guest support"
13265+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
13266 ---help---
13267 Say Y here to enable options for running Linux under various hyper-
13268 visors. This option enables basic hypervisor detection and platform
13269@@ -1083,6 +1084,7 @@ choice
13270
13271 config NOHIGHMEM
13272 bool "off"
13273+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
13274 ---help---
13275 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
13276 However, the address space of 32-bit x86 processors is only 4
13277@@ -1119,6 +1121,7 @@ config NOHIGHMEM
13278
13279 config HIGHMEM4G
13280 bool "4GB"
13281+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
13282 ---help---
13283 Select this if you have a 32-bit processor and between 1 and 4
13284 gigabytes of physical RAM.
13285@@ -1171,7 +1174,7 @@ config PAGE_OFFSET
13286 hex
13287 default 0xB0000000 if VMSPLIT_3G_OPT
13288 default 0x80000000 if VMSPLIT_2G
13289- default 0x78000000 if VMSPLIT_2G_OPT
13290+ default 0x70000000 if VMSPLIT_2G_OPT
13291 default 0x40000000 if VMSPLIT_1G
13292 default 0xC0000000
13293 depends on X86_32
13294@@ -1586,6 +1589,7 @@ source kernel/Kconfig.hz
13295
13296 config KEXEC
13297 bool "kexec system call"
13298+ depends on !GRKERNSEC_KMEM
13299 ---help---
13300 kexec is a system call that implements the ability to shutdown your
13301 current kernel, and to start another kernel. It is like a reboot
13302@@ -1771,7 +1775,9 @@ config X86_NEED_RELOCS
13303
13304 config PHYSICAL_ALIGN
13305 hex "Alignment value to which kernel should be aligned"
13306- default "0x200000"
13307+ default "0x1000000"
13308+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
13309+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
13310 range 0x2000 0x1000000 if X86_32
13311 range 0x200000 0x1000000 if X86_64
13312 ---help---
13313@@ -1854,6 +1860,7 @@ config COMPAT_VDSO
13314 def_bool n
13315 prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
13316 depends on X86_32 || IA32_EMULATION
13317+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
13318 ---help---
13319 Certain buggy versions of glibc will crash if they are
13320 presented with a 32-bit vDSO that is not mapped at the address
13321diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
13322index 6983314..54ad7e8 100644
13323--- a/arch/x86/Kconfig.cpu
13324+++ b/arch/x86/Kconfig.cpu
13325@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
13326
13327 config X86_F00F_BUG
13328 def_bool y
13329- depends on M586MMX || M586TSC || M586 || M486
13330+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
13331
13332 config X86_INVD_BUG
13333 def_bool y
13334@@ -327,7 +327,7 @@ config X86_INVD_BUG
13335
13336 config X86_ALIGNMENT_16
13337 def_bool y
13338- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
13339+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
13340
13341 config X86_INTEL_USERCOPY
13342 def_bool y
13343@@ -369,7 +369,7 @@ config X86_CMPXCHG64
13344 # generates cmov.
13345 config X86_CMOV
13346 def_bool y
13347- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
13348+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
13349
13350 config X86_MINIMUM_CPU_FAMILY
13351 int
13352diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
13353index 61bd2ad..50b625d 100644
13354--- a/arch/x86/Kconfig.debug
13355+++ b/arch/x86/Kconfig.debug
13356@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
13357 config DEBUG_RODATA
13358 bool "Write protect kernel read-only data structures"
13359 default y
13360- depends on DEBUG_KERNEL
13361+ depends on DEBUG_KERNEL && BROKEN
13362 ---help---
13363 Mark the kernel read-only data as write-protected in the pagetables,
13364 in order to catch accidental (and incorrect) writes to such const
13365@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
13366
13367 config DEBUG_SET_MODULE_RONX
13368 bool "Set loadable kernel module data as NX and text as RO"
13369- depends on MODULES
13370+ depends on MODULES && BROKEN
13371 ---help---
13372 This option helps catch unintended modifications to loadable
13373 kernel module's text and read-only data. It also prevents execution
13374diff --git a/arch/x86/Makefile b/arch/x86/Makefile
13375index 60087ca..9d9500e 100644
13376--- a/arch/x86/Makefile
13377+++ b/arch/x86/Makefile
13378@@ -68,9 +68,6 @@ ifeq ($(CONFIG_X86_32),y)
13379 # CPU-specific tuning. Anything which can be shared with UML should go here.
13380 include $(srctree)/arch/x86/Makefile_32.cpu
13381 KBUILD_CFLAGS += $(cflags-y)
13382-
13383- # temporary until string.h is fixed
13384- KBUILD_CFLAGS += -ffreestanding
13385 else
13386 BITS := 64
13387 UTS_MACHINE := x86_64
13388@@ -111,6 +108,9 @@ else
13389 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
13390 endif
13391
13392+# temporary until string.h is fixed
13393+KBUILD_CFLAGS += -ffreestanding
13394+
13395 # Make sure compiler does not have buggy stack-protector support.
13396 ifdef CONFIG_CC_STACKPROTECTOR
13397 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
13398@@ -184,6 +184,7 @@ archheaders:
13399 $(Q)$(MAKE) $(build)=arch/x86/syscalls all
13400
13401 archprepare:
13402+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
13403 ifeq ($(CONFIG_KEXEC_FILE),y)
13404 $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
13405 endif
13406@@ -274,3 +275,9 @@ define archhelp
13407 echo ' FDINITRD=file initrd for the booted kernel'
13408 echo ' kvmconfig - Enable additional options for guest kernel support'
13409 endef
13410+
13411+define OLD_LD
13412+
13413+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
13414+*** Please upgrade your binutils to 2.18 or newer
13415+endef
13416diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
13417index dbe8dd2..2f0a98f 100644
13418--- a/arch/x86/boot/Makefile
13419+++ b/arch/x86/boot/Makefile
13420@@ -52,6 +52,9 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
13421 # ---------------------------------------------------------------------------
13422
13423 KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
13424+ifdef CONSTIFY_PLUGIN
13425+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
13426+endif
13427 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13428 GCOV_PROFILE := n
13429
13430diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
13431index 878e4b9..20537ab 100644
13432--- a/arch/x86/boot/bitops.h
13433+++ b/arch/x86/boot/bitops.h
13434@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
13435 u8 v;
13436 const u32 *p = (const u32 *)addr;
13437
13438- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
13439+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
13440 return v;
13441 }
13442
13443@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
13444
13445 static inline void set_bit(int nr, void *addr)
13446 {
13447- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
13448+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
13449 }
13450
13451 #endif /* BOOT_BITOPS_H */
13452diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
13453index bd49ec6..94c7f58 100644
13454--- a/arch/x86/boot/boot.h
13455+++ b/arch/x86/boot/boot.h
13456@@ -84,7 +84,7 @@ static inline void io_delay(void)
13457 static inline u16 ds(void)
13458 {
13459 u16 seg;
13460- asm("movw %%ds,%0" : "=rm" (seg));
13461+ asm volatile("movw %%ds,%0" : "=rm" (seg));
13462 return seg;
13463 }
13464
13465diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
13466index 14fe7cb..829b962 100644
13467--- a/arch/x86/boot/compressed/Makefile
13468+++ b/arch/x86/boot/compressed/Makefile
13469@@ -16,6 +16,9 @@ KBUILD_CFLAGS += $(cflags-y)
13470 KBUILD_CFLAGS += -mno-mmx -mno-sse
13471 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
13472 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
13473+ifdef CONSTIFY_PLUGIN
13474+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
13475+endif
13476
13477 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13478 GCOV_PROFILE := n
13479diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
13480index a53440e..c3dbf1e 100644
13481--- a/arch/x86/boot/compressed/efi_stub_32.S
13482+++ b/arch/x86/boot/compressed/efi_stub_32.S
13483@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
13484 * parameter 2, ..., param n. To make things easy, we save the return
13485 * address of efi_call_phys in a global variable.
13486 */
13487- popl %ecx
13488- movl %ecx, saved_return_addr(%edx)
13489- /* get the function pointer into ECX*/
13490- popl %ecx
13491- movl %ecx, efi_rt_function_ptr(%edx)
13492+ popl saved_return_addr(%edx)
13493+ popl efi_rt_function_ptr(%edx)
13494
13495 /*
13496 * 3. Call the physical function.
13497 */
13498- call *%ecx
13499+ call *efi_rt_function_ptr(%edx)
13500
13501 /*
13502 * 4. Balance the stack. And because EAX contain the return value,
13503@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
13504 1: popl %edx
13505 subl $1b, %edx
13506
13507- movl efi_rt_function_ptr(%edx), %ecx
13508- pushl %ecx
13509+ pushl efi_rt_function_ptr(%edx)
13510
13511 /*
13512 * 10. Push the saved return address onto the stack and return.
13513 */
13514- movl saved_return_addr(%edx), %ecx
13515- pushl %ecx
13516- ret
13517+ jmpl *saved_return_addr(%edx)
13518 ENDPROC(efi_call_phys)
13519 .previous
13520
13521diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
13522index 1d7fbbc..36ecd58 100644
13523--- a/arch/x86/boot/compressed/head_32.S
13524+++ b/arch/x86/boot/compressed/head_32.S
13525@@ -140,10 +140,10 @@ preferred_addr:
13526 addl %eax, %ebx
13527 notl %eax
13528 andl %eax, %ebx
13529- cmpl $LOAD_PHYSICAL_ADDR, %ebx
13530+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
13531 jge 1f
13532 #endif
13533- movl $LOAD_PHYSICAL_ADDR, %ebx
13534+ movl $____LOAD_PHYSICAL_ADDR, %ebx
13535 1:
13536
13537 /* Target address to relocate to for decompression */
13538diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
13539index 6b1766c..ad465c9 100644
13540--- a/arch/x86/boot/compressed/head_64.S
13541+++ b/arch/x86/boot/compressed/head_64.S
13542@@ -94,10 +94,10 @@ ENTRY(startup_32)
13543 addl %eax, %ebx
13544 notl %eax
13545 andl %eax, %ebx
13546- cmpl $LOAD_PHYSICAL_ADDR, %ebx
13547+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
13548 jge 1f
13549 #endif
13550- movl $LOAD_PHYSICAL_ADDR, %ebx
13551+ movl $____LOAD_PHYSICAL_ADDR, %ebx
13552 1:
13553
13554 /* Target address to relocate to for decompression */
13555@@ -322,10 +322,10 @@ preferred_addr:
13556 addq %rax, %rbp
13557 notq %rax
13558 andq %rax, %rbp
13559- cmpq $LOAD_PHYSICAL_ADDR, %rbp
13560+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
13561 jge 1f
13562 #endif
13563- movq $LOAD_PHYSICAL_ADDR, %rbp
13564+ movq $____LOAD_PHYSICAL_ADDR, %rbp
13565 1:
13566
13567 /* Target address to relocate to for decompression */
13568@@ -434,8 +434,8 @@ gdt:
13569 .long gdt
13570 .word 0
13571 .quad 0x0000000000000000 /* NULL descriptor */
13572- .quad 0x00af9a000000ffff /* __KERNEL_CS */
13573- .quad 0x00cf92000000ffff /* __KERNEL_DS */
13574+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
13575+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
13576 .quad 0x0080890000000000 /* TS descriptor */
13577 .quad 0x0000000000000000 /* TS continued */
13578 gdt_end:
13579diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
13580index 30dd59a..cd9edc3 100644
13581--- a/arch/x86/boot/compressed/misc.c
13582+++ b/arch/x86/boot/compressed/misc.c
13583@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
13584 * Calculate the delta between where vmlinux was linked to load
13585 * and where it was actually loaded.
13586 */
13587- delta = min_addr - LOAD_PHYSICAL_ADDR;
13588+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
13589 if (!delta) {
13590 debug_putstr("No relocation needed... ");
13591 return;
13592@@ -312,7 +312,7 @@ static void parse_elf(void *output)
13593 Elf32_Ehdr ehdr;
13594 Elf32_Phdr *phdrs, *phdr;
13595 #endif
13596- void *dest;
13597+ void *dest, *prev;
13598 int i;
13599
13600 memcpy(&ehdr, output, sizeof(ehdr));
13601@@ -339,13 +339,16 @@ static void parse_elf(void *output)
13602 case PT_LOAD:
13603 #ifdef CONFIG_RELOCATABLE
13604 dest = output;
13605- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
13606+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
13607 #else
13608 dest = (void *)(phdr->p_paddr);
13609 #endif
13610 memcpy(dest,
13611 output + phdr->p_offset,
13612 phdr->p_filesz);
13613+ if (i)
13614+ memset(prev, 0xff, dest - prev);
13615+ prev = dest + phdr->p_filesz;
13616 break;
13617 default: /* Ignore other PT_* */ break;
13618 }
13619@@ -402,7 +405,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
13620 error("Destination address too large");
13621 #endif
13622 #ifndef CONFIG_RELOCATABLE
13623- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
13624+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
13625 error("Wrong destination address");
13626 #endif
13627
13628diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
13629index 1fd7d57..0f7d096 100644
13630--- a/arch/x86/boot/cpucheck.c
13631+++ b/arch/x86/boot/cpucheck.c
13632@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13633 u32 ecx = MSR_K7_HWCR;
13634 u32 eax, edx;
13635
13636- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13637+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13638 eax &= ~(1 << 15);
13639- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13640+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13641
13642 get_cpuflags(); /* Make sure it really did something */
13643 err = check_cpuflags();
13644@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13645 u32 ecx = MSR_VIA_FCR;
13646 u32 eax, edx;
13647
13648- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13649+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13650 eax |= (1<<1)|(1<<7);
13651- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13652+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13653
13654 set_bit(X86_FEATURE_CX8, cpu.flags);
13655 err = check_cpuflags();
13656@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13657 u32 eax, edx;
13658 u32 level = 1;
13659
13660- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13661- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13662- asm("cpuid"
13663+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13664+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13665+ asm volatile("cpuid"
13666 : "+a" (level), "=d" (cpu.flags[0])
13667 : : "ecx", "ebx");
13668- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13669+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13670
13671 err = check_cpuflags();
13672 } else if (err == 0x01 &&
13673diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
13674index 16ef025..91e033b 100644
13675--- a/arch/x86/boot/header.S
13676+++ b/arch/x86/boot/header.S
13677@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
13678 # single linked list of
13679 # struct setup_data
13680
13681-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
13682+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
13683
13684 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
13685+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13686+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
13687+#else
13688 #define VO_INIT_SIZE (VO__end - VO__text)
13689+#endif
13690 #if ZO_INIT_SIZE > VO_INIT_SIZE
13691 #define INIT_SIZE ZO_INIT_SIZE
13692 #else
13693diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
13694index db75d07..8e6d0af 100644
13695--- a/arch/x86/boot/memory.c
13696+++ b/arch/x86/boot/memory.c
13697@@ -19,7 +19,7 @@
13698
13699 static int detect_memory_e820(void)
13700 {
13701- int count = 0;
13702+ unsigned int count = 0;
13703 struct biosregs ireg, oreg;
13704 struct e820entry *desc = boot_params.e820_map;
13705 static struct e820entry buf; /* static so it is zeroed */
13706diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
13707index ba3e100..6501b8f 100644
13708--- a/arch/x86/boot/video-vesa.c
13709+++ b/arch/x86/boot/video-vesa.c
13710@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
13711
13712 boot_params.screen_info.vesapm_seg = oreg.es;
13713 boot_params.screen_info.vesapm_off = oreg.di;
13714+ boot_params.screen_info.vesapm_size = oreg.cx;
13715 }
13716
13717 /*
13718diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
13719index 43eda28..5ab5fdb 100644
13720--- a/arch/x86/boot/video.c
13721+++ b/arch/x86/boot/video.c
13722@@ -96,7 +96,7 @@ static void store_mode_params(void)
13723 static unsigned int get_entry(void)
13724 {
13725 char entry_buf[4];
13726- int i, len = 0;
13727+ unsigned int i, len = 0;
13728 int key;
13729 unsigned int v;
13730
13731diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
13732index 9105655..41779c1 100644
13733--- a/arch/x86/crypto/aes-x86_64-asm_64.S
13734+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
13735@@ -8,6 +8,8 @@
13736 * including this sentence is retained in full.
13737 */
13738
13739+#include <asm/alternative-asm.h>
13740+
13741 .extern crypto_ft_tab
13742 .extern crypto_it_tab
13743 .extern crypto_fl_tab
13744@@ -70,6 +72,8 @@
13745 je B192; \
13746 leaq 32(r9),r9;
13747
13748+#define ret pax_force_retaddr; ret
13749+
13750 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
13751 movq r1,r2; \
13752 movq r3,r4; \
13753diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
13754index 477e9d7..c92c7d8 100644
13755--- a/arch/x86/crypto/aesni-intel_asm.S
13756+++ b/arch/x86/crypto/aesni-intel_asm.S
13757@@ -31,6 +31,7 @@
13758
13759 #include <linux/linkage.h>
13760 #include <asm/inst.h>
13761+#include <asm/alternative-asm.h>
13762
13763 #ifdef __x86_64__
13764 .data
13765@@ -205,7 +206,7 @@ enc: .octa 0x2
13766 * num_initial_blocks = b mod 4
13767 * encrypt the initial num_initial_blocks blocks and apply ghash on
13768 * the ciphertext
13769-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13770+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13771 * are clobbered
13772 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13773 */
13774@@ -214,8 +215,8 @@ enc: .octa 0x2
13775 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13776 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13777 mov arg7, %r10 # %r10 = AAD
13778- mov arg8, %r12 # %r12 = aadLen
13779- mov %r12, %r11
13780+ mov arg8, %r15 # %r15 = aadLen
13781+ mov %r15, %r11
13782 pxor %xmm\i, %xmm\i
13783 _get_AAD_loop\num_initial_blocks\operation:
13784 movd (%r10), \TMP1
13785@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13786 psrldq $4, %xmm\i
13787 pxor \TMP1, %xmm\i
13788 add $4, %r10
13789- sub $4, %r12
13790+ sub $4, %r15
13791 jne _get_AAD_loop\num_initial_blocks\operation
13792 cmp $16, %r11
13793 je _get_AAD_loop2_done\num_initial_blocks\operation
13794- mov $16, %r12
13795+ mov $16, %r15
13796 _get_AAD_loop2\num_initial_blocks\operation:
13797 psrldq $4, %xmm\i
13798- sub $4, %r12
13799- cmp %r11, %r12
13800+ sub $4, %r15
13801+ cmp %r11, %r15
13802 jne _get_AAD_loop2\num_initial_blocks\operation
13803 _get_AAD_loop2_done\num_initial_blocks\operation:
13804 movdqa SHUF_MASK(%rip), %xmm14
13805@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
13806 * num_initial_blocks = b mod 4
13807 * encrypt the initial num_initial_blocks blocks and apply ghash on
13808 * the ciphertext
13809-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13810+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13811 * are clobbered
13812 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13813 */
13814@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
13815 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13816 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13817 mov arg7, %r10 # %r10 = AAD
13818- mov arg8, %r12 # %r12 = aadLen
13819- mov %r12, %r11
13820+ mov arg8, %r15 # %r15 = aadLen
13821+ mov %r15, %r11
13822 pxor %xmm\i, %xmm\i
13823 _get_AAD_loop\num_initial_blocks\operation:
13824 movd (%r10), \TMP1
13825@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13826 psrldq $4, %xmm\i
13827 pxor \TMP1, %xmm\i
13828 add $4, %r10
13829- sub $4, %r12
13830+ sub $4, %r15
13831 jne _get_AAD_loop\num_initial_blocks\operation
13832 cmp $16, %r11
13833 je _get_AAD_loop2_done\num_initial_blocks\operation
13834- mov $16, %r12
13835+ mov $16, %r15
13836 _get_AAD_loop2\num_initial_blocks\operation:
13837 psrldq $4, %xmm\i
13838- sub $4, %r12
13839- cmp %r11, %r12
13840+ sub $4, %r15
13841+ cmp %r11, %r15
13842 jne _get_AAD_loop2\num_initial_blocks\operation
13843 _get_AAD_loop2_done\num_initial_blocks\operation:
13844 movdqa SHUF_MASK(%rip), %xmm14
13845@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
13846 *
13847 *****************************************************************************/
13848 ENTRY(aesni_gcm_dec)
13849- push %r12
13850+ push %r15
13851 push %r13
13852 push %r14
13853 mov %rsp, %r14
13854@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
13855 */
13856 sub $VARIABLE_OFFSET, %rsp
13857 and $~63, %rsp # align rsp to 64 bytes
13858- mov %arg6, %r12
13859- movdqu (%r12), %xmm13 # %xmm13 = HashKey
13860+ mov %arg6, %r15
13861+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
13862 movdqa SHUF_MASK(%rip), %xmm2
13863 PSHUFB_XMM %xmm2, %xmm13
13864
13865@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
13866 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
13867 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
13868 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
13869- mov %r13, %r12
13870- and $(3<<4), %r12
13871+ mov %r13, %r15
13872+ and $(3<<4), %r15
13873 jz _initial_num_blocks_is_0_decrypt
13874- cmp $(2<<4), %r12
13875+ cmp $(2<<4), %r15
13876 jb _initial_num_blocks_is_1_decrypt
13877 je _initial_num_blocks_is_2_decrypt
13878 _initial_num_blocks_is_3_decrypt:
13879@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
13880 sub $16, %r11
13881 add %r13, %r11
13882 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
13883- lea SHIFT_MASK+16(%rip), %r12
13884- sub %r13, %r12
13885+ lea SHIFT_MASK+16(%rip), %r15
13886+ sub %r13, %r15
13887 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
13888 # (%r13 is the number of bytes in plaintext mod 16)
13889- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13890+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13891 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
13892
13893 movdqa %xmm1, %xmm2
13894 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
13895- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13896+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13897 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
13898 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
13899 pand %xmm1, %xmm2
13900@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
13901 sub $1, %r13
13902 jne _less_than_8_bytes_left_decrypt
13903 _multiple_of_16_bytes_decrypt:
13904- mov arg8, %r12 # %r13 = aadLen (number of bytes)
13905- shl $3, %r12 # convert into number of bits
13906- movd %r12d, %xmm15 # len(A) in %xmm15
13907+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
13908+ shl $3, %r15 # convert into number of bits
13909+ movd %r15d, %xmm15 # len(A) in %xmm15
13910 shl $3, %arg4 # len(C) in bits (*128)
13911 MOVQ_R64_XMM %arg4, %xmm1
13912 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13913@@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
13914 mov %r14, %rsp
13915 pop %r14
13916 pop %r13
13917- pop %r12
13918+ pop %r15
13919+ pax_force_retaddr
13920 ret
13921 ENDPROC(aesni_gcm_dec)
13922
13923@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
13924 * poly = x^128 + x^127 + x^126 + x^121 + 1
13925 ***************************************************************************/
13926 ENTRY(aesni_gcm_enc)
13927- push %r12
13928+ push %r15
13929 push %r13
13930 push %r14
13931 mov %rsp, %r14
13932@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
13933 #
13934 sub $VARIABLE_OFFSET, %rsp
13935 and $~63, %rsp
13936- mov %arg6, %r12
13937- movdqu (%r12), %xmm13
13938+ mov %arg6, %r15
13939+ movdqu (%r15), %xmm13
13940 movdqa SHUF_MASK(%rip), %xmm2
13941 PSHUFB_XMM %xmm2, %xmm13
13942
13943@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
13944 movdqa %xmm13, HashKey(%rsp)
13945 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
13946 and $-16, %r13
13947- mov %r13, %r12
13948+ mov %r13, %r15
13949
13950 # Encrypt first few blocks
13951
13952- and $(3<<4), %r12
13953+ and $(3<<4), %r15
13954 jz _initial_num_blocks_is_0_encrypt
13955- cmp $(2<<4), %r12
13956+ cmp $(2<<4), %r15
13957 jb _initial_num_blocks_is_1_encrypt
13958 je _initial_num_blocks_is_2_encrypt
13959 _initial_num_blocks_is_3_encrypt:
13960@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
13961 sub $16, %r11
13962 add %r13, %r11
13963 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
13964- lea SHIFT_MASK+16(%rip), %r12
13965- sub %r13, %r12
13966+ lea SHIFT_MASK+16(%rip), %r15
13967+ sub %r13, %r15
13968 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
13969 # (%r13 is the number of bytes in plaintext mod 16)
13970- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13971+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13972 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
13973 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
13974- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13975+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13976 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
13977 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
13978 movdqa SHUF_MASK(%rip), %xmm10
13979@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
13980 sub $1, %r13
13981 jne _less_than_8_bytes_left_encrypt
13982 _multiple_of_16_bytes_encrypt:
13983- mov arg8, %r12 # %r12 = addLen (number of bytes)
13984- shl $3, %r12
13985- movd %r12d, %xmm15 # len(A) in %xmm15
13986+ mov arg8, %r15 # %r15 = addLen (number of bytes)
13987+ shl $3, %r15
13988+ movd %r15d, %xmm15 # len(A) in %xmm15
13989 shl $3, %arg4 # len(C) in bits (*128)
13990 MOVQ_R64_XMM %arg4, %xmm1
13991 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13992@@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
13993 mov %r14, %rsp
13994 pop %r14
13995 pop %r13
13996- pop %r12
13997+ pop %r15
13998+ pax_force_retaddr
13999 ret
14000 ENDPROC(aesni_gcm_enc)
14001
14002@@ -1722,6 +1725,7 @@ _key_expansion_256a:
14003 pxor %xmm1, %xmm0
14004 movaps %xmm0, (TKEYP)
14005 add $0x10, TKEYP
14006+ pax_force_retaddr
14007 ret
14008 ENDPROC(_key_expansion_128)
14009 ENDPROC(_key_expansion_256a)
14010@@ -1748,6 +1752,7 @@ _key_expansion_192a:
14011 shufps $0b01001110, %xmm2, %xmm1
14012 movaps %xmm1, 0x10(TKEYP)
14013 add $0x20, TKEYP
14014+ pax_force_retaddr
14015 ret
14016 ENDPROC(_key_expansion_192a)
14017
14018@@ -1768,6 +1773,7 @@ _key_expansion_192b:
14019
14020 movaps %xmm0, (TKEYP)
14021 add $0x10, TKEYP
14022+ pax_force_retaddr
14023 ret
14024 ENDPROC(_key_expansion_192b)
14025
14026@@ -1781,6 +1787,7 @@ _key_expansion_256b:
14027 pxor %xmm1, %xmm2
14028 movaps %xmm2, (TKEYP)
14029 add $0x10, TKEYP
14030+ pax_force_retaddr
14031 ret
14032 ENDPROC(_key_expansion_256b)
14033
14034@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
14035 #ifndef __x86_64__
14036 popl KEYP
14037 #endif
14038+ pax_force_retaddr
14039 ret
14040 ENDPROC(aesni_set_key)
14041
14042@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
14043 popl KLEN
14044 popl KEYP
14045 #endif
14046+ pax_force_retaddr
14047 ret
14048 ENDPROC(aesni_enc)
14049
14050@@ -1974,6 +1983,7 @@ _aesni_enc1:
14051 AESENC KEY STATE
14052 movaps 0x70(TKEYP), KEY
14053 AESENCLAST KEY STATE
14054+ pax_force_retaddr
14055 ret
14056 ENDPROC(_aesni_enc1)
14057
14058@@ -2083,6 +2093,7 @@ _aesni_enc4:
14059 AESENCLAST KEY STATE2
14060 AESENCLAST KEY STATE3
14061 AESENCLAST KEY STATE4
14062+ pax_force_retaddr
14063 ret
14064 ENDPROC(_aesni_enc4)
14065
14066@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
14067 popl KLEN
14068 popl KEYP
14069 #endif
14070+ pax_force_retaddr
14071 ret
14072 ENDPROC(aesni_dec)
14073
14074@@ -2164,6 +2176,7 @@ _aesni_dec1:
14075 AESDEC KEY STATE
14076 movaps 0x70(TKEYP), KEY
14077 AESDECLAST KEY STATE
14078+ pax_force_retaddr
14079 ret
14080 ENDPROC(_aesni_dec1)
14081
14082@@ -2273,6 +2286,7 @@ _aesni_dec4:
14083 AESDECLAST KEY STATE2
14084 AESDECLAST KEY STATE3
14085 AESDECLAST KEY STATE4
14086+ pax_force_retaddr
14087 ret
14088 ENDPROC(_aesni_dec4)
14089
14090@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
14091 popl KEYP
14092 popl LEN
14093 #endif
14094+ pax_force_retaddr
14095 ret
14096 ENDPROC(aesni_ecb_enc)
14097
14098@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
14099 popl KEYP
14100 popl LEN
14101 #endif
14102+ pax_force_retaddr
14103 ret
14104 ENDPROC(aesni_ecb_dec)
14105
14106@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
14107 popl LEN
14108 popl IVP
14109 #endif
14110+ pax_force_retaddr
14111 ret
14112 ENDPROC(aesni_cbc_enc)
14113
14114@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
14115 popl LEN
14116 popl IVP
14117 #endif
14118+ pax_force_retaddr
14119 ret
14120 ENDPROC(aesni_cbc_dec)
14121
14122@@ -2550,6 +2568,7 @@ _aesni_inc_init:
14123 mov $1, TCTR_LOW
14124 MOVQ_R64_XMM TCTR_LOW INC
14125 MOVQ_R64_XMM CTR TCTR_LOW
14126+ pax_force_retaddr
14127 ret
14128 ENDPROC(_aesni_inc_init)
14129
14130@@ -2579,6 +2598,7 @@ _aesni_inc:
14131 .Linc_low:
14132 movaps CTR, IV
14133 PSHUFB_XMM BSWAP_MASK IV
14134+ pax_force_retaddr
14135 ret
14136 ENDPROC(_aesni_inc)
14137
14138@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
14139 .Lctr_enc_ret:
14140 movups IV, (IVP)
14141 .Lctr_enc_just_ret:
14142+ pax_force_retaddr
14143 ret
14144 ENDPROC(aesni_ctr_enc)
14145
14146@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
14147 pxor INC, STATE4
14148 movdqu STATE4, 0x70(OUTP)
14149
14150+ pax_force_retaddr
14151 ret
14152 ENDPROC(aesni_xts_crypt8)
14153
14154diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
14155index 246c670..466e2d6 100644
14156--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
14157+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
14158@@ -21,6 +21,7 @@
14159 */
14160
14161 #include <linux/linkage.h>
14162+#include <asm/alternative-asm.h>
14163
14164 .file "blowfish-x86_64-asm.S"
14165 .text
14166@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
14167 jnz .L__enc_xor;
14168
14169 write_block();
14170+ pax_force_retaddr
14171 ret;
14172 .L__enc_xor:
14173 xor_block();
14174+ pax_force_retaddr
14175 ret;
14176 ENDPROC(__blowfish_enc_blk)
14177
14178@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
14179
14180 movq %r11, %rbp;
14181
14182+ pax_force_retaddr
14183 ret;
14184 ENDPROC(blowfish_dec_blk)
14185
14186@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
14187
14188 popq %rbx;
14189 popq %rbp;
14190+ pax_force_retaddr
14191 ret;
14192
14193 .L__enc_xor4:
14194@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
14195
14196 popq %rbx;
14197 popq %rbp;
14198+ pax_force_retaddr
14199 ret;
14200 ENDPROC(__blowfish_enc_blk_4way)
14201
14202@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
14203 popq %rbx;
14204 popq %rbp;
14205
14206+ pax_force_retaddr
14207 ret;
14208 ENDPROC(blowfish_dec_blk_4way)
14209diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
14210index ce71f92..1dce7ec 100644
14211--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
14212+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
14213@@ -16,6 +16,7 @@
14214 */
14215
14216 #include <linux/linkage.h>
14217+#include <asm/alternative-asm.h>
14218
14219 #define CAMELLIA_TABLE_BYTE_LEN 272
14220
14221@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
14222 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
14223 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
14224 %rcx, (%r9));
14225+ pax_force_retaddr
14226 ret;
14227 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
14228
14229@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
14230 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
14231 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
14232 %rax, (%r9));
14233+ pax_force_retaddr
14234 ret;
14235 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
14236
14237@@ -780,6 +783,7 @@ __camellia_enc_blk16:
14238 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
14239 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
14240
14241+ pax_force_retaddr
14242 ret;
14243
14244 .align 8
14245@@ -865,6 +869,7 @@ __camellia_dec_blk16:
14246 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
14247 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
14248
14249+ pax_force_retaddr
14250 ret;
14251
14252 .align 8
14253@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
14254 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
14255 %xmm8, %rsi);
14256
14257+ pax_force_retaddr
14258 ret;
14259 ENDPROC(camellia_ecb_enc_16way)
14260
14261@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
14262 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
14263 %xmm8, %rsi);
14264
14265+ pax_force_retaddr
14266 ret;
14267 ENDPROC(camellia_ecb_dec_16way)
14268
14269@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
14270 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
14271 %xmm8, %rsi);
14272
14273+ pax_force_retaddr
14274 ret;
14275 ENDPROC(camellia_cbc_dec_16way)
14276
14277@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
14278 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
14279 %xmm8, %rsi);
14280
14281+ pax_force_retaddr
14282 ret;
14283 ENDPROC(camellia_ctr_16way)
14284
14285@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
14286 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
14287 %xmm8, %rsi);
14288
14289+ pax_force_retaddr
14290 ret;
14291 ENDPROC(camellia_xts_crypt_16way)
14292
14293diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
14294index 0e0b886..5a3123c 100644
14295--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
14296+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
14297@@ -11,6 +11,7 @@
14298 */
14299
14300 #include <linux/linkage.h>
14301+#include <asm/alternative-asm.h>
14302
14303 #define CAMELLIA_TABLE_BYTE_LEN 272
14304
14305@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
14306 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
14307 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
14308 %rcx, (%r9));
14309+ pax_force_retaddr
14310 ret;
14311 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
14312
14313@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
14314 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
14315 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
14316 %rax, (%r9));
14317+ pax_force_retaddr
14318 ret;
14319 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
14320
14321@@ -820,6 +823,7 @@ __camellia_enc_blk32:
14322 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
14323 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
14324
14325+ pax_force_retaddr
14326 ret;
14327
14328 .align 8
14329@@ -905,6 +909,7 @@ __camellia_dec_blk32:
14330 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
14331 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
14332
14333+ pax_force_retaddr
14334 ret;
14335
14336 .align 8
14337@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
14338
14339 vzeroupper;
14340
14341+ pax_force_retaddr
14342 ret;
14343 ENDPROC(camellia_ecb_enc_32way)
14344
14345@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
14346
14347 vzeroupper;
14348
14349+ pax_force_retaddr
14350 ret;
14351 ENDPROC(camellia_ecb_dec_32way)
14352
14353@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
14354
14355 vzeroupper;
14356
14357+ pax_force_retaddr
14358 ret;
14359 ENDPROC(camellia_cbc_dec_32way)
14360
14361@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
14362
14363 vzeroupper;
14364
14365+ pax_force_retaddr
14366 ret;
14367 ENDPROC(camellia_ctr_32way)
14368
14369@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
14370
14371 vzeroupper;
14372
14373+ pax_force_retaddr
14374 ret;
14375 ENDPROC(camellia_xts_crypt_32way)
14376
14377diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
14378index 310319c..db3d7b5 100644
14379--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
14380+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
14381@@ -21,6 +21,7 @@
14382 */
14383
14384 #include <linux/linkage.h>
14385+#include <asm/alternative-asm.h>
14386
14387 .file "camellia-x86_64-asm_64.S"
14388 .text
14389@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
14390 enc_outunpack(mov, RT1);
14391
14392 movq RRBP, %rbp;
14393+ pax_force_retaddr
14394 ret;
14395
14396 .L__enc_xor:
14397 enc_outunpack(xor, RT1);
14398
14399 movq RRBP, %rbp;
14400+ pax_force_retaddr
14401 ret;
14402 ENDPROC(__camellia_enc_blk)
14403
14404@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
14405 dec_outunpack();
14406
14407 movq RRBP, %rbp;
14408+ pax_force_retaddr
14409 ret;
14410 ENDPROC(camellia_dec_blk)
14411
14412@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
14413
14414 movq RRBP, %rbp;
14415 popq %rbx;
14416+ pax_force_retaddr
14417 ret;
14418
14419 .L__enc2_xor:
14420@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
14421
14422 movq RRBP, %rbp;
14423 popq %rbx;
14424+ pax_force_retaddr
14425 ret;
14426 ENDPROC(__camellia_enc_blk_2way)
14427
14428@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
14429
14430 movq RRBP, %rbp;
14431 movq RXOR, %rbx;
14432+ pax_force_retaddr
14433 ret;
14434 ENDPROC(camellia_dec_blk_2way)
14435diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
14436index c35fd5d..2d8c7db 100644
14437--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
14438+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
14439@@ -24,6 +24,7 @@
14440 */
14441
14442 #include <linux/linkage.h>
14443+#include <asm/alternative-asm.h>
14444
14445 .file "cast5-avx-x86_64-asm_64.S"
14446
14447@@ -281,6 +282,7 @@ __cast5_enc_blk16:
14448 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
14449 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
14450
14451+ pax_force_retaddr
14452 ret;
14453 ENDPROC(__cast5_enc_blk16)
14454
14455@@ -352,6 +354,7 @@ __cast5_dec_blk16:
14456 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
14457 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
14458
14459+ pax_force_retaddr
14460 ret;
14461
14462 .L__skip_dec:
14463@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
14464 vmovdqu RR4, (6*4*4)(%r11);
14465 vmovdqu RL4, (7*4*4)(%r11);
14466
14467+ pax_force_retaddr
14468 ret;
14469 ENDPROC(cast5_ecb_enc_16way)
14470
14471@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
14472 vmovdqu RR4, (6*4*4)(%r11);
14473 vmovdqu RL4, (7*4*4)(%r11);
14474
14475+ pax_force_retaddr
14476 ret;
14477 ENDPROC(cast5_ecb_dec_16way)
14478
14479@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
14480 * %rdx: src
14481 */
14482
14483- pushq %r12;
14484+ pushq %r14;
14485
14486 movq %rsi, %r11;
14487- movq %rdx, %r12;
14488+ movq %rdx, %r14;
14489
14490 vmovdqu (0*16)(%rdx), RL1;
14491 vmovdqu (1*16)(%rdx), RR1;
14492@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
14493 call __cast5_dec_blk16;
14494
14495 /* xor with src */
14496- vmovq (%r12), RX;
14497+ vmovq (%r14), RX;
14498 vpshufd $0x4f, RX, RX;
14499 vpxor RX, RR1, RR1;
14500- vpxor 0*16+8(%r12), RL1, RL1;
14501- vpxor 1*16+8(%r12), RR2, RR2;
14502- vpxor 2*16+8(%r12), RL2, RL2;
14503- vpxor 3*16+8(%r12), RR3, RR3;
14504- vpxor 4*16+8(%r12), RL3, RL3;
14505- vpxor 5*16+8(%r12), RR4, RR4;
14506- vpxor 6*16+8(%r12), RL4, RL4;
14507+ vpxor 0*16+8(%r14), RL1, RL1;
14508+ vpxor 1*16+8(%r14), RR2, RR2;
14509+ vpxor 2*16+8(%r14), RL2, RL2;
14510+ vpxor 3*16+8(%r14), RR3, RR3;
14511+ vpxor 4*16+8(%r14), RL3, RL3;
14512+ vpxor 5*16+8(%r14), RR4, RR4;
14513+ vpxor 6*16+8(%r14), RL4, RL4;
14514
14515 vmovdqu RR1, (0*16)(%r11);
14516 vmovdqu RL1, (1*16)(%r11);
14517@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
14518 vmovdqu RR4, (6*16)(%r11);
14519 vmovdqu RL4, (7*16)(%r11);
14520
14521- popq %r12;
14522+ popq %r14;
14523
14524+ pax_force_retaddr
14525 ret;
14526 ENDPROC(cast5_cbc_dec_16way)
14527
14528@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
14529 * %rcx: iv (big endian, 64bit)
14530 */
14531
14532- pushq %r12;
14533+ pushq %r14;
14534
14535 movq %rsi, %r11;
14536- movq %rdx, %r12;
14537+ movq %rdx, %r14;
14538
14539 vpcmpeqd RTMP, RTMP, RTMP;
14540 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
14541@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
14542 call __cast5_enc_blk16;
14543
14544 /* dst = src ^ iv */
14545- vpxor (0*16)(%r12), RR1, RR1;
14546- vpxor (1*16)(%r12), RL1, RL1;
14547- vpxor (2*16)(%r12), RR2, RR2;
14548- vpxor (3*16)(%r12), RL2, RL2;
14549- vpxor (4*16)(%r12), RR3, RR3;
14550- vpxor (5*16)(%r12), RL3, RL3;
14551- vpxor (6*16)(%r12), RR4, RR4;
14552- vpxor (7*16)(%r12), RL4, RL4;
14553+ vpxor (0*16)(%r14), RR1, RR1;
14554+ vpxor (1*16)(%r14), RL1, RL1;
14555+ vpxor (2*16)(%r14), RR2, RR2;
14556+ vpxor (3*16)(%r14), RL2, RL2;
14557+ vpxor (4*16)(%r14), RR3, RR3;
14558+ vpxor (5*16)(%r14), RL3, RL3;
14559+ vpxor (6*16)(%r14), RR4, RR4;
14560+ vpxor (7*16)(%r14), RL4, RL4;
14561 vmovdqu RR1, (0*16)(%r11);
14562 vmovdqu RL1, (1*16)(%r11);
14563 vmovdqu RR2, (2*16)(%r11);
14564@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
14565 vmovdqu RR4, (6*16)(%r11);
14566 vmovdqu RL4, (7*16)(%r11);
14567
14568- popq %r12;
14569+ popq %r14;
14570
14571+ pax_force_retaddr
14572 ret;
14573 ENDPROC(cast5_ctr_16way)
14574diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
14575index e3531f8..e123f35 100644
14576--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
14577+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
14578@@ -24,6 +24,7 @@
14579 */
14580
14581 #include <linux/linkage.h>
14582+#include <asm/alternative-asm.h>
14583 #include "glue_helper-asm-avx.S"
14584
14585 .file "cast6-avx-x86_64-asm_64.S"
14586@@ -295,6 +296,7 @@ __cast6_enc_blk8:
14587 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
14588 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
14589
14590+ pax_force_retaddr
14591 ret;
14592 ENDPROC(__cast6_enc_blk8)
14593
14594@@ -340,6 +342,7 @@ __cast6_dec_blk8:
14595 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
14596 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
14597
14598+ pax_force_retaddr
14599 ret;
14600 ENDPROC(__cast6_dec_blk8)
14601
14602@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
14603
14604 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14605
14606+ pax_force_retaddr
14607 ret;
14608 ENDPROC(cast6_ecb_enc_8way)
14609
14610@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
14611
14612 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14613
14614+ pax_force_retaddr
14615 ret;
14616 ENDPROC(cast6_ecb_dec_8way)
14617
14618@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
14619 * %rdx: src
14620 */
14621
14622- pushq %r12;
14623+ pushq %r14;
14624
14625 movq %rsi, %r11;
14626- movq %rdx, %r12;
14627+ movq %rdx, %r14;
14628
14629 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14630
14631 call __cast6_dec_blk8;
14632
14633- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14634+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14635
14636- popq %r12;
14637+ popq %r14;
14638
14639+ pax_force_retaddr
14640 ret;
14641 ENDPROC(cast6_cbc_dec_8way)
14642
14643@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
14644 * %rcx: iv (little endian, 128bit)
14645 */
14646
14647- pushq %r12;
14648+ pushq %r14;
14649
14650 movq %rsi, %r11;
14651- movq %rdx, %r12;
14652+ movq %rdx, %r14;
14653
14654 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14655 RD2, RX, RKR, RKM);
14656
14657 call __cast6_enc_blk8;
14658
14659- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14660+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14661
14662- popq %r12;
14663+ popq %r14;
14664
14665+ pax_force_retaddr
14666 ret;
14667 ENDPROC(cast6_ctr_8way)
14668
14669@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
14670 /* dst <= regs xor IVs(in dst) */
14671 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14672
14673+ pax_force_retaddr
14674 ret;
14675 ENDPROC(cast6_xts_enc_8way)
14676
14677@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
14678 /* dst <= regs xor IVs(in dst) */
14679 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14680
14681+ pax_force_retaddr
14682 ret;
14683 ENDPROC(cast6_xts_dec_8way)
14684diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14685index 26d49eb..c0a8c84 100644
14686--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14687+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14688@@ -45,6 +45,7 @@
14689
14690 #include <asm/inst.h>
14691 #include <linux/linkage.h>
14692+#include <asm/alternative-asm.h>
14693
14694 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
14695
14696@@ -309,6 +310,7 @@ do_return:
14697 popq %rsi
14698 popq %rdi
14699 popq %rbx
14700+ pax_force_retaddr
14701 ret
14702
14703 ################################################################
14704diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14705index 5d1e007..098cb4f 100644
14706--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
14707+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14708@@ -18,6 +18,7 @@
14709
14710 #include <linux/linkage.h>
14711 #include <asm/inst.h>
14712+#include <asm/alternative-asm.h>
14713
14714 .data
14715
14716@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
14717 psrlq $1, T2
14718 pxor T2, T1
14719 pxor T1, DATA
14720+ pax_force_retaddr
14721 ret
14722 ENDPROC(__clmul_gf128mul_ble)
14723
14724@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
14725 call __clmul_gf128mul_ble
14726 PSHUFB_XMM BSWAP DATA
14727 movups DATA, (%rdi)
14728+ pax_force_retaddr
14729 ret
14730 ENDPROC(clmul_ghash_mul)
14731
14732@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
14733 PSHUFB_XMM BSWAP DATA
14734 movups DATA, (%rdi)
14735 .Lupdate_just_ret:
14736+ pax_force_retaddr
14737 ret
14738 ENDPROC(clmul_ghash_update)
14739diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14740index 9279e0b..c4b3d2c 100644
14741--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
14742+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14743@@ -1,4 +1,5 @@
14744 #include <linux/linkage.h>
14745+#include <asm/alternative-asm.h>
14746
14747 # enter salsa20_encrypt_bytes
14748 ENTRY(salsa20_encrypt_bytes)
14749@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
14750 add %r11,%rsp
14751 mov %rdi,%rax
14752 mov %rsi,%rdx
14753+ pax_force_retaddr
14754 ret
14755 # bytesatleast65:
14756 ._bytesatleast65:
14757@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
14758 add %r11,%rsp
14759 mov %rdi,%rax
14760 mov %rsi,%rdx
14761+ pax_force_retaddr
14762 ret
14763 ENDPROC(salsa20_keysetup)
14764
14765@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
14766 add %r11,%rsp
14767 mov %rdi,%rax
14768 mov %rsi,%rdx
14769+ pax_force_retaddr
14770 ret
14771 ENDPROC(salsa20_ivsetup)
14772diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14773index 2f202f4..d9164d6 100644
14774--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14775+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14776@@ -24,6 +24,7 @@
14777 */
14778
14779 #include <linux/linkage.h>
14780+#include <asm/alternative-asm.h>
14781 #include "glue_helper-asm-avx.S"
14782
14783 .file "serpent-avx-x86_64-asm_64.S"
14784@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
14785 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14786 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14787
14788+ pax_force_retaddr
14789 ret;
14790 ENDPROC(__serpent_enc_blk8_avx)
14791
14792@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
14793 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14794 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14795
14796+ pax_force_retaddr
14797 ret;
14798 ENDPROC(__serpent_dec_blk8_avx)
14799
14800@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
14801
14802 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14803
14804+ pax_force_retaddr
14805 ret;
14806 ENDPROC(serpent_ecb_enc_8way_avx)
14807
14808@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
14809
14810 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14811
14812+ pax_force_retaddr
14813 ret;
14814 ENDPROC(serpent_ecb_dec_8way_avx)
14815
14816@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
14817
14818 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14819
14820+ pax_force_retaddr
14821 ret;
14822 ENDPROC(serpent_cbc_dec_8way_avx)
14823
14824@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
14825
14826 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14827
14828+ pax_force_retaddr
14829 ret;
14830 ENDPROC(serpent_ctr_8way_avx)
14831
14832@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
14833 /* dst <= regs xor IVs(in dst) */
14834 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14835
14836+ pax_force_retaddr
14837 ret;
14838 ENDPROC(serpent_xts_enc_8way_avx)
14839
14840@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
14841 /* dst <= regs xor IVs(in dst) */
14842 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14843
14844+ pax_force_retaddr
14845 ret;
14846 ENDPROC(serpent_xts_dec_8way_avx)
14847diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
14848index b222085..abd483c 100644
14849--- a/arch/x86/crypto/serpent-avx2-asm_64.S
14850+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
14851@@ -15,6 +15,7 @@
14852 */
14853
14854 #include <linux/linkage.h>
14855+#include <asm/alternative-asm.h>
14856 #include "glue_helper-asm-avx2.S"
14857
14858 .file "serpent-avx2-asm_64.S"
14859@@ -610,6 +611,7 @@ __serpent_enc_blk16:
14860 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14861 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14862
14863+ pax_force_retaddr
14864 ret;
14865 ENDPROC(__serpent_enc_blk16)
14866
14867@@ -664,6 +666,7 @@ __serpent_dec_blk16:
14868 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14869 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14870
14871+ pax_force_retaddr
14872 ret;
14873 ENDPROC(__serpent_dec_blk16)
14874
14875@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
14876
14877 vzeroupper;
14878
14879+ pax_force_retaddr
14880 ret;
14881 ENDPROC(serpent_ecb_enc_16way)
14882
14883@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
14884
14885 vzeroupper;
14886
14887+ pax_force_retaddr
14888 ret;
14889 ENDPROC(serpent_ecb_dec_16way)
14890
14891@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
14892
14893 vzeroupper;
14894
14895+ pax_force_retaddr
14896 ret;
14897 ENDPROC(serpent_cbc_dec_16way)
14898
14899@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
14900
14901 vzeroupper;
14902
14903+ pax_force_retaddr
14904 ret;
14905 ENDPROC(serpent_ctr_16way)
14906
14907@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
14908
14909 vzeroupper;
14910
14911+ pax_force_retaddr
14912 ret;
14913 ENDPROC(serpent_xts_enc_16way)
14914
14915@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
14916
14917 vzeroupper;
14918
14919+ pax_force_retaddr
14920 ret;
14921 ENDPROC(serpent_xts_dec_16way)
14922diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14923index acc066c..1559cc4 100644
14924--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14925+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14926@@ -25,6 +25,7 @@
14927 */
14928
14929 #include <linux/linkage.h>
14930+#include <asm/alternative-asm.h>
14931
14932 .file "serpent-sse2-x86_64-asm_64.S"
14933 .text
14934@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
14935 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14936 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14937
14938+ pax_force_retaddr
14939 ret;
14940
14941 .L__enc_xor8:
14942 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14943 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14944
14945+ pax_force_retaddr
14946 ret;
14947 ENDPROC(__serpent_enc_blk_8way)
14948
14949@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
14950 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14951 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14952
14953+ pax_force_retaddr
14954 ret;
14955 ENDPROC(serpent_dec_blk_8way)
14956diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
14957index a410950..9dfe7ad 100644
14958--- a/arch/x86/crypto/sha1_ssse3_asm.S
14959+++ b/arch/x86/crypto/sha1_ssse3_asm.S
14960@@ -29,6 +29,7 @@
14961 */
14962
14963 #include <linux/linkage.h>
14964+#include <asm/alternative-asm.h>
14965
14966 #define CTX %rdi // arg1
14967 #define BUF %rsi // arg2
14968@@ -75,9 +76,9 @@
14969
14970 push %rbx
14971 push %rbp
14972- push %r12
14973+ push %r14
14974
14975- mov %rsp, %r12
14976+ mov %rsp, %r14
14977 sub $64, %rsp # allocate workspace
14978 and $~15, %rsp # align stack
14979
14980@@ -99,11 +100,12 @@
14981 xor %rax, %rax
14982 rep stosq
14983
14984- mov %r12, %rsp # deallocate workspace
14985+ mov %r14, %rsp # deallocate workspace
14986
14987- pop %r12
14988+ pop %r14
14989 pop %rbp
14990 pop %rbx
14991+ pax_force_retaddr
14992 ret
14993
14994 ENDPROC(\name)
14995diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
14996index 642f156..51a513c 100644
14997--- a/arch/x86/crypto/sha256-avx-asm.S
14998+++ b/arch/x86/crypto/sha256-avx-asm.S
14999@@ -49,6 +49,7 @@
15000
15001 #ifdef CONFIG_AS_AVX
15002 #include <linux/linkage.h>
15003+#include <asm/alternative-asm.h>
15004
15005 ## assume buffers not aligned
15006 #define VMOVDQ vmovdqu
15007@@ -460,6 +461,7 @@ done_hash:
15008 popq %r13
15009 popq %rbp
15010 popq %rbx
15011+ pax_force_retaddr
15012 ret
15013 ENDPROC(sha256_transform_avx)
15014
15015diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
15016index 9e86944..3795e6a 100644
15017--- a/arch/x86/crypto/sha256-avx2-asm.S
15018+++ b/arch/x86/crypto/sha256-avx2-asm.S
15019@@ -50,6 +50,7 @@
15020
15021 #ifdef CONFIG_AS_AVX2
15022 #include <linux/linkage.h>
15023+#include <asm/alternative-asm.h>
15024
15025 ## assume buffers not aligned
15026 #define VMOVDQ vmovdqu
15027@@ -720,6 +721,7 @@ done_hash:
15028 popq %r12
15029 popq %rbp
15030 popq %rbx
15031+ pax_force_retaddr
15032 ret
15033 ENDPROC(sha256_transform_rorx)
15034
15035diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
15036index f833b74..8c62a9e 100644
15037--- a/arch/x86/crypto/sha256-ssse3-asm.S
15038+++ b/arch/x86/crypto/sha256-ssse3-asm.S
15039@@ -47,6 +47,7 @@
15040 ########################################################################
15041
15042 #include <linux/linkage.h>
15043+#include <asm/alternative-asm.h>
15044
15045 ## assume buffers not aligned
15046 #define MOVDQ movdqu
15047@@ -471,6 +472,7 @@ done_hash:
15048 popq %rbp
15049 popq %rbx
15050
15051+ pax_force_retaddr
15052 ret
15053 ENDPROC(sha256_transform_ssse3)
15054
15055diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
15056index 974dde9..a823ff9 100644
15057--- a/arch/x86/crypto/sha512-avx-asm.S
15058+++ b/arch/x86/crypto/sha512-avx-asm.S
15059@@ -49,6 +49,7 @@
15060
15061 #ifdef CONFIG_AS_AVX
15062 #include <linux/linkage.h>
15063+#include <asm/alternative-asm.h>
15064
15065 .text
15066
15067@@ -364,6 +365,7 @@ updateblock:
15068 mov frame_RSPSAVE(%rsp), %rsp
15069
15070 nowork:
15071+ pax_force_retaddr
15072 ret
15073 ENDPROC(sha512_transform_avx)
15074
15075diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
15076index 568b961..ed20c37 100644
15077--- a/arch/x86/crypto/sha512-avx2-asm.S
15078+++ b/arch/x86/crypto/sha512-avx2-asm.S
15079@@ -51,6 +51,7 @@
15080
15081 #ifdef CONFIG_AS_AVX2
15082 #include <linux/linkage.h>
15083+#include <asm/alternative-asm.h>
15084
15085 .text
15086
15087@@ -678,6 +679,7 @@ done_hash:
15088
15089 # Restore Stack Pointer
15090 mov frame_RSPSAVE(%rsp), %rsp
15091+ pax_force_retaddr
15092 ret
15093 ENDPROC(sha512_transform_rorx)
15094
15095diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
15096index fb56855..6edd768 100644
15097--- a/arch/x86/crypto/sha512-ssse3-asm.S
15098+++ b/arch/x86/crypto/sha512-ssse3-asm.S
15099@@ -48,6 +48,7 @@
15100 ########################################################################
15101
15102 #include <linux/linkage.h>
15103+#include <asm/alternative-asm.h>
15104
15105 .text
15106
15107@@ -363,6 +364,7 @@ updateblock:
15108 mov frame_RSPSAVE(%rsp), %rsp
15109
15110 nowork:
15111+ pax_force_retaddr
15112 ret
15113 ENDPROC(sha512_transform_ssse3)
15114
15115diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
15116index 0505813..b067311 100644
15117--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
15118+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
15119@@ -24,6 +24,7 @@
15120 */
15121
15122 #include <linux/linkage.h>
15123+#include <asm/alternative-asm.h>
15124 #include "glue_helper-asm-avx.S"
15125
15126 .file "twofish-avx-x86_64-asm_64.S"
15127@@ -284,6 +285,7 @@ __twofish_enc_blk8:
15128 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
15129 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
15130
15131+ pax_force_retaddr
15132 ret;
15133 ENDPROC(__twofish_enc_blk8)
15134
15135@@ -324,6 +326,7 @@ __twofish_dec_blk8:
15136 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
15137 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
15138
15139+ pax_force_retaddr
15140 ret;
15141 ENDPROC(__twofish_dec_blk8)
15142
15143@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
15144
15145 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
15146
15147+ pax_force_retaddr
15148 ret;
15149 ENDPROC(twofish_ecb_enc_8way)
15150
15151@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
15152
15153 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
15154
15155+ pax_force_retaddr
15156 ret;
15157 ENDPROC(twofish_ecb_dec_8way)
15158
15159@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
15160 * %rdx: src
15161 */
15162
15163- pushq %r12;
15164+ pushq %r14;
15165
15166 movq %rsi, %r11;
15167- movq %rdx, %r12;
15168+ movq %rdx, %r14;
15169
15170 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
15171
15172 call __twofish_dec_blk8;
15173
15174- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
15175+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
15176
15177- popq %r12;
15178+ popq %r14;
15179
15180+ pax_force_retaddr
15181 ret;
15182 ENDPROC(twofish_cbc_dec_8way)
15183
15184@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
15185 * %rcx: iv (little endian, 128bit)
15186 */
15187
15188- pushq %r12;
15189+ pushq %r14;
15190
15191 movq %rsi, %r11;
15192- movq %rdx, %r12;
15193+ movq %rdx, %r14;
15194
15195 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
15196 RD2, RX0, RX1, RY0);
15197
15198 call __twofish_enc_blk8;
15199
15200- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
15201+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
15202
15203- popq %r12;
15204+ popq %r14;
15205
15206+ pax_force_retaddr
15207 ret;
15208 ENDPROC(twofish_ctr_8way)
15209
15210@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
15211 /* dst <= regs xor IVs(in dst) */
15212 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
15213
15214+ pax_force_retaddr
15215 ret;
15216 ENDPROC(twofish_xts_enc_8way)
15217
15218@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
15219 /* dst <= regs xor IVs(in dst) */
15220 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
15221
15222+ pax_force_retaddr
15223 ret;
15224 ENDPROC(twofish_xts_dec_8way)
15225diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
15226index 1c3b7ce..02f578d 100644
15227--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
15228+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
15229@@ -21,6 +21,7 @@
15230 */
15231
15232 #include <linux/linkage.h>
15233+#include <asm/alternative-asm.h>
15234
15235 .file "twofish-x86_64-asm-3way.S"
15236 .text
15237@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
15238 popq %r13;
15239 popq %r14;
15240 popq %r15;
15241+ pax_force_retaddr
15242 ret;
15243
15244 .L__enc_xor3:
15245@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
15246 popq %r13;
15247 popq %r14;
15248 popq %r15;
15249+ pax_force_retaddr
15250 ret;
15251 ENDPROC(__twofish_enc_blk_3way)
15252
15253@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
15254 popq %r13;
15255 popq %r14;
15256 popq %r15;
15257+ pax_force_retaddr
15258 ret;
15259 ENDPROC(twofish_dec_blk_3way)
15260diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
15261index a039d21..524b8b2 100644
15262--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
15263+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
15264@@ -22,6 +22,7 @@
15265
15266 #include <linux/linkage.h>
15267 #include <asm/asm-offsets.h>
15268+#include <asm/alternative-asm.h>
15269
15270 #define a_offset 0
15271 #define b_offset 4
15272@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
15273
15274 popq R1
15275 movq $1,%rax
15276+ pax_force_retaddr
15277 ret
15278 ENDPROC(twofish_enc_blk)
15279
15280@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
15281
15282 popq R1
15283 movq $1,%rax
15284+ pax_force_retaddr
15285 ret
15286 ENDPROC(twofish_dec_blk)
15287diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
15288index d21ff89..6da8e6e 100644
15289--- a/arch/x86/ia32/ia32_aout.c
15290+++ b/arch/x86/ia32/ia32_aout.c
15291@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
15292 unsigned long dump_start, dump_size;
15293 struct user32 dump;
15294
15295+ memset(&dump, 0, sizeof(dump));
15296+
15297 fs = get_fs();
15298 set_fs(KERNEL_DS);
15299 has_dumped = 1;
15300diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
15301index f9e181a..300544c 100644
15302--- a/arch/x86/ia32/ia32_signal.c
15303+++ b/arch/x86/ia32/ia32_signal.c
15304@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
15305 if (__get_user(set.sig[0], &frame->sc.oldmask)
15306 || (_COMPAT_NSIG_WORDS > 1
15307 && __copy_from_user((((char *) &set.sig) + 4),
15308- &frame->extramask,
15309+ frame->extramask,
15310 sizeof(frame->extramask))))
15311 goto badframe;
15312
15313@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
15314 sp -= frame_size;
15315 /* Align the stack pointer according to the i386 ABI,
15316 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
15317- sp = ((sp + 4) & -16ul) - 4;
15318+ sp = ((sp - 12) & -16ul) - 4;
15319 return (void __user *) sp;
15320 }
15321
15322@@ -383,10 +383,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
15323 } else {
15324 /* Return stub is in 32bit vsyscall page */
15325 if (current->mm->context.vdso)
15326- restorer = current->mm->context.vdso +
15327- selected_vdso32->sym___kernel_sigreturn;
15328+ restorer = (void __force_user *)(current->mm->context.vdso +
15329+ selected_vdso32->sym___kernel_sigreturn);
15330 else
15331- restorer = &frame->retcode;
15332+ restorer = frame->retcode;
15333 }
15334
15335 put_user_try {
15336@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
15337 * These are actually not used anymore, but left because some
15338 * gdb versions depend on them as a marker.
15339 */
15340- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
15341+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
15342 } put_user_catch(err);
15343
15344 if (err)
15345@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
15346 0xb8,
15347 __NR_ia32_rt_sigreturn,
15348 0x80cd,
15349- 0,
15350+ 0
15351 };
15352
15353 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
15354@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
15355
15356 if (ksig->ka.sa.sa_flags & SA_RESTORER)
15357 restorer = ksig->ka.sa.sa_restorer;
15358+ else if (current->mm->context.vdso)
15359+ /* Return stub is in 32bit vsyscall page */
15360+ restorer = (void __force_user *)(current->mm->context.vdso +
15361+ selected_vdso32->sym___kernel_rt_sigreturn);
15362 else
15363- restorer = current->mm->context.vdso +
15364- selected_vdso32->sym___kernel_rt_sigreturn;
15365+ restorer = frame->retcode;
15366 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
15367
15368 /*
15369 * Not actually used anymore, but left because some gdb
15370 * versions need it.
15371 */
15372- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
15373+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
15374 } put_user_catch(err);
15375
15376 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
15377diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
15378index 92a2e93..cd4d95f 100644
15379--- a/arch/x86/ia32/ia32entry.S
15380+++ b/arch/x86/ia32/ia32entry.S
15381@@ -15,8 +15,10 @@
15382 #include <asm/irqflags.h>
15383 #include <asm/asm.h>
15384 #include <asm/smap.h>
15385+#include <asm/pgtable.h>
15386 #include <linux/linkage.h>
15387 #include <linux/err.h>
15388+#include <asm/alternative-asm.h>
15389
15390 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15391 #include <linux/elf-em.h>
15392@@ -62,12 +64,12 @@
15393 */
15394 .macro LOAD_ARGS32 offset, _r9=0
15395 .if \_r9
15396- movl \offset+16(%rsp),%r9d
15397+ movl \offset+R9(%rsp),%r9d
15398 .endif
15399- movl \offset+40(%rsp),%ecx
15400- movl \offset+48(%rsp),%edx
15401- movl \offset+56(%rsp),%esi
15402- movl \offset+64(%rsp),%edi
15403+ movl \offset+RCX(%rsp),%ecx
15404+ movl \offset+RDX(%rsp),%edx
15405+ movl \offset+RSI(%rsp),%esi
15406+ movl \offset+RDI(%rsp),%edi
15407 movl %eax,%eax /* zero extension */
15408 .endm
15409
15410@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
15411 ENDPROC(native_irq_enable_sysexit)
15412 #endif
15413
15414+ .macro pax_enter_kernel_user
15415+ pax_set_fptr_mask
15416+#ifdef CONFIG_PAX_MEMORY_UDEREF
15417+ call pax_enter_kernel_user
15418+#endif
15419+ .endm
15420+
15421+ .macro pax_exit_kernel_user
15422+#ifdef CONFIG_PAX_MEMORY_UDEREF
15423+ call pax_exit_kernel_user
15424+#endif
15425+#ifdef CONFIG_PAX_RANDKSTACK
15426+ pushq %rax
15427+ pushq %r11
15428+ call pax_randomize_kstack
15429+ popq %r11
15430+ popq %rax
15431+#endif
15432+ .endm
15433+
15434+ .macro pax_erase_kstack
15435+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15436+ call pax_erase_kstack
15437+#endif
15438+ .endm
15439+
15440 /*
15441 * 32bit SYSENTER instruction entry.
15442 *
15443@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
15444 CFI_REGISTER rsp,rbp
15445 SWAPGS_UNSAFE_STACK
15446 movq PER_CPU_VAR(kernel_stack), %rsp
15447- addq $(KERNEL_STACK_OFFSET),%rsp
15448- /*
15449- * No need to follow this irqs on/off section: the syscall
15450- * disabled irqs, here we enable it straight after entry:
15451- */
15452- ENABLE_INTERRUPTS(CLBR_NONE)
15453 movl %ebp,%ebp /* zero extension */
15454 pushq_cfi $__USER32_DS
15455 /*CFI_REL_OFFSET ss,0*/
15456@@ -135,23 +157,46 @@ ENTRY(ia32_sysenter_target)
15457 CFI_REL_OFFSET rsp,0
15458 pushfq_cfi
15459 /*CFI_REL_OFFSET rflags,0*/
15460- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
15461- CFI_REGISTER rip,r10
15462+ orl $X86_EFLAGS_IF,(%rsp)
15463+ GET_THREAD_INFO(%r11)
15464+ movl TI_sysenter_return(%r11), %r11d
15465+ CFI_REGISTER rip,r11
15466 pushq_cfi $__USER32_CS
15467 /*CFI_REL_OFFSET cs,0*/
15468 movl %eax, %eax
15469- pushq_cfi %r10
15470+ pushq_cfi %r11
15471 CFI_REL_OFFSET rip,0
15472 pushq_cfi %rax
15473 cld
15474 SAVE_ARGS 0,1,0
15475+ pax_enter_kernel_user
15476+
15477+#ifdef CONFIG_PAX_RANDKSTACK
15478+ pax_erase_kstack
15479+#endif
15480+
15481+ /*
15482+ * No need to follow this irqs on/off section: the syscall
15483+ * disabled irqs, here we enable it straight after entry:
15484+ */
15485+ ENABLE_INTERRUPTS(CLBR_NONE)
15486 /* no need to do an access_ok check here because rbp has been
15487 32bit zero extended */
15488+
15489+#ifdef CONFIG_PAX_MEMORY_UDEREF
15490+ addq pax_user_shadow_base,%rbp
15491+ ASM_PAX_OPEN_USERLAND
15492+#endif
15493+
15494 ASM_STAC
15495 1: movl (%rbp),%ebp
15496 _ASM_EXTABLE(1b,ia32_badarg)
15497 ASM_CLAC
15498
15499+#ifdef CONFIG_PAX_MEMORY_UDEREF
15500+ ASM_PAX_CLOSE_USERLAND
15501+#endif
15502+
15503 /*
15504 * Sysenter doesn't filter flags, so we need to clear NT
15505 * ourselves. To save a few cycles, we can check whether
15506@@ -161,8 +206,9 @@ ENTRY(ia32_sysenter_target)
15507 jnz sysenter_fix_flags
15508 sysenter_flags_fixed:
15509
15510- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15511- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15512+ GET_THREAD_INFO(%r11)
15513+ orl $TS_COMPAT,TI_status(%r11)
15514+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15515 CFI_REMEMBER_STATE
15516 jnz sysenter_tracesys
15517 cmpq $(IA32_NR_syscalls-1),%rax
15518@@ -172,15 +218,18 @@ sysenter_do_call:
15519 sysenter_dispatch:
15520 call *ia32_sys_call_table(,%rax,8)
15521 movq %rax,RAX-ARGOFFSET(%rsp)
15522+ GET_THREAD_INFO(%r11)
15523 DISABLE_INTERRUPTS(CLBR_NONE)
15524 TRACE_IRQS_OFF
15525- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15526+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
15527 jnz sysexit_audit
15528 sysexit_from_sys_call:
15529- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15530+ pax_exit_kernel_user
15531+ pax_erase_kstack
15532+ andl $~TS_COMPAT,TI_status(%r11)
15533 /* clear IF, that popfq doesn't enable interrupts early */
15534- andl $~0x200,EFLAGS-R11(%rsp)
15535- movl RIP-R11(%rsp),%edx /* User %eip */
15536+ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
15537+ movl RIP(%rsp),%edx /* User %eip */
15538 CFI_REGISTER rip,rdx
15539 RESTORE_ARGS 0,24,0,0,0,0
15540 xorq %r8,%r8
15541@@ -205,6 +254,9 @@ sysexit_from_sys_call:
15542 movl %eax,%esi /* 2nd arg: syscall number */
15543 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
15544 call __audit_syscall_entry
15545+
15546+ pax_erase_kstack
15547+
15548 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
15549 cmpq $(IA32_NR_syscalls-1),%rax
15550 ja ia32_badsys
15551@@ -216,7 +268,7 @@ sysexit_from_sys_call:
15552 .endm
15553
15554 .macro auditsys_exit exit
15555- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15556+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15557 jnz ia32_ret_from_sys_call
15558 TRACE_IRQS_ON
15559 ENABLE_INTERRUPTS(CLBR_NONE)
15560@@ -227,11 +279,12 @@ sysexit_from_sys_call:
15561 1: setbe %al /* 1 if error, 0 if not */
15562 movzbl %al,%edi /* zero-extend that into %edi */
15563 call __audit_syscall_exit
15564+ GET_THREAD_INFO(%r11)
15565 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
15566 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
15567 DISABLE_INTERRUPTS(CLBR_NONE)
15568 TRACE_IRQS_OFF
15569- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15570+ testl %edi,TI_flags(%r11)
15571 jz \exit
15572 CLEAR_RREGS -ARGOFFSET
15573 jmp int_with_check
15574@@ -253,7 +306,7 @@ sysenter_fix_flags:
15575
15576 sysenter_tracesys:
15577 #ifdef CONFIG_AUDITSYSCALL
15578- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15579+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15580 jz sysenter_auditsys
15581 #endif
15582 SAVE_REST
15583@@ -265,6 +318,9 @@ sysenter_tracesys:
15584 RESTORE_REST
15585 cmpq $(IA32_NR_syscalls-1),%rax
15586 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
15587+
15588+ pax_erase_kstack
15589+
15590 jmp sysenter_do_call
15591 CFI_ENDPROC
15592 ENDPROC(ia32_sysenter_target)
15593@@ -292,19 +348,25 @@ ENDPROC(ia32_sysenter_target)
15594 ENTRY(ia32_cstar_target)
15595 CFI_STARTPROC32 simple
15596 CFI_SIGNAL_FRAME
15597- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
15598+ CFI_DEF_CFA rsp,0
15599 CFI_REGISTER rip,rcx
15600 /*CFI_REGISTER rflags,r11*/
15601 SWAPGS_UNSAFE_STACK
15602 movl %esp,%r8d
15603 CFI_REGISTER rsp,r8
15604 movq PER_CPU_VAR(kernel_stack),%rsp
15605+ SAVE_ARGS 8*6,0,0
15606+ pax_enter_kernel_user
15607+
15608+#ifdef CONFIG_PAX_RANDKSTACK
15609+ pax_erase_kstack
15610+#endif
15611+
15612 /*
15613 * No need to follow this irqs on/off section: the syscall
15614 * disabled irqs and here we enable it straight after entry:
15615 */
15616 ENABLE_INTERRUPTS(CLBR_NONE)
15617- SAVE_ARGS 8,0,0
15618 movl %eax,%eax /* zero extension */
15619 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
15620 movq %rcx,RIP-ARGOFFSET(%rsp)
15621@@ -320,12 +382,25 @@ ENTRY(ia32_cstar_target)
15622 /* no need to do an access_ok check here because r8 has been
15623 32bit zero extended */
15624 /* hardware stack frame is complete now */
15625+
15626+#ifdef CONFIG_PAX_MEMORY_UDEREF
15627+ ASM_PAX_OPEN_USERLAND
15628+ movq pax_user_shadow_base,%r8
15629+ addq RSP-ARGOFFSET(%rsp),%r8
15630+#endif
15631+
15632 ASM_STAC
15633 1: movl (%r8),%r9d
15634 _ASM_EXTABLE(1b,ia32_badarg)
15635 ASM_CLAC
15636- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15637- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15638+
15639+#ifdef CONFIG_PAX_MEMORY_UDEREF
15640+ ASM_PAX_CLOSE_USERLAND
15641+#endif
15642+
15643+ GET_THREAD_INFO(%r11)
15644+ orl $TS_COMPAT,TI_status(%r11)
15645+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15646 CFI_REMEMBER_STATE
15647 jnz cstar_tracesys
15648 cmpq $IA32_NR_syscalls-1,%rax
15649@@ -335,13 +410,16 @@ cstar_do_call:
15650 cstar_dispatch:
15651 call *ia32_sys_call_table(,%rax,8)
15652 movq %rax,RAX-ARGOFFSET(%rsp)
15653+ GET_THREAD_INFO(%r11)
15654 DISABLE_INTERRUPTS(CLBR_NONE)
15655 TRACE_IRQS_OFF
15656- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15657+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
15658 jnz sysretl_audit
15659 sysretl_from_sys_call:
15660- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15661- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
15662+ pax_exit_kernel_user
15663+ pax_erase_kstack
15664+ andl $~TS_COMPAT,TI_status(%r11)
15665+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
15666 movl RIP-ARGOFFSET(%rsp),%ecx
15667 CFI_REGISTER rip,rcx
15668 movl EFLAGS-ARGOFFSET(%rsp),%r11d
15669@@ -368,7 +446,7 @@ sysretl_audit:
15670
15671 cstar_tracesys:
15672 #ifdef CONFIG_AUDITSYSCALL
15673- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15674+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15675 jz cstar_auditsys
15676 #endif
15677 xchgl %r9d,%ebp
15678@@ -382,11 +460,19 @@ cstar_tracesys:
15679 xchgl %ebp,%r9d
15680 cmpq $(IA32_NR_syscalls-1),%rax
15681 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
15682+
15683+ pax_erase_kstack
15684+
15685 jmp cstar_do_call
15686 END(ia32_cstar_target)
15687
15688 ia32_badarg:
15689 ASM_CLAC
15690+
15691+#ifdef CONFIG_PAX_MEMORY_UDEREF
15692+ ASM_PAX_CLOSE_USERLAND
15693+#endif
15694+
15695 movq $-EFAULT,%rax
15696 jmp ia32_sysret
15697 CFI_ENDPROC
15698@@ -423,19 +509,26 @@ ENTRY(ia32_syscall)
15699 CFI_REL_OFFSET rip,RIP-RIP
15700 PARAVIRT_ADJUST_EXCEPTION_FRAME
15701 SWAPGS
15702- /*
15703- * No need to follow this irqs on/off section: the syscall
15704- * disabled irqs and here we enable it straight after entry:
15705- */
15706- ENABLE_INTERRUPTS(CLBR_NONE)
15707 movl %eax,%eax
15708 pushq_cfi %rax
15709 cld
15710 /* note the registers are not zero extended to the sf.
15711 this could be a problem. */
15712 SAVE_ARGS 0,1,0
15713- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15714- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15715+ pax_enter_kernel_user
15716+
15717+#ifdef CONFIG_PAX_RANDKSTACK
15718+ pax_erase_kstack
15719+#endif
15720+
15721+ /*
15722+ * No need to follow this irqs on/off section: the syscall
15723+ * disabled irqs and here we enable it straight after entry:
15724+ */
15725+ ENABLE_INTERRUPTS(CLBR_NONE)
15726+ GET_THREAD_INFO(%r11)
15727+ orl $TS_COMPAT,TI_status(%r11)
15728+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15729 jnz ia32_tracesys
15730 cmpq $(IA32_NR_syscalls-1),%rax
15731 ja ia32_badsys
15732@@ -458,6 +551,9 @@ ia32_tracesys:
15733 RESTORE_REST
15734 cmpq $(IA32_NR_syscalls-1),%rax
15735 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
15736+
15737+ pax_erase_kstack
15738+
15739 jmp ia32_do_call
15740 END(ia32_syscall)
15741
15742diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
15743index 8e0ceec..af13504 100644
15744--- a/arch/x86/ia32/sys_ia32.c
15745+++ b/arch/x86/ia32/sys_ia32.c
15746@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
15747 */
15748 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
15749 {
15750- typeof(ubuf->st_uid) uid = 0;
15751- typeof(ubuf->st_gid) gid = 0;
15752+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
15753+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
15754 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
15755 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
15756 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
15757diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
15758index 372231c..51b537d 100644
15759--- a/arch/x86/include/asm/alternative-asm.h
15760+++ b/arch/x86/include/asm/alternative-asm.h
15761@@ -18,6 +18,45 @@
15762 .endm
15763 #endif
15764
15765+#ifdef KERNEXEC_PLUGIN
15766+ .macro pax_force_retaddr_bts rip=0
15767+ btsq $63,\rip(%rsp)
15768+ .endm
15769+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15770+ .macro pax_force_retaddr rip=0, reload=0
15771+ btsq $63,\rip(%rsp)
15772+ .endm
15773+ .macro pax_force_fptr ptr
15774+ btsq $63,\ptr
15775+ .endm
15776+ .macro pax_set_fptr_mask
15777+ .endm
15778+#endif
15779+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15780+ .macro pax_force_retaddr rip=0, reload=0
15781+ .if \reload
15782+ pax_set_fptr_mask
15783+ .endif
15784+ orq %r12,\rip(%rsp)
15785+ .endm
15786+ .macro pax_force_fptr ptr
15787+ orq %r12,\ptr
15788+ .endm
15789+ .macro pax_set_fptr_mask
15790+ movabs $0x8000000000000000,%r12
15791+ .endm
15792+#endif
15793+#else
15794+ .macro pax_force_retaddr rip=0, reload=0
15795+ .endm
15796+ .macro pax_force_fptr ptr
15797+ .endm
15798+ .macro pax_force_retaddr_bts rip=0
15799+ .endm
15800+ .macro pax_set_fptr_mask
15801+ .endm
15802+#endif
15803+
15804 .macro altinstruction_entry orig alt feature orig_len alt_len
15805 .long \orig - .
15806 .long \alt - .
15807diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
15808index 473bdbe..b1e3377 100644
15809--- a/arch/x86/include/asm/alternative.h
15810+++ b/arch/x86/include/asm/alternative.h
15811@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15812 ".pushsection .discard,\"aw\",@progbits\n" \
15813 DISCARD_ENTRY(1) \
15814 ".popsection\n" \
15815- ".pushsection .altinstr_replacement, \"ax\"\n" \
15816+ ".pushsection .altinstr_replacement, \"a\"\n" \
15817 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
15818 ".popsection"
15819
15820@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15821 DISCARD_ENTRY(1) \
15822 DISCARD_ENTRY(2) \
15823 ".popsection\n" \
15824- ".pushsection .altinstr_replacement, \"ax\"\n" \
15825+ ".pushsection .altinstr_replacement, \"a\"\n" \
15826 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
15827 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
15828 ".popsection"
15829diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
15830index 465b309..ab7e51f 100644
15831--- a/arch/x86/include/asm/apic.h
15832+++ b/arch/x86/include/asm/apic.h
15833@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15834
15835 #ifdef CONFIG_X86_LOCAL_APIC
15836
15837-extern unsigned int apic_verbosity;
15838+extern int apic_verbosity;
15839 extern int local_apic_timer_c2_ok;
15840
15841 extern int disable_apic;
15842diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
15843index 20370c6..a2eb9b0 100644
15844--- a/arch/x86/include/asm/apm.h
15845+++ b/arch/x86/include/asm/apm.h
15846@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
15847 __asm__ __volatile__(APM_DO_ZERO_SEGS
15848 "pushl %%edi\n\t"
15849 "pushl %%ebp\n\t"
15850- "lcall *%%cs:apm_bios_entry\n\t"
15851+ "lcall *%%ss:apm_bios_entry\n\t"
15852 "setc %%al\n\t"
15853 "popl %%ebp\n\t"
15854 "popl %%edi\n\t"
15855@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
15856 __asm__ __volatile__(APM_DO_ZERO_SEGS
15857 "pushl %%edi\n\t"
15858 "pushl %%ebp\n\t"
15859- "lcall *%%cs:apm_bios_entry\n\t"
15860+ "lcall *%%ss:apm_bios_entry\n\t"
15861 "setc %%bl\n\t"
15862 "popl %%ebp\n\t"
15863 "popl %%edi\n\t"
15864diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
15865index 6dd1c7dd..5a85bf2 100644
15866--- a/arch/x86/include/asm/atomic.h
15867+++ b/arch/x86/include/asm/atomic.h
15868@@ -24,7 +24,18 @@
15869 */
15870 static inline int atomic_read(const atomic_t *v)
15871 {
15872- return (*(volatile int *)&(v)->counter);
15873+ return (*(volatile const int *)&(v)->counter);
15874+}
15875+
15876+/**
15877+ * atomic_read_unchecked - read atomic variable
15878+ * @v: pointer of type atomic_unchecked_t
15879+ *
15880+ * Atomically reads the value of @v.
15881+ */
15882+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
15883+{
15884+ return (*(volatile const int *)&(v)->counter);
15885 }
15886
15887 /**
15888@@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
15889 }
15890
15891 /**
15892+ * atomic_set_unchecked - set atomic variable
15893+ * @v: pointer of type atomic_unchecked_t
15894+ * @i: required value
15895+ *
15896+ * Atomically sets the value of @v to @i.
15897+ */
15898+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
15899+{
15900+ v->counter = i;
15901+}
15902+
15903+/**
15904 * atomic_add - add integer to atomic variable
15905 * @i: integer value to add
15906 * @v: pointer of type atomic_t
15907@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
15908 */
15909 static inline void atomic_add(int i, atomic_t *v)
15910 {
15911- asm volatile(LOCK_PREFIX "addl %1,%0"
15912+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15913+
15914+#ifdef CONFIG_PAX_REFCOUNT
15915+ "jno 0f\n"
15916+ LOCK_PREFIX "subl %1,%0\n"
15917+ "int $4\n0:\n"
15918+ _ASM_EXTABLE(0b, 0b)
15919+#endif
15920+
15921+ : "+m" (v->counter)
15922+ : "ir" (i));
15923+}
15924+
15925+/**
15926+ * atomic_add_unchecked - add integer to atomic variable
15927+ * @i: integer value to add
15928+ * @v: pointer of type atomic_unchecked_t
15929+ *
15930+ * Atomically adds @i to @v.
15931+ */
15932+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
15933+{
15934+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15935 : "+m" (v->counter)
15936 : "ir" (i));
15937 }
15938@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
15939 */
15940 static inline void atomic_sub(int i, atomic_t *v)
15941 {
15942- asm volatile(LOCK_PREFIX "subl %1,%0"
15943+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15944+
15945+#ifdef CONFIG_PAX_REFCOUNT
15946+ "jno 0f\n"
15947+ LOCK_PREFIX "addl %1,%0\n"
15948+ "int $4\n0:\n"
15949+ _ASM_EXTABLE(0b, 0b)
15950+#endif
15951+
15952+ : "+m" (v->counter)
15953+ : "ir" (i));
15954+}
15955+
15956+/**
15957+ * atomic_sub_unchecked - subtract integer from atomic variable
15958+ * @i: integer value to subtract
15959+ * @v: pointer of type atomic_unchecked_t
15960+ *
15961+ * Atomically subtracts @i from @v.
15962+ */
15963+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
15964+{
15965+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15966 : "+m" (v->counter)
15967 : "ir" (i));
15968 }
15969@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
15970 */
15971 static inline int atomic_sub_and_test(int i, atomic_t *v)
15972 {
15973- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
15974+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
15975 }
15976
15977 /**
15978@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
15979 */
15980 static inline void atomic_inc(atomic_t *v)
15981 {
15982- asm volatile(LOCK_PREFIX "incl %0"
15983+ asm volatile(LOCK_PREFIX "incl %0\n"
15984+
15985+#ifdef CONFIG_PAX_REFCOUNT
15986+ "jno 0f\n"
15987+ LOCK_PREFIX "decl %0\n"
15988+ "int $4\n0:\n"
15989+ _ASM_EXTABLE(0b, 0b)
15990+#endif
15991+
15992+ : "+m" (v->counter));
15993+}
15994+
15995+/**
15996+ * atomic_inc_unchecked - increment atomic variable
15997+ * @v: pointer of type atomic_unchecked_t
15998+ *
15999+ * Atomically increments @v by 1.
16000+ */
16001+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
16002+{
16003+ asm volatile(LOCK_PREFIX "incl %0\n"
16004 : "+m" (v->counter));
16005 }
16006
16007@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
16008 */
16009 static inline void atomic_dec(atomic_t *v)
16010 {
16011- asm volatile(LOCK_PREFIX "decl %0"
16012+ asm volatile(LOCK_PREFIX "decl %0\n"
16013+
16014+#ifdef CONFIG_PAX_REFCOUNT
16015+ "jno 0f\n"
16016+ LOCK_PREFIX "incl %0\n"
16017+ "int $4\n0:\n"
16018+ _ASM_EXTABLE(0b, 0b)
16019+#endif
16020+
16021+ : "+m" (v->counter));
16022+}
16023+
16024+/**
16025+ * atomic_dec_unchecked - decrement atomic variable
16026+ * @v: pointer of type atomic_unchecked_t
16027+ *
16028+ * Atomically decrements @v by 1.
16029+ */
16030+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
16031+{
16032+ asm volatile(LOCK_PREFIX "decl %0\n"
16033 : "+m" (v->counter));
16034 }
16035
16036@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
16037 */
16038 static inline int atomic_dec_and_test(atomic_t *v)
16039 {
16040- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
16041+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
16042 }
16043
16044 /**
16045@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
16046 */
16047 static inline int atomic_inc_and_test(atomic_t *v)
16048 {
16049- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
16050+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
16051+}
16052+
16053+/**
16054+ * atomic_inc_and_test_unchecked - increment and test
16055+ * @v: pointer of type atomic_unchecked_t
16056+ *
16057+ * Atomically increments @v by 1
16058+ * and returns true if the result is zero, or false for all
16059+ * other cases.
16060+ */
16061+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
16062+{
16063+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
16064 }
16065
16066 /**
16067@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
16068 */
16069 static inline int atomic_add_negative(int i, atomic_t *v)
16070 {
16071- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
16072+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
16073 }
16074
16075 /**
16076@@ -152,7 +272,19 @@ static inline int atomic_add_negative(int i, atomic_t *v)
16077 *
16078 * Atomically adds @i to @v and returns @i + @v
16079 */
16080-static inline int atomic_add_return(int i, atomic_t *v)
16081+static inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
16082+{
16083+ return i + xadd_check_overflow(&v->counter, i);
16084+}
16085+
16086+/**
16087+ * atomic_add_return_unchecked - add integer and return
16088+ * @i: integer value to add
16089+ * @v: pointer of type atomic_unchecked_t
16090+ *
16091+ * Atomically adds @i to @v and returns @i + @v
16092+ */
16093+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
16094 {
16095 return i + xadd(&v->counter, i);
16096 }
16097@@ -164,15 +296,24 @@ static inline int atomic_add_return(int i, atomic_t *v)
16098 *
16099 * Atomically subtracts @i from @v and returns @v - @i
16100 */
16101-static inline int atomic_sub_return(int i, atomic_t *v)
16102+static inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
16103 {
16104 return atomic_add_return(-i, v);
16105 }
16106
16107 #define atomic_inc_return(v) (atomic_add_return(1, v))
16108+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
16109+{
16110+ return atomic_add_return_unchecked(1, v);
16111+}
16112 #define atomic_dec_return(v) (atomic_sub_return(1, v))
16113
16114-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
16115+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
16116+{
16117+ return cmpxchg(&v->counter, old, new);
16118+}
16119+
16120+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
16121 {
16122 return cmpxchg(&v->counter, old, new);
16123 }
16124@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
16125 return xchg(&v->counter, new);
16126 }
16127
16128+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
16129+{
16130+ return xchg(&v->counter, new);
16131+}
16132+
16133 /**
16134 * __atomic_add_unless - add unless the number is already a given value
16135 * @v: pointer of type atomic_t
16136@@ -191,14 +337,27 @@ static inline int atomic_xchg(atomic_t *v, int new)
16137 * Atomically adds @a to @v, so long as @v was not already @u.
16138 * Returns the old value of @v.
16139 */
16140-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
16141+static inline int __intentional_overflow(-1) __atomic_add_unless(atomic_t *v, int a, int u)
16142 {
16143- int c, old;
16144+ int c, old, new;
16145 c = atomic_read(v);
16146 for (;;) {
16147- if (unlikely(c == (u)))
16148+ if (unlikely(c == u))
16149 break;
16150- old = atomic_cmpxchg((v), c, c + (a));
16151+
16152+ asm volatile("addl %2,%0\n"
16153+
16154+#ifdef CONFIG_PAX_REFCOUNT
16155+ "jno 0f\n"
16156+ "subl %2,%0\n"
16157+ "int $4\n0:\n"
16158+ _ASM_EXTABLE(0b, 0b)
16159+#endif
16160+
16161+ : "=r" (new)
16162+ : "0" (c), "ir" (a));
16163+
16164+ old = atomic_cmpxchg(v, c, new);
16165 if (likely(old == c))
16166 break;
16167 c = old;
16168@@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
16169 }
16170
16171 /**
16172+ * atomic_inc_not_zero_hint - increment if not null
16173+ * @v: pointer of type atomic_t
16174+ * @hint: probable value of the atomic before the increment
16175+ *
16176+ * This version of atomic_inc_not_zero() gives a hint of probable
16177+ * value of the atomic. This helps processor to not read the memory
16178+ * before doing the atomic read/modify/write cycle, lowering
16179+ * number of bus transactions on some arches.
16180+ *
16181+ * Returns: 0 if increment was not done, 1 otherwise.
16182+ */
16183+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
16184+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
16185+{
16186+ int val, c = hint, new;
16187+
16188+ /* sanity test, should be removed by compiler if hint is a constant */
16189+ if (!hint)
16190+ return __atomic_add_unless(v, 1, 0);
16191+
16192+ do {
16193+ asm volatile("incl %0\n"
16194+
16195+#ifdef CONFIG_PAX_REFCOUNT
16196+ "jno 0f\n"
16197+ "decl %0\n"
16198+ "int $4\n0:\n"
16199+ _ASM_EXTABLE(0b, 0b)
16200+#endif
16201+
16202+ : "=r" (new)
16203+ : "0" (c));
16204+
16205+ val = atomic_cmpxchg(v, c, new);
16206+ if (val == c)
16207+ return 1;
16208+ c = val;
16209+ } while (c);
16210+
16211+ return 0;
16212+}
16213+
16214+/**
16215 * atomic_inc_short - increment of a short integer
16216 * @v: pointer to type int
16217 *
16218@@ -235,14 +437,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
16219 #endif
16220
16221 /* These are x86-specific, used by some header files */
16222-#define atomic_clear_mask(mask, addr) \
16223- asm volatile(LOCK_PREFIX "andl %0,%1" \
16224- : : "r" (~(mask)), "m" (*(addr)) : "memory")
16225+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
16226+{
16227+ asm volatile(LOCK_PREFIX "andl %1,%0"
16228+ : "+m" (v->counter)
16229+ : "r" (~(mask))
16230+ : "memory");
16231+}
16232
16233-#define atomic_set_mask(mask, addr) \
16234- asm volatile(LOCK_PREFIX "orl %0,%1" \
16235- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
16236- : "memory")
16237+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
16238+{
16239+ asm volatile(LOCK_PREFIX "andl %1,%0"
16240+ : "+m" (v->counter)
16241+ : "r" (~(mask))
16242+ : "memory");
16243+}
16244+
16245+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
16246+{
16247+ asm volatile(LOCK_PREFIX "orl %1,%0"
16248+ : "+m" (v->counter)
16249+ : "r" (mask)
16250+ : "memory");
16251+}
16252+
16253+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
16254+{
16255+ asm volatile(LOCK_PREFIX "orl %1,%0"
16256+ : "+m" (v->counter)
16257+ : "r" (mask)
16258+ : "memory");
16259+}
16260
16261 #ifdef CONFIG_X86_32
16262 # include <asm/atomic64_32.h>
16263diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
16264index b154de7..bf18a5a 100644
16265--- a/arch/x86/include/asm/atomic64_32.h
16266+++ b/arch/x86/include/asm/atomic64_32.h
16267@@ -12,6 +12,14 @@ typedef struct {
16268 u64 __aligned(8) counter;
16269 } atomic64_t;
16270
16271+#ifdef CONFIG_PAX_REFCOUNT
16272+typedef struct {
16273+ u64 __aligned(8) counter;
16274+} atomic64_unchecked_t;
16275+#else
16276+typedef atomic64_t atomic64_unchecked_t;
16277+#endif
16278+
16279 #define ATOMIC64_INIT(val) { (val) }
16280
16281 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
16282@@ -37,21 +45,31 @@ typedef struct {
16283 ATOMIC64_DECL_ONE(sym##_386)
16284
16285 ATOMIC64_DECL_ONE(add_386);
16286+ATOMIC64_DECL_ONE(add_unchecked_386);
16287 ATOMIC64_DECL_ONE(sub_386);
16288+ATOMIC64_DECL_ONE(sub_unchecked_386);
16289 ATOMIC64_DECL_ONE(inc_386);
16290+ATOMIC64_DECL_ONE(inc_unchecked_386);
16291 ATOMIC64_DECL_ONE(dec_386);
16292+ATOMIC64_DECL_ONE(dec_unchecked_386);
16293 #endif
16294
16295 #define alternative_atomic64(f, out, in...) \
16296 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
16297
16298 ATOMIC64_DECL(read);
16299+ATOMIC64_DECL(read_unchecked);
16300 ATOMIC64_DECL(set);
16301+ATOMIC64_DECL(set_unchecked);
16302 ATOMIC64_DECL(xchg);
16303 ATOMIC64_DECL(add_return);
16304+ATOMIC64_DECL(add_return_unchecked);
16305 ATOMIC64_DECL(sub_return);
16306+ATOMIC64_DECL(sub_return_unchecked);
16307 ATOMIC64_DECL(inc_return);
16308+ATOMIC64_DECL(inc_return_unchecked);
16309 ATOMIC64_DECL(dec_return);
16310+ATOMIC64_DECL(dec_return_unchecked);
16311 ATOMIC64_DECL(dec_if_positive);
16312 ATOMIC64_DECL(inc_not_zero);
16313 ATOMIC64_DECL(add_unless);
16314@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
16315 }
16316
16317 /**
16318+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
16319+ * @p: pointer to type atomic64_unchecked_t
16320+ * @o: expected value
16321+ * @n: new value
16322+ *
16323+ * Atomically sets @v to @n if it was equal to @o and returns
16324+ * the old value.
16325+ */
16326+
16327+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
16328+{
16329+ return cmpxchg64(&v->counter, o, n);
16330+}
16331+
16332+/**
16333 * atomic64_xchg - xchg atomic64 variable
16334 * @v: pointer to type atomic64_t
16335 * @n: value to assign
16336@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
16337 }
16338
16339 /**
16340+ * atomic64_set_unchecked - set atomic64 variable
16341+ * @v: pointer to type atomic64_unchecked_t
16342+ * @n: value to assign
16343+ *
16344+ * Atomically sets the value of @v to @n.
16345+ */
16346+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
16347+{
16348+ unsigned high = (unsigned)(i >> 32);
16349+ unsigned low = (unsigned)i;
16350+ alternative_atomic64(set, /* no output */,
16351+ "S" (v), "b" (low), "c" (high)
16352+ : "eax", "edx", "memory");
16353+}
16354+
16355+/**
16356 * atomic64_read - read atomic64 variable
16357 * @v: pointer to type atomic64_t
16358 *
16359@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
16360 }
16361
16362 /**
16363+ * atomic64_read_unchecked - read atomic64 variable
16364+ * @v: pointer to type atomic64_unchecked_t
16365+ *
16366+ * Atomically reads the value of @v and returns it.
16367+ */
16368+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
16369+{
16370+ long long r;
16371+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
16372+ return r;
16373+ }
16374+
16375+/**
16376 * atomic64_add_return - add and return
16377 * @i: integer value to add
16378 * @v: pointer to type atomic64_t
16379@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
16380 return i;
16381 }
16382
16383+/**
16384+ * atomic64_add_return_unchecked - add and return
16385+ * @i: integer value to add
16386+ * @v: pointer to type atomic64_unchecked_t
16387+ *
16388+ * Atomically adds @i to @v and returns @i + *@v
16389+ */
16390+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
16391+{
16392+ alternative_atomic64(add_return_unchecked,
16393+ ASM_OUTPUT2("+A" (i), "+c" (v)),
16394+ ASM_NO_INPUT_CLOBBER("memory"));
16395+ return i;
16396+}
16397+
16398 /*
16399 * Other variants with different arithmetic operators:
16400 */
16401@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
16402 return a;
16403 }
16404
16405+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
16406+{
16407+ long long a;
16408+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
16409+ "S" (v) : "memory", "ecx");
16410+ return a;
16411+}
16412+
16413 static inline long long atomic64_dec_return(atomic64_t *v)
16414 {
16415 long long a;
16416@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
16417 }
16418
16419 /**
16420+ * atomic64_add_unchecked - add integer to atomic64 variable
16421+ * @i: integer value to add
16422+ * @v: pointer to type atomic64_unchecked_t
16423+ *
16424+ * Atomically adds @i to @v.
16425+ */
16426+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
16427+{
16428+ __alternative_atomic64(add_unchecked, add_return_unchecked,
16429+ ASM_OUTPUT2("+A" (i), "+c" (v)),
16430+ ASM_NO_INPUT_CLOBBER("memory"));
16431+ return i;
16432+}
16433+
16434+/**
16435 * atomic64_sub - subtract the atomic64 variable
16436 * @i: integer value to subtract
16437 * @v: pointer to type atomic64_t
16438diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
16439index 46e9052..ae45136 100644
16440--- a/arch/x86/include/asm/atomic64_64.h
16441+++ b/arch/x86/include/asm/atomic64_64.h
16442@@ -18,7 +18,19 @@
16443 */
16444 static inline long atomic64_read(const atomic64_t *v)
16445 {
16446- return (*(volatile long *)&(v)->counter);
16447+ return (*(volatile const long *)&(v)->counter);
16448+}
16449+
16450+/**
16451+ * atomic64_read_unchecked - read atomic64 variable
16452+ * @v: pointer of type atomic64_unchecked_t
16453+ *
16454+ * Atomically reads the value of @v.
16455+ * Doesn't imply a read memory barrier.
16456+ */
16457+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
16458+{
16459+ return (*(volatile const long *)&(v)->counter);
16460 }
16461
16462 /**
16463@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
16464 }
16465
16466 /**
16467+ * atomic64_set_unchecked - set atomic64 variable
16468+ * @v: pointer to type atomic64_unchecked_t
16469+ * @i: required value
16470+ *
16471+ * Atomically sets the value of @v to @i.
16472+ */
16473+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
16474+{
16475+ v->counter = i;
16476+}
16477+
16478+/**
16479 * atomic64_add - add integer to atomic64 variable
16480 * @i: integer value to add
16481 * @v: pointer to type atomic64_t
16482@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
16483 */
16484 static inline void atomic64_add(long i, atomic64_t *v)
16485 {
16486+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
16487+
16488+#ifdef CONFIG_PAX_REFCOUNT
16489+ "jno 0f\n"
16490+ LOCK_PREFIX "subq %1,%0\n"
16491+ "int $4\n0:\n"
16492+ _ASM_EXTABLE(0b, 0b)
16493+#endif
16494+
16495+ : "=m" (v->counter)
16496+ : "er" (i), "m" (v->counter));
16497+}
16498+
16499+/**
16500+ * atomic64_add_unchecked - add integer to atomic64 variable
16501+ * @i: integer value to add
16502+ * @v: pointer to type atomic64_unchecked_t
16503+ *
16504+ * Atomically adds @i to @v.
16505+ */
16506+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
16507+{
16508 asm volatile(LOCK_PREFIX "addq %1,%0"
16509 : "=m" (v->counter)
16510 : "er" (i), "m" (v->counter));
16511@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
16512 */
16513 static inline void atomic64_sub(long i, atomic64_t *v)
16514 {
16515- asm volatile(LOCK_PREFIX "subq %1,%0"
16516+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
16517+
16518+#ifdef CONFIG_PAX_REFCOUNT
16519+ "jno 0f\n"
16520+ LOCK_PREFIX "addq %1,%0\n"
16521+ "int $4\n0:\n"
16522+ _ASM_EXTABLE(0b, 0b)
16523+#endif
16524+
16525+ : "=m" (v->counter)
16526+ : "er" (i), "m" (v->counter));
16527+}
16528+
16529+/**
16530+ * atomic64_sub_unchecked - subtract the atomic64 variable
16531+ * @i: integer value to subtract
16532+ * @v: pointer to type atomic64_unchecked_t
16533+ *
16534+ * Atomically subtracts @i from @v.
16535+ */
16536+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
16537+{
16538+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
16539 : "=m" (v->counter)
16540 : "er" (i), "m" (v->counter));
16541 }
16542@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
16543 */
16544 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
16545 {
16546- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
16547+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
16548 }
16549
16550 /**
16551@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
16552 */
16553 static inline void atomic64_inc(atomic64_t *v)
16554 {
16555+ asm volatile(LOCK_PREFIX "incq %0\n"
16556+
16557+#ifdef CONFIG_PAX_REFCOUNT
16558+ "jno 0f\n"
16559+ LOCK_PREFIX "decq %0\n"
16560+ "int $4\n0:\n"
16561+ _ASM_EXTABLE(0b, 0b)
16562+#endif
16563+
16564+ : "=m" (v->counter)
16565+ : "m" (v->counter));
16566+}
16567+
16568+/**
16569+ * atomic64_inc_unchecked - increment atomic64 variable
16570+ * @v: pointer to type atomic64_unchecked_t
16571+ *
16572+ * Atomically increments @v by 1.
16573+ */
16574+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
16575+{
16576 asm volatile(LOCK_PREFIX "incq %0"
16577 : "=m" (v->counter)
16578 : "m" (v->counter));
16579@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
16580 */
16581 static inline void atomic64_dec(atomic64_t *v)
16582 {
16583- asm volatile(LOCK_PREFIX "decq %0"
16584+ asm volatile(LOCK_PREFIX "decq %0\n"
16585+
16586+#ifdef CONFIG_PAX_REFCOUNT
16587+ "jno 0f\n"
16588+ LOCK_PREFIX "incq %0\n"
16589+ "int $4\n0:\n"
16590+ _ASM_EXTABLE(0b, 0b)
16591+#endif
16592+
16593+ : "=m" (v->counter)
16594+ : "m" (v->counter));
16595+}
16596+
16597+/**
16598+ * atomic64_dec_unchecked - decrement atomic64 variable
16599+ * @v: pointer to type atomic64_t
16600+ *
16601+ * Atomically decrements @v by 1.
16602+ */
16603+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
16604+{
16605+ asm volatile(LOCK_PREFIX "decq %0\n"
16606 : "=m" (v->counter)
16607 : "m" (v->counter));
16608 }
16609@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
16610 */
16611 static inline int atomic64_dec_and_test(atomic64_t *v)
16612 {
16613- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
16614+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
16615 }
16616
16617 /**
16618@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
16619 */
16620 static inline int atomic64_inc_and_test(atomic64_t *v)
16621 {
16622- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
16623+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
16624 }
16625
16626 /**
16627@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
16628 */
16629 static inline int atomic64_add_negative(long i, atomic64_t *v)
16630 {
16631- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
16632+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
16633 }
16634
16635 /**
16636@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
16637 */
16638 static inline long atomic64_add_return(long i, atomic64_t *v)
16639 {
16640+ return i + xadd_check_overflow(&v->counter, i);
16641+}
16642+
16643+/**
16644+ * atomic64_add_return_unchecked - add and return
16645+ * @i: integer value to add
16646+ * @v: pointer to type atomic64_unchecked_t
16647+ *
16648+ * Atomically adds @i to @v and returns @i + @v
16649+ */
16650+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
16651+{
16652 return i + xadd(&v->counter, i);
16653 }
16654
16655@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
16656 }
16657
16658 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
16659+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
16660+{
16661+ return atomic64_add_return_unchecked(1, v);
16662+}
16663 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
16664
16665 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16666@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16667 return cmpxchg(&v->counter, old, new);
16668 }
16669
16670+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
16671+{
16672+ return cmpxchg(&v->counter, old, new);
16673+}
16674+
16675 static inline long atomic64_xchg(atomic64_t *v, long new)
16676 {
16677 return xchg(&v->counter, new);
16678@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
16679 */
16680 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
16681 {
16682- long c, old;
16683+ long c, old, new;
16684 c = atomic64_read(v);
16685 for (;;) {
16686- if (unlikely(c == (u)))
16687+ if (unlikely(c == u))
16688 break;
16689- old = atomic64_cmpxchg((v), c, c + (a));
16690+
16691+ asm volatile("add %2,%0\n"
16692+
16693+#ifdef CONFIG_PAX_REFCOUNT
16694+ "jno 0f\n"
16695+ "sub %2,%0\n"
16696+ "int $4\n0:\n"
16697+ _ASM_EXTABLE(0b, 0b)
16698+#endif
16699+
16700+ : "=r" (new)
16701+ : "0" (c), "ir" (a));
16702+
16703+ old = atomic64_cmpxchg(v, c, new);
16704 if (likely(old == c))
16705 break;
16706 c = old;
16707 }
16708- return c != (u);
16709+ return c != u;
16710 }
16711
16712 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
16713diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
16714index 0f4460b..fa1ee19 100644
16715--- a/arch/x86/include/asm/barrier.h
16716+++ b/arch/x86/include/asm/barrier.h
16717@@ -107,7 +107,7 @@
16718 do { \
16719 compiletime_assert_atomic_type(*p); \
16720 smp_mb(); \
16721- ACCESS_ONCE(*p) = (v); \
16722+ ACCESS_ONCE_RW(*p) = (v); \
16723 } while (0)
16724
16725 #define smp_load_acquire(p) \
16726@@ -124,7 +124,7 @@ do { \
16727 do { \
16728 compiletime_assert_atomic_type(*p); \
16729 barrier(); \
16730- ACCESS_ONCE(*p) = (v); \
16731+ ACCESS_ONCE_RW(*p) = (v); \
16732 } while (0)
16733
16734 #define smp_load_acquire(p) \
16735diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
16736index cfe3b95..d01b118 100644
16737--- a/arch/x86/include/asm/bitops.h
16738+++ b/arch/x86/include/asm/bitops.h
16739@@ -50,7 +50,7 @@
16740 * a mask operation on a byte.
16741 */
16742 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
16743-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
16744+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
16745 #define CONST_MASK(nr) (1 << ((nr) & 7))
16746
16747 /**
16748@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
16749 */
16750 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
16751 {
16752- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16753+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16754 }
16755
16756 /**
16757@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
16758 */
16759 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
16760 {
16761- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16762+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16763 }
16764
16765 /**
16766@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
16767 */
16768 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
16769 {
16770- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16771+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16772 }
16773
16774 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
16775@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
16776 *
16777 * Undefined if no bit exists, so code should check against 0 first.
16778 */
16779-static inline unsigned long __ffs(unsigned long word)
16780+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
16781 {
16782 asm("rep; bsf %1,%0"
16783 : "=r" (word)
16784@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
16785 *
16786 * Undefined if no zero exists, so code should check against ~0UL first.
16787 */
16788-static inline unsigned long ffz(unsigned long word)
16789+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
16790 {
16791 asm("rep; bsf %1,%0"
16792 : "=r" (word)
16793@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
16794 *
16795 * Undefined if no set bit exists, so code should check against 0 first.
16796 */
16797-static inline unsigned long __fls(unsigned long word)
16798+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
16799 {
16800 asm("bsr %1,%0"
16801 : "=r" (word)
16802@@ -434,7 +434,7 @@ static inline int ffs(int x)
16803 * set bit if value is nonzero. The last (most significant) bit is
16804 * at position 32.
16805 */
16806-static inline int fls(int x)
16807+static inline int __intentional_overflow(-1) fls(int x)
16808 {
16809 int r;
16810
16811@@ -476,7 +476,7 @@ static inline int fls(int x)
16812 * at position 64.
16813 */
16814 #ifdef CONFIG_X86_64
16815-static __always_inline int fls64(__u64 x)
16816+static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
16817 {
16818 int bitpos = -1;
16819 /*
16820diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
16821index 4fa687a..60f2d39 100644
16822--- a/arch/x86/include/asm/boot.h
16823+++ b/arch/x86/include/asm/boot.h
16824@@ -6,10 +6,15 @@
16825 #include <uapi/asm/boot.h>
16826
16827 /* Physical address where kernel should be loaded. */
16828-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16829+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16830 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16831 & ~(CONFIG_PHYSICAL_ALIGN - 1))
16832
16833+#ifndef __ASSEMBLY__
16834+extern unsigned char __LOAD_PHYSICAL_ADDR[];
16835+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
16836+#endif
16837+
16838 /* Minimum kernel alignment, as a power of two */
16839 #ifdef CONFIG_X86_64
16840 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16841diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
16842index 48f99f1..d78ebf9 100644
16843--- a/arch/x86/include/asm/cache.h
16844+++ b/arch/x86/include/asm/cache.h
16845@@ -5,12 +5,13 @@
16846
16847 /* L1 cache line size */
16848 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
16849-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16850+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
16851
16852 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
16853+#define __read_only __attribute__((__section__(".data..read_only")))
16854
16855 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
16856-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
16857+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
16858
16859 #ifdef CONFIG_X86_VSMP
16860 #ifdef CONFIG_SMP
16861diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
16862index 9863ee3..4a1f8e1 100644
16863--- a/arch/x86/include/asm/cacheflush.h
16864+++ b/arch/x86/include/asm/cacheflush.h
16865@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
16866 unsigned long pg_flags = pg->flags & _PGMT_MASK;
16867
16868 if (pg_flags == _PGMT_DEFAULT)
16869- return -1;
16870+ return ~0UL;
16871 else if (pg_flags == _PGMT_WC)
16872 return _PAGE_CACHE_WC;
16873 else if (pg_flags == _PGMT_UC_MINUS)
16874diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
16875index cb4c73b..c473c29 100644
16876--- a/arch/x86/include/asm/calling.h
16877+++ b/arch/x86/include/asm/calling.h
16878@@ -82,103 +82,113 @@ For 32-bit we have the following conventions - kernel is built with
16879 #define RSP 152
16880 #define SS 160
16881
16882-#define ARGOFFSET R11
16883-#define SWFRAME ORIG_RAX
16884+#define ARGOFFSET R15
16885
16886 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1
16887- subq $9*8+\addskip, %rsp
16888- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
16889- movq_cfi rdi, 8*8
16890- movq_cfi rsi, 7*8
16891- movq_cfi rdx, 6*8
16892+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
16893+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
16894+ movq_cfi rdi, RDI
16895+ movq_cfi rsi, RSI
16896+ movq_cfi rdx, RDX
16897
16898 .if \save_rcx
16899- movq_cfi rcx, 5*8
16900+ movq_cfi rcx, RCX
16901 .endif
16902
16903- movq_cfi rax, 4*8
16904+ movq_cfi rax, RAX
16905
16906 .if \save_r891011
16907- movq_cfi r8, 3*8
16908- movq_cfi r9, 2*8
16909- movq_cfi r10, 1*8
16910- movq_cfi r11, 0*8
16911+ movq_cfi r8, R8
16912+ movq_cfi r9, R9
16913+ movq_cfi r10, R10
16914+ movq_cfi r11, R11
16915 .endif
16916
16917+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16918+ movq_cfi r12, R12
16919+#endif
16920+
16921 .endm
16922
16923-#define ARG_SKIP (9*8)
16924+#define ARG_SKIP ORIG_RAX
16925
16926 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
16927 rstor_r8910=1, rstor_rdx=1
16928+
16929+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16930+ movq_cfi_restore R12, r12
16931+#endif
16932+
16933 .if \rstor_r11
16934- movq_cfi_restore 0*8, r11
16935+ movq_cfi_restore R11, r11
16936 .endif
16937
16938 .if \rstor_r8910
16939- movq_cfi_restore 1*8, r10
16940- movq_cfi_restore 2*8, r9
16941- movq_cfi_restore 3*8, r8
16942+ movq_cfi_restore R10, r10
16943+ movq_cfi_restore R9, r9
16944+ movq_cfi_restore R8, r8
16945 .endif
16946
16947 .if \rstor_rax
16948- movq_cfi_restore 4*8, rax
16949+ movq_cfi_restore RAX, rax
16950 .endif
16951
16952 .if \rstor_rcx
16953- movq_cfi_restore 5*8, rcx
16954+ movq_cfi_restore RCX, rcx
16955 .endif
16956
16957 .if \rstor_rdx
16958- movq_cfi_restore 6*8, rdx
16959+ movq_cfi_restore RDX, rdx
16960 .endif
16961
16962- movq_cfi_restore 7*8, rsi
16963- movq_cfi_restore 8*8, rdi
16964+ movq_cfi_restore RSI, rsi
16965+ movq_cfi_restore RDI, rdi
16966
16967- .if ARG_SKIP+\addskip > 0
16968- addq $ARG_SKIP+\addskip, %rsp
16969- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
16970+ .if ORIG_RAX+\addskip > 0
16971+ addq $ORIG_RAX+\addskip, %rsp
16972+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
16973 .endif
16974 .endm
16975
16976- .macro LOAD_ARGS offset, skiprax=0
16977- movq \offset(%rsp), %r11
16978- movq \offset+8(%rsp), %r10
16979- movq \offset+16(%rsp), %r9
16980- movq \offset+24(%rsp), %r8
16981- movq \offset+40(%rsp), %rcx
16982- movq \offset+48(%rsp), %rdx
16983- movq \offset+56(%rsp), %rsi
16984- movq \offset+64(%rsp), %rdi
16985+ .macro LOAD_ARGS skiprax=0
16986+ movq R11(%rsp), %r11
16987+ movq R10(%rsp), %r10
16988+ movq R9(%rsp), %r9
16989+ movq R8(%rsp), %r8
16990+ movq RCX(%rsp), %rcx
16991+ movq RDX(%rsp), %rdx
16992+ movq RSI(%rsp), %rsi
16993+ movq RDI(%rsp), %rdi
16994 .if \skiprax
16995 .else
16996- movq \offset+72(%rsp), %rax
16997+ movq RAX(%rsp), %rax
16998 .endif
16999 .endm
17000
17001-#define REST_SKIP (6*8)
17002-
17003 .macro SAVE_REST
17004- subq $REST_SKIP, %rsp
17005- CFI_ADJUST_CFA_OFFSET REST_SKIP
17006- movq_cfi rbx, 5*8
17007- movq_cfi rbp, 4*8
17008- movq_cfi r12, 3*8
17009- movq_cfi r13, 2*8
17010- movq_cfi r14, 1*8
17011- movq_cfi r15, 0*8
17012+ movq_cfi rbx, RBX
17013+ movq_cfi rbp, RBP
17014+
17015+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
17016+ movq_cfi r12, R12
17017+#endif
17018+
17019+ movq_cfi r13, R13
17020+ movq_cfi r14, R14
17021+ movq_cfi r15, R15
17022 .endm
17023
17024 .macro RESTORE_REST
17025- movq_cfi_restore 0*8, r15
17026- movq_cfi_restore 1*8, r14
17027- movq_cfi_restore 2*8, r13
17028- movq_cfi_restore 3*8, r12
17029- movq_cfi_restore 4*8, rbp
17030- movq_cfi_restore 5*8, rbx
17031- addq $REST_SKIP, %rsp
17032- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
17033+ movq_cfi_restore R15, r15
17034+ movq_cfi_restore R14, r14
17035+ movq_cfi_restore R13, r13
17036+
17037+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
17038+ movq_cfi_restore R12, r12
17039+#endif
17040+
17041+ movq_cfi_restore RBP, rbp
17042+ movq_cfi_restore RBX, rbx
17043 .endm
17044
17045 .macro SAVE_ALL
17046diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
17047index f50de69..2b0a458 100644
17048--- a/arch/x86/include/asm/checksum_32.h
17049+++ b/arch/x86/include/asm/checksum_32.h
17050@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
17051 int len, __wsum sum,
17052 int *src_err_ptr, int *dst_err_ptr);
17053
17054+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
17055+ int len, __wsum sum,
17056+ int *src_err_ptr, int *dst_err_ptr);
17057+
17058+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
17059+ int len, __wsum sum,
17060+ int *src_err_ptr, int *dst_err_ptr);
17061+
17062 /*
17063 * Note: when you get a NULL pointer exception here this means someone
17064 * passed in an incorrect kernel address to one of these functions.
17065@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
17066
17067 might_sleep();
17068 stac();
17069- ret = csum_partial_copy_generic((__force void *)src, dst,
17070+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
17071 len, sum, err_ptr, NULL);
17072 clac();
17073
17074@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
17075 might_sleep();
17076 if (access_ok(VERIFY_WRITE, dst, len)) {
17077 stac();
17078- ret = csum_partial_copy_generic(src, (__force void *)dst,
17079+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
17080 len, sum, NULL, err_ptr);
17081 clac();
17082 return ret;
17083diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
17084index 99c105d7..2f667ac 100644
17085--- a/arch/x86/include/asm/cmpxchg.h
17086+++ b/arch/x86/include/asm/cmpxchg.h
17087@@ -16,8 +16,12 @@ extern void __cmpxchg_wrong_size(void)
17088 __compiletime_error("Bad argument size for cmpxchg");
17089 extern void __xadd_wrong_size(void)
17090 __compiletime_error("Bad argument size for xadd");
17091+extern void __xadd_check_overflow_wrong_size(void)
17092+ __compiletime_error("Bad argument size for xadd_check_overflow");
17093 extern void __add_wrong_size(void)
17094 __compiletime_error("Bad argument size for add");
17095+extern void __add_check_overflow_wrong_size(void)
17096+ __compiletime_error("Bad argument size for add_check_overflow");
17097
17098 /*
17099 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
17100@@ -69,6 +73,38 @@ extern void __add_wrong_size(void)
17101 __ret; \
17102 })
17103
17104+#ifdef CONFIG_PAX_REFCOUNT
17105+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
17106+ ({ \
17107+ __typeof__ (*(ptr)) __ret = (arg); \
17108+ switch (sizeof(*(ptr))) { \
17109+ case __X86_CASE_L: \
17110+ asm volatile (lock #op "l %0, %1\n" \
17111+ "jno 0f\n" \
17112+ "mov %0,%1\n" \
17113+ "int $4\n0:\n" \
17114+ _ASM_EXTABLE(0b, 0b) \
17115+ : "+r" (__ret), "+m" (*(ptr)) \
17116+ : : "memory", "cc"); \
17117+ break; \
17118+ case __X86_CASE_Q: \
17119+ asm volatile (lock #op "q %q0, %1\n" \
17120+ "jno 0f\n" \
17121+ "mov %0,%1\n" \
17122+ "int $4\n0:\n" \
17123+ _ASM_EXTABLE(0b, 0b) \
17124+ : "+r" (__ret), "+m" (*(ptr)) \
17125+ : : "memory", "cc"); \
17126+ break; \
17127+ default: \
17128+ __ ## op ## _check_overflow_wrong_size(); \
17129+ } \
17130+ __ret; \
17131+ })
17132+#else
17133+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
17134+#endif
17135+
17136 /*
17137 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
17138 * Since this is generally used to protect other memory information, we
17139@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
17140 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
17141 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
17142
17143+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
17144+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
17145+
17146 #define __add(ptr, inc, lock) \
17147 ({ \
17148 __typeof__ (*(ptr)) __ret = (inc); \
17149diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
17150index 59c6c40..5e0b22c 100644
17151--- a/arch/x86/include/asm/compat.h
17152+++ b/arch/x86/include/asm/compat.h
17153@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
17154 typedef u32 compat_uint_t;
17155 typedef u32 compat_ulong_t;
17156 typedef u64 __attribute__((aligned(4))) compat_u64;
17157-typedef u32 compat_uptr_t;
17158+typedef u32 __user compat_uptr_t;
17159
17160 struct compat_timespec {
17161 compat_time_t tv_sec;
17162diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
17163index 2075e6c..d65aa96 100644
17164--- a/arch/x86/include/asm/cpufeature.h
17165+++ b/arch/x86/include/asm/cpufeature.h
17166@@ -204,14 +204,14 @@
17167 #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
17168 #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
17169
17170-
17171+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
17172 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
17173 #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
17174 #define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
17175 #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
17176 #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
17177 #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
17178-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
17179+#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Prevention */
17180 #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
17181 #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
17182 #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
17183@@ -371,6 +371,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
17184 #undef cpu_has_centaur_mcr
17185 #define cpu_has_centaur_mcr 0
17186
17187+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
17188 #endif /* CONFIG_X86_64 */
17189
17190 #if __GNUC__ >= 4
17191@@ -423,7 +424,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
17192
17193 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
17194 t_warn:
17195- warn_pre_alternatives();
17196+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
17197+ warn_pre_alternatives();
17198 return false;
17199 #endif
17200
17201@@ -443,7 +445,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
17202 ".section .discard,\"aw\",@progbits\n"
17203 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
17204 ".previous\n"
17205- ".section .altinstr_replacement,\"ax\"\n"
17206+ ".section .altinstr_replacement,\"a\"\n"
17207 "3: movb $1,%0\n"
17208 "4:\n"
17209 ".previous\n"
17210@@ -480,7 +482,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
17211 " .byte 2b - 1b\n" /* src len */
17212 " .byte 4f - 3f\n" /* repl len */
17213 ".previous\n"
17214- ".section .altinstr_replacement,\"ax\"\n"
17215+ ".section .altinstr_replacement,\"a\"\n"
17216 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
17217 "4:\n"
17218 ".previous\n"
17219@@ -513,7 +515,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
17220 ".section .discard,\"aw\",@progbits\n"
17221 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
17222 ".previous\n"
17223- ".section .altinstr_replacement,\"ax\"\n"
17224+ ".section .altinstr_replacement,\"a\"\n"
17225 "3: movb $0,%0\n"
17226 "4:\n"
17227 ".previous\n"
17228@@ -527,7 +529,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
17229 ".section .discard,\"aw\",@progbits\n"
17230 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
17231 ".previous\n"
17232- ".section .altinstr_replacement,\"ax\"\n"
17233+ ".section .altinstr_replacement,\"a\"\n"
17234 "5: movb $1,%0\n"
17235 "6:\n"
17236 ".previous\n"
17237diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
17238index 50d033a..37deb26 100644
17239--- a/arch/x86/include/asm/desc.h
17240+++ b/arch/x86/include/asm/desc.h
17241@@ -4,6 +4,7 @@
17242 #include <asm/desc_defs.h>
17243 #include <asm/ldt.h>
17244 #include <asm/mmu.h>
17245+#include <asm/pgtable.h>
17246
17247 #include <linux/smp.h>
17248 #include <linux/percpu.h>
17249@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
17250
17251 desc->type = (info->read_exec_only ^ 1) << 1;
17252 desc->type |= info->contents << 2;
17253+ desc->type |= info->seg_not_present ^ 1;
17254
17255 desc->s = 1;
17256 desc->dpl = 0x3;
17257@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
17258 }
17259
17260 extern struct desc_ptr idt_descr;
17261-extern gate_desc idt_table[];
17262-extern struct desc_ptr debug_idt_descr;
17263-extern gate_desc debug_idt_table[];
17264-
17265-struct gdt_page {
17266- struct desc_struct gdt[GDT_ENTRIES];
17267-} __attribute__((aligned(PAGE_SIZE)));
17268-
17269-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
17270+extern gate_desc idt_table[IDT_ENTRIES];
17271+extern const struct desc_ptr debug_idt_descr;
17272+extern gate_desc debug_idt_table[IDT_ENTRIES];
17273
17274+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
17275 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
17276 {
17277- return per_cpu(gdt_page, cpu).gdt;
17278+ return cpu_gdt_table[cpu];
17279 }
17280
17281 #ifdef CONFIG_X86_64
17282@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
17283 unsigned long base, unsigned dpl, unsigned flags,
17284 unsigned short seg)
17285 {
17286- gate->a = (seg << 16) | (base & 0xffff);
17287- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
17288+ gate->gate.offset_low = base;
17289+ gate->gate.seg = seg;
17290+ gate->gate.reserved = 0;
17291+ gate->gate.type = type;
17292+ gate->gate.s = 0;
17293+ gate->gate.dpl = dpl;
17294+ gate->gate.p = 1;
17295+ gate->gate.offset_high = base >> 16;
17296 }
17297
17298 #endif
17299@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
17300
17301 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
17302 {
17303+ pax_open_kernel();
17304 memcpy(&idt[entry], gate, sizeof(*gate));
17305+ pax_close_kernel();
17306 }
17307
17308 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
17309 {
17310+ pax_open_kernel();
17311 memcpy(&ldt[entry], desc, 8);
17312+ pax_close_kernel();
17313 }
17314
17315 static inline void
17316@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
17317 default: size = sizeof(*gdt); break;
17318 }
17319
17320+ pax_open_kernel();
17321 memcpy(&gdt[entry], desc, size);
17322+ pax_close_kernel();
17323 }
17324
17325 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
17326@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
17327
17328 static inline void native_load_tr_desc(void)
17329 {
17330+ pax_open_kernel();
17331 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
17332+ pax_close_kernel();
17333 }
17334
17335 static inline void native_load_gdt(const struct desc_ptr *dtr)
17336@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
17337 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
17338 unsigned int i;
17339
17340+ pax_open_kernel();
17341 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
17342 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
17343+ pax_close_kernel();
17344 }
17345
17346 #define _LDT_empty(info) \
17347@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
17348 preempt_enable();
17349 }
17350
17351-static inline unsigned long get_desc_base(const struct desc_struct *desc)
17352+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
17353 {
17354 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
17355 }
17356@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
17357 }
17358
17359 #ifdef CONFIG_X86_64
17360-static inline void set_nmi_gate(int gate, void *addr)
17361+static inline void set_nmi_gate(int gate, const void *addr)
17362 {
17363 gate_desc s;
17364
17365@@ -321,14 +334,14 @@ static inline void set_nmi_gate(int gate, void *addr)
17366 #endif
17367
17368 #ifdef CONFIG_TRACING
17369-extern struct desc_ptr trace_idt_descr;
17370-extern gate_desc trace_idt_table[];
17371+extern const struct desc_ptr trace_idt_descr;
17372+extern gate_desc trace_idt_table[IDT_ENTRIES];
17373 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
17374 {
17375 write_idt_entry(trace_idt_table, entry, gate);
17376 }
17377
17378-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
17379+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
17380 unsigned dpl, unsigned ist, unsigned seg)
17381 {
17382 gate_desc s;
17383@@ -348,7 +361,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
17384 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
17385 #endif
17386
17387-static inline void _set_gate(int gate, unsigned type, void *addr,
17388+static inline void _set_gate(int gate, unsigned type, const void *addr,
17389 unsigned dpl, unsigned ist, unsigned seg)
17390 {
17391 gate_desc s;
17392@@ -371,9 +384,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
17393 #define set_intr_gate(n, addr) \
17394 do { \
17395 BUG_ON((unsigned)n > 0xFF); \
17396- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
17397+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
17398 __KERNEL_CS); \
17399- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
17400+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
17401 0, 0, __KERNEL_CS); \
17402 } while (0)
17403
17404@@ -401,19 +414,19 @@ static inline void alloc_system_vector(int vector)
17405 /*
17406 * This routine sets up an interrupt gate at directory privilege level 3.
17407 */
17408-static inline void set_system_intr_gate(unsigned int n, void *addr)
17409+static inline void set_system_intr_gate(unsigned int n, const void *addr)
17410 {
17411 BUG_ON((unsigned)n > 0xFF);
17412 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
17413 }
17414
17415-static inline void set_system_trap_gate(unsigned int n, void *addr)
17416+static inline void set_system_trap_gate(unsigned int n, const void *addr)
17417 {
17418 BUG_ON((unsigned)n > 0xFF);
17419 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
17420 }
17421
17422-static inline void set_trap_gate(unsigned int n, void *addr)
17423+static inline void set_trap_gate(unsigned int n, const void *addr)
17424 {
17425 BUG_ON((unsigned)n > 0xFF);
17426 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
17427@@ -422,16 +435,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
17428 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
17429 {
17430 BUG_ON((unsigned)n > 0xFF);
17431- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
17432+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
17433 }
17434
17435-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
17436+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
17437 {
17438 BUG_ON((unsigned)n > 0xFF);
17439 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
17440 }
17441
17442-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
17443+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
17444 {
17445 BUG_ON((unsigned)n > 0xFF);
17446 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
17447@@ -503,4 +516,17 @@ static inline void load_current_idt(void)
17448 else
17449 load_idt((const struct desc_ptr *)&idt_descr);
17450 }
17451+
17452+#ifdef CONFIG_X86_32
17453+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
17454+{
17455+ struct desc_struct d;
17456+
17457+ if (likely(limit))
17458+ limit = (limit - 1UL) >> PAGE_SHIFT;
17459+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
17460+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
17461+}
17462+#endif
17463+
17464 #endif /* _ASM_X86_DESC_H */
17465diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
17466index 278441f..b95a174 100644
17467--- a/arch/x86/include/asm/desc_defs.h
17468+++ b/arch/x86/include/asm/desc_defs.h
17469@@ -31,6 +31,12 @@ struct desc_struct {
17470 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
17471 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
17472 };
17473+ struct {
17474+ u16 offset_low;
17475+ u16 seg;
17476+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
17477+ unsigned offset_high: 16;
17478+ } gate;
17479 };
17480 } __attribute__((packed));
17481
17482diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
17483index ced283a..ffe04cc 100644
17484--- a/arch/x86/include/asm/div64.h
17485+++ b/arch/x86/include/asm/div64.h
17486@@ -39,7 +39,7 @@
17487 __mod; \
17488 })
17489
17490-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
17491+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
17492 {
17493 union {
17494 u64 v64;
17495diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
17496index ca3347a..1a5082a 100644
17497--- a/arch/x86/include/asm/elf.h
17498+++ b/arch/x86/include/asm/elf.h
17499@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
17500
17501 #include <asm/vdso.h>
17502
17503-#ifdef CONFIG_X86_64
17504-extern unsigned int vdso64_enabled;
17505-#endif
17506 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
17507 extern unsigned int vdso32_enabled;
17508 #endif
17509@@ -249,7 +246,25 @@ extern int force_personality32;
17510 the loader. We need to make sure that it is out of the way of the program
17511 that it will "exec", and that there is sufficient room for the brk. */
17512
17513+#ifdef CONFIG_PAX_SEGMEXEC
17514+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
17515+#else
17516 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
17517+#endif
17518+
17519+#ifdef CONFIG_PAX_ASLR
17520+#ifdef CONFIG_X86_32
17521+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
17522+
17523+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
17524+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
17525+#else
17526+#define PAX_ELF_ET_DYN_BASE 0x400000UL
17527+
17528+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
17529+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
17530+#endif
17531+#endif
17532
17533 /* This yields a mask that user programs can use to figure out what
17534 instruction set this CPU supports. This could be done in user space,
17535@@ -298,17 +313,13 @@ do { \
17536
17537 #define ARCH_DLINFO \
17538 do { \
17539- if (vdso64_enabled) \
17540- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
17541- (unsigned long __force)current->mm->context.vdso); \
17542+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
17543 } while (0)
17544
17545 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
17546 #define ARCH_DLINFO_X32 \
17547 do { \
17548- if (vdso64_enabled) \
17549- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
17550- (unsigned long __force)current->mm->context.vdso); \
17551+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
17552 } while (0)
17553
17554 #define AT_SYSINFO 32
17555@@ -323,10 +334,10 @@ else \
17556
17557 #endif /* !CONFIG_X86_32 */
17558
17559-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
17560+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
17561
17562 #define VDSO_ENTRY \
17563- ((unsigned long)current->mm->context.vdso + \
17564+ (current->mm->context.vdso + \
17565 selected_vdso32->sym___kernel_vsyscall)
17566
17567 struct linux_binprm;
17568@@ -338,9 +349,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
17569 int uses_interp);
17570 #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
17571
17572-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
17573-#define arch_randomize_brk arch_randomize_brk
17574-
17575 /*
17576 * True on X86_32 or when emulating IA32 on X86_64
17577 */
17578diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
17579index 77a99ac..39ff7f5 100644
17580--- a/arch/x86/include/asm/emergency-restart.h
17581+++ b/arch/x86/include/asm/emergency-restart.h
17582@@ -1,6 +1,6 @@
17583 #ifndef _ASM_X86_EMERGENCY_RESTART_H
17584 #define _ASM_X86_EMERGENCY_RESTART_H
17585
17586-extern void machine_emergency_restart(void);
17587+extern void machine_emergency_restart(void) __noreturn;
17588
17589 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
17590diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
17591index 1c7eefe..d0e4702 100644
17592--- a/arch/x86/include/asm/floppy.h
17593+++ b/arch/x86/include/asm/floppy.h
17594@@ -229,18 +229,18 @@ static struct fd_routine_l {
17595 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
17596 } fd_routine[] = {
17597 {
17598- request_dma,
17599- free_dma,
17600- get_dma_residue,
17601- dma_mem_alloc,
17602- hard_dma_setup
17603+ ._request_dma = request_dma,
17604+ ._free_dma = free_dma,
17605+ ._get_dma_residue = get_dma_residue,
17606+ ._dma_mem_alloc = dma_mem_alloc,
17607+ ._dma_setup = hard_dma_setup
17608 },
17609 {
17610- vdma_request_dma,
17611- vdma_nop,
17612- vdma_get_dma_residue,
17613- vdma_mem_alloc,
17614- vdma_dma_setup
17615+ ._request_dma = vdma_request_dma,
17616+ ._free_dma = vdma_nop,
17617+ ._get_dma_residue = vdma_get_dma_residue,
17618+ ._dma_mem_alloc = vdma_mem_alloc,
17619+ ._dma_setup = vdma_dma_setup
17620 }
17621 };
17622
17623diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
17624index 412ecec..c1ea43a 100644
17625--- a/arch/x86/include/asm/fpu-internal.h
17626+++ b/arch/x86/include/asm/fpu-internal.h
17627@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
17628 #define user_insn(insn, output, input...) \
17629 ({ \
17630 int err; \
17631+ pax_open_userland(); \
17632 asm volatile(ASM_STAC "\n" \
17633- "1:" #insn "\n\t" \
17634+ "1:" \
17635+ __copyuser_seg \
17636+ #insn "\n\t" \
17637 "2: " ASM_CLAC "\n" \
17638 ".section .fixup,\"ax\"\n" \
17639 "3: movl $-1,%[err]\n" \
17640@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
17641 _ASM_EXTABLE(1b, 3b) \
17642 : [err] "=r" (err), output \
17643 : "0"(0), input); \
17644+ pax_close_userland(); \
17645 err; \
17646 })
17647
17648@@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
17649 "fnclex\n\t"
17650 "emms\n\t"
17651 "fildl %P[addr]" /* set F?P to defined value */
17652- : : [addr] "m" (tsk->thread.fpu.has_fpu));
17653+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
17654 }
17655
17656 return fpu_restore_checking(&tsk->thread.fpu);
17657diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
17658index b4c1f54..e290c08 100644
17659--- a/arch/x86/include/asm/futex.h
17660+++ b/arch/x86/include/asm/futex.h
17661@@ -12,6 +12,7 @@
17662 #include <asm/smap.h>
17663
17664 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
17665+ typecheck(u32 __user *, uaddr); \
17666 asm volatile("\t" ASM_STAC "\n" \
17667 "1:\t" insn "\n" \
17668 "2:\t" ASM_CLAC "\n" \
17669@@ -20,15 +21,16 @@
17670 "\tjmp\t2b\n" \
17671 "\t.previous\n" \
17672 _ASM_EXTABLE(1b, 3b) \
17673- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
17674+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
17675 : "i" (-EFAULT), "0" (oparg), "1" (0))
17676
17677 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
17678+ typecheck(u32 __user *, uaddr); \
17679 asm volatile("\t" ASM_STAC "\n" \
17680 "1:\tmovl %2, %0\n" \
17681 "\tmovl\t%0, %3\n" \
17682 "\t" insn "\n" \
17683- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
17684+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
17685 "\tjnz\t1b\n" \
17686 "3:\t" ASM_CLAC "\n" \
17687 "\t.section .fixup,\"ax\"\n" \
17688@@ -38,7 +40,7 @@
17689 _ASM_EXTABLE(1b, 4b) \
17690 _ASM_EXTABLE(2b, 4b) \
17691 : "=&a" (oldval), "=&r" (ret), \
17692- "+m" (*uaddr), "=&r" (tem) \
17693+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
17694 : "r" (oparg), "i" (-EFAULT), "1" (0))
17695
17696 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17697@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17698
17699 pagefault_disable();
17700
17701+ pax_open_userland();
17702 switch (op) {
17703 case FUTEX_OP_SET:
17704- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
17705+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
17706 break;
17707 case FUTEX_OP_ADD:
17708- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
17709+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
17710 uaddr, oparg);
17711 break;
17712 case FUTEX_OP_OR:
17713@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17714 default:
17715 ret = -ENOSYS;
17716 }
17717+ pax_close_userland();
17718
17719 pagefault_enable();
17720
17721diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
17722index 4615906..788c817 100644
17723--- a/arch/x86/include/asm/hw_irq.h
17724+++ b/arch/x86/include/asm/hw_irq.h
17725@@ -164,8 +164,8 @@ extern void setup_ioapic_dest(void);
17726 extern void enable_IO_APIC(void);
17727
17728 /* Statistics */
17729-extern atomic_t irq_err_count;
17730-extern atomic_t irq_mis_count;
17731+extern atomic_unchecked_t irq_err_count;
17732+extern atomic_unchecked_t irq_mis_count;
17733
17734 /* EISA */
17735 extern void eisa_set_level_irq(unsigned int irq);
17736diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
17737index ccffa53..3c90c87 100644
17738--- a/arch/x86/include/asm/i8259.h
17739+++ b/arch/x86/include/asm/i8259.h
17740@@ -62,7 +62,7 @@ struct legacy_pic {
17741 void (*init)(int auto_eoi);
17742 int (*irq_pending)(unsigned int irq);
17743 void (*make_irq)(unsigned int irq);
17744-};
17745+} __do_const;
17746
17747 extern struct legacy_pic *legacy_pic;
17748 extern struct legacy_pic null_legacy_pic;
17749diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
17750index b8237d8..3e8864e 100644
17751--- a/arch/x86/include/asm/io.h
17752+++ b/arch/x86/include/asm/io.h
17753@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
17754 "m" (*(volatile type __force *)addr) barrier); }
17755
17756 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
17757-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
17758-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
17759+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
17760+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
17761
17762 build_mmio_read(__readb, "b", unsigned char, "=q", )
17763-build_mmio_read(__readw, "w", unsigned short, "=r", )
17764-build_mmio_read(__readl, "l", unsigned int, "=r", )
17765+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
17766+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
17767
17768 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
17769 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
17770@@ -109,7 +109,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
17771 * this function
17772 */
17773
17774-static inline phys_addr_t virt_to_phys(volatile void *address)
17775+static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
17776 {
17777 return __pa(address);
17778 }
17779@@ -185,7 +185,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
17780 return ioremap_nocache(offset, size);
17781 }
17782
17783-extern void iounmap(volatile void __iomem *addr);
17784+extern void iounmap(const volatile void __iomem *addr);
17785
17786 extern void set_iounmap_nonlazy(void);
17787
17788@@ -195,6 +195,17 @@ extern void set_iounmap_nonlazy(void);
17789
17790 #include <linux/vmalloc.h>
17791
17792+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
17793+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
17794+{
17795+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17796+}
17797+
17798+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
17799+{
17800+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17801+}
17802+
17803 /*
17804 * Convert a virtual cached pointer to an uncached pointer
17805 */
17806diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
17807index 0a8b519..80e7d5b 100644
17808--- a/arch/x86/include/asm/irqflags.h
17809+++ b/arch/x86/include/asm/irqflags.h
17810@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
17811 sti; \
17812 sysexit
17813
17814+#define GET_CR0_INTO_RDI mov %cr0, %rdi
17815+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
17816+#define GET_CR3_INTO_RDI mov %cr3, %rdi
17817+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
17818+
17819 #else
17820 #define INTERRUPT_RETURN iret
17821 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
17822diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
17823index 53cdfb2..d1369e6 100644
17824--- a/arch/x86/include/asm/kprobes.h
17825+++ b/arch/x86/include/asm/kprobes.h
17826@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
17827 #define RELATIVEJUMP_SIZE 5
17828 #define RELATIVECALL_OPCODE 0xe8
17829 #define RELATIVE_ADDR_SIZE 4
17830-#define MAX_STACK_SIZE 64
17831-#define MIN_STACK_SIZE(ADDR) \
17832- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
17833- THREAD_SIZE - (unsigned long)(ADDR))) \
17834- ? (MAX_STACK_SIZE) \
17835- : (((unsigned long)current_thread_info()) + \
17836- THREAD_SIZE - (unsigned long)(ADDR)))
17837+#define MAX_STACK_SIZE 64UL
17838+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
17839
17840 #define flush_insn_slot(p) do { } while (0)
17841
17842diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
17843index 4ad6560..75c7bdd 100644
17844--- a/arch/x86/include/asm/local.h
17845+++ b/arch/x86/include/asm/local.h
17846@@ -10,33 +10,97 @@ typedef struct {
17847 atomic_long_t a;
17848 } local_t;
17849
17850+typedef struct {
17851+ atomic_long_unchecked_t a;
17852+} local_unchecked_t;
17853+
17854 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
17855
17856 #define local_read(l) atomic_long_read(&(l)->a)
17857+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
17858 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17859+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
17860
17861 static inline void local_inc(local_t *l)
17862 {
17863- asm volatile(_ASM_INC "%0"
17864+ asm volatile(_ASM_INC "%0\n"
17865+
17866+#ifdef CONFIG_PAX_REFCOUNT
17867+ "jno 0f\n"
17868+ _ASM_DEC "%0\n"
17869+ "int $4\n0:\n"
17870+ _ASM_EXTABLE(0b, 0b)
17871+#endif
17872+
17873+ : "+m" (l->a.counter));
17874+}
17875+
17876+static inline void local_inc_unchecked(local_unchecked_t *l)
17877+{
17878+ asm volatile(_ASM_INC "%0\n"
17879 : "+m" (l->a.counter));
17880 }
17881
17882 static inline void local_dec(local_t *l)
17883 {
17884- asm volatile(_ASM_DEC "%0"
17885+ asm volatile(_ASM_DEC "%0\n"
17886+
17887+#ifdef CONFIG_PAX_REFCOUNT
17888+ "jno 0f\n"
17889+ _ASM_INC "%0\n"
17890+ "int $4\n0:\n"
17891+ _ASM_EXTABLE(0b, 0b)
17892+#endif
17893+
17894+ : "+m" (l->a.counter));
17895+}
17896+
17897+static inline void local_dec_unchecked(local_unchecked_t *l)
17898+{
17899+ asm volatile(_ASM_DEC "%0\n"
17900 : "+m" (l->a.counter));
17901 }
17902
17903 static inline void local_add(long i, local_t *l)
17904 {
17905- asm volatile(_ASM_ADD "%1,%0"
17906+ asm volatile(_ASM_ADD "%1,%0\n"
17907+
17908+#ifdef CONFIG_PAX_REFCOUNT
17909+ "jno 0f\n"
17910+ _ASM_SUB "%1,%0\n"
17911+ "int $4\n0:\n"
17912+ _ASM_EXTABLE(0b, 0b)
17913+#endif
17914+
17915+ : "+m" (l->a.counter)
17916+ : "ir" (i));
17917+}
17918+
17919+static inline void local_add_unchecked(long i, local_unchecked_t *l)
17920+{
17921+ asm volatile(_ASM_ADD "%1,%0\n"
17922 : "+m" (l->a.counter)
17923 : "ir" (i));
17924 }
17925
17926 static inline void local_sub(long i, local_t *l)
17927 {
17928- asm volatile(_ASM_SUB "%1,%0"
17929+ asm volatile(_ASM_SUB "%1,%0\n"
17930+
17931+#ifdef CONFIG_PAX_REFCOUNT
17932+ "jno 0f\n"
17933+ _ASM_ADD "%1,%0\n"
17934+ "int $4\n0:\n"
17935+ _ASM_EXTABLE(0b, 0b)
17936+#endif
17937+
17938+ : "+m" (l->a.counter)
17939+ : "ir" (i));
17940+}
17941+
17942+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
17943+{
17944+ asm volatile(_ASM_SUB "%1,%0\n"
17945 : "+m" (l->a.counter)
17946 : "ir" (i));
17947 }
17948@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
17949 */
17950 static inline int local_sub_and_test(long i, local_t *l)
17951 {
17952- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
17953+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
17954 }
17955
17956 /**
17957@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
17958 */
17959 static inline int local_dec_and_test(local_t *l)
17960 {
17961- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
17962+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
17963 }
17964
17965 /**
17966@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
17967 */
17968 static inline int local_inc_and_test(local_t *l)
17969 {
17970- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
17971+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
17972 }
17973
17974 /**
17975@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
17976 */
17977 static inline int local_add_negative(long i, local_t *l)
17978 {
17979- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
17980+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
17981 }
17982
17983 /**
17984@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
17985 static inline long local_add_return(long i, local_t *l)
17986 {
17987 long __i = i;
17988+ asm volatile(_ASM_XADD "%0, %1\n"
17989+
17990+#ifdef CONFIG_PAX_REFCOUNT
17991+ "jno 0f\n"
17992+ _ASM_MOV "%0,%1\n"
17993+ "int $4\n0:\n"
17994+ _ASM_EXTABLE(0b, 0b)
17995+#endif
17996+
17997+ : "+r" (i), "+m" (l->a.counter)
17998+ : : "memory");
17999+ return i + __i;
18000+}
18001+
18002+/**
18003+ * local_add_return_unchecked - add and return
18004+ * @i: integer value to add
18005+ * @l: pointer to type local_unchecked_t
18006+ *
18007+ * Atomically adds @i to @l and returns @i + @l
18008+ */
18009+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
18010+{
18011+ long __i = i;
18012 asm volatile(_ASM_XADD "%0, %1;"
18013 : "+r" (i), "+m" (l->a.counter)
18014 : : "memory");
18015@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
18016
18017 #define local_cmpxchg(l, o, n) \
18018 (cmpxchg_local(&((l)->a.counter), (o), (n)))
18019+#define local_cmpxchg_unchecked(l, o, n) \
18020+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
18021 /* Always has a lock prefix */
18022 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
18023
18024diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
18025new file mode 100644
18026index 0000000..2bfd3ba
18027--- /dev/null
18028+++ b/arch/x86/include/asm/mman.h
18029@@ -0,0 +1,15 @@
18030+#ifndef _X86_MMAN_H
18031+#define _X86_MMAN_H
18032+
18033+#include <uapi/asm/mman.h>
18034+
18035+#ifdef __KERNEL__
18036+#ifndef __ASSEMBLY__
18037+#ifdef CONFIG_X86_32
18038+#define arch_mmap_check i386_mmap_check
18039+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
18040+#endif
18041+#endif
18042+#endif
18043+
18044+#endif /* X86_MMAN_H */
18045diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
18046index 876e74e..e20bfb1 100644
18047--- a/arch/x86/include/asm/mmu.h
18048+++ b/arch/x86/include/asm/mmu.h
18049@@ -9,7 +9,7 @@
18050 * we put the segment information here.
18051 */
18052 typedef struct {
18053- void *ldt;
18054+ struct desc_struct *ldt;
18055 int size;
18056
18057 #ifdef CONFIG_X86_64
18058@@ -18,7 +18,19 @@ typedef struct {
18059 #endif
18060
18061 struct mutex lock;
18062- void __user *vdso;
18063+ unsigned long vdso;
18064+
18065+#ifdef CONFIG_X86_32
18066+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18067+ unsigned long user_cs_base;
18068+ unsigned long user_cs_limit;
18069+
18070+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18071+ cpumask_t cpu_user_cs_mask;
18072+#endif
18073+
18074+#endif
18075+#endif
18076 } mm_context_t;
18077
18078 #ifdef CONFIG_SMP
18079diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
18080index 166af2a..648c200 100644
18081--- a/arch/x86/include/asm/mmu_context.h
18082+++ b/arch/x86/include/asm/mmu_context.h
18083@@ -28,6 +28,20 @@ void destroy_context(struct mm_struct *mm);
18084
18085 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
18086 {
18087+
18088+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18089+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
18090+ unsigned int i;
18091+ pgd_t *pgd;
18092+
18093+ pax_open_kernel();
18094+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
18095+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
18096+ set_pgd_batched(pgd+i, native_make_pgd(0));
18097+ pax_close_kernel();
18098+ }
18099+#endif
18100+
18101 #ifdef CONFIG_SMP
18102 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
18103 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
18104@@ -38,16 +52,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
18105 struct task_struct *tsk)
18106 {
18107 unsigned cpu = smp_processor_id();
18108+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
18109+ int tlbstate = TLBSTATE_OK;
18110+#endif
18111
18112 if (likely(prev != next)) {
18113 #ifdef CONFIG_SMP
18114+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
18115+ tlbstate = this_cpu_read(cpu_tlbstate.state);
18116+#endif
18117 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
18118 this_cpu_write(cpu_tlbstate.active_mm, next);
18119 #endif
18120 cpumask_set_cpu(cpu, mm_cpumask(next));
18121
18122 /* Re-load page tables */
18123+#ifdef CONFIG_PAX_PER_CPU_PGD
18124+ pax_open_kernel();
18125+
18126+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18127+ if (static_cpu_has(X86_FEATURE_PCID))
18128+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
18129+ else
18130+#endif
18131+
18132+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
18133+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
18134+ pax_close_kernel();
18135+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
18136+
18137+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18138+ if (static_cpu_has(X86_FEATURE_PCID)) {
18139+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
18140+ u64 descriptor[2];
18141+ descriptor[0] = PCID_USER;
18142+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
18143+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
18144+ descriptor[0] = PCID_KERNEL;
18145+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
18146+ }
18147+ } else {
18148+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
18149+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
18150+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
18151+ else
18152+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
18153+ }
18154+ } else
18155+#endif
18156+
18157+ load_cr3(get_cpu_pgd(cpu, kernel));
18158+#else
18159 load_cr3(next->pgd);
18160+#endif
18161 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
18162
18163 /* Stop flush ipis for the previous mm */
18164@@ -56,9 +113,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
18165 /* Load the LDT, if the LDT is different: */
18166 if (unlikely(prev->context.ldt != next->context.ldt))
18167 load_LDT_nolock(&next->context);
18168+
18169+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18170+ if (!(__supported_pte_mask & _PAGE_NX)) {
18171+ smp_mb__before_atomic();
18172+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
18173+ smp_mb__after_atomic();
18174+ cpu_set(cpu, next->context.cpu_user_cs_mask);
18175+ }
18176+#endif
18177+
18178+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
18179+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
18180+ prev->context.user_cs_limit != next->context.user_cs_limit))
18181+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
18182+#ifdef CONFIG_SMP
18183+ else if (unlikely(tlbstate != TLBSTATE_OK))
18184+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
18185+#endif
18186+#endif
18187+
18188 }
18189+ else {
18190+
18191+#ifdef CONFIG_PAX_PER_CPU_PGD
18192+ pax_open_kernel();
18193+
18194+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18195+ if (static_cpu_has(X86_FEATURE_PCID))
18196+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
18197+ else
18198+#endif
18199+
18200+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
18201+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
18202+ pax_close_kernel();
18203+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
18204+
18205+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18206+ if (static_cpu_has(X86_FEATURE_PCID)) {
18207+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
18208+ u64 descriptor[2];
18209+ descriptor[0] = PCID_USER;
18210+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
18211+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
18212+ descriptor[0] = PCID_KERNEL;
18213+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
18214+ }
18215+ } else {
18216+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
18217+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
18218+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
18219+ else
18220+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
18221+ }
18222+ } else
18223+#endif
18224+
18225+ load_cr3(get_cpu_pgd(cpu, kernel));
18226+#endif
18227+
18228 #ifdef CONFIG_SMP
18229- else {
18230 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
18231 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
18232
18233@@ -75,12 +190,29 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
18234 * tlb flush IPI delivery. We must reload CR3
18235 * to make sure to use no freed page tables.
18236 */
18237+
18238+#ifndef CONFIG_PAX_PER_CPU_PGD
18239 load_cr3(next->pgd);
18240 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
18241+#endif
18242+
18243 load_LDT_nolock(&next->context);
18244+
18245+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18246+ if (!(__supported_pte_mask & _PAGE_NX))
18247+ cpu_set(cpu, next->context.cpu_user_cs_mask);
18248+#endif
18249+
18250+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
18251+#ifdef CONFIG_PAX_PAGEEXEC
18252+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
18253+#endif
18254+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
18255+#endif
18256+
18257 }
18258+#endif
18259 }
18260-#endif
18261 }
18262
18263 #define activate_mm(prev, next) \
18264diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
18265index e3b7819..b257c64 100644
18266--- a/arch/x86/include/asm/module.h
18267+++ b/arch/x86/include/asm/module.h
18268@@ -5,6 +5,7 @@
18269
18270 #ifdef CONFIG_X86_64
18271 /* X86_64 does not define MODULE_PROC_FAMILY */
18272+#define MODULE_PROC_FAMILY ""
18273 #elif defined CONFIG_M486
18274 #define MODULE_PROC_FAMILY "486 "
18275 #elif defined CONFIG_M586
18276@@ -57,8 +58,20 @@
18277 #error unknown processor family
18278 #endif
18279
18280-#ifdef CONFIG_X86_32
18281-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
18282+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
18283+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
18284+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
18285+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
18286+#else
18287+#define MODULE_PAX_KERNEXEC ""
18288 #endif
18289
18290+#ifdef CONFIG_PAX_MEMORY_UDEREF
18291+#define MODULE_PAX_UDEREF "UDEREF "
18292+#else
18293+#define MODULE_PAX_UDEREF ""
18294+#endif
18295+
18296+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
18297+
18298 #endif /* _ASM_X86_MODULE_H */
18299diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
18300index 5f2fc44..106caa6 100644
18301--- a/arch/x86/include/asm/nmi.h
18302+++ b/arch/x86/include/asm/nmi.h
18303@@ -36,26 +36,35 @@ enum {
18304
18305 typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
18306
18307+struct nmiaction;
18308+
18309+struct nmiwork {
18310+ const struct nmiaction *action;
18311+ u64 max_duration;
18312+ struct irq_work irq_work;
18313+};
18314+
18315 struct nmiaction {
18316 struct list_head list;
18317 nmi_handler_t handler;
18318- u64 max_duration;
18319- struct irq_work irq_work;
18320 unsigned long flags;
18321 const char *name;
18322-};
18323+ struct nmiwork *work;
18324+} __do_const;
18325
18326 #define register_nmi_handler(t, fn, fg, n, init...) \
18327 ({ \
18328- static struct nmiaction init fn##_na = { \
18329+ static struct nmiwork fn##_nw; \
18330+ static const struct nmiaction init fn##_na = { \
18331 .handler = (fn), \
18332 .name = (n), \
18333 .flags = (fg), \
18334+ .work = &fn##_nw, \
18335 }; \
18336 __register_nmi_handler((t), &fn##_na); \
18337 })
18338
18339-int __register_nmi_handler(unsigned int, struct nmiaction *);
18340+int __register_nmi_handler(unsigned int, const struct nmiaction *);
18341
18342 void unregister_nmi_handler(unsigned int, const char *);
18343
18344diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
18345index 802dde3..9183e68 100644
18346--- a/arch/x86/include/asm/page.h
18347+++ b/arch/x86/include/asm/page.h
18348@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
18349 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
18350
18351 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
18352+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
18353
18354 #define __boot_va(x) __va(x)
18355 #define __boot_pa(x) __pa(x)
18356@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
18357 * virt_to_page(kaddr) returns a valid pointer if and only if
18358 * virt_addr_valid(kaddr) returns true.
18359 */
18360-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
18361 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
18362 extern bool __virt_addr_valid(unsigned long kaddr);
18363 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
18364
18365+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
18366+#define virt_to_page(kaddr) \
18367+ ({ \
18368+ const void *__kaddr = (const void *)(kaddr); \
18369+ BUG_ON(!virt_addr_valid(__kaddr)); \
18370+ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
18371+ })
18372+#else
18373+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
18374+#endif
18375+
18376 #endif /* __ASSEMBLY__ */
18377
18378 #include <asm-generic/memory_model.h>
18379diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
18380index f408caf..4a0455e 100644
18381--- a/arch/x86/include/asm/page_64.h
18382+++ b/arch/x86/include/asm/page_64.h
18383@@ -7,9 +7,9 @@
18384
18385 /* duplicated to the one in bootmem.h */
18386 extern unsigned long max_pfn;
18387-extern unsigned long phys_base;
18388+extern const unsigned long phys_base;
18389
18390-static inline unsigned long __phys_addr_nodebug(unsigned long x)
18391+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
18392 {
18393 unsigned long y = x - __START_KERNEL_map;
18394
18395diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
18396index cd6e1610..70f4418 100644
18397--- a/arch/x86/include/asm/paravirt.h
18398+++ b/arch/x86/include/asm/paravirt.h
18399@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
18400 return (pmd_t) { ret };
18401 }
18402
18403-static inline pmdval_t pmd_val(pmd_t pmd)
18404+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
18405 {
18406 pmdval_t ret;
18407
18408@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
18409 val);
18410 }
18411
18412+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
18413+{
18414+ pgdval_t val = native_pgd_val(pgd);
18415+
18416+ if (sizeof(pgdval_t) > sizeof(long))
18417+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
18418+ val, (u64)val >> 32);
18419+ else
18420+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
18421+ val);
18422+}
18423+
18424 static inline void pgd_clear(pgd_t *pgdp)
18425 {
18426 set_pgd(pgdp, __pgd(0));
18427@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
18428 pv_mmu_ops.set_fixmap(idx, phys, flags);
18429 }
18430
18431+#ifdef CONFIG_PAX_KERNEXEC
18432+static inline unsigned long pax_open_kernel(void)
18433+{
18434+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
18435+}
18436+
18437+static inline unsigned long pax_close_kernel(void)
18438+{
18439+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
18440+}
18441+#else
18442+static inline unsigned long pax_open_kernel(void) { return 0; }
18443+static inline unsigned long pax_close_kernel(void) { return 0; }
18444+#endif
18445+
18446 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
18447
18448 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
18449@@ -906,7 +933,7 @@ extern void default_banner(void);
18450
18451 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
18452 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
18453-#define PARA_INDIRECT(addr) *%cs:addr
18454+#define PARA_INDIRECT(addr) *%ss:addr
18455 #endif
18456
18457 #define INTERRUPT_RETURN \
18458@@ -981,6 +1008,21 @@ extern void default_banner(void);
18459 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
18460 CLBR_NONE, \
18461 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
18462+
18463+#define GET_CR0_INTO_RDI \
18464+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
18465+ mov %rax,%rdi
18466+
18467+#define SET_RDI_INTO_CR0 \
18468+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
18469+
18470+#define GET_CR3_INTO_RDI \
18471+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
18472+ mov %rax,%rdi
18473+
18474+#define SET_RDI_INTO_CR3 \
18475+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
18476+
18477 #endif /* CONFIG_X86_32 */
18478
18479 #endif /* __ASSEMBLY__ */
18480diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
18481index 7549b8b..f0edfda 100644
18482--- a/arch/x86/include/asm/paravirt_types.h
18483+++ b/arch/x86/include/asm/paravirt_types.h
18484@@ -84,7 +84,7 @@ struct pv_init_ops {
18485 */
18486 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
18487 unsigned long addr, unsigned len);
18488-};
18489+} __no_const __no_randomize_layout;
18490
18491
18492 struct pv_lazy_ops {
18493@@ -92,13 +92,13 @@ struct pv_lazy_ops {
18494 void (*enter)(void);
18495 void (*leave)(void);
18496 void (*flush)(void);
18497-};
18498+} __no_randomize_layout;
18499
18500 struct pv_time_ops {
18501 unsigned long long (*sched_clock)(void);
18502 unsigned long long (*steal_clock)(int cpu);
18503 unsigned long (*get_tsc_khz)(void);
18504-};
18505+} __no_const __no_randomize_layout;
18506
18507 struct pv_cpu_ops {
18508 /* hooks for various privileged instructions */
18509@@ -192,7 +192,7 @@ struct pv_cpu_ops {
18510
18511 void (*start_context_switch)(struct task_struct *prev);
18512 void (*end_context_switch)(struct task_struct *next);
18513-};
18514+} __no_const __no_randomize_layout;
18515
18516 struct pv_irq_ops {
18517 /*
18518@@ -215,7 +215,7 @@ struct pv_irq_ops {
18519 #ifdef CONFIG_X86_64
18520 void (*adjust_exception_frame)(void);
18521 #endif
18522-};
18523+} __no_randomize_layout;
18524
18525 struct pv_apic_ops {
18526 #ifdef CONFIG_X86_LOCAL_APIC
18527@@ -223,7 +223,7 @@ struct pv_apic_ops {
18528 unsigned long start_eip,
18529 unsigned long start_esp);
18530 #endif
18531-};
18532+} __no_const __no_randomize_layout;
18533
18534 struct pv_mmu_ops {
18535 unsigned long (*read_cr2)(void);
18536@@ -313,6 +313,7 @@ struct pv_mmu_ops {
18537 struct paravirt_callee_save make_pud;
18538
18539 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
18540+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
18541 #endif /* PAGETABLE_LEVELS == 4 */
18542 #endif /* PAGETABLE_LEVELS >= 3 */
18543
18544@@ -324,7 +325,13 @@ struct pv_mmu_ops {
18545 an mfn. We can tell which is which from the index. */
18546 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
18547 phys_addr_t phys, pgprot_t flags);
18548-};
18549+
18550+#ifdef CONFIG_PAX_KERNEXEC
18551+ unsigned long (*pax_open_kernel)(void);
18552+ unsigned long (*pax_close_kernel)(void);
18553+#endif
18554+
18555+} __no_randomize_layout;
18556
18557 struct arch_spinlock;
18558 #ifdef CONFIG_SMP
18559@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
18560 struct pv_lock_ops {
18561 struct paravirt_callee_save lock_spinning;
18562 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
18563-};
18564+} __no_randomize_layout;
18565
18566 /* This contains all the paravirt structures: we get a convenient
18567 * number for each function using the offset which we use to indicate
18568- * what to patch. */
18569+ * what to patch.
18570+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
18571+ */
18572+
18573 struct paravirt_patch_template {
18574 struct pv_init_ops pv_init_ops;
18575 struct pv_time_ops pv_time_ops;
18576@@ -349,7 +359,7 @@ struct paravirt_patch_template {
18577 struct pv_apic_ops pv_apic_ops;
18578 struct pv_mmu_ops pv_mmu_ops;
18579 struct pv_lock_ops pv_lock_ops;
18580-};
18581+} __no_randomize_layout;
18582
18583 extern struct pv_info pv_info;
18584 extern struct pv_init_ops pv_init_ops;
18585diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
18586index c4412e9..90e88c5 100644
18587--- a/arch/x86/include/asm/pgalloc.h
18588+++ b/arch/x86/include/asm/pgalloc.h
18589@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
18590 pmd_t *pmd, pte_t *pte)
18591 {
18592 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
18593+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
18594+}
18595+
18596+static inline void pmd_populate_user(struct mm_struct *mm,
18597+ pmd_t *pmd, pte_t *pte)
18598+{
18599+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
18600 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
18601 }
18602
18603@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
18604
18605 #ifdef CONFIG_X86_PAE
18606 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
18607+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
18608+{
18609+ pud_populate(mm, pudp, pmd);
18610+}
18611 #else /* !CONFIG_X86_PAE */
18612 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
18613 {
18614 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
18615 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
18616 }
18617+
18618+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
18619+{
18620+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
18621+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
18622+}
18623 #endif /* CONFIG_X86_PAE */
18624
18625 #if PAGETABLE_LEVELS > 3
18626@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
18627 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
18628 }
18629
18630+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
18631+{
18632+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
18633+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
18634+}
18635+
18636 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
18637 {
18638 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
18639diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
18640index 206a87f..1623b06 100644
18641--- a/arch/x86/include/asm/pgtable-2level.h
18642+++ b/arch/x86/include/asm/pgtable-2level.h
18643@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
18644
18645 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18646 {
18647+ pax_open_kernel();
18648 *pmdp = pmd;
18649+ pax_close_kernel();
18650 }
18651
18652 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18653diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
18654index 81bb91b..9392125 100644
18655--- a/arch/x86/include/asm/pgtable-3level.h
18656+++ b/arch/x86/include/asm/pgtable-3level.h
18657@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18658
18659 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18660 {
18661+ pax_open_kernel();
18662 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
18663+ pax_close_kernel();
18664 }
18665
18666 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18667 {
18668+ pax_open_kernel();
18669 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
18670+ pax_close_kernel();
18671 }
18672
18673 /*
18674diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
18675index aa97a07..5c53c32 100644
18676--- a/arch/x86/include/asm/pgtable.h
18677+++ b/arch/x86/include/asm/pgtable.h
18678@@ -46,6 +46,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18679
18680 #ifndef __PAGETABLE_PUD_FOLDED
18681 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
18682+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
18683 #define pgd_clear(pgd) native_pgd_clear(pgd)
18684 #endif
18685
18686@@ -83,12 +84,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18687
18688 #define arch_end_context_switch(prev) do {} while(0)
18689
18690+#define pax_open_kernel() native_pax_open_kernel()
18691+#define pax_close_kernel() native_pax_close_kernel()
18692 #endif /* CONFIG_PARAVIRT */
18693
18694+#define __HAVE_ARCH_PAX_OPEN_KERNEL
18695+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
18696+
18697+#ifdef CONFIG_PAX_KERNEXEC
18698+static inline unsigned long native_pax_open_kernel(void)
18699+{
18700+ unsigned long cr0;
18701+
18702+ preempt_disable();
18703+ barrier();
18704+ cr0 = read_cr0() ^ X86_CR0_WP;
18705+ BUG_ON(cr0 & X86_CR0_WP);
18706+ write_cr0(cr0);
18707+ barrier();
18708+ return cr0 ^ X86_CR0_WP;
18709+}
18710+
18711+static inline unsigned long native_pax_close_kernel(void)
18712+{
18713+ unsigned long cr0;
18714+
18715+ barrier();
18716+ cr0 = read_cr0() ^ X86_CR0_WP;
18717+ BUG_ON(!(cr0 & X86_CR0_WP));
18718+ write_cr0(cr0);
18719+ barrier();
18720+ preempt_enable_no_resched();
18721+ return cr0 ^ X86_CR0_WP;
18722+}
18723+#else
18724+static inline unsigned long native_pax_open_kernel(void) { return 0; }
18725+static inline unsigned long native_pax_close_kernel(void) { return 0; }
18726+#endif
18727+
18728 /*
18729 * The following only work if pte_present() is true.
18730 * Undefined behaviour if not..
18731 */
18732+static inline int pte_user(pte_t pte)
18733+{
18734+ return pte_val(pte) & _PAGE_USER;
18735+}
18736+
18737 static inline int pte_dirty(pte_t pte)
18738 {
18739 return pte_flags(pte) & _PAGE_DIRTY;
18740@@ -155,6 +197,11 @@ static inline unsigned long pud_pfn(pud_t pud)
18741 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
18742 }
18743
18744+static inline unsigned long pgd_pfn(pgd_t pgd)
18745+{
18746+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
18747+}
18748+
18749 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
18750
18751 static inline int pmd_large(pmd_t pte)
18752@@ -208,9 +255,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
18753 return pte_clear_flags(pte, _PAGE_RW);
18754 }
18755
18756+static inline pte_t pte_mkread(pte_t pte)
18757+{
18758+ return __pte(pte_val(pte) | _PAGE_USER);
18759+}
18760+
18761 static inline pte_t pte_mkexec(pte_t pte)
18762 {
18763- return pte_clear_flags(pte, _PAGE_NX);
18764+#ifdef CONFIG_X86_PAE
18765+ if (__supported_pte_mask & _PAGE_NX)
18766+ return pte_clear_flags(pte, _PAGE_NX);
18767+ else
18768+#endif
18769+ return pte_set_flags(pte, _PAGE_USER);
18770+}
18771+
18772+static inline pte_t pte_exprotect(pte_t pte)
18773+{
18774+#ifdef CONFIG_X86_PAE
18775+ if (__supported_pte_mask & _PAGE_NX)
18776+ return pte_set_flags(pte, _PAGE_NX);
18777+ else
18778+#endif
18779+ return pte_clear_flags(pte, _PAGE_USER);
18780 }
18781
18782 static inline pte_t pte_mkdirty(pte_t pte)
18783@@ -440,6 +507,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
18784 #endif
18785
18786 #ifndef __ASSEMBLY__
18787+
18788+#ifdef CONFIG_PAX_PER_CPU_PGD
18789+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
18790+enum cpu_pgd_type {kernel = 0, user = 1};
18791+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
18792+{
18793+ return cpu_pgd[cpu][type];
18794+}
18795+#endif
18796+
18797 #include <linux/mm_types.h>
18798 #include <linux/mmdebug.h>
18799 #include <linux/log2.h>
18800@@ -586,7 +663,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
18801 * Currently stuck as a macro due to indirect forward reference to
18802 * linux/mmzone.h's __section_mem_map_addr() definition:
18803 */
18804-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
18805+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
18806
18807 /* Find an entry in the second-level page table.. */
18808 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
18809@@ -626,7 +703,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
18810 * Currently stuck as a macro due to indirect forward reference to
18811 * linux/mmzone.h's __section_mem_map_addr() definition:
18812 */
18813-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
18814+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
18815
18816 /* to find an entry in a page-table-directory. */
18817 static inline unsigned long pud_index(unsigned long address)
18818@@ -641,7 +718,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
18819
18820 static inline int pgd_bad(pgd_t pgd)
18821 {
18822- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
18823+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
18824 }
18825
18826 static inline int pgd_none(pgd_t pgd)
18827@@ -664,7 +741,12 @@ static inline int pgd_none(pgd_t pgd)
18828 * pgd_offset() returns a (pgd_t *)
18829 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
18830 */
18831-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
18832+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
18833+
18834+#ifdef CONFIG_PAX_PER_CPU_PGD
18835+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
18836+#endif
18837+
18838 /*
18839 * a shortcut which implies the use of the kernel's pgd, instead
18840 * of a process's
18841@@ -675,6 +757,23 @@ static inline int pgd_none(pgd_t pgd)
18842 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
18843 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
18844
18845+#ifdef CONFIG_X86_32
18846+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
18847+#else
18848+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
18849+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
18850+
18851+#ifdef CONFIG_PAX_MEMORY_UDEREF
18852+#ifdef __ASSEMBLY__
18853+#define pax_user_shadow_base pax_user_shadow_base(%rip)
18854+#else
18855+extern unsigned long pax_user_shadow_base;
18856+extern pgdval_t clone_pgd_mask;
18857+#endif
18858+#endif
18859+
18860+#endif
18861+
18862 #ifndef __ASSEMBLY__
18863
18864 extern int direct_gbpages;
18865@@ -841,11 +940,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
18866 * dst and src can be on the same page, but the range must not overlap,
18867 * and must not cross a page boundary.
18868 */
18869-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
18870+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
18871 {
18872- memcpy(dst, src, count * sizeof(pgd_t));
18873+ pax_open_kernel();
18874+ while (count--)
18875+ *dst++ = *src++;
18876+ pax_close_kernel();
18877 }
18878
18879+#ifdef CONFIG_PAX_PER_CPU_PGD
18880+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
18881+#endif
18882+
18883+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18884+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
18885+#else
18886+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
18887+#endif
18888+
18889 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
18890 static inline int page_level_shift(enum pg_level level)
18891 {
18892diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
18893index 9ee3221..b979c6b 100644
18894--- a/arch/x86/include/asm/pgtable_32.h
18895+++ b/arch/x86/include/asm/pgtable_32.h
18896@@ -25,9 +25,6 @@
18897 struct mm_struct;
18898 struct vm_area_struct;
18899
18900-extern pgd_t swapper_pg_dir[1024];
18901-extern pgd_t initial_page_table[1024];
18902-
18903 static inline void pgtable_cache_init(void) { }
18904 static inline void check_pgt_cache(void) { }
18905 void paging_init(void);
18906@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
18907 # include <asm/pgtable-2level.h>
18908 #endif
18909
18910+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
18911+extern pgd_t initial_page_table[PTRS_PER_PGD];
18912+#ifdef CONFIG_X86_PAE
18913+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
18914+#endif
18915+
18916 #if defined(CONFIG_HIGHPTE)
18917 #define pte_offset_map(dir, address) \
18918 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
18919@@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
18920 /* Clear a kernel PTE and flush it from the TLB */
18921 #define kpte_clear_flush(ptep, vaddr) \
18922 do { \
18923+ pax_open_kernel(); \
18924 pte_clear(&init_mm, (vaddr), (ptep)); \
18925+ pax_close_kernel(); \
18926 __flush_tlb_one((vaddr)); \
18927 } while (0)
18928
18929 #endif /* !__ASSEMBLY__ */
18930
18931+#define HAVE_ARCH_UNMAPPED_AREA
18932+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
18933+
18934 /*
18935 * kern_addr_valid() is (1) for FLATMEM and (0) for
18936 * SPARSEMEM and DISCONTIGMEM
18937diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
18938index ed5903b..c7fe163 100644
18939--- a/arch/x86/include/asm/pgtable_32_types.h
18940+++ b/arch/x86/include/asm/pgtable_32_types.h
18941@@ -8,7 +8,7 @@
18942 */
18943 #ifdef CONFIG_X86_PAE
18944 # include <asm/pgtable-3level_types.h>
18945-# define PMD_SIZE (1UL << PMD_SHIFT)
18946+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
18947 # define PMD_MASK (~(PMD_SIZE - 1))
18948 #else
18949 # include <asm/pgtable-2level_types.h>
18950@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
18951 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
18952 #endif
18953
18954+#ifdef CONFIG_PAX_KERNEXEC
18955+#ifndef __ASSEMBLY__
18956+extern unsigned char MODULES_EXEC_VADDR[];
18957+extern unsigned char MODULES_EXEC_END[];
18958+#endif
18959+#include <asm/boot.h>
18960+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
18961+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
18962+#else
18963+#define ktla_ktva(addr) (addr)
18964+#define ktva_ktla(addr) (addr)
18965+#endif
18966+
18967 #define MODULES_VADDR VMALLOC_START
18968 #define MODULES_END VMALLOC_END
18969 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
18970diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
18971index 3874693..d7906ac 100644
18972--- a/arch/x86/include/asm/pgtable_64.h
18973+++ b/arch/x86/include/asm/pgtable_64.h
18974@@ -16,11 +16,16 @@
18975
18976 extern pud_t level3_kernel_pgt[512];
18977 extern pud_t level3_ident_pgt[512];
18978+extern pud_t level3_vmalloc_start_pgt[512];
18979+extern pud_t level3_vmalloc_end_pgt[512];
18980+extern pud_t level3_vmemmap_pgt[512];
18981+extern pud_t level2_vmemmap_pgt[512];
18982 extern pmd_t level2_kernel_pgt[512];
18983 extern pmd_t level2_fixmap_pgt[512];
18984-extern pmd_t level2_ident_pgt[512];
18985+extern pmd_t level2_ident_pgt[512*2];
18986 extern pte_t level1_fixmap_pgt[512];
18987-extern pgd_t init_level4_pgt[];
18988+extern pte_t level1_vsyscall_pgt[512];
18989+extern pgd_t init_level4_pgt[512];
18990
18991 #define swapper_pg_dir init_level4_pgt
18992
18993@@ -62,7 +67,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18994
18995 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18996 {
18997+ pax_open_kernel();
18998 *pmdp = pmd;
18999+ pax_close_kernel();
19000 }
19001
19002 static inline void native_pmd_clear(pmd_t *pmd)
19003@@ -98,7 +105,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
19004
19005 static inline void native_set_pud(pud_t *pudp, pud_t pud)
19006 {
19007+ pax_open_kernel();
19008 *pudp = pud;
19009+ pax_close_kernel();
19010 }
19011
19012 static inline void native_pud_clear(pud_t *pud)
19013@@ -108,6 +117,13 @@ static inline void native_pud_clear(pud_t *pud)
19014
19015 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
19016 {
19017+ pax_open_kernel();
19018+ *pgdp = pgd;
19019+ pax_close_kernel();
19020+}
19021+
19022+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
19023+{
19024 *pgdp = pgd;
19025 }
19026
19027diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
19028index 7166e25..baaa6fe 100644
19029--- a/arch/x86/include/asm/pgtable_64_types.h
19030+++ b/arch/x86/include/asm/pgtable_64_types.h
19031@@ -61,9 +61,14 @@ typedef struct { pteval_t pte; } pte_t;
19032 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
19033 #define MODULES_END _AC(0xffffffffff000000, UL)
19034 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
19035+#define MODULES_EXEC_VADDR MODULES_VADDR
19036+#define MODULES_EXEC_END MODULES_END
19037 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
19038 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
19039
19040+#define ktla_ktva(addr) (addr)
19041+#define ktva_ktla(addr) (addr)
19042+
19043 #define EARLY_DYNAMIC_PAGE_TABLES 64
19044
19045 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
19046diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
19047index f216963..6bd7c21 100644
19048--- a/arch/x86/include/asm/pgtable_types.h
19049+++ b/arch/x86/include/asm/pgtable_types.h
19050@@ -111,8 +111,10 @@
19051
19052 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
19053 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
19054-#else
19055+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
19056 #define _PAGE_NX (_AT(pteval_t, 0))
19057+#else
19058+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
19059 #endif
19060
19061 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
19062@@ -151,6 +153,9 @@
19063 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
19064 _PAGE_ACCESSED)
19065
19066+#define PAGE_READONLY_NOEXEC PAGE_READONLY
19067+#define PAGE_SHARED_NOEXEC PAGE_SHARED
19068+
19069 #define __PAGE_KERNEL_EXEC \
19070 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
19071 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
19072@@ -161,7 +166,7 @@
19073 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
19074 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
19075 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
19076-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
19077+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
19078 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
19079 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
19080 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
19081@@ -218,7 +223,7 @@
19082 #ifdef CONFIG_X86_64
19083 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
19084 #else
19085-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
19086+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
19087 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
19088 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
19089 #endif
19090@@ -257,7 +262,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
19091 {
19092 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
19093 }
19094+#endif
19095
19096+#if PAGETABLE_LEVELS == 3
19097+#include <asm-generic/pgtable-nopud.h>
19098+#endif
19099+
19100+#if PAGETABLE_LEVELS == 2
19101+#include <asm-generic/pgtable-nopmd.h>
19102+#endif
19103+
19104+#ifndef __ASSEMBLY__
19105 #if PAGETABLE_LEVELS > 3
19106 typedef struct { pudval_t pud; } pud_t;
19107
19108@@ -271,8 +286,6 @@ static inline pudval_t native_pud_val(pud_t pud)
19109 return pud.pud;
19110 }
19111 #else
19112-#include <asm-generic/pgtable-nopud.h>
19113-
19114 static inline pudval_t native_pud_val(pud_t pud)
19115 {
19116 return native_pgd_val(pud.pgd);
19117@@ -292,8 +305,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
19118 return pmd.pmd;
19119 }
19120 #else
19121-#include <asm-generic/pgtable-nopmd.h>
19122-
19123 static inline pmdval_t native_pmd_val(pmd_t pmd)
19124 {
19125 return native_pgd_val(pmd.pud.pgd);
19126@@ -333,7 +344,6 @@ typedef struct page *pgtable_t;
19127
19128 extern pteval_t __supported_pte_mask;
19129 extern void set_nx(void);
19130-extern int nx_enabled;
19131
19132 #define pgprot_writecombine pgprot_writecombine
19133 extern pgprot_t pgprot_writecombine(pgprot_t prot);
19134diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
19135index 7024c12..71c46b9 100644
19136--- a/arch/x86/include/asm/preempt.h
19137+++ b/arch/x86/include/asm/preempt.h
19138@@ -87,7 +87,7 @@ static __always_inline void __preempt_count_sub(int val)
19139 */
19140 static __always_inline bool __preempt_count_dec_and_test(void)
19141 {
19142- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
19143+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
19144 }
19145
19146 /*
19147diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
19148index eb71ec7..f06532a 100644
19149--- a/arch/x86/include/asm/processor.h
19150+++ b/arch/x86/include/asm/processor.h
19151@@ -127,7 +127,7 @@ struct cpuinfo_x86 {
19152 /* Index into per_cpu list: */
19153 u16 cpu_index;
19154 u32 microcode;
19155-} __attribute__((__aligned__(SMP_CACHE_BYTES)));
19156+} __attribute__((__aligned__(SMP_CACHE_BYTES))) __randomize_layout;
19157
19158 #define X86_VENDOR_INTEL 0
19159 #define X86_VENDOR_CYRIX 1
19160@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
19161 : "memory");
19162 }
19163
19164+/* invpcid (%rdx),%rax */
19165+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
19166+
19167+#define INVPCID_SINGLE_ADDRESS 0UL
19168+#define INVPCID_SINGLE_CONTEXT 1UL
19169+#define INVPCID_ALL_GLOBAL 2UL
19170+#define INVPCID_ALL_NONGLOBAL 3UL
19171+
19172+#define PCID_KERNEL 0UL
19173+#define PCID_USER 1UL
19174+#define PCID_NOFLUSH (1UL << 63)
19175+
19176 static inline void load_cr3(pgd_t *pgdir)
19177 {
19178- write_cr3(__pa(pgdir));
19179+ write_cr3(__pa(pgdir) | PCID_KERNEL);
19180 }
19181
19182 #ifdef CONFIG_X86_32
19183@@ -282,7 +294,7 @@ struct tss_struct {
19184
19185 } ____cacheline_aligned;
19186
19187-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
19188+extern struct tss_struct init_tss[NR_CPUS];
19189
19190 /*
19191 * Save the original ist values for checking stack pointers during debugging
19192@@ -478,6 +490,7 @@ struct thread_struct {
19193 unsigned short ds;
19194 unsigned short fsindex;
19195 unsigned short gsindex;
19196+ unsigned short ss;
19197 #endif
19198 #ifdef CONFIG_X86_32
19199 unsigned long ip;
19200@@ -587,29 +600,8 @@ static inline void load_sp0(struct tss_struct *tss,
19201 extern unsigned long mmu_cr4_features;
19202 extern u32 *trampoline_cr4_features;
19203
19204-static inline void set_in_cr4(unsigned long mask)
19205-{
19206- unsigned long cr4;
19207-
19208- mmu_cr4_features |= mask;
19209- if (trampoline_cr4_features)
19210- *trampoline_cr4_features = mmu_cr4_features;
19211- cr4 = read_cr4();
19212- cr4 |= mask;
19213- write_cr4(cr4);
19214-}
19215-
19216-static inline void clear_in_cr4(unsigned long mask)
19217-{
19218- unsigned long cr4;
19219-
19220- mmu_cr4_features &= ~mask;
19221- if (trampoline_cr4_features)
19222- *trampoline_cr4_features = mmu_cr4_features;
19223- cr4 = read_cr4();
19224- cr4 &= ~mask;
19225- write_cr4(cr4);
19226-}
19227+extern void set_in_cr4(unsigned long mask);
19228+extern void clear_in_cr4(unsigned long mask);
19229
19230 typedef struct {
19231 unsigned long seg;
19232@@ -837,11 +829,18 @@ static inline void spin_lock_prefetch(const void *x)
19233 */
19234 #define TASK_SIZE PAGE_OFFSET
19235 #define TASK_SIZE_MAX TASK_SIZE
19236+
19237+#ifdef CONFIG_PAX_SEGMEXEC
19238+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
19239+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
19240+#else
19241 #define STACK_TOP TASK_SIZE
19242-#define STACK_TOP_MAX STACK_TOP
19243+#endif
19244+
19245+#define STACK_TOP_MAX TASK_SIZE
19246
19247 #define INIT_THREAD { \
19248- .sp0 = sizeof(init_stack) + (long)&init_stack, \
19249+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
19250 .vm86_info = NULL, \
19251 .sysenter_cs = __KERNEL_CS, \
19252 .io_bitmap_ptr = NULL, \
19253@@ -855,7 +854,7 @@ static inline void spin_lock_prefetch(const void *x)
19254 */
19255 #define INIT_TSS { \
19256 .x86_tss = { \
19257- .sp0 = sizeof(init_stack) + (long)&init_stack, \
19258+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
19259 .ss0 = __KERNEL_DS, \
19260 .ss1 = __KERNEL_CS, \
19261 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
19262@@ -866,11 +865,7 @@ static inline void spin_lock_prefetch(const void *x)
19263 extern unsigned long thread_saved_pc(struct task_struct *tsk);
19264
19265 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
19266-#define KSTK_TOP(info) \
19267-({ \
19268- unsigned long *__ptr = (unsigned long *)(info); \
19269- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
19270-})
19271+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
19272
19273 /*
19274 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
19275@@ -885,7 +880,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
19276 #define task_pt_regs(task) \
19277 ({ \
19278 struct pt_regs *__regs__; \
19279- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
19280+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
19281 __regs__ - 1; \
19282 })
19283
19284@@ -895,13 +890,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
19285 /*
19286 * User space process size. 47bits minus one guard page.
19287 */
19288-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
19289+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
19290
19291 /* This decides where the kernel will search for a free chunk of vm
19292 * space during mmap's.
19293 */
19294 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
19295- 0xc0000000 : 0xFFFFe000)
19296+ 0xc0000000 : 0xFFFFf000)
19297
19298 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
19299 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
19300@@ -912,11 +907,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
19301 #define STACK_TOP_MAX TASK_SIZE_MAX
19302
19303 #define INIT_THREAD { \
19304- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
19305+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
19306 }
19307
19308 #define INIT_TSS { \
19309- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
19310+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
19311 }
19312
19313 /*
19314@@ -944,6 +939,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
19315 */
19316 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
19317
19318+#ifdef CONFIG_PAX_SEGMEXEC
19319+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
19320+#endif
19321+
19322 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
19323
19324 /* Get/set a process' ability to use the timestamp counter instruction */
19325@@ -970,7 +969,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
19326 return 0;
19327 }
19328
19329-extern unsigned long arch_align_stack(unsigned long sp);
19330+#define arch_align_stack(x) ((x) & ~0xfUL)
19331 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
19332
19333 void default_idle(void);
19334@@ -980,6 +979,6 @@ bool xen_set_default_idle(void);
19335 #define xen_set_default_idle 0
19336 #endif
19337
19338-void stop_this_cpu(void *dummy);
19339+void stop_this_cpu(void *dummy) __noreturn;
19340 void df_debug(struct pt_regs *regs, long error_code);
19341 #endif /* _ASM_X86_PROCESSOR_H */
19342diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
19343index 6205f0c..688a3a9 100644
19344--- a/arch/x86/include/asm/ptrace.h
19345+++ b/arch/x86/include/asm/ptrace.h
19346@@ -84,28 +84,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
19347 }
19348
19349 /*
19350- * user_mode_vm(regs) determines whether a register set came from user mode.
19351+ * user_mode(regs) determines whether a register set came from user mode.
19352 * This is true if V8086 mode was enabled OR if the register set was from
19353 * protected mode with RPL-3 CS value. This tricky test checks that with
19354 * one comparison. Many places in the kernel can bypass this full check
19355- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
19356+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
19357+ * be used.
19358 */
19359-static inline int user_mode(struct pt_regs *regs)
19360+static inline int user_mode_novm(struct pt_regs *regs)
19361 {
19362 #ifdef CONFIG_X86_32
19363 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
19364 #else
19365- return !!(regs->cs & 3);
19366+ return !!(regs->cs & SEGMENT_RPL_MASK);
19367 #endif
19368 }
19369
19370-static inline int user_mode_vm(struct pt_regs *regs)
19371+static inline int user_mode(struct pt_regs *regs)
19372 {
19373 #ifdef CONFIG_X86_32
19374 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
19375 USER_RPL;
19376 #else
19377- return user_mode(regs);
19378+ return user_mode_novm(regs);
19379 #endif
19380 }
19381
19382@@ -121,15 +122,16 @@ static inline int v8086_mode(struct pt_regs *regs)
19383 #ifdef CONFIG_X86_64
19384 static inline bool user_64bit_mode(struct pt_regs *regs)
19385 {
19386+ unsigned long cs = regs->cs & 0xffff;
19387 #ifndef CONFIG_PARAVIRT
19388 /*
19389 * On non-paravirt systems, this is the only long mode CPL 3
19390 * selector. We do not allow long mode selectors in the LDT.
19391 */
19392- return regs->cs == __USER_CS;
19393+ return cs == __USER_CS;
19394 #else
19395 /* Headers are too twisted for this to go in paravirt.h. */
19396- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
19397+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
19398 #endif
19399 }
19400
19401@@ -180,9 +182,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
19402 * Traps from the kernel do not save sp and ss.
19403 * Use the helper function to retrieve sp.
19404 */
19405- if (offset == offsetof(struct pt_regs, sp) &&
19406- regs->cs == __KERNEL_CS)
19407- return kernel_stack_pointer(regs);
19408+ if (offset == offsetof(struct pt_regs, sp)) {
19409+ unsigned long cs = regs->cs & 0xffff;
19410+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
19411+ return kernel_stack_pointer(regs);
19412+ }
19413 #endif
19414 return *(unsigned long *)((unsigned long)regs + offset);
19415 }
19416diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
19417index ae0e241..e80b10b 100644
19418--- a/arch/x86/include/asm/qrwlock.h
19419+++ b/arch/x86/include/asm/qrwlock.h
19420@@ -7,8 +7,8 @@
19421 #define queue_write_unlock queue_write_unlock
19422 static inline void queue_write_unlock(struct qrwlock *lock)
19423 {
19424- barrier();
19425- ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
19426+ barrier();
19427+ ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
19428 }
19429 #endif
19430
19431diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
19432index 9c6b890..5305f53 100644
19433--- a/arch/x86/include/asm/realmode.h
19434+++ b/arch/x86/include/asm/realmode.h
19435@@ -22,16 +22,14 @@ struct real_mode_header {
19436 #endif
19437 /* APM/BIOS reboot */
19438 u32 machine_real_restart_asm;
19439-#ifdef CONFIG_X86_64
19440 u32 machine_real_restart_seg;
19441-#endif
19442 };
19443
19444 /* This must match data at trampoline_32/64.S */
19445 struct trampoline_header {
19446 #ifdef CONFIG_X86_32
19447 u32 start;
19448- u16 gdt_pad;
19449+ u16 boot_cs;
19450 u16 gdt_limit;
19451 u32 gdt_base;
19452 #else
19453diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
19454index a82c4f1..ac45053 100644
19455--- a/arch/x86/include/asm/reboot.h
19456+++ b/arch/x86/include/asm/reboot.h
19457@@ -6,13 +6,13 @@
19458 struct pt_regs;
19459
19460 struct machine_ops {
19461- void (*restart)(char *cmd);
19462- void (*halt)(void);
19463- void (*power_off)(void);
19464+ void (* __noreturn restart)(char *cmd);
19465+ void (* __noreturn halt)(void);
19466+ void (* __noreturn power_off)(void);
19467 void (*shutdown)(void);
19468 void (*crash_shutdown)(struct pt_regs *);
19469- void (*emergency_restart)(void);
19470-};
19471+ void (* __noreturn emergency_restart)(void);
19472+} __no_const;
19473
19474 extern struct machine_ops machine_ops;
19475
19476diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
19477index 8f7866a..e442f20 100644
19478--- a/arch/x86/include/asm/rmwcc.h
19479+++ b/arch/x86/include/asm/rmwcc.h
19480@@ -3,7 +3,34 @@
19481
19482 #ifdef CC_HAVE_ASM_GOTO
19483
19484-#define __GEN_RMWcc(fullop, var, cc, ...) \
19485+#ifdef CONFIG_PAX_REFCOUNT
19486+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19487+do { \
19488+ asm_volatile_goto (fullop \
19489+ ";jno 0f\n" \
19490+ fullantiop \
19491+ ";int $4\n0:\n" \
19492+ _ASM_EXTABLE(0b, 0b) \
19493+ ";j" cc " %l[cc_label]" \
19494+ : : "m" (var), ## __VA_ARGS__ \
19495+ : "memory" : cc_label); \
19496+ return 0; \
19497+cc_label: \
19498+ return 1; \
19499+} while (0)
19500+#else
19501+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19502+do { \
19503+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
19504+ : : "m" (var), ## __VA_ARGS__ \
19505+ : "memory" : cc_label); \
19506+ return 0; \
19507+cc_label: \
19508+ return 1; \
19509+} while (0)
19510+#endif
19511+
19512+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
19513 do { \
19514 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
19515 : : "m" (var), ## __VA_ARGS__ \
19516@@ -13,15 +40,46 @@ cc_label: \
19517 return 1; \
19518 } while (0)
19519
19520-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
19521- __GEN_RMWcc(op " " arg0, var, cc)
19522+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
19523+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
19524
19525-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
19526- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
19527+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
19528+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
19529+
19530+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
19531+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
19532+
19533+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
19534+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
19535
19536 #else /* !CC_HAVE_ASM_GOTO */
19537
19538-#define __GEN_RMWcc(fullop, var, cc, ...) \
19539+#ifdef CONFIG_PAX_REFCOUNT
19540+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19541+do { \
19542+ char c; \
19543+ asm volatile (fullop \
19544+ ";jno 0f\n" \
19545+ fullantiop \
19546+ ";int $4\n0:\n" \
19547+ _ASM_EXTABLE(0b, 0b) \
19548+ "; set" cc " %1" \
19549+ : "+m" (var), "=qm" (c) \
19550+ : __VA_ARGS__ : "memory"); \
19551+ return c != 0; \
19552+} while (0)
19553+#else
19554+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19555+do { \
19556+ char c; \
19557+ asm volatile (fullop "; set" cc " %1" \
19558+ : "+m" (var), "=qm" (c) \
19559+ : __VA_ARGS__ : "memory"); \
19560+ return c != 0; \
19561+} while (0)
19562+#endif
19563+
19564+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
19565 do { \
19566 char c; \
19567 asm volatile (fullop "; set" cc " %1" \
19568@@ -30,11 +88,17 @@ do { \
19569 return c != 0; \
19570 } while (0)
19571
19572-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
19573- __GEN_RMWcc(op " " arg0, var, cc)
19574+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
19575+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
19576+
19577+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
19578+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
19579+
19580+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
19581+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
19582
19583-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
19584- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
19585+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
19586+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
19587
19588 #endif /* CC_HAVE_ASM_GOTO */
19589
19590diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
19591index cad82c9..2e5c5c1 100644
19592--- a/arch/x86/include/asm/rwsem.h
19593+++ b/arch/x86/include/asm/rwsem.h
19594@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
19595 {
19596 asm volatile("# beginning down_read\n\t"
19597 LOCK_PREFIX _ASM_INC "(%1)\n\t"
19598+
19599+#ifdef CONFIG_PAX_REFCOUNT
19600+ "jno 0f\n"
19601+ LOCK_PREFIX _ASM_DEC "(%1)\n"
19602+ "int $4\n0:\n"
19603+ _ASM_EXTABLE(0b, 0b)
19604+#endif
19605+
19606 /* adds 0x00000001 */
19607 " jns 1f\n"
19608 " call call_rwsem_down_read_failed\n"
19609@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
19610 "1:\n\t"
19611 " mov %1,%2\n\t"
19612 " add %3,%2\n\t"
19613+
19614+#ifdef CONFIG_PAX_REFCOUNT
19615+ "jno 0f\n"
19616+ "sub %3,%2\n"
19617+ "int $4\n0:\n"
19618+ _ASM_EXTABLE(0b, 0b)
19619+#endif
19620+
19621 " jle 2f\n\t"
19622 LOCK_PREFIX " cmpxchg %2,%0\n\t"
19623 " jnz 1b\n\t"
19624@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
19625 long tmp;
19626 asm volatile("# beginning down_write\n\t"
19627 LOCK_PREFIX " xadd %1,(%2)\n\t"
19628+
19629+#ifdef CONFIG_PAX_REFCOUNT
19630+ "jno 0f\n"
19631+ "mov %1,(%2)\n"
19632+ "int $4\n0:\n"
19633+ _ASM_EXTABLE(0b, 0b)
19634+#endif
19635+
19636 /* adds 0xffff0001, returns the old value */
19637 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
19638 /* was the active mask 0 before? */
19639@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
19640 long tmp;
19641 asm volatile("# beginning __up_read\n\t"
19642 LOCK_PREFIX " xadd %1,(%2)\n\t"
19643+
19644+#ifdef CONFIG_PAX_REFCOUNT
19645+ "jno 0f\n"
19646+ "mov %1,(%2)\n"
19647+ "int $4\n0:\n"
19648+ _ASM_EXTABLE(0b, 0b)
19649+#endif
19650+
19651 /* subtracts 1, returns the old value */
19652 " jns 1f\n\t"
19653 " call call_rwsem_wake\n" /* expects old value in %edx */
19654@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
19655 long tmp;
19656 asm volatile("# beginning __up_write\n\t"
19657 LOCK_PREFIX " xadd %1,(%2)\n\t"
19658+
19659+#ifdef CONFIG_PAX_REFCOUNT
19660+ "jno 0f\n"
19661+ "mov %1,(%2)\n"
19662+ "int $4\n0:\n"
19663+ _ASM_EXTABLE(0b, 0b)
19664+#endif
19665+
19666 /* subtracts 0xffff0001, returns the old value */
19667 " jns 1f\n\t"
19668 " call call_rwsem_wake\n" /* expects old value in %edx */
19669@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
19670 {
19671 asm volatile("# beginning __downgrade_write\n\t"
19672 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
19673+
19674+#ifdef CONFIG_PAX_REFCOUNT
19675+ "jno 0f\n"
19676+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
19677+ "int $4\n0:\n"
19678+ _ASM_EXTABLE(0b, 0b)
19679+#endif
19680+
19681 /*
19682 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
19683 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
19684@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
19685 */
19686 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19687 {
19688- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
19689+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
19690+
19691+#ifdef CONFIG_PAX_REFCOUNT
19692+ "jno 0f\n"
19693+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
19694+ "int $4\n0:\n"
19695+ _ASM_EXTABLE(0b, 0b)
19696+#endif
19697+
19698 : "+m" (sem->count)
19699 : "er" (delta));
19700 }
19701@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19702 */
19703 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
19704 {
19705- return delta + xadd(&sem->count, delta);
19706+ return delta + xadd_check_overflow(&sem->count, delta);
19707 }
19708
19709 #endif /* __KERNEL__ */
19710diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
19711index 6f1c3a8..7744f19 100644
19712--- a/arch/x86/include/asm/segment.h
19713+++ b/arch/x86/include/asm/segment.h
19714@@ -64,10 +64,15 @@
19715 * 26 - ESPFIX small SS
19716 * 27 - per-cpu [ offset to per-cpu data area ]
19717 * 28 - stack_canary-20 [ for stack protector ]
19718- * 29 - unused
19719- * 30 - unused
19720+ * 29 - PCI BIOS CS
19721+ * 30 - PCI BIOS DS
19722 * 31 - TSS for double fault handler
19723 */
19724+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
19725+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
19726+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
19727+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
19728+
19729 #define GDT_ENTRY_TLS_MIN 6
19730 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
19731
19732@@ -79,6 +84,8 @@
19733
19734 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
19735
19736+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
19737+
19738 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
19739
19740 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
19741@@ -104,6 +111,12 @@
19742 #define __KERNEL_STACK_CANARY 0
19743 #endif
19744
19745+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
19746+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
19747+
19748+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
19749+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
19750+
19751 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
19752
19753 /*
19754@@ -141,7 +154,7 @@
19755 */
19756
19757 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
19758-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
19759+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
19760
19761
19762 #else
19763@@ -165,6 +178,8 @@
19764 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
19765 #define __USER32_DS __USER_DS
19766
19767+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
19768+
19769 #define GDT_ENTRY_TSS 8 /* needs two entries */
19770 #define GDT_ENTRY_LDT 10 /* needs two entries */
19771 #define GDT_ENTRY_TLS_MIN 12
19772@@ -173,6 +188,8 @@
19773 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
19774 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
19775
19776+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
19777+
19778 /* TLS indexes for 64bit - hardcoded in arch_prctl */
19779 #define FS_TLS 0
19780 #define GS_TLS 1
19781@@ -180,12 +197,14 @@
19782 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
19783 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
19784
19785-#define GDT_ENTRIES 16
19786+#define GDT_ENTRIES 17
19787
19788 #endif
19789
19790 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19791+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19792 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19793+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
19794 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
19795 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
19796 #ifndef CONFIG_PARAVIRT
19797@@ -268,7 +287,7 @@ static inline unsigned long get_limit(unsigned long segment)
19798 {
19799 unsigned long __limit;
19800 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
19801- return __limit + 1;
19802+ return __limit;
19803 }
19804
19805 #endif /* !__ASSEMBLY__ */
19806diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
19807index 8d3120f..352b440 100644
19808--- a/arch/x86/include/asm/smap.h
19809+++ b/arch/x86/include/asm/smap.h
19810@@ -25,11 +25,40 @@
19811
19812 #include <asm/alternative-asm.h>
19813
19814+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19815+#define ASM_PAX_OPEN_USERLAND \
19816+ 661: jmp 663f; \
19817+ .pushsection .altinstr_replacement, "a" ; \
19818+ 662: pushq %rax; nop; \
19819+ .popsection ; \
19820+ .pushsection .altinstructions, "a" ; \
19821+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19822+ .popsection ; \
19823+ call __pax_open_userland; \
19824+ popq %rax; \
19825+ 663:
19826+
19827+#define ASM_PAX_CLOSE_USERLAND \
19828+ 661: jmp 663f; \
19829+ .pushsection .altinstr_replacement, "a" ; \
19830+ 662: pushq %rax; nop; \
19831+ .popsection; \
19832+ .pushsection .altinstructions, "a" ; \
19833+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19834+ .popsection; \
19835+ call __pax_close_userland; \
19836+ popq %rax; \
19837+ 663:
19838+#else
19839+#define ASM_PAX_OPEN_USERLAND
19840+#define ASM_PAX_CLOSE_USERLAND
19841+#endif
19842+
19843 #ifdef CONFIG_X86_SMAP
19844
19845 #define ASM_CLAC \
19846 661: ASM_NOP3 ; \
19847- .pushsection .altinstr_replacement, "ax" ; \
19848+ .pushsection .altinstr_replacement, "a" ; \
19849 662: __ASM_CLAC ; \
19850 .popsection ; \
19851 .pushsection .altinstructions, "a" ; \
19852@@ -38,7 +67,7 @@
19853
19854 #define ASM_STAC \
19855 661: ASM_NOP3 ; \
19856- .pushsection .altinstr_replacement, "ax" ; \
19857+ .pushsection .altinstr_replacement, "a" ; \
19858 662: __ASM_STAC ; \
19859 .popsection ; \
19860 .pushsection .altinstructions, "a" ; \
19861@@ -56,6 +85,37 @@
19862
19863 #include <asm/alternative.h>
19864
19865+#define __HAVE_ARCH_PAX_OPEN_USERLAND
19866+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
19867+
19868+extern void __pax_open_userland(void);
19869+static __always_inline unsigned long pax_open_userland(void)
19870+{
19871+
19872+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19873+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
19874+ :
19875+ : [open] "i" (__pax_open_userland)
19876+ : "memory", "rax");
19877+#endif
19878+
19879+ return 0;
19880+}
19881+
19882+extern void __pax_close_userland(void);
19883+static __always_inline unsigned long pax_close_userland(void)
19884+{
19885+
19886+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19887+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
19888+ :
19889+ : [close] "i" (__pax_close_userland)
19890+ : "memory", "rax");
19891+#endif
19892+
19893+ return 0;
19894+}
19895+
19896 #ifdef CONFIG_X86_SMAP
19897
19898 static __always_inline void clac(void)
19899diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
19900index 8cd27e0..7f05ec8 100644
19901--- a/arch/x86/include/asm/smp.h
19902+++ b/arch/x86/include/asm/smp.h
19903@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
19904 /* cpus sharing the last level cache: */
19905 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19906 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
19907-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
19908+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
19909
19910 static inline struct cpumask *cpu_sibling_mask(int cpu)
19911 {
19912@@ -78,7 +78,7 @@ struct smp_ops {
19913
19914 void (*send_call_func_ipi)(const struct cpumask *mask);
19915 void (*send_call_func_single_ipi)(int cpu);
19916-};
19917+} __no_const;
19918
19919 /* Globals due to paravirt */
19920 extern void set_cpu_sibling_map(int cpu);
19921@@ -190,14 +190,8 @@ extern unsigned disabled_cpus;
19922 extern int safe_smp_processor_id(void);
19923
19924 #elif defined(CONFIG_X86_64_SMP)
19925-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19926-
19927-#define stack_smp_processor_id() \
19928-({ \
19929- struct thread_info *ti; \
19930- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
19931- ti->cpu; \
19932-})
19933+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19934+#define stack_smp_processor_id() raw_smp_processor_id()
19935 #define safe_smp_processor_id() smp_processor_id()
19936
19937 #endif
19938diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
19939index 54f1c80..39362a5 100644
19940--- a/arch/x86/include/asm/spinlock.h
19941+++ b/arch/x86/include/asm/spinlock.h
19942@@ -223,6 +223,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
19943 static inline void arch_read_lock(arch_rwlock_t *rw)
19944 {
19945 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
19946+
19947+#ifdef CONFIG_PAX_REFCOUNT
19948+ "jno 0f\n"
19949+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
19950+ "int $4\n0:\n"
19951+ _ASM_EXTABLE(0b, 0b)
19952+#endif
19953+
19954 "jns 1f\n"
19955 "call __read_lock_failed\n\t"
19956 "1:\n"
19957@@ -232,6 +240,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
19958 static inline void arch_write_lock(arch_rwlock_t *rw)
19959 {
19960 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
19961+
19962+#ifdef CONFIG_PAX_REFCOUNT
19963+ "jno 0f\n"
19964+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
19965+ "int $4\n0:\n"
19966+ _ASM_EXTABLE(0b, 0b)
19967+#endif
19968+
19969 "jz 1f\n"
19970 "call __write_lock_failed\n\t"
19971 "1:\n"
19972@@ -261,13 +277,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
19973
19974 static inline void arch_read_unlock(arch_rwlock_t *rw)
19975 {
19976- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
19977+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
19978+
19979+#ifdef CONFIG_PAX_REFCOUNT
19980+ "jno 0f\n"
19981+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
19982+ "int $4\n0:\n"
19983+ _ASM_EXTABLE(0b, 0b)
19984+#endif
19985+
19986 :"+m" (rw->lock) : : "memory");
19987 }
19988
19989 static inline void arch_write_unlock(arch_rwlock_t *rw)
19990 {
19991- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
19992+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
19993+
19994+#ifdef CONFIG_PAX_REFCOUNT
19995+ "jno 0f\n"
19996+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
19997+ "int $4\n0:\n"
19998+ _ASM_EXTABLE(0b, 0b)
19999+#endif
20000+
20001 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
20002 }
20003 #else
20004diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
20005index 6a99859..03cb807 100644
20006--- a/arch/x86/include/asm/stackprotector.h
20007+++ b/arch/x86/include/asm/stackprotector.h
20008@@ -47,7 +47,7 @@
20009 * head_32 for boot CPU and setup_per_cpu_areas() for others.
20010 */
20011 #define GDT_STACK_CANARY_INIT \
20012- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
20013+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
20014
20015 /*
20016 * Initialize the stackprotector canary value.
20017@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
20018
20019 static inline void load_stack_canary_segment(void)
20020 {
20021-#ifdef CONFIG_X86_32
20022+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
20023 asm volatile ("mov %0, %%gs" : : "r" (0));
20024 #endif
20025 }
20026diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
20027index 70bbe39..4ae2bd4 100644
20028--- a/arch/x86/include/asm/stacktrace.h
20029+++ b/arch/x86/include/asm/stacktrace.h
20030@@ -11,28 +11,20 @@
20031
20032 extern int kstack_depth_to_print;
20033
20034-struct thread_info;
20035+struct task_struct;
20036 struct stacktrace_ops;
20037
20038-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
20039- unsigned long *stack,
20040- unsigned long bp,
20041- const struct stacktrace_ops *ops,
20042- void *data,
20043- unsigned long *end,
20044- int *graph);
20045+typedef unsigned long walk_stack_t(struct task_struct *task,
20046+ void *stack_start,
20047+ unsigned long *stack,
20048+ unsigned long bp,
20049+ const struct stacktrace_ops *ops,
20050+ void *data,
20051+ unsigned long *end,
20052+ int *graph);
20053
20054-extern unsigned long
20055-print_context_stack(struct thread_info *tinfo,
20056- unsigned long *stack, unsigned long bp,
20057- const struct stacktrace_ops *ops, void *data,
20058- unsigned long *end, int *graph);
20059-
20060-extern unsigned long
20061-print_context_stack_bp(struct thread_info *tinfo,
20062- unsigned long *stack, unsigned long bp,
20063- const struct stacktrace_ops *ops, void *data,
20064- unsigned long *end, int *graph);
20065+extern walk_stack_t print_context_stack;
20066+extern walk_stack_t print_context_stack_bp;
20067
20068 /* Generic stack tracer with callbacks */
20069
20070@@ -40,7 +32,7 @@ struct stacktrace_ops {
20071 void (*address)(void *data, unsigned long address, int reliable);
20072 /* On negative return stop dumping */
20073 int (*stack)(void *data, char *name);
20074- walk_stack_t walk_stack;
20075+ walk_stack_t *walk_stack;
20076 };
20077
20078 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
20079diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
20080index d7f3b3b..3cc39f1 100644
20081--- a/arch/x86/include/asm/switch_to.h
20082+++ b/arch/x86/include/asm/switch_to.h
20083@@ -108,7 +108,7 @@ do { \
20084 "call __switch_to\n\t" \
20085 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
20086 __switch_canary \
20087- "movq %P[thread_info](%%rsi),%%r8\n\t" \
20088+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
20089 "movq %%rax,%%rdi\n\t" \
20090 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
20091 "jnz ret_from_fork\n\t" \
20092@@ -119,7 +119,7 @@ do { \
20093 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
20094 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
20095 [_tif_fork] "i" (_TIF_FORK), \
20096- [thread_info] "i" (offsetof(struct task_struct, stack)), \
20097+ [thread_info] "m" (current_tinfo), \
20098 [current_task] "m" (current_task) \
20099 __switch_canary_iparam \
20100 : "memory", "cc" __EXTRA_CLOBBER)
20101diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
20102index 547e344..6be1175 100644
20103--- a/arch/x86/include/asm/thread_info.h
20104+++ b/arch/x86/include/asm/thread_info.h
20105@@ -24,7 +24,6 @@ struct exec_domain;
20106 #include <linux/atomic.h>
20107
20108 struct thread_info {
20109- struct task_struct *task; /* main task structure */
20110 struct exec_domain *exec_domain; /* execution domain */
20111 __u32 flags; /* low level flags */
20112 __u32 status; /* thread synchronous flags */
20113@@ -33,13 +32,13 @@ struct thread_info {
20114 mm_segment_t addr_limit;
20115 struct restart_block restart_block;
20116 void __user *sysenter_return;
20117+ unsigned long lowest_stack;
20118 unsigned int sig_on_uaccess_error:1;
20119 unsigned int uaccess_err:1; /* uaccess failed */
20120 };
20121
20122-#define INIT_THREAD_INFO(tsk) \
20123+#define INIT_THREAD_INFO \
20124 { \
20125- .task = &tsk, \
20126 .exec_domain = &default_exec_domain, \
20127 .flags = 0, \
20128 .cpu = 0, \
20129@@ -50,7 +49,7 @@ struct thread_info {
20130 }, \
20131 }
20132
20133-#define init_thread_info (init_thread_union.thread_info)
20134+#define init_thread_info (init_thread_union.stack)
20135 #define init_stack (init_thread_union.stack)
20136
20137 #else /* !__ASSEMBLY__ */
20138@@ -91,6 +90,7 @@ struct thread_info {
20139 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
20140 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
20141 #define TIF_X32 30 /* 32-bit native x86-64 binary */
20142+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
20143
20144 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
20145 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
20146@@ -115,17 +115,18 @@ struct thread_info {
20147 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
20148 #define _TIF_ADDR32 (1 << TIF_ADDR32)
20149 #define _TIF_X32 (1 << TIF_X32)
20150+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
20151
20152 /* work to do in syscall_trace_enter() */
20153 #define _TIF_WORK_SYSCALL_ENTRY \
20154 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
20155 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
20156- _TIF_NOHZ)
20157+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
20158
20159 /* work to do in syscall_trace_leave() */
20160 #define _TIF_WORK_SYSCALL_EXIT \
20161 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
20162- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
20163+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
20164
20165 /* work to do on interrupt/exception return */
20166 #define _TIF_WORK_MASK \
20167@@ -136,7 +137,7 @@ struct thread_info {
20168 /* work to do on any return to user space */
20169 #define _TIF_ALLWORK_MASK \
20170 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
20171- _TIF_NOHZ)
20172+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
20173
20174 /* Only used for 64 bit */
20175 #define _TIF_DO_NOTIFY_MASK \
20176@@ -151,7 +152,6 @@ struct thread_info {
20177 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
20178
20179 #define STACK_WARN (THREAD_SIZE/8)
20180-#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
20181
20182 /*
20183 * macros/functions for gaining access to the thread information structure
20184@@ -162,26 +162,18 @@ struct thread_info {
20185
20186 DECLARE_PER_CPU(unsigned long, kernel_stack);
20187
20188+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
20189+
20190 static inline struct thread_info *current_thread_info(void)
20191 {
20192- struct thread_info *ti;
20193- ti = (void *)(this_cpu_read_stable(kernel_stack) +
20194- KERNEL_STACK_OFFSET - THREAD_SIZE);
20195- return ti;
20196+ return this_cpu_read_stable(current_tinfo);
20197 }
20198
20199 #else /* !__ASSEMBLY__ */
20200
20201 /* how to get the thread information struct from ASM */
20202 #define GET_THREAD_INFO(reg) \
20203- _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
20204- _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
20205-
20206-/*
20207- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
20208- * a certain register (to be used in assembler memory operands).
20209- */
20210-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
20211+ _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
20212
20213 #endif
20214
20215@@ -237,5 +229,12 @@ static inline bool is_ia32_task(void)
20216 extern void arch_task_cache_init(void);
20217 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
20218 extern void arch_release_task_struct(struct task_struct *tsk);
20219+
20220+#define __HAVE_THREAD_FUNCTIONS
20221+#define task_thread_info(task) (&(task)->tinfo)
20222+#define task_stack_page(task) ((task)->stack)
20223+#define setup_thread_stack(p, org) do {} while (0)
20224+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
20225+
20226 #endif
20227 #endif /* _ASM_X86_THREAD_INFO_H */
20228diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
20229index 04905bf..1178cdf 100644
20230--- a/arch/x86/include/asm/tlbflush.h
20231+++ b/arch/x86/include/asm/tlbflush.h
20232@@ -17,18 +17,44 @@
20233
20234 static inline void __native_flush_tlb(void)
20235 {
20236+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
20237+ u64 descriptor[2];
20238+
20239+ descriptor[0] = PCID_KERNEL;
20240+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory");
20241+ return;
20242+ }
20243+
20244+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20245+ if (static_cpu_has(X86_FEATURE_PCID)) {
20246+ unsigned int cpu = raw_get_cpu();
20247+
20248+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
20249+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
20250+ raw_put_cpu_no_resched();
20251+ return;
20252+ }
20253+#endif
20254+
20255 native_write_cr3(native_read_cr3());
20256 }
20257
20258 static inline void __native_flush_tlb_global_irq_disabled(void)
20259 {
20260- unsigned long cr4;
20261+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
20262+ u64 descriptor[2];
20263
20264- cr4 = native_read_cr4();
20265- /* clear PGE */
20266- native_write_cr4(cr4 & ~X86_CR4_PGE);
20267- /* write old PGE again and flush TLBs */
20268- native_write_cr4(cr4);
20269+ descriptor[0] = PCID_KERNEL;
20270+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
20271+ } else {
20272+ unsigned long cr4;
20273+
20274+ cr4 = native_read_cr4();
20275+ /* clear PGE */
20276+ native_write_cr4(cr4 & ~X86_CR4_PGE);
20277+ /* write old PGE again and flush TLBs */
20278+ native_write_cr4(cr4);
20279+ }
20280 }
20281
20282 static inline void __native_flush_tlb_global(void)
20283@@ -49,6 +75,41 @@ static inline void __native_flush_tlb_global(void)
20284
20285 static inline void __native_flush_tlb_single(unsigned long addr)
20286 {
20287+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
20288+ u64 descriptor[2];
20289+
20290+ descriptor[0] = PCID_KERNEL;
20291+ descriptor[1] = addr;
20292+
20293+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20294+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
20295+ if (addr < TASK_SIZE_MAX)
20296+ descriptor[1] += pax_user_shadow_base;
20297+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
20298+ }
20299+
20300+ descriptor[0] = PCID_USER;
20301+ descriptor[1] = addr;
20302+#endif
20303+
20304+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
20305+ return;
20306+ }
20307+
20308+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20309+ if (static_cpu_has(X86_FEATURE_PCID)) {
20310+ unsigned int cpu = raw_get_cpu();
20311+
20312+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
20313+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
20314+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
20315+ raw_put_cpu_no_resched();
20316+
20317+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
20318+ addr += pax_user_shadow_base;
20319+ }
20320+#endif
20321+
20322 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
20323 }
20324
20325diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
20326index 0d592e0..526f797 100644
20327--- a/arch/x86/include/asm/uaccess.h
20328+++ b/arch/x86/include/asm/uaccess.h
20329@@ -7,6 +7,7 @@
20330 #include <linux/compiler.h>
20331 #include <linux/thread_info.h>
20332 #include <linux/string.h>
20333+#include <linux/spinlock.h>
20334 #include <asm/asm.h>
20335 #include <asm/page.h>
20336 #include <asm/smap.h>
20337@@ -29,7 +30,12 @@
20338
20339 #define get_ds() (KERNEL_DS)
20340 #define get_fs() (current_thread_info()->addr_limit)
20341+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20342+void __set_fs(mm_segment_t x);
20343+void set_fs(mm_segment_t x);
20344+#else
20345 #define set_fs(x) (current_thread_info()->addr_limit = (x))
20346+#endif
20347
20348 #define segment_eq(a, b) ((a).seg == (b).seg)
20349
20350@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
20351 * checks that the pointer is in the user space range - after calling
20352 * this function, memory access functions may still return -EFAULT.
20353 */
20354-#define access_ok(type, addr, size) \
20355- likely(!__range_not_ok(addr, size, user_addr_max()))
20356+extern int _cond_resched(void);
20357+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
20358+#define access_ok(type, addr, size) \
20359+({ \
20360+ unsigned long __size = size; \
20361+ unsigned long __addr = (unsigned long)addr; \
20362+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
20363+ if (__ret_ao && __size) { \
20364+ unsigned long __addr_ao = __addr & PAGE_MASK; \
20365+ unsigned long __end_ao = __addr + __size - 1; \
20366+ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
20367+ while (__addr_ao <= __end_ao) { \
20368+ char __c_ao; \
20369+ __addr_ao += PAGE_SIZE; \
20370+ if (__size > PAGE_SIZE) \
20371+ _cond_resched(); \
20372+ if (__get_user(__c_ao, (char __user *)__addr)) \
20373+ break; \
20374+ if (type != VERIFY_WRITE) { \
20375+ __addr = __addr_ao; \
20376+ continue; \
20377+ } \
20378+ if (__put_user(__c_ao, (char __user *)__addr)) \
20379+ break; \
20380+ __addr = __addr_ao; \
20381+ } \
20382+ } \
20383+ } \
20384+ __ret_ao; \
20385+})
20386
20387 /*
20388 * The exception table consists of pairs of addresses relative to the
20389@@ -176,10 +210,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
20390 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
20391 __chk_user_ptr(ptr); \
20392 might_fault(); \
20393+ pax_open_userland(); \
20394 asm volatile("call __get_user_%P3" \
20395 : "=a" (__ret_gu), "=r" (__val_gu) \
20396 : "0" (ptr), "i" (sizeof(*(ptr)))); \
20397 (x) = (__typeof__(*(ptr))) __val_gu; \
20398+ pax_close_userland(); \
20399 __ret_gu; \
20400 })
20401
20402@@ -187,13 +223,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
20403 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
20404 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
20405
20406-
20407+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20408+#define __copyuser_seg "gs;"
20409+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
20410+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
20411+#else
20412+#define __copyuser_seg
20413+#define __COPYUSER_SET_ES
20414+#define __COPYUSER_RESTORE_ES
20415+#endif
20416
20417 #ifdef CONFIG_X86_32
20418 #define __put_user_asm_u64(x, addr, err, errret) \
20419 asm volatile(ASM_STAC "\n" \
20420- "1: movl %%eax,0(%2)\n" \
20421- "2: movl %%edx,4(%2)\n" \
20422+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
20423+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
20424 "3: " ASM_CLAC "\n" \
20425 ".section .fixup,\"ax\"\n" \
20426 "4: movl %3,%0\n" \
20427@@ -206,8 +250,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
20428
20429 #define __put_user_asm_ex_u64(x, addr) \
20430 asm volatile(ASM_STAC "\n" \
20431- "1: movl %%eax,0(%1)\n" \
20432- "2: movl %%edx,4(%1)\n" \
20433+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
20434+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
20435 "3: " ASM_CLAC "\n" \
20436 _ASM_EXTABLE_EX(1b, 2b) \
20437 _ASM_EXTABLE_EX(2b, 3b) \
20438@@ -257,7 +301,8 @@ extern void __put_user_8(void);
20439 __typeof__(*(ptr)) __pu_val; \
20440 __chk_user_ptr(ptr); \
20441 might_fault(); \
20442- __pu_val = x; \
20443+ __pu_val = (x); \
20444+ pax_open_userland(); \
20445 switch (sizeof(*(ptr))) { \
20446 case 1: \
20447 __put_user_x(1, __pu_val, ptr, __ret_pu); \
20448@@ -275,6 +320,7 @@ extern void __put_user_8(void);
20449 __put_user_x(X, __pu_val, ptr, __ret_pu); \
20450 break; \
20451 } \
20452+ pax_close_userland(); \
20453 __ret_pu; \
20454 })
20455
20456@@ -355,8 +401,10 @@ do { \
20457 } while (0)
20458
20459 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
20460+do { \
20461+ pax_open_userland(); \
20462 asm volatile(ASM_STAC "\n" \
20463- "1: mov"itype" %2,%"rtype"1\n" \
20464+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
20465 "2: " ASM_CLAC "\n" \
20466 ".section .fixup,\"ax\"\n" \
20467 "3: mov %3,%0\n" \
20468@@ -364,8 +412,10 @@ do { \
20469 " jmp 2b\n" \
20470 ".previous\n" \
20471 _ASM_EXTABLE(1b, 3b) \
20472- : "=r" (err), ltype(x) \
20473- : "m" (__m(addr)), "i" (errret), "0" (err))
20474+ : "=r" (err), ltype (x) \
20475+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
20476+ pax_close_userland(); \
20477+} while (0)
20478
20479 #define __get_user_size_ex(x, ptr, size) \
20480 do { \
20481@@ -389,7 +439,7 @@ do { \
20482 } while (0)
20483
20484 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
20485- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
20486+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
20487 "2:\n" \
20488 _ASM_EXTABLE_EX(1b, 2b) \
20489 : ltype(x) : "m" (__m(addr)))
20490@@ -406,13 +456,24 @@ do { \
20491 int __gu_err; \
20492 unsigned long __gu_val; \
20493 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
20494- (x) = (__force __typeof__(*(ptr)))__gu_val; \
20495+ (x) = (__typeof__(*(ptr)))__gu_val; \
20496 __gu_err; \
20497 })
20498
20499 /* FIXME: this hack is definitely wrong -AK */
20500 struct __large_struct { unsigned long buf[100]; };
20501-#define __m(x) (*(struct __large_struct __user *)(x))
20502+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20503+#define ____m(x) \
20504+({ \
20505+ unsigned long ____x = (unsigned long)(x); \
20506+ if (____x < pax_user_shadow_base) \
20507+ ____x += pax_user_shadow_base; \
20508+ (typeof(x))____x; \
20509+})
20510+#else
20511+#define ____m(x) (x)
20512+#endif
20513+#define __m(x) (*(struct __large_struct __user *)____m(x))
20514
20515 /*
20516 * Tell gcc we read from memory instead of writing: this is because
20517@@ -420,8 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
20518 * aliasing issues.
20519 */
20520 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
20521+do { \
20522+ pax_open_userland(); \
20523 asm volatile(ASM_STAC "\n" \
20524- "1: mov"itype" %"rtype"1,%2\n" \
20525+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
20526 "2: " ASM_CLAC "\n" \
20527 ".section .fixup,\"ax\"\n" \
20528 "3: mov %3,%0\n" \
20529@@ -429,10 +492,12 @@ struct __large_struct { unsigned long buf[100]; };
20530 ".previous\n" \
20531 _ASM_EXTABLE(1b, 3b) \
20532 : "=r"(err) \
20533- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
20534+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
20535+ pax_close_userland(); \
20536+} while (0)
20537
20538 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
20539- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
20540+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
20541 "2:\n" \
20542 _ASM_EXTABLE_EX(1b, 2b) \
20543 : : ltype(x), "m" (__m(addr)))
20544@@ -442,11 +507,13 @@ struct __large_struct { unsigned long buf[100]; };
20545 */
20546 #define uaccess_try do { \
20547 current_thread_info()->uaccess_err = 0; \
20548+ pax_open_userland(); \
20549 stac(); \
20550 barrier();
20551
20552 #define uaccess_catch(err) \
20553 clac(); \
20554+ pax_close_userland(); \
20555 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
20556 } while (0)
20557
20558@@ -471,8 +538,12 @@ struct __large_struct { unsigned long buf[100]; };
20559 * On error, the variable @x is set to zero.
20560 */
20561
20562+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20563+#define __get_user(x, ptr) get_user((x), (ptr))
20564+#else
20565 #define __get_user(x, ptr) \
20566 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
20567+#endif
20568
20569 /**
20570 * __put_user: - Write a simple value into user space, with less checking.
20571@@ -494,8 +565,12 @@ struct __large_struct { unsigned long buf[100]; };
20572 * Returns zero on success, or -EFAULT on error.
20573 */
20574
20575+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20576+#define __put_user(x, ptr) put_user((x), (ptr))
20577+#else
20578 #define __put_user(x, ptr) \
20579 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
20580+#endif
20581
20582 #define __get_user_unaligned __get_user
20583 #define __put_user_unaligned __put_user
20584@@ -513,7 +588,7 @@ struct __large_struct { unsigned long buf[100]; };
20585 #define get_user_ex(x, ptr) do { \
20586 unsigned long __gue_val; \
20587 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
20588- (x) = (__force __typeof__(*(ptr)))__gue_val; \
20589+ (x) = (__typeof__(*(ptr)))__gue_val; \
20590 } while (0)
20591
20592 #define put_user_try uaccess_try
20593@@ -542,18 +617,19 @@ extern void __cmpxchg_wrong_size(void)
20594 __typeof__(ptr) __uval = (uval); \
20595 __typeof__(*(ptr)) __old = (old); \
20596 __typeof__(*(ptr)) __new = (new); \
20597+ pax_open_userland(); \
20598 switch (size) { \
20599 case 1: \
20600 { \
20601 asm volatile("\t" ASM_STAC "\n" \
20602- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
20603+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
20604 "2:\t" ASM_CLAC "\n" \
20605 "\t.section .fixup, \"ax\"\n" \
20606 "3:\tmov %3, %0\n" \
20607 "\tjmp 2b\n" \
20608 "\t.previous\n" \
20609 _ASM_EXTABLE(1b, 3b) \
20610- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20611+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20612 : "i" (-EFAULT), "q" (__new), "1" (__old) \
20613 : "memory" \
20614 ); \
20615@@ -562,14 +638,14 @@ extern void __cmpxchg_wrong_size(void)
20616 case 2: \
20617 { \
20618 asm volatile("\t" ASM_STAC "\n" \
20619- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
20620+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
20621 "2:\t" ASM_CLAC "\n" \
20622 "\t.section .fixup, \"ax\"\n" \
20623 "3:\tmov %3, %0\n" \
20624 "\tjmp 2b\n" \
20625 "\t.previous\n" \
20626 _ASM_EXTABLE(1b, 3b) \
20627- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20628+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20629 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20630 : "memory" \
20631 ); \
20632@@ -578,14 +654,14 @@ extern void __cmpxchg_wrong_size(void)
20633 case 4: \
20634 { \
20635 asm volatile("\t" ASM_STAC "\n" \
20636- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
20637+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
20638 "2:\t" ASM_CLAC "\n" \
20639 "\t.section .fixup, \"ax\"\n" \
20640 "3:\tmov %3, %0\n" \
20641 "\tjmp 2b\n" \
20642 "\t.previous\n" \
20643 _ASM_EXTABLE(1b, 3b) \
20644- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20645+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20646 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20647 : "memory" \
20648 ); \
20649@@ -597,14 +673,14 @@ extern void __cmpxchg_wrong_size(void)
20650 __cmpxchg_wrong_size(); \
20651 \
20652 asm volatile("\t" ASM_STAC "\n" \
20653- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
20654+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
20655 "2:\t" ASM_CLAC "\n" \
20656 "\t.section .fixup, \"ax\"\n" \
20657 "3:\tmov %3, %0\n" \
20658 "\tjmp 2b\n" \
20659 "\t.previous\n" \
20660 _ASM_EXTABLE(1b, 3b) \
20661- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20662+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20663 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20664 : "memory" \
20665 ); \
20666@@ -613,6 +689,7 @@ extern void __cmpxchg_wrong_size(void)
20667 default: \
20668 __cmpxchg_wrong_size(); \
20669 } \
20670+ pax_close_userland(); \
20671 *__uval = __old; \
20672 __ret; \
20673 })
20674@@ -636,17 +713,6 @@ extern struct movsl_mask {
20675
20676 #define ARCH_HAS_NOCACHE_UACCESS 1
20677
20678-#ifdef CONFIG_X86_32
20679-# include <asm/uaccess_32.h>
20680-#else
20681-# include <asm/uaccess_64.h>
20682-#endif
20683-
20684-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
20685- unsigned n);
20686-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
20687- unsigned n);
20688-
20689 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
20690 # define copy_user_diag __compiletime_error
20691 #else
20692@@ -656,7 +722,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
20693 extern void copy_user_diag("copy_from_user() buffer size is too small")
20694 copy_from_user_overflow(void);
20695 extern void copy_user_diag("copy_to_user() buffer size is too small")
20696-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
20697+copy_to_user_overflow(void);
20698
20699 #undef copy_user_diag
20700
20701@@ -669,7 +735,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
20702
20703 extern void
20704 __compiletime_warning("copy_to_user() buffer size is not provably correct")
20705-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
20706+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
20707 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
20708
20709 #else
20710@@ -684,10 +750,16 @@ __copy_from_user_overflow(int size, unsigned long count)
20711
20712 #endif
20713
20714+#ifdef CONFIG_X86_32
20715+# include <asm/uaccess_32.h>
20716+#else
20717+# include <asm/uaccess_64.h>
20718+#endif
20719+
20720 static inline unsigned long __must_check
20721 copy_from_user(void *to, const void __user *from, unsigned long n)
20722 {
20723- int sz = __compiletime_object_size(to);
20724+ size_t sz = __compiletime_object_size(to);
20725
20726 might_fault();
20727
20728@@ -709,12 +781,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
20729 * case, and do only runtime checking for non-constant sizes.
20730 */
20731
20732- if (likely(sz < 0 || sz >= n))
20733- n = _copy_from_user(to, from, n);
20734- else if(__builtin_constant_p(n))
20735- copy_from_user_overflow();
20736- else
20737- __copy_from_user_overflow(sz, n);
20738+ if (likely(sz != (size_t)-1 && sz < n)) {
20739+ if(__builtin_constant_p(n))
20740+ copy_from_user_overflow();
20741+ else
20742+ __copy_from_user_overflow(sz, n);
20743+ } else if (access_ok(VERIFY_READ, from, n))
20744+ n = __copy_from_user(to, from, n);
20745+ else if ((long)n > 0)
20746+ memset(to, 0, n);
20747
20748 return n;
20749 }
20750@@ -722,17 +797,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
20751 static inline unsigned long __must_check
20752 copy_to_user(void __user *to, const void *from, unsigned long n)
20753 {
20754- int sz = __compiletime_object_size(from);
20755+ size_t sz = __compiletime_object_size(from);
20756
20757 might_fault();
20758
20759 /* See the comment in copy_from_user() above. */
20760- if (likely(sz < 0 || sz >= n))
20761- n = _copy_to_user(to, from, n);
20762- else if(__builtin_constant_p(n))
20763- copy_to_user_overflow();
20764- else
20765- __copy_to_user_overflow(sz, n);
20766+ if (likely(sz != (size_t)-1 && sz < n)) {
20767+ if(__builtin_constant_p(n))
20768+ copy_to_user_overflow();
20769+ else
20770+ __copy_to_user_overflow(sz, n);
20771+ } else if (access_ok(VERIFY_WRITE, to, n))
20772+ n = __copy_to_user(to, from, n);
20773
20774 return n;
20775 }
20776diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
20777index 3c03a5d..1071638 100644
20778--- a/arch/x86/include/asm/uaccess_32.h
20779+++ b/arch/x86/include/asm/uaccess_32.h
20780@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
20781 static __always_inline unsigned long __must_check
20782 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
20783 {
20784+ if ((long)n < 0)
20785+ return n;
20786+
20787+ check_object_size(from, n, true);
20788+
20789 if (__builtin_constant_p(n)) {
20790 unsigned long ret;
20791
20792@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
20793 __copy_to_user(void __user *to, const void *from, unsigned long n)
20794 {
20795 might_fault();
20796+
20797 return __copy_to_user_inatomic(to, from, n);
20798 }
20799
20800 static __always_inline unsigned long
20801 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
20802 {
20803+ if ((long)n < 0)
20804+ return n;
20805+
20806 /* Avoid zeroing the tail if the copy fails..
20807 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
20808 * but as the zeroing behaviour is only significant when n is not
20809@@ -137,6 +146,12 @@ static __always_inline unsigned long
20810 __copy_from_user(void *to, const void __user *from, unsigned long n)
20811 {
20812 might_fault();
20813+
20814+ if ((long)n < 0)
20815+ return n;
20816+
20817+ check_object_size(to, n, false);
20818+
20819 if (__builtin_constant_p(n)) {
20820 unsigned long ret;
20821
20822@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
20823 const void __user *from, unsigned long n)
20824 {
20825 might_fault();
20826+
20827+ if ((long)n < 0)
20828+ return n;
20829+
20830 if (__builtin_constant_p(n)) {
20831 unsigned long ret;
20832
20833@@ -181,7 +200,10 @@ static __always_inline unsigned long
20834 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
20835 unsigned long n)
20836 {
20837- return __copy_from_user_ll_nocache_nozero(to, from, n);
20838+ if ((long)n < 0)
20839+ return n;
20840+
20841+ return __copy_from_user_ll_nocache_nozero(to, from, n);
20842 }
20843
20844 #endif /* _ASM_X86_UACCESS_32_H */
20845diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
20846index 12a26b9..206c200 100644
20847--- a/arch/x86/include/asm/uaccess_64.h
20848+++ b/arch/x86/include/asm/uaccess_64.h
20849@@ -10,6 +10,9 @@
20850 #include <asm/alternative.h>
20851 #include <asm/cpufeature.h>
20852 #include <asm/page.h>
20853+#include <asm/pgtable.h>
20854+
20855+#define set_fs(x) (current_thread_info()->addr_limit = (x))
20856
20857 /*
20858 * Copy To/From Userspace
20859@@ -17,14 +20,14 @@
20860
20861 /* Handles exceptions in both to and from, but doesn't do access_ok */
20862 __must_check unsigned long
20863-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
20864+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
20865 __must_check unsigned long
20866-copy_user_generic_string(void *to, const void *from, unsigned len);
20867+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
20868 __must_check unsigned long
20869-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
20870+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
20871
20872 static __always_inline __must_check unsigned long
20873-copy_user_generic(void *to, const void *from, unsigned len)
20874+copy_user_generic(void *to, const void *from, unsigned long len)
20875 {
20876 unsigned ret;
20877
20878@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
20879 }
20880
20881 __must_check unsigned long
20882-copy_in_user(void __user *to, const void __user *from, unsigned len);
20883+copy_in_user(void __user *to, const void __user *from, unsigned long len);
20884
20885 static __always_inline __must_check
20886-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
20887+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
20888 {
20889- int ret = 0;
20890+ size_t sz = __compiletime_object_size(dst);
20891+ unsigned ret = 0;
20892+
20893+ if (size > INT_MAX)
20894+ return size;
20895+
20896+ check_object_size(dst, size, false);
20897+
20898+#ifdef CONFIG_PAX_MEMORY_UDEREF
20899+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20900+ return size;
20901+#endif
20902+
20903+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20904+ if(__builtin_constant_p(size))
20905+ copy_from_user_overflow();
20906+ else
20907+ __copy_from_user_overflow(sz, size);
20908+ return size;
20909+ }
20910
20911 if (!__builtin_constant_p(size))
20912- return copy_user_generic(dst, (__force void *)src, size);
20913+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20914 switch (size) {
20915- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
20916+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
20917 ret, "b", "b", "=q", 1);
20918 return ret;
20919- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
20920+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
20921 ret, "w", "w", "=r", 2);
20922 return ret;
20923- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
20924+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
20925 ret, "l", "k", "=r", 4);
20926 return ret;
20927- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
20928+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20929 ret, "q", "", "=r", 8);
20930 return ret;
20931 case 10:
20932- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20933+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20934 ret, "q", "", "=r", 10);
20935 if (unlikely(ret))
20936 return ret;
20937 __get_user_asm(*(u16 *)(8 + (char *)dst),
20938- (u16 __user *)(8 + (char __user *)src),
20939+ (const u16 __user *)(8 + (const char __user *)src),
20940 ret, "w", "w", "=r", 2);
20941 return ret;
20942 case 16:
20943- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20944+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20945 ret, "q", "", "=r", 16);
20946 if (unlikely(ret))
20947 return ret;
20948 __get_user_asm(*(u64 *)(8 + (char *)dst),
20949- (u64 __user *)(8 + (char __user *)src),
20950+ (const u64 __user *)(8 + (const char __user *)src),
20951 ret, "q", "", "=r", 8);
20952 return ret;
20953 default:
20954- return copy_user_generic(dst, (__force void *)src, size);
20955+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20956 }
20957 }
20958
20959 static __always_inline __must_check
20960-int __copy_from_user(void *dst, const void __user *src, unsigned size)
20961+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
20962 {
20963 might_fault();
20964 return __copy_from_user_nocheck(dst, src, size);
20965 }
20966
20967 static __always_inline __must_check
20968-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
20969+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
20970 {
20971- int ret = 0;
20972+ size_t sz = __compiletime_object_size(src);
20973+ unsigned ret = 0;
20974+
20975+ if (size > INT_MAX)
20976+ return size;
20977+
20978+ check_object_size(src, size, true);
20979+
20980+#ifdef CONFIG_PAX_MEMORY_UDEREF
20981+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20982+ return size;
20983+#endif
20984+
20985+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20986+ if(__builtin_constant_p(size))
20987+ copy_to_user_overflow();
20988+ else
20989+ __copy_to_user_overflow(sz, size);
20990+ return size;
20991+ }
20992
20993 if (!__builtin_constant_p(size))
20994- return copy_user_generic((__force void *)dst, src, size);
20995+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
20996 switch (size) {
20997- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
20998+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
20999 ret, "b", "b", "iq", 1);
21000 return ret;
21001- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
21002+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
21003 ret, "w", "w", "ir", 2);
21004 return ret;
21005- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
21006+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
21007 ret, "l", "k", "ir", 4);
21008 return ret;
21009- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
21010+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
21011 ret, "q", "", "er", 8);
21012 return ret;
21013 case 10:
21014- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
21015+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
21016 ret, "q", "", "er", 10);
21017 if (unlikely(ret))
21018 return ret;
21019 asm("":::"memory");
21020- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
21021+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
21022 ret, "w", "w", "ir", 2);
21023 return ret;
21024 case 16:
21025- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
21026+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
21027 ret, "q", "", "er", 16);
21028 if (unlikely(ret))
21029 return ret;
21030 asm("":::"memory");
21031- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
21032+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
21033 ret, "q", "", "er", 8);
21034 return ret;
21035 default:
21036- return copy_user_generic((__force void *)dst, src, size);
21037+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
21038 }
21039 }
21040
21041 static __always_inline __must_check
21042-int __copy_to_user(void __user *dst, const void *src, unsigned size)
21043+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
21044 {
21045 might_fault();
21046 return __copy_to_user_nocheck(dst, src, size);
21047 }
21048
21049 static __always_inline __must_check
21050-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
21051+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
21052 {
21053- int ret = 0;
21054+ unsigned ret = 0;
21055
21056 might_fault();
21057+
21058+ if (size > INT_MAX)
21059+ return size;
21060+
21061+#ifdef CONFIG_PAX_MEMORY_UDEREF
21062+ if (!access_ok_noprefault(VERIFY_READ, src, size))
21063+ return size;
21064+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
21065+ return size;
21066+#endif
21067+
21068 if (!__builtin_constant_p(size))
21069- return copy_user_generic((__force void *)dst,
21070- (__force void *)src, size);
21071+ return copy_user_generic((__force_kernel void *)____m(dst),
21072+ (__force_kernel const void *)____m(src), size);
21073 switch (size) {
21074 case 1: {
21075 u8 tmp;
21076- __get_user_asm(tmp, (u8 __user *)src,
21077+ __get_user_asm(tmp, (const u8 __user *)src,
21078 ret, "b", "b", "=q", 1);
21079 if (likely(!ret))
21080 __put_user_asm(tmp, (u8 __user *)dst,
21081@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
21082 }
21083 case 2: {
21084 u16 tmp;
21085- __get_user_asm(tmp, (u16 __user *)src,
21086+ __get_user_asm(tmp, (const u16 __user *)src,
21087 ret, "w", "w", "=r", 2);
21088 if (likely(!ret))
21089 __put_user_asm(tmp, (u16 __user *)dst,
21090@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
21091
21092 case 4: {
21093 u32 tmp;
21094- __get_user_asm(tmp, (u32 __user *)src,
21095+ __get_user_asm(tmp, (const u32 __user *)src,
21096 ret, "l", "k", "=r", 4);
21097 if (likely(!ret))
21098 __put_user_asm(tmp, (u32 __user *)dst,
21099@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
21100 }
21101 case 8: {
21102 u64 tmp;
21103- __get_user_asm(tmp, (u64 __user *)src,
21104+ __get_user_asm(tmp, (const u64 __user *)src,
21105 ret, "q", "", "=r", 8);
21106 if (likely(!ret))
21107 __put_user_asm(tmp, (u64 __user *)dst,
21108@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
21109 return ret;
21110 }
21111 default:
21112- return copy_user_generic((__force void *)dst,
21113- (__force void *)src, size);
21114+ return copy_user_generic((__force_kernel void *)____m(dst),
21115+ (__force_kernel const void *)____m(src), size);
21116 }
21117 }
21118
21119-static __must_check __always_inline int
21120-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
21121+static __must_check __always_inline unsigned long
21122+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
21123 {
21124 return __copy_from_user_nocheck(dst, src, size);
21125 }
21126
21127-static __must_check __always_inline int
21128-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
21129+static __must_check __always_inline unsigned long
21130+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
21131 {
21132 return __copy_to_user_nocheck(dst, src, size);
21133 }
21134
21135-extern long __copy_user_nocache(void *dst, const void __user *src,
21136- unsigned size, int zerorest);
21137+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
21138+ unsigned long size, int zerorest);
21139
21140-static inline int
21141-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
21142+static inline unsigned long
21143+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
21144 {
21145 might_fault();
21146+
21147+ if (size > INT_MAX)
21148+ return size;
21149+
21150+#ifdef CONFIG_PAX_MEMORY_UDEREF
21151+ if (!access_ok_noprefault(VERIFY_READ, src, size))
21152+ return size;
21153+#endif
21154+
21155 return __copy_user_nocache(dst, src, size, 1);
21156 }
21157
21158-static inline int
21159+static inline unsigned long
21160 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
21161- unsigned size)
21162+ unsigned long size)
21163 {
21164+ if (size > INT_MAX)
21165+ return size;
21166+
21167+#ifdef CONFIG_PAX_MEMORY_UDEREF
21168+ if (!access_ok_noprefault(VERIFY_READ, src, size))
21169+ return size;
21170+#endif
21171+
21172 return __copy_user_nocache(dst, src, size, 0);
21173 }
21174
21175 unsigned long
21176-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
21177+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
21178
21179 #endif /* _ASM_X86_UACCESS_64_H */
21180diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
21181index 5b238981..77fdd78 100644
21182--- a/arch/x86/include/asm/word-at-a-time.h
21183+++ b/arch/x86/include/asm/word-at-a-time.h
21184@@ -11,7 +11,7 @@
21185 * and shift, for example.
21186 */
21187 struct word_at_a_time {
21188- const unsigned long one_bits, high_bits;
21189+ unsigned long one_bits, high_bits;
21190 };
21191
21192 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
21193diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
21194index e45e4da..44e8572 100644
21195--- a/arch/x86/include/asm/x86_init.h
21196+++ b/arch/x86/include/asm/x86_init.h
21197@@ -129,7 +129,7 @@ struct x86_init_ops {
21198 struct x86_init_timers timers;
21199 struct x86_init_iommu iommu;
21200 struct x86_init_pci pci;
21201-};
21202+} __no_const;
21203
21204 /**
21205 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
21206@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
21207 void (*setup_percpu_clockev)(void);
21208 void (*early_percpu_clock_init)(void);
21209 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
21210-};
21211+} __no_const;
21212
21213 struct timespec;
21214
21215@@ -168,7 +168,7 @@ struct x86_platform_ops {
21216 void (*save_sched_clock_state)(void);
21217 void (*restore_sched_clock_state)(void);
21218 void (*apic_post_init)(void);
21219-};
21220+} __no_const;
21221
21222 struct pci_dev;
21223 struct msi_msg;
21224@@ -185,7 +185,7 @@ struct x86_msi_ops {
21225 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
21226 u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag);
21227 u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag);
21228-};
21229+} __no_const;
21230
21231 struct IO_APIC_route_entry;
21232 struct io_apic_irq_attr;
21233@@ -206,7 +206,7 @@ struct x86_io_apic_ops {
21234 unsigned int destination, int vector,
21235 struct io_apic_irq_attr *attr);
21236 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
21237-};
21238+} __no_const;
21239
21240 extern struct x86_init_ops x86_init;
21241 extern struct x86_cpuinit_ops x86_cpuinit;
21242diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
21243index c949923..c22bfa4 100644
21244--- a/arch/x86/include/asm/xen/page.h
21245+++ b/arch/x86/include/asm/xen/page.h
21246@@ -63,7 +63,7 @@ extern int m2p_remove_override(struct page *page,
21247 extern struct page *m2p_find_override(unsigned long mfn);
21248 extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
21249
21250-static inline unsigned long pfn_to_mfn(unsigned long pfn)
21251+static inline unsigned long __intentional_overflow(-1) pfn_to_mfn(unsigned long pfn)
21252 {
21253 unsigned long mfn;
21254
21255diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
21256index 7e7a79a..0824666 100644
21257--- a/arch/x86/include/asm/xsave.h
21258+++ b/arch/x86/include/asm/xsave.h
21259@@ -228,12 +228,16 @@ static inline int xsave_user(struct xsave_struct __user *buf)
21260 if (unlikely(err))
21261 return -EFAULT;
21262
21263+ pax_open_userland();
21264 __asm__ __volatile__(ASM_STAC "\n"
21265- "1:"XSAVE"\n"
21266+ "1:"
21267+ __copyuser_seg
21268+ XSAVE"\n"
21269 "2: " ASM_CLAC "\n"
21270 xstate_fault
21271 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
21272 : "memory");
21273+ pax_close_userland();
21274 return err;
21275 }
21276
21277@@ -243,16 +247,20 @@ static inline int xsave_user(struct xsave_struct __user *buf)
21278 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
21279 {
21280 int err = 0;
21281- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
21282+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
21283 u32 lmask = mask;
21284 u32 hmask = mask >> 32;
21285
21286+ pax_open_userland();
21287 __asm__ __volatile__(ASM_STAC "\n"
21288- "1:"XRSTOR"\n"
21289+ "1:"
21290+ __copyuser_seg
21291+ XRSTOR"\n"
21292 "2: " ASM_CLAC "\n"
21293 xstate_fault
21294 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
21295 : "memory"); /* memory required? */
21296+ pax_close_userland();
21297 return err;
21298 }
21299
21300diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
21301index bbae024..e1528f9 100644
21302--- a/arch/x86/include/uapi/asm/e820.h
21303+++ b/arch/x86/include/uapi/asm/e820.h
21304@@ -63,7 +63,7 @@ struct e820map {
21305 #define ISA_START_ADDRESS 0xa0000
21306 #define ISA_END_ADDRESS 0x100000
21307
21308-#define BIOS_BEGIN 0x000a0000
21309+#define BIOS_BEGIN 0x000c0000
21310 #define BIOS_END 0x00100000
21311
21312 #define BIOS_ROM_BASE 0xffe00000
21313diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
21314index 7b0a55a..ad115bf 100644
21315--- a/arch/x86/include/uapi/asm/ptrace-abi.h
21316+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
21317@@ -49,7 +49,6 @@
21318 #define EFLAGS 144
21319 #define RSP 152
21320 #define SS 160
21321-#define ARGOFFSET R11
21322 #endif /* __ASSEMBLY__ */
21323
21324 /* top of stack page */
21325diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
21326index ada2e2d..ca69e16 100644
21327--- a/arch/x86/kernel/Makefile
21328+++ b/arch/x86/kernel/Makefile
21329@@ -24,7 +24,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
21330 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
21331 obj-$(CONFIG_IRQ_WORK) += irq_work.o
21332 obj-y += probe_roms.o
21333-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
21334+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
21335 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
21336 obj-$(CONFIG_X86_64) += mcount_64.o
21337 obj-y += syscall_$(BITS).o vsyscall_gtod.o
21338diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
21339index a142e77..6222cdd 100644
21340--- a/arch/x86/kernel/acpi/boot.c
21341+++ b/arch/x86/kernel/acpi/boot.c
21342@@ -1276,7 +1276,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
21343 * If your system is blacklisted here, but you find that acpi=force
21344 * works for you, please contact linux-acpi@vger.kernel.org
21345 */
21346-static struct dmi_system_id __initdata acpi_dmi_table[] = {
21347+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
21348 /*
21349 * Boxes that need ACPI disabled
21350 */
21351@@ -1351,7 +1351,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
21352 };
21353
21354 /* second table for DMI checks that should run after early-quirks */
21355-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
21356+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
21357 /*
21358 * HP laptops which use a DSDT reporting as HP/SB400/10000,
21359 * which includes some code which overrides all temperature
21360diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
21361index 3136820..e2c6577 100644
21362--- a/arch/x86/kernel/acpi/sleep.c
21363+++ b/arch/x86/kernel/acpi/sleep.c
21364@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
21365 #else /* CONFIG_64BIT */
21366 #ifdef CONFIG_SMP
21367 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
21368+
21369+ pax_open_kernel();
21370 early_gdt_descr.address =
21371 (unsigned long)get_cpu_gdt_table(smp_processor_id());
21372+ pax_close_kernel();
21373+
21374 initial_gs = per_cpu_offset(smp_processor_id());
21375 #endif
21376 initial_code = (unsigned long)wakeup_long64;
21377diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
21378index 665c6b7..eae4d56 100644
21379--- a/arch/x86/kernel/acpi/wakeup_32.S
21380+++ b/arch/x86/kernel/acpi/wakeup_32.S
21381@@ -29,13 +29,11 @@ wakeup_pmode_return:
21382 # and restore the stack ... but you need gdt for this to work
21383 movl saved_context_esp, %esp
21384
21385- movl %cs:saved_magic, %eax
21386- cmpl $0x12345678, %eax
21387+ cmpl $0x12345678, saved_magic
21388 jne bogus_magic
21389
21390 # jump to place where we left off
21391- movl saved_eip, %eax
21392- jmp *%eax
21393+ jmp *(saved_eip)
21394
21395 bogus_magic:
21396 jmp bogus_magic
21397diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
21398index 703130f..27a155d 100644
21399--- a/arch/x86/kernel/alternative.c
21400+++ b/arch/x86/kernel/alternative.c
21401@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
21402 */
21403 for (a = start; a < end; a++) {
21404 instr = (u8 *)&a->instr_offset + a->instr_offset;
21405+
21406+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21407+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
21408+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
21409+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
21410+#endif
21411+
21412 replacement = (u8 *)&a->repl_offset + a->repl_offset;
21413 BUG_ON(a->replacementlen > a->instrlen);
21414 BUG_ON(a->instrlen > sizeof(insnbuf));
21415@@ -284,6 +291,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
21416 add_nops(insnbuf + a->replacementlen,
21417 a->instrlen - a->replacementlen);
21418
21419+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21420+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
21421+ instr = ktva_ktla(instr);
21422+#endif
21423+
21424 text_poke_early(instr, insnbuf, a->instrlen);
21425 }
21426 }
21427@@ -299,10 +311,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
21428 for (poff = start; poff < end; poff++) {
21429 u8 *ptr = (u8 *)poff + *poff;
21430
21431+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21432+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
21433+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
21434+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
21435+#endif
21436+
21437 if (!*poff || ptr < text || ptr >= text_end)
21438 continue;
21439 /* turn DS segment override prefix into lock prefix */
21440- if (*ptr == 0x3e)
21441+ if (*ktla_ktva(ptr) == 0x3e)
21442 text_poke(ptr, ((unsigned char []){0xf0}), 1);
21443 }
21444 mutex_unlock(&text_mutex);
21445@@ -317,10 +335,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
21446 for (poff = start; poff < end; poff++) {
21447 u8 *ptr = (u8 *)poff + *poff;
21448
21449+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21450+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
21451+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
21452+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
21453+#endif
21454+
21455 if (!*poff || ptr < text || ptr >= text_end)
21456 continue;
21457 /* turn lock prefix into DS segment override prefix */
21458- if (*ptr == 0xf0)
21459+ if (*ktla_ktva(ptr) == 0xf0)
21460 text_poke(ptr, ((unsigned char []){0x3E}), 1);
21461 }
21462 mutex_unlock(&text_mutex);
21463@@ -457,7 +481,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
21464
21465 BUG_ON(p->len > MAX_PATCH_LEN);
21466 /* prep the buffer with the original instructions */
21467- memcpy(insnbuf, p->instr, p->len);
21468+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
21469 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
21470 (unsigned long)p->instr, p->len);
21471
21472@@ -504,7 +528,7 @@ void __init alternative_instructions(void)
21473 if (!uniproc_patched || num_possible_cpus() == 1)
21474 free_init_pages("SMP alternatives",
21475 (unsigned long)__smp_locks,
21476- (unsigned long)__smp_locks_end);
21477+ PAGE_ALIGN((unsigned long)__smp_locks_end));
21478 #endif
21479
21480 apply_paravirt(__parainstructions, __parainstructions_end);
21481@@ -524,13 +548,17 @@ void __init alternative_instructions(void)
21482 * instructions. And on the local CPU you need to be protected again NMI or MCE
21483 * handlers seeing an inconsistent instruction while you patch.
21484 */
21485-void *__init_or_module text_poke_early(void *addr, const void *opcode,
21486+void *__kprobes text_poke_early(void *addr, const void *opcode,
21487 size_t len)
21488 {
21489 unsigned long flags;
21490 local_irq_save(flags);
21491- memcpy(addr, opcode, len);
21492+
21493+ pax_open_kernel();
21494+ memcpy(ktla_ktva(addr), opcode, len);
21495 sync_core();
21496+ pax_close_kernel();
21497+
21498 local_irq_restore(flags);
21499 /* Could also do a CLFLUSH here to speed up CPU recovery; but
21500 that causes hangs on some VIA CPUs. */
21501@@ -552,36 +580,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
21502 */
21503 void *text_poke(void *addr, const void *opcode, size_t len)
21504 {
21505- unsigned long flags;
21506- char *vaddr;
21507+ unsigned char *vaddr = ktla_ktva(addr);
21508 struct page *pages[2];
21509- int i;
21510+ size_t i;
21511
21512 if (!core_kernel_text((unsigned long)addr)) {
21513- pages[0] = vmalloc_to_page(addr);
21514- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
21515+ pages[0] = vmalloc_to_page(vaddr);
21516+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
21517 } else {
21518- pages[0] = virt_to_page(addr);
21519+ pages[0] = virt_to_page(vaddr);
21520 WARN_ON(!PageReserved(pages[0]));
21521- pages[1] = virt_to_page(addr + PAGE_SIZE);
21522+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
21523 }
21524 BUG_ON(!pages[0]);
21525- local_irq_save(flags);
21526- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
21527- if (pages[1])
21528- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
21529- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
21530- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
21531- clear_fixmap(FIX_TEXT_POKE0);
21532- if (pages[1])
21533- clear_fixmap(FIX_TEXT_POKE1);
21534- local_flush_tlb();
21535- sync_core();
21536- /* Could also do a CLFLUSH here to speed up CPU recovery; but
21537- that causes hangs on some VIA CPUs. */
21538+ text_poke_early(addr, opcode, len);
21539 for (i = 0; i < len; i++)
21540- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
21541- local_irq_restore(flags);
21542+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
21543 return addr;
21544 }
21545
21546@@ -601,7 +615,7 @@ int poke_int3_handler(struct pt_regs *regs)
21547 if (likely(!bp_patching_in_progress))
21548 return 0;
21549
21550- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
21551+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
21552 return 0;
21553
21554 /* set up the specified breakpoint handler */
21555@@ -635,7 +649,7 @@ int poke_int3_handler(struct pt_regs *regs)
21556 */
21557 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
21558 {
21559- unsigned char int3 = 0xcc;
21560+ const unsigned char int3 = 0xcc;
21561
21562 bp_int3_handler = handler;
21563 bp_int3_addr = (u8 *)addr + sizeof(int3);
21564diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
21565index 24b5894..6d9701b 100644
21566--- a/arch/x86/kernel/apic/apic.c
21567+++ b/arch/x86/kernel/apic/apic.c
21568@@ -201,7 +201,7 @@ int first_system_vector = 0xfe;
21569 /*
21570 * Debug level, exported for io_apic.c
21571 */
21572-unsigned int apic_verbosity;
21573+int apic_verbosity;
21574
21575 int pic_mode;
21576
21577@@ -1989,7 +1989,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
21578 apic_write(APIC_ESR, 0);
21579 v = apic_read(APIC_ESR);
21580 ack_APIC_irq();
21581- atomic_inc(&irq_err_count);
21582+ atomic_inc_unchecked(&irq_err_count);
21583
21584 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
21585 smp_processor_id(), v);
21586diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
21587index de918c4..32eed23 100644
21588--- a/arch/x86/kernel/apic/apic_flat_64.c
21589+++ b/arch/x86/kernel/apic/apic_flat_64.c
21590@@ -154,7 +154,7 @@ static int flat_probe(void)
21591 return 1;
21592 }
21593
21594-static struct apic apic_flat = {
21595+static struct apic apic_flat __read_only = {
21596 .name = "flat",
21597 .probe = flat_probe,
21598 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
21599@@ -260,7 +260,7 @@ static int physflat_probe(void)
21600 return 0;
21601 }
21602
21603-static struct apic apic_physflat = {
21604+static struct apic apic_physflat __read_only = {
21605
21606 .name = "physical flat",
21607 .probe = physflat_probe,
21608diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
21609index b205cdb..d8503ff 100644
21610--- a/arch/x86/kernel/apic/apic_noop.c
21611+++ b/arch/x86/kernel/apic/apic_noop.c
21612@@ -108,7 +108,7 @@ static void noop_apic_write(u32 reg, u32 v)
21613 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
21614 }
21615
21616-struct apic apic_noop = {
21617+struct apic apic_noop __read_only = {
21618 .name = "noop",
21619 .probe = noop_probe,
21620 .acpi_madt_oem_check = NULL,
21621diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
21622index c4a8d63..fe893ac 100644
21623--- a/arch/x86/kernel/apic/bigsmp_32.c
21624+++ b/arch/x86/kernel/apic/bigsmp_32.c
21625@@ -147,7 +147,7 @@ static int probe_bigsmp(void)
21626 return dmi_bigsmp;
21627 }
21628
21629-static struct apic apic_bigsmp = {
21630+static struct apic apic_bigsmp __read_only = {
21631
21632 .name = "bigsmp",
21633 .probe = probe_bigsmp,
21634diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
21635index 337ce5a..c8d98b4 100644
21636--- a/arch/x86/kernel/apic/io_apic.c
21637+++ b/arch/x86/kernel/apic/io_apic.c
21638@@ -1230,7 +1230,7 @@ out:
21639 }
21640 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
21641
21642-void lock_vector_lock(void)
21643+void lock_vector_lock(void) __acquires(vector_lock)
21644 {
21645 /* Used to the online set of cpus does not change
21646 * during assign_irq_vector.
21647@@ -1238,7 +1238,7 @@ void lock_vector_lock(void)
21648 raw_spin_lock(&vector_lock);
21649 }
21650
21651-void unlock_vector_lock(void)
21652+void unlock_vector_lock(void) __releases(vector_lock)
21653 {
21654 raw_spin_unlock(&vector_lock);
21655 }
21656@@ -2465,7 +2465,7 @@ static void ack_apic_edge(struct irq_data *data)
21657 ack_APIC_irq();
21658 }
21659
21660-atomic_t irq_mis_count;
21661+atomic_unchecked_t irq_mis_count;
21662
21663 #ifdef CONFIG_GENERIC_PENDING_IRQ
21664 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
21665@@ -2606,7 +2606,7 @@ static void ack_apic_level(struct irq_data *data)
21666 * at the cpu.
21667 */
21668 if (!(v & (1 << (i & 0x1f)))) {
21669- atomic_inc(&irq_mis_count);
21670+ atomic_inc_unchecked(&irq_mis_count);
21671
21672 eoi_ioapic_irq(irq, cfg);
21673 }
21674diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
21675index bda4886..f9c7195 100644
21676--- a/arch/x86/kernel/apic/probe_32.c
21677+++ b/arch/x86/kernel/apic/probe_32.c
21678@@ -72,7 +72,7 @@ static int probe_default(void)
21679 return 1;
21680 }
21681
21682-static struct apic apic_default = {
21683+static struct apic apic_default __read_only = {
21684
21685 .name = "default",
21686 .probe = probe_default,
21687diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
21688index 6ce600f..cb44af8 100644
21689--- a/arch/x86/kernel/apic/x2apic_cluster.c
21690+++ b/arch/x86/kernel/apic/x2apic_cluster.c
21691@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
21692 return notifier_from_errno(err);
21693 }
21694
21695-static struct notifier_block __refdata x2apic_cpu_notifier = {
21696+static struct notifier_block x2apic_cpu_notifier = {
21697 .notifier_call = update_clusterinfo,
21698 };
21699
21700@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
21701 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
21702 }
21703
21704-static struct apic apic_x2apic_cluster = {
21705+static struct apic apic_x2apic_cluster __read_only = {
21706
21707 .name = "cluster x2apic",
21708 .probe = x2apic_cluster_probe,
21709diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
21710index 6fae733..5ca17af 100644
21711--- a/arch/x86/kernel/apic/x2apic_phys.c
21712+++ b/arch/x86/kernel/apic/x2apic_phys.c
21713@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
21714 return apic == &apic_x2apic_phys;
21715 }
21716
21717-static struct apic apic_x2apic_phys = {
21718+static struct apic apic_x2apic_phys __read_only = {
21719
21720 .name = "physical x2apic",
21721 .probe = x2apic_phys_probe,
21722diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
21723index 004f017..8fbc8b5 100644
21724--- a/arch/x86/kernel/apic/x2apic_uv_x.c
21725+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
21726@@ -350,7 +350,7 @@ static int uv_probe(void)
21727 return apic == &apic_x2apic_uv_x;
21728 }
21729
21730-static struct apic __refdata apic_x2apic_uv_x = {
21731+static struct apic apic_x2apic_uv_x __read_only = {
21732
21733 .name = "UV large system",
21734 .probe = uv_probe,
21735diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
21736index 5848744..56cb598 100644
21737--- a/arch/x86/kernel/apm_32.c
21738+++ b/arch/x86/kernel/apm_32.c
21739@@ -433,7 +433,7 @@ static DEFINE_MUTEX(apm_mutex);
21740 * This is for buggy BIOS's that refer to (real mode) segment 0x40
21741 * even though they are called in protected mode.
21742 */
21743-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
21744+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
21745 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
21746
21747 static const char driver_version[] = "1.16ac"; /* no spaces */
21748@@ -611,7 +611,10 @@ static long __apm_bios_call(void *_call)
21749 BUG_ON(cpu != 0);
21750 gdt = get_cpu_gdt_table(cpu);
21751 save_desc_40 = gdt[0x40 / 8];
21752+
21753+ pax_open_kernel();
21754 gdt[0x40 / 8] = bad_bios_desc;
21755+ pax_close_kernel();
21756
21757 apm_irq_save(flags);
21758 APM_DO_SAVE_SEGS;
21759@@ -620,7 +623,11 @@ static long __apm_bios_call(void *_call)
21760 &call->esi);
21761 APM_DO_RESTORE_SEGS;
21762 apm_irq_restore(flags);
21763+
21764+ pax_open_kernel();
21765 gdt[0x40 / 8] = save_desc_40;
21766+ pax_close_kernel();
21767+
21768 put_cpu();
21769
21770 return call->eax & 0xff;
21771@@ -687,7 +694,10 @@ static long __apm_bios_call_simple(void *_call)
21772 BUG_ON(cpu != 0);
21773 gdt = get_cpu_gdt_table(cpu);
21774 save_desc_40 = gdt[0x40 / 8];
21775+
21776+ pax_open_kernel();
21777 gdt[0x40 / 8] = bad_bios_desc;
21778+ pax_close_kernel();
21779
21780 apm_irq_save(flags);
21781 APM_DO_SAVE_SEGS;
21782@@ -695,7 +705,11 @@ static long __apm_bios_call_simple(void *_call)
21783 &call->eax);
21784 APM_DO_RESTORE_SEGS;
21785 apm_irq_restore(flags);
21786+
21787+ pax_open_kernel();
21788 gdt[0x40 / 8] = save_desc_40;
21789+ pax_close_kernel();
21790+
21791 put_cpu();
21792 return error;
21793 }
21794@@ -2350,12 +2364,15 @@ static int __init apm_init(void)
21795 * code to that CPU.
21796 */
21797 gdt = get_cpu_gdt_table(0);
21798+
21799+ pax_open_kernel();
21800 set_desc_base(&gdt[APM_CS >> 3],
21801 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
21802 set_desc_base(&gdt[APM_CS_16 >> 3],
21803 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
21804 set_desc_base(&gdt[APM_DS >> 3],
21805 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
21806+ pax_close_kernel();
21807
21808 proc_create("apm", 0, NULL, &apm_file_ops);
21809
21810diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
21811index 9f6b934..cf5ffb3 100644
21812--- a/arch/x86/kernel/asm-offsets.c
21813+++ b/arch/x86/kernel/asm-offsets.c
21814@@ -32,6 +32,8 @@ void common(void) {
21815 OFFSET(TI_flags, thread_info, flags);
21816 OFFSET(TI_status, thread_info, status);
21817 OFFSET(TI_addr_limit, thread_info, addr_limit);
21818+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
21819+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
21820
21821 BLANK();
21822 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
21823@@ -52,8 +54,26 @@ void common(void) {
21824 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
21825 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
21826 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
21827+
21828+#ifdef CONFIG_PAX_KERNEXEC
21829+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
21830 #endif
21831
21832+#ifdef CONFIG_PAX_MEMORY_UDEREF
21833+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
21834+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
21835+#ifdef CONFIG_X86_64
21836+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
21837+#endif
21838+#endif
21839+
21840+#endif
21841+
21842+ BLANK();
21843+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
21844+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
21845+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
21846+
21847 #ifdef CONFIG_XEN
21848 BLANK();
21849 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
21850diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
21851index e7c798b..2b2019b 100644
21852--- a/arch/x86/kernel/asm-offsets_64.c
21853+++ b/arch/x86/kernel/asm-offsets_64.c
21854@@ -77,6 +77,7 @@ int main(void)
21855 BLANK();
21856 #undef ENTRY
21857
21858+ DEFINE(TSS_size, sizeof(struct tss_struct));
21859 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
21860 BLANK();
21861
21862diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
21863index 7fd54f0..0691410 100644
21864--- a/arch/x86/kernel/cpu/Makefile
21865+++ b/arch/x86/kernel/cpu/Makefile
21866@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
21867 CFLAGS_REMOVE_perf_event.o = -pg
21868 endif
21869
21870-# Make sure load_percpu_segment has no stackprotector
21871-nostackp := $(call cc-option, -fno-stack-protector)
21872-CFLAGS_common.o := $(nostackp)
21873-
21874 obj-y := intel_cacheinfo.o scattered.o topology.o
21875 obj-y += proc.o capflags.o powerflags.o common.o
21876 obj-y += rdrand.o
21877diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
21878index 813d29d..6e542d4 100644
21879--- a/arch/x86/kernel/cpu/amd.c
21880+++ b/arch/x86/kernel/cpu/amd.c
21881@@ -718,7 +718,7 @@ static void init_amd(struct cpuinfo_x86 *c)
21882 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
21883 {
21884 /* AMD errata T13 (order #21922) */
21885- if ((c->x86 == 6)) {
21886+ if (c->x86 == 6) {
21887 /* Duron Rev A0 */
21888 if (c->x86_model == 3 && c->x86_mask == 0)
21889 size = 64;
21890diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
21891index 35db56b..256e87c 100644
21892--- a/arch/x86/kernel/cpu/common.c
21893+++ b/arch/x86/kernel/cpu/common.c
21894@@ -90,60 +90,6 @@ static const struct cpu_dev default_cpu = {
21895
21896 static const struct cpu_dev *this_cpu = &default_cpu;
21897
21898-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
21899-#ifdef CONFIG_X86_64
21900- /*
21901- * We need valid kernel segments for data and code in long mode too
21902- * IRET will check the segment types kkeil 2000/10/28
21903- * Also sysret mandates a special GDT layout
21904- *
21905- * TLS descriptors are currently at a different place compared to i386.
21906- * Hopefully nobody expects them at a fixed place (Wine?)
21907- */
21908- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
21909- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
21910- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
21911- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
21912- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
21913- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
21914-#else
21915- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
21916- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21917- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
21918- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
21919- /*
21920- * Segments used for calling PnP BIOS have byte granularity.
21921- * They code segments and data segments have fixed 64k limits,
21922- * the transfer segment sizes are set at run time.
21923- */
21924- /* 32-bit code */
21925- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21926- /* 16-bit code */
21927- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21928- /* 16-bit data */
21929- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
21930- /* 16-bit data */
21931- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
21932- /* 16-bit data */
21933- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
21934- /*
21935- * The APM segments have byte granularity and their bases
21936- * are set at run time. All have 64k limits.
21937- */
21938- /* 32-bit code */
21939- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21940- /* 16-bit code */
21941- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21942- /* data */
21943- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
21944-
21945- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21946- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21947- GDT_STACK_CANARY_INIT
21948-#endif
21949-} };
21950-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
21951-
21952 static int __init x86_xsave_setup(char *s)
21953 {
21954 if (strlen(s))
21955@@ -305,6 +251,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
21956 }
21957 }
21958
21959+#ifdef CONFIG_X86_64
21960+static __init int setup_disable_pcid(char *arg)
21961+{
21962+ setup_clear_cpu_cap(X86_FEATURE_PCID);
21963+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
21964+
21965+#ifdef CONFIG_PAX_MEMORY_UDEREF
21966+ if (clone_pgd_mask != ~(pgdval_t)0UL)
21967+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21968+#endif
21969+
21970+ return 1;
21971+}
21972+__setup("nopcid", setup_disable_pcid);
21973+
21974+static void setup_pcid(struct cpuinfo_x86 *c)
21975+{
21976+ if (!cpu_has(c, X86_FEATURE_PCID)) {
21977+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
21978+
21979+#ifdef CONFIG_PAX_MEMORY_UDEREF
21980+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
21981+ pax_open_kernel();
21982+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21983+ pax_close_kernel();
21984+ printk("PAX: slow and weak UDEREF enabled\n");
21985+ } else
21986+ printk("PAX: UDEREF disabled\n");
21987+#endif
21988+
21989+ return;
21990+ }
21991+
21992+ printk("PAX: PCID detected\n");
21993+ set_in_cr4(X86_CR4_PCIDE);
21994+
21995+#ifdef CONFIG_PAX_MEMORY_UDEREF
21996+ pax_open_kernel();
21997+ clone_pgd_mask = ~(pgdval_t)0UL;
21998+ pax_close_kernel();
21999+ if (pax_user_shadow_base)
22000+ printk("PAX: weak UDEREF enabled\n");
22001+ else {
22002+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
22003+ printk("PAX: strong UDEREF enabled\n");
22004+ }
22005+#endif
22006+
22007+ if (cpu_has(c, X86_FEATURE_INVPCID))
22008+ printk("PAX: INVPCID detected\n");
22009+}
22010+#endif
22011+
22012 /*
22013 * Some CPU features depend on higher CPUID levels, which may not always
22014 * be available due to CPUID level capping or broken virtualization
22015@@ -405,7 +404,7 @@ void switch_to_new_gdt(int cpu)
22016 {
22017 struct desc_ptr gdt_descr;
22018
22019- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
22020+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
22021 gdt_descr.size = GDT_SIZE - 1;
22022 load_gdt(&gdt_descr);
22023 /* Reload the per-cpu base */
22024@@ -895,6 +894,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
22025 setup_smep(c);
22026 setup_smap(c);
22027
22028+#ifdef CONFIG_X86_64
22029+ setup_pcid(c);
22030+#endif
22031+
22032 /*
22033 * The vendor-specific functions might have changed features.
22034 * Now we do "generic changes."
22035@@ -903,6 +906,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
22036 /* Filter out anything that depends on CPUID levels we don't have */
22037 filter_cpuid_features(c, true);
22038
22039+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
22040+ setup_clear_cpu_cap(X86_FEATURE_SEP);
22041+#endif
22042+
22043 /* If the model name is still unset, do table lookup. */
22044 if (!c->x86_model_id[0]) {
22045 const char *p;
22046@@ -983,7 +990,7 @@ static void syscall32_cpu_init(void)
22047 void enable_sep_cpu(void)
22048 {
22049 int cpu = get_cpu();
22050- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22051+ struct tss_struct *tss = init_tss + cpu;
22052
22053 if (!boot_cpu_has(X86_FEATURE_SEP)) {
22054 put_cpu();
22055@@ -1123,14 +1130,16 @@ static __init int setup_disablecpuid(char *arg)
22056 }
22057 __setup("clearcpuid=", setup_disablecpuid);
22058
22059+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
22060+EXPORT_PER_CPU_SYMBOL(current_tinfo);
22061+
22062 DEFINE_PER_CPU(unsigned long, kernel_stack) =
22063- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
22064+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
22065 EXPORT_PER_CPU_SYMBOL(kernel_stack);
22066
22067 #ifdef CONFIG_X86_64
22068-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
22069-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
22070- (unsigned long) debug_idt_table };
22071+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
22072+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
22073
22074 DEFINE_PER_CPU_FIRST(union irq_stack_union,
22075 irq_stack_union) __aligned(PAGE_SIZE) __visible;
22076@@ -1293,7 +1302,7 @@ void cpu_init(void)
22077 load_ucode_ap();
22078
22079 cpu = stack_smp_processor_id();
22080- t = &per_cpu(init_tss, cpu);
22081+ t = init_tss + cpu;
22082 oist = &per_cpu(orig_ist, cpu);
22083
22084 #ifdef CONFIG_NUMA
22085@@ -1328,7 +1337,6 @@ void cpu_init(void)
22086 wrmsrl(MSR_KERNEL_GS_BASE, 0);
22087 barrier();
22088
22089- x86_configure_nx();
22090 enable_x2apic();
22091
22092 /*
22093@@ -1380,7 +1388,7 @@ void cpu_init(void)
22094 {
22095 int cpu = smp_processor_id();
22096 struct task_struct *curr = current;
22097- struct tss_struct *t = &per_cpu(init_tss, cpu);
22098+ struct tss_struct *t = init_tss + cpu;
22099 struct thread_struct *thread = &curr->thread;
22100
22101 show_ucode_info_early();
22102diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
22103index c703507..28535e3 100644
22104--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
22105+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
22106@@ -1026,6 +1026,22 @@ static struct attribute *default_attrs[] = {
22107 };
22108
22109 #ifdef CONFIG_AMD_NB
22110+static struct attribute *default_attrs_amd_nb[] = {
22111+ &type.attr,
22112+ &level.attr,
22113+ &coherency_line_size.attr,
22114+ &physical_line_partition.attr,
22115+ &ways_of_associativity.attr,
22116+ &number_of_sets.attr,
22117+ &size.attr,
22118+ &shared_cpu_map.attr,
22119+ &shared_cpu_list.attr,
22120+ NULL,
22121+ NULL,
22122+ NULL,
22123+ NULL
22124+};
22125+
22126 static struct attribute **amd_l3_attrs(void)
22127 {
22128 static struct attribute **attrs;
22129@@ -1036,18 +1052,7 @@ static struct attribute **amd_l3_attrs(void)
22130
22131 n = ARRAY_SIZE(default_attrs);
22132
22133- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
22134- n += 2;
22135-
22136- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
22137- n += 1;
22138-
22139- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
22140- if (attrs == NULL)
22141- return attrs = default_attrs;
22142-
22143- for (n = 0; default_attrs[n]; n++)
22144- attrs[n] = default_attrs[n];
22145+ attrs = default_attrs_amd_nb;
22146
22147 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
22148 attrs[n++] = &cache_disable_0.attr;
22149@@ -1098,6 +1103,13 @@ static struct kobj_type ktype_cache = {
22150 .default_attrs = default_attrs,
22151 };
22152
22153+#ifdef CONFIG_AMD_NB
22154+static struct kobj_type ktype_cache_amd_nb = {
22155+ .sysfs_ops = &sysfs_ops,
22156+ .default_attrs = default_attrs_amd_nb,
22157+};
22158+#endif
22159+
22160 static struct kobj_type ktype_percpu_entry = {
22161 .sysfs_ops = &sysfs_ops,
22162 };
22163@@ -1163,20 +1175,26 @@ static int cache_add_dev(struct device *dev)
22164 return retval;
22165 }
22166
22167+#ifdef CONFIG_AMD_NB
22168+ amd_l3_attrs();
22169+#endif
22170+
22171 for (i = 0; i < num_cache_leaves; i++) {
22172+ struct kobj_type *ktype;
22173+
22174 this_object = INDEX_KOBJECT_PTR(cpu, i);
22175 this_object->cpu = cpu;
22176 this_object->index = i;
22177
22178 this_leaf = CPUID4_INFO_IDX(cpu, i);
22179
22180- ktype_cache.default_attrs = default_attrs;
22181+ ktype = &ktype_cache;
22182 #ifdef CONFIG_AMD_NB
22183 if (this_leaf->base.nb)
22184- ktype_cache.default_attrs = amd_l3_attrs();
22185+ ktype = &ktype_cache_amd_nb;
22186 #endif
22187 retval = kobject_init_and_add(&(this_object->kobj),
22188- &ktype_cache,
22189+ ktype,
22190 per_cpu(ici_cache_kobject, cpu),
22191 "index%1lu", i);
22192 if (unlikely(retval)) {
22193diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
22194index bd9ccda..38314e7 100644
22195--- a/arch/x86/kernel/cpu/mcheck/mce.c
22196+++ b/arch/x86/kernel/cpu/mcheck/mce.c
22197@@ -45,6 +45,7 @@
22198 #include <asm/processor.h>
22199 #include <asm/mce.h>
22200 #include <asm/msr.h>
22201+#include <asm/local.h>
22202
22203 #include "mce-internal.h"
22204
22205@@ -259,7 +260,7 @@ static void print_mce(struct mce *m)
22206 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
22207 m->cs, m->ip);
22208
22209- if (m->cs == __KERNEL_CS)
22210+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
22211 print_symbol("{%s}", m->ip);
22212 pr_cont("\n");
22213 }
22214@@ -292,10 +293,10 @@ static void print_mce(struct mce *m)
22215
22216 #define PANIC_TIMEOUT 5 /* 5 seconds */
22217
22218-static atomic_t mce_paniced;
22219+static atomic_unchecked_t mce_paniced;
22220
22221 static int fake_panic;
22222-static atomic_t mce_fake_paniced;
22223+static atomic_unchecked_t mce_fake_paniced;
22224
22225 /* Panic in progress. Enable interrupts and wait for final IPI */
22226 static void wait_for_panic(void)
22227@@ -319,7 +320,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
22228 /*
22229 * Make sure only one CPU runs in machine check panic
22230 */
22231- if (atomic_inc_return(&mce_paniced) > 1)
22232+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
22233 wait_for_panic();
22234 barrier();
22235
22236@@ -327,7 +328,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
22237 console_verbose();
22238 } else {
22239 /* Don't log too much for fake panic */
22240- if (atomic_inc_return(&mce_fake_paniced) > 1)
22241+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
22242 return;
22243 }
22244 /* First print corrected ones that are still unlogged */
22245@@ -366,7 +367,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
22246 if (!fake_panic) {
22247 if (panic_timeout == 0)
22248 panic_timeout = mca_cfg.panic_timeout;
22249- panic(msg);
22250+ panic("%s", msg);
22251 } else
22252 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
22253 }
22254@@ -697,7 +698,7 @@ static int mce_timed_out(u64 *t)
22255 * might have been modified by someone else.
22256 */
22257 rmb();
22258- if (atomic_read(&mce_paniced))
22259+ if (atomic_read_unchecked(&mce_paniced))
22260 wait_for_panic();
22261 if (!mca_cfg.monarch_timeout)
22262 goto out;
22263@@ -1674,7 +1675,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
22264 }
22265
22266 /* Call the installed machine check handler for this CPU setup. */
22267-void (*machine_check_vector)(struct pt_regs *, long error_code) =
22268+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
22269 unexpected_machine_check;
22270
22271 /*
22272@@ -1697,7 +1698,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
22273 return;
22274 }
22275
22276+ pax_open_kernel();
22277 machine_check_vector = do_machine_check;
22278+ pax_close_kernel();
22279
22280 __mcheck_cpu_init_generic();
22281 __mcheck_cpu_init_vendor(c);
22282@@ -1711,7 +1714,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
22283 */
22284
22285 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
22286-static int mce_chrdev_open_count; /* #times opened */
22287+static local_t mce_chrdev_open_count; /* #times opened */
22288 static int mce_chrdev_open_exclu; /* already open exclusive? */
22289
22290 static int mce_chrdev_open(struct inode *inode, struct file *file)
22291@@ -1719,7 +1722,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
22292 spin_lock(&mce_chrdev_state_lock);
22293
22294 if (mce_chrdev_open_exclu ||
22295- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
22296+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
22297 spin_unlock(&mce_chrdev_state_lock);
22298
22299 return -EBUSY;
22300@@ -1727,7 +1730,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
22301
22302 if (file->f_flags & O_EXCL)
22303 mce_chrdev_open_exclu = 1;
22304- mce_chrdev_open_count++;
22305+ local_inc(&mce_chrdev_open_count);
22306
22307 spin_unlock(&mce_chrdev_state_lock);
22308
22309@@ -1738,7 +1741,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
22310 {
22311 spin_lock(&mce_chrdev_state_lock);
22312
22313- mce_chrdev_open_count--;
22314+ local_dec(&mce_chrdev_open_count);
22315 mce_chrdev_open_exclu = 0;
22316
22317 spin_unlock(&mce_chrdev_state_lock);
22318@@ -2413,7 +2416,7 @@ static __init void mce_init_banks(void)
22319
22320 for (i = 0; i < mca_cfg.banks; i++) {
22321 struct mce_bank *b = &mce_banks[i];
22322- struct device_attribute *a = &b->attr;
22323+ device_attribute_no_const *a = &b->attr;
22324
22325 sysfs_attr_init(&a->attr);
22326 a->attr.name = b->attrname;
22327@@ -2520,7 +2523,7 @@ struct dentry *mce_get_debugfs_dir(void)
22328 static void mce_reset(void)
22329 {
22330 cpu_missing = 0;
22331- atomic_set(&mce_fake_paniced, 0);
22332+ atomic_set_unchecked(&mce_fake_paniced, 0);
22333 atomic_set(&mce_executing, 0);
22334 atomic_set(&mce_callin, 0);
22335 atomic_set(&global_nwo, 0);
22336diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
22337index a304298..49b6d06 100644
22338--- a/arch/x86/kernel/cpu/mcheck/p5.c
22339+++ b/arch/x86/kernel/cpu/mcheck/p5.c
22340@@ -10,6 +10,7 @@
22341 #include <asm/processor.h>
22342 #include <asm/mce.h>
22343 #include <asm/msr.h>
22344+#include <asm/pgtable.h>
22345
22346 /* By default disabled */
22347 int mce_p5_enabled __read_mostly;
22348@@ -48,7 +49,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
22349 if (!cpu_has(c, X86_FEATURE_MCE))
22350 return;
22351
22352+ pax_open_kernel();
22353 machine_check_vector = pentium_machine_check;
22354+ pax_close_kernel();
22355 /* Make sure the vector pointer is visible before we enable MCEs: */
22356 wmb();
22357
22358diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
22359index 7dc5564..1273569 100644
22360--- a/arch/x86/kernel/cpu/mcheck/winchip.c
22361+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
22362@@ -9,6 +9,7 @@
22363 #include <asm/processor.h>
22364 #include <asm/mce.h>
22365 #include <asm/msr.h>
22366+#include <asm/pgtable.h>
22367
22368 /* Machine check handler for WinChip C6: */
22369 static void winchip_machine_check(struct pt_regs *regs, long error_code)
22370@@ -22,7 +23,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
22371 {
22372 u32 lo, hi;
22373
22374+ pax_open_kernel();
22375 machine_check_vector = winchip_machine_check;
22376+ pax_close_kernel();
22377 /* Make sure the vector pointer is visible before we enable MCEs: */
22378 wmb();
22379
22380diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
22381index dd9d619..86e1d81 100644
22382--- a/arch/x86/kernel/cpu/microcode/core.c
22383+++ b/arch/x86/kernel/cpu/microcode/core.c
22384@@ -516,7 +516,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
22385 return NOTIFY_OK;
22386 }
22387
22388-static struct notifier_block __refdata mc_cpu_notifier = {
22389+static struct notifier_block mc_cpu_notifier = {
22390 .notifier_call = mc_cpu_callback,
22391 };
22392
22393diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
22394index a276fa7..e66810f 100644
22395--- a/arch/x86/kernel/cpu/microcode/intel.c
22396+++ b/arch/x86/kernel/cpu/microcode/intel.c
22397@@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
22398
22399 static int get_ucode_user(void *to, const void *from, size_t n)
22400 {
22401- return copy_from_user(to, from, n);
22402+ return copy_from_user(to, (const void __force_user *)from, n);
22403 }
22404
22405 static enum ucode_state
22406 request_microcode_user(int cpu, const void __user *buf, size_t size)
22407 {
22408- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
22409+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
22410 }
22411
22412 static void microcode_fini_cpu(int cpu)
22413diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
22414index f961de9..8a9d332 100644
22415--- a/arch/x86/kernel/cpu/mtrr/main.c
22416+++ b/arch/x86/kernel/cpu/mtrr/main.c
22417@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
22418 u64 size_or_mask, size_and_mask;
22419 static bool mtrr_aps_delayed_init;
22420
22421-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
22422+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
22423
22424 const struct mtrr_ops *mtrr_if;
22425
22426diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
22427index df5e41f..816c719 100644
22428--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
22429+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
22430@@ -25,7 +25,7 @@ struct mtrr_ops {
22431 int (*validate_add_page)(unsigned long base, unsigned long size,
22432 unsigned int type);
22433 int (*have_wrcomb)(void);
22434-};
22435+} __do_const;
22436
22437 extern int generic_get_free_region(unsigned long base, unsigned long size,
22438 int replace_reg);
22439diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
22440index 2879ecd..bb8c80b 100644
22441--- a/arch/x86/kernel/cpu/perf_event.c
22442+++ b/arch/x86/kernel/cpu/perf_event.c
22443@@ -1372,7 +1372,7 @@ static void __init pmu_check_apic(void)
22444
22445 }
22446
22447-static struct attribute_group x86_pmu_format_group = {
22448+static attribute_group_no_const x86_pmu_format_group = {
22449 .name = "format",
22450 .attrs = NULL,
22451 };
22452@@ -1471,7 +1471,7 @@ static struct attribute *events_attr[] = {
22453 NULL,
22454 };
22455
22456-static struct attribute_group x86_pmu_events_group = {
22457+static attribute_group_no_const x86_pmu_events_group = {
22458 .name = "events",
22459 .attrs = events_attr,
22460 };
22461@@ -1995,7 +1995,7 @@ static unsigned long get_segment_base(unsigned int segment)
22462 if (idx > GDT_ENTRIES)
22463 return 0;
22464
22465- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
22466+ desc = get_cpu_gdt_table(smp_processor_id());
22467 }
22468
22469 return get_desc_base(desc + idx);
22470@@ -2085,7 +2085,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
22471 break;
22472
22473 perf_callchain_store(entry, frame.return_address);
22474- fp = frame.next_frame;
22475+ fp = (const void __force_user *)frame.next_frame;
22476 }
22477 }
22478
22479diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
22480index 639d128..e92d7e5 100644
22481--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
22482+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
22483@@ -405,7 +405,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
22484 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
22485 {
22486 struct attribute **attrs;
22487- struct attribute_group *attr_group;
22488+ attribute_group_no_const *attr_group;
22489 int i = 0, j;
22490
22491 while (amd_iommu_v2_event_descs[i].attr.attr.name)
22492diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
22493index 2502d0d..e5cc05c 100644
22494--- a/arch/x86/kernel/cpu/perf_event_intel.c
22495+++ b/arch/x86/kernel/cpu/perf_event_intel.c
22496@@ -2353,10 +2353,10 @@ __init int intel_pmu_init(void)
22497 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
22498
22499 if (boot_cpu_has(X86_FEATURE_PDCM)) {
22500- u64 capabilities;
22501+ u64 capabilities = x86_pmu.intel_cap.capabilities;
22502
22503- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
22504- x86_pmu.intel_cap.capabilities = capabilities;
22505+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
22506+ x86_pmu.intel_cap.capabilities = capabilities;
22507 }
22508
22509 intel_ds_init();
22510diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
22511index 619f769..d510008 100644
22512--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
22513+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
22514@@ -449,7 +449,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
22515 NULL,
22516 };
22517
22518-static struct attribute_group rapl_pmu_events_group = {
22519+static attribute_group_no_const rapl_pmu_events_group __read_only = {
22520 .name = "events",
22521 .attrs = NULL, /* patched at runtime */
22522 };
22523diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
22524index 0939f86..69730af 100644
22525--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
22526+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
22527@@ -3691,7 +3691,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
22528 static int __init uncore_type_init(struct intel_uncore_type *type)
22529 {
22530 struct intel_uncore_pmu *pmus;
22531- struct attribute_group *attr_group;
22532+ attribute_group_no_const *attr_group;
22533 struct attribute **attrs;
22534 int i, j;
22535
22536diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
22537index 90236f0..54cb20d 100644
22538--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
22539+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
22540@@ -503,7 +503,7 @@ struct intel_uncore_box {
22541 struct uncore_event_desc {
22542 struct kobj_attribute attr;
22543 const char *config;
22544-};
22545+} __do_const;
22546
22547 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
22548 { \
22549diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
22550index 3225ae6c..ee3c6db 100644
22551--- a/arch/x86/kernel/cpuid.c
22552+++ b/arch/x86/kernel/cpuid.c
22553@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
22554 return notifier_from_errno(err);
22555 }
22556
22557-static struct notifier_block __refdata cpuid_class_cpu_notifier =
22558+static struct notifier_block cpuid_class_cpu_notifier =
22559 {
22560 .notifier_call = cpuid_class_cpu_callback,
22561 };
22562diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
22563index a618fcd..200e95b 100644
22564--- a/arch/x86/kernel/crash.c
22565+++ b/arch/x86/kernel/crash.c
22566@@ -104,7 +104,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
22567 #ifdef CONFIG_X86_32
22568 struct pt_regs fixed_regs;
22569
22570- if (!user_mode_vm(regs)) {
22571+ if (!user_mode(regs)) {
22572 crash_fixup_ss_esp(&fixed_regs, regs);
22573 regs = &fixed_regs;
22574 }
22575diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
22576index afa64ad..dce67dd 100644
22577--- a/arch/x86/kernel/crash_dump_64.c
22578+++ b/arch/x86/kernel/crash_dump_64.c
22579@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
22580 return -ENOMEM;
22581
22582 if (userbuf) {
22583- if (copy_to_user(buf, vaddr + offset, csize)) {
22584+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
22585 iounmap(vaddr);
22586 return -EFAULT;
22587 }
22588diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
22589index f6dfd93..892ade4 100644
22590--- a/arch/x86/kernel/doublefault.c
22591+++ b/arch/x86/kernel/doublefault.c
22592@@ -12,7 +12,7 @@
22593
22594 #define DOUBLEFAULT_STACKSIZE (1024)
22595 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
22596-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
22597+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
22598
22599 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
22600
22601@@ -22,7 +22,7 @@ static void doublefault_fn(void)
22602 unsigned long gdt, tss;
22603
22604 native_store_gdt(&gdt_desc);
22605- gdt = gdt_desc.address;
22606+ gdt = (unsigned long)gdt_desc.address;
22607
22608 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
22609
22610@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
22611 /* 0x2 bit is always set */
22612 .flags = X86_EFLAGS_SF | 0x2,
22613 .sp = STACK_START,
22614- .es = __USER_DS,
22615+ .es = __KERNEL_DS,
22616 .cs = __KERNEL_CS,
22617 .ss = __KERNEL_DS,
22618- .ds = __USER_DS,
22619+ .ds = __KERNEL_DS,
22620 .fs = __KERNEL_PERCPU,
22621
22622 .__cr3 = __pa_nodebug(swapper_pg_dir),
22623diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
22624index b74ebc7..2c95874 100644
22625--- a/arch/x86/kernel/dumpstack.c
22626+++ b/arch/x86/kernel/dumpstack.c
22627@@ -2,6 +2,9 @@
22628 * Copyright (C) 1991, 1992 Linus Torvalds
22629 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
22630 */
22631+#ifdef CONFIG_GRKERNSEC_HIDESYM
22632+#define __INCLUDED_BY_HIDESYM 1
22633+#endif
22634 #include <linux/kallsyms.h>
22635 #include <linux/kprobes.h>
22636 #include <linux/uaccess.h>
22637@@ -33,23 +36,21 @@ static void printk_stack_address(unsigned long address, int reliable)
22638
22639 void printk_address(unsigned long address)
22640 {
22641- pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
22642+ pr_cont(" [<%p>] %pA\n", (void *)address, (void *)address);
22643 }
22644
22645 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
22646 static void
22647 print_ftrace_graph_addr(unsigned long addr, void *data,
22648 const struct stacktrace_ops *ops,
22649- struct thread_info *tinfo, int *graph)
22650+ struct task_struct *task, int *graph)
22651 {
22652- struct task_struct *task;
22653 unsigned long ret_addr;
22654 int index;
22655
22656 if (addr != (unsigned long)return_to_handler)
22657 return;
22658
22659- task = tinfo->task;
22660 index = task->curr_ret_stack;
22661
22662 if (!task->ret_stack || index < *graph)
22663@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
22664 static inline void
22665 print_ftrace_graph_addr(unsigned long addr, void *data,
22666 const struct stacktrace_ops *ops,
22667- struct thread_info *tinfo, int *graph)
22668+ struct task_struct *task, int *graph)
22669 { }
22670 #endif
22671
22672@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
22673 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
22674 */
22675
22676-static inline int valid_stack_ptr(struct thread_info *tinfo,
22677- void *p, unsigned int size, void *end)
22678+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
22679 {
22680- void *t = tinfo;
22681 if (end) {
22682 if (p < end && p >= (end-THREAD_SIZE))
22683 return 1;
22684@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
22685 }
22686
22687 unsigned long
22688-print_context_stack(struct thread_info *tinfo,
22689+print_context_stack(struct task_struct *task, void *stack_start,
22690 unsigned long *stack, unsigned long bp,
22691 const struct stacktrace_ops *ops, void *data,
22692 unsigned long *end, int *graph)
22693 {
22694 struct stack_frame *frame = (struct stack_frame *)bp;
22695
22696- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
22697+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
22698 unsigned long addr;
22699
22700 addr = *stack;
22701@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
22702 } else {
22703 ops->address(data, addr, 0);
22704 }
22705- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22706+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22707 }
22708 stack++;
22709 }
22710@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
22711 EXPORT_SYMBOL_GPL(print_context_stack);
22712
22713 unsigned long
22714-print_context_stack_bp(struct thread_info *tinfo,
22715+print_context_stack_bp(struct task_struct *task, void *stack_start,
22716 unsigned long *stack, unsigned long bp,
22717 const struct stacktrace_ops *ops, void *data,
22718 unsigned long *end, int *graph)
22719@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22720 struct stack_frame *frame = (struct stack_frame *)bp;
22721 unsigned long *ret_addr = &frame->return_address;
22722
22723- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
22724+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
22725 unsigned long addr = *ret_addr;
22726
22727 if (!__kernel_text_address(addr))
22728@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22729 ops->address(data, addr, 1);
22730 frame = frame->next_frame;
22731 ret_addr = &frame->return_address;
22732- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22733+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22734 }
22735
22736 return (unsigned long)frame;
22737@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
22738 static void print_trace_address(void *data, unsigned long addr, int reliable)
22739 {
22740 touch_nmi_watchdog();
22741- printk(data);
22742+ printk("%s", (char *)data);
22743 printk_stack_address(addr, reliable);
22744 }
22745
22746@@ -225,6 +224,8 @@ unsigned long oops_begin(void)
22747 EXPORT_SYMBOL_GPL(oops_begin);
22748 NOKPROBE_SYMBOL(oops_begin);
22749
22750+extern void gr_handle_kernel_exploit(void);
22751+
22752 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22753 {
22754 if (regs && kexec_should_crash(current))
22755@@ -246,7 +247,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22756 panic("Fatal exception in interrupt");
22757 if (panic_on_oops)
22758 panic("Fatal exception");
22759- do_exit(signr);
22760+
22761+ gr_handle_kernel_exploit();
22762+
22763+ do_group_exit(signr);
22764 }
22765 NOKPROBE_SYMBOL(oops_end);
22766
22767@@ -275,7 +279,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
22768 print_modules();
22769 show_regs(regs);
22770 #ifdef CONFIG_X86_32
22771- if (user_mode_vm(regs)) {
22772+ if (user_mode(regs)) {
22773 sp = regs->sp;
22774 ss = regs->ss & 0xffff;
22775 } else {
22776@@ -304,7 +308,7 @@ void die(const char *str, struct pt_regs *regs, long err)
22777 unsigned long flags = oops_begin();
22778 int sig = SIGSEGV;
22779
22780- if (!user_mode_vm(regs))
22781+ if (!user_mode(regs))
22782 report_bug(regs->ip, regs);
22783
22784 if (__die(str, regs, err))
22785diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
22786index 5abd4cd..c65733b 100644
22787--- a/arch/x86/kernel/dumpstack_32.c
22788+++ b/arch/x86/kernel/dumpstack_32.c
22789@@ -61,15 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22790 bp = stack_frame(task, regs);
22791
22792 for (;;) {
22793- struct thread_info *context;
22794+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22795 void *end_stack;
22796
22797 end_stack = is_hardirq_stack(stack, cpu);
22798 if (!end_stack)
22799 end_stack = is_softirq_stack(stack, cpu);
22800
22801- context = task_thread_info(task);
22802- bp = ops->walk_stack(context, stack, bp, ops, data,
22803+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
22804 end_stack, &graph);
22805
22806 /* Stop if not on irq stack */
22807@@ -123,27 +122,28 @@ void show_regs(struct pt_regs *regs)
22808 int i;
22809
22810 show_regs_print_info(KERN_EMERG);
22811- __show_regs(regs, !user_mode_vm(regs));
22812+ __show_regs(regs, !user_mode(regs));
22813
22814 /*
22815 * When in-kernel, we also print out the stack and code at the
22816 * time of the fault..
22817 */
22818- if (!user_mode_vm(regs)) {
22819+ if (!user_mode(regs)) {
22820 unsigned int code_prologue = code_bytes * 43 / 64;
22821 unsigned int code_len = code_bytes;
22822 unsigned char c;
22823 u8 *ip;
22824+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
22825
22826 pr_emerg("Stack:\n");
22827 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
22828
22829 pr_emerg("Code:");
22830
22831- ip = (u8 *)regs->ip - code_prologue;
22832+ ip = (u8 *)regs->ip - code_prologue + cs_base;
22833 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
22834 /* try starting at IP */
22835- ip = (u8 *)regs->ip;
22836+ ip = (u8 *)regs->ip + cs_base;
22837 code_len = code_len - code_prologue + 1;
22838 }
22839 for (i = 0; i < code_len; i++, ip++) {
22840@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
22841 pr_cont(" Bad EIP value.");
22842 break;
22843 }
22844- if (ip == (u8 *)regs->ip)
22845+ if (ip == (u8 *)regs->ip + cs_base)
22846 pr_cont(" <%02x>", c);
22847 else
22848 pr_cont(" %02x", c);
22849@@ -165,6 +165,7 @@ int is_valid_bugaddr(unsigned long ip)
22850 {
22851 unsigned short ud2;
22852
22853+ ip = ktla_ktva(ip);
22854 if (ip < PAGE_OFFSET)
22855 return 0;
22856 if (probe_kernel_address((unsigned short *)ip, ud2))
22857@@ -172,3 +173,15 @@ int is_valid_bugaddr(unsigned long ip)
22858
22859 return ud2 == 0x0b0f;
22860 }
22861+
22862+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22863+void pax_check_alloca(unsigned long size)
22864+{
22865+ unsigned long sp = (unsigned long)&sp, stack_left;
22866+
22867+ /* all kernel stacks are of the same size */
22868+ stack_left = sp & (THREAD_SIZE - 1);
22869+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22870+}
22871+EXPORT_SYMBOL(pax_check_alloca);
22872+#endif
22873diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
22874index ff86f19..a20c62c 100644
22875--- a/arch/x86/kernel/dumpstack_64.c
22876+++ b/arch/x86/kernel/dumpstack_64.c
22877@@ -153,12 +153,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22878 const struct stacktrace_ops *ops, void *data)
22879 {
22880 const unsigned cpu = get_cpu();
22881- struct thread_info *tinfo;
22882 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
22883 unsigned long dummy;
22884 unsigned used = 0;
22885 int graph = 0;
22886 int done = 0;
22887+ void *stack_start;
22888
22889 if (!task)
22890 task = current;
22891@@ -179,7 +179,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22892 * current stack address. If the stacks consist of nested
22893 * exceptions
22894 */
22895- tinfo = task_thread_info(task);
22896 while (!done) {
22897 unsigned long *stack_end;
22898 enum stack_type stype;
22899@@ -202,7 +201,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22900 if (ops->stack(data, id) < 0)
22901 break;
22902
22903- bp = ops->walk_stack(tinfo, stack, bp, ops,
22904+ bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
22905 data, stack_end, &graph);
22906 ops->stack(data, "<EOE>");
22907 /*
22908@@ -210,6 +209,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22909 * second-to-last pointer (index -2 to end) in the
22910 * exception stack:
22911 */
22912+ if ((u16)stack_end[-1] != __KERNEL_DS)
22913+ goto out;
22914 stack = (unsigned long *) stack_end[-2];
22915 done = 0;
22916 break;
22917@@ -218,7 +219,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22918
22919 if (ops->stack(data, "IRQ") < 0)
22920 break;
22921- bp = ops->walk_stack(tinfo, stack, bp,
22922+ bp = ops->walk_stack(task, irq_stack, stack, bp,
22923 ops, data, stack_end, &graph);
22924 /*
22925 * We link to the next stack (which would be
22926@@ -240,7 +241,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22927 /*
22928 * This handles the process stack:
22929 */
22930- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
22931+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22932+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22933+out:
22934 put_cpu();
22935 }
22936 EXPORT_SYMBOL(dump_trace);
22937@@ -349,3 +352,50 @@ int is_valid_bugaddr(unsigned long ip)
22938
22939 return ud2 == 0x0b0f;
22940 }
22941+
22942+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22943+void pax_check_alloca(unsigned long size)
22944+{
22945+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
22946+ unsigned cpu, used;
22947+ char *id;
22948+
22949+ /* check the process stack first */
22950+ stack_start = (unsigned long)task_stack_page(current);
22951+ stack_end = stack_start + THREAD_SIZE;
22952+ if (likely(stack_start <= sp && sp < stack_end)) {
22953+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
22954+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22955+ return;
22956+ }
22957+
22958+ cpu = get_cpu();
22959+
22960+ /* check the irq stacks */
22961+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
22962+ stack_start = stack_end - IRQ_STACK_SIZE;
22963+ if (stack_start <= sp && sp < stack_end) {
22964+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
22965+ put_cpu();
22966+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22967+ return;
22968+ }
22969+
22970+ /* check the exception stacks */
22971+ used = 0;
22972+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
22973+ stack_start = stack_end - EXCEPTION_STKSZ;
22974+ if (stack_end && stack_start <= sp && sp < stack_end) {
22975+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
22976+ put_cpu();
22977+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22978+ return;
22979+ }
22980+
22981+ put_cpu();
22982+
22983+ /* unknown stack */
22984+ BUG();
22985+}
22986+EXPORT_SYMBOL(pax_check_alloca);
22987+#endif
22988diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
22989index 988c00a..4f673b6 100644
22990--- a/arch/x86/kernel/e820.c
22991+++ b/arch/x86/kernel/e820.c
22992@@ -803,8 +803,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
22993
22994 static void early_panic(char *msg)
22995 {
22996- early_printk(msg);
22997- panic(msg);
22998+ early_printk("%s", msg);
22999+ panic("%s", msg);
23000 }
23001
23002 static int userdef __initdata;
23003diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
23004index 01d1c18..8073693 100644
23005--- a/arch/x86/kernel/early_printk.c
23006+++ b/arch/x86/kernel/early_printk.c
23007@@ -7,6 +7,7 @@
23008 #include <linux/pci_regs.h>
23009 #include <linux/pci_ids.h>
23010 #include <linux/errno.h>
23011+#include <linux/sched.h>
23012 #include <asm/io.h>
23013 #include <asm/processor.h>
23014 #include <asm/fcntl.h>
23015diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
23016index 4b0e1df..884b67e 100644
23017--- a/arch/x86/kernel/entry_32.S
23018+++ b/arch/x86/kernel/entry_32.S
23019@@ -177,13 +177,153 @@
23020 /*CFI_REL_OFFSET gs, PT_GS*/
23021 .endm
23022 .macro SET_KERNEL_GS reg
23023+
23024+#ifdef CONFIG_CC_STACKPROTECTOR
23025 movl $(__KERNEL_STACK_CANARY), \reg
23026+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
23027+ movl $(__USER_DS), \reg
23028+#else
23029+ xorl \reg, \reg
23030+#endif
23031+
23032 movl \reg, %gs
23033 .endm
23034
23035 #endif /* CONFIG_X86_32_LAZY_GS */
23036
23037-.macro SAVE_ALL
23038+.macro pax_enter_kernel
23039+#ifdef CONFIG_PAX_KERNEXEC
23040+ call pax_enter_kernel
23041+#endif
23042+.endm
23043+
23044+.macro pax_exit_kernel
23045+#ifdef CONFIG_PAX_KERNEXEC
23046+ call pax_exit_kernel
23047+#endif
23048+.endm
23049+
23050+#ifdef CONFIG_PAX_KERNEXEC
23051+ENTRY(pax_enter_kernel)
23052+#ifdef CONFIG_PARAVIRT
23053+ pushl %eax
23054+ pushl %ecx
23055+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
23056+ mov %eax, %esi
23057+#else
23058+ mov %cr0, %esi
23059+#endif
23060+ bts $16, %esi
23061+ jnc 1f
23062+ mov %cs, %esi
23063+ cmp $__KERNEL_CS, %esi
23064+ jz 3f
23065+ ljmp $__KERNEL_CS, $3f
23066+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
23067+2:
23068+#ifdef CONFIG_PARAVIRT
23069+ mov %esi, %eax
23070+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
23071+#else
23072+ mov %esi, %cr0
23073+#endif
23074+3:
23075+#ifdef CONFIG_PARAVIRT
23076+ popl %ecx
23077+ popl %eax
23078+#endif
23079+ ret
23080+ENDPROC(pax_enter_kernel)
23081+
23082+ENTRY(pax_exit_kernel)
23083+#ifdef CONFIG_PARAVIRT
23084+ pushl %eax
23085+ pushl %ecx
23086+#endif
23087+ mov %cs, %esi
23088+ cmp $__KERNEXEC_KERNEL_CS, %esi
23089+ jnz 2f
23090+#ifdef CONFIG_PARAVIRT
23091+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
23092+ mov %eax, %esi
23093+#else
23094+ mov %cr0, %esi
23095+#endif
23096+ btr $16, %esi
23097+ ljmp $__KERNEL_CS, $1f
23098+1:
23099+#ifdef CONFIG_PARAVIRT
23100+ mov %esi, %eax
23101+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
23102+#else
23103+ mov %esi, %cr0
23104+#endif
23105+2:
23106+#ifdef CONFIG_PARAVIRT
23107+ popl %ecx
23108+ popl %eax
23109+#endif
23110+ ret
23111+ENDPROC(pax_exit_kernel)
23112+#endif
23113+
23114+ .macro pax_erase_kstack
23115+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23116+ call pax_erase_kstack
23117+#endif
23118+ .endm
23119+
23120+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23121+/*
23122+ * ebp: thread_info
23123+ */
23124+ENTRY(pax_erase_kstack)
23125+ pushl %edi
23126+ pushl %ecx
23127+ pushl %eax
23128+
23129+ mov TI_lowest_stack(%ebp), %edi
23130+ mov $-0xBEEF, %eax
23131+ std
23132+
23133+1: mov %edi, %ecx
23134+ and $THREAD_SIZE_asm - 1, %ecx
23135+ shr $2, %ecx
23136+ repne scasl
23137+ jecxz 2f
23138+
23139+ cmp $2*16, %ecx
23140+ jc 2f
23141+
23142+ mov $2*16, %ecx
23143+ repe scasl
23144+ jecxz 2f
23145+ jne 1b
23146+
23147+2: cld
23148+ mov %esp, %ecx
23149+ sub %edi, %ecx
23150+
23151+ cmp $THREAD_SIZE_asm, %ecx
23152+ jb 3f
23153+ ud2
23154+3:
23155+
23156+ shr $2, %ecx
23157+ rep stosl
23158+
23159+ mov TI_task_thread_sp0(%ebp), %edi
23160+ sub $128, %edi
23161+ mov %edi, TI_lowest_stack(%ebp)
23162+
23163+ popl %eax
23164+ popl %ecx
23165+ popl %edi
23166+ ret
23167+ENDPROC(pax_erase_kstack)
23168+#endif
23169+
23170+.macro __SAVE_ALL _DS
23171 cld
23172 PUSH_GS
23173 pushl_cfi %fs
23174@@ -206,7 +346,7 @@
23175 CFI_REL_OFFSET ecx, 0
23176 pushl_cfi %ebx
23177 CFI_REL_OFFSET ebx, 0
23178- movl $(__USER_DS), %edx
23179+ movl $\_DS, %edx
23180 movl %edx, %ds
23181 movl %edx, %es
23182 movl $(__KERNEL_PERCPU), %edx
23183@@ -214,6 +354,15 @@
23184 SET_KERNEL_GS %edx
23185 .endm
23186
23187+.macro SAVE_ALL
23188+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23189+ __SAVE_ALL __KERNEL_DS
23190+ pax_enter_kernel
23191+#else
23192+ __SAVE_ALL __USER_DS
23193+#endif
23194+.endm
23195+
23196 .macro RESTORE_INT_REGS
23197 popl_cfi %ebx
23198 CFI_RESTORE ebx
23199@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
23200 popfl_cfi
23201 jmp syscall_exit
23202 CFI_ENDPROC
23203-END(ret_from_fork)
23204+ENDPROC(ret_from_fork)
23205
23206 ENTRY(ret_from_kernel_thread)
23207 CFI_STARTPROC
23208@@ -340,7 +489,15 @@ ret_from_intr:
23209 andl $SEGMENT_RPL_MASK, %eax
23210 #endif
23211 cmpl $USER_RPL, %eax
23212+
23213+#ifdef CONFIG_PAX_KERNEXEC
23214+ jae resume_userspace
23215+
23216+ pax_exit_kernel
23217+ jmp resume_kernel
23218+#else
23219 jb resume_kernel # not returning to v8086 or userspace
23220+#endif
23221
23222 ENTRY(resume_userspace)
23223 LOCKDEP_SYS_EXIT
23224@@ -352,8 +509,8 @@ ENTRY(resume_userspace)
23225 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
23226 # int/exception return?
23227 jne work_pending
23228- jmp restore_all
23229-END(ret_from_exception)
23230+ jmp restore_all_pax
23231+ENDPROC(ret_from_exception)
23232
23233 #ifdef CONFIG_PREEMPT
23234 ENTRY(resume_kernel)
23235@@ -365,7 +522,7 @@ need_resched:
23236 jz restore_all
23237 call preempt_schedule_irq
23238 jmp need_resched
23239-END(resume_kernel)
23240+ENDPROC(resume_kernel)
23241 #endif
23242 CFI_ENDPROC
23243
23244@@ -395,30 +552,45 @@ sysenter_past_esp:
23245 /*CFI_REL_OFFSET cs, 0*/
23246 /*
23247 * Push current_thread_info()->sysenter_return to the stack.
23248- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
23249- * pushed above; +8 corresponds to copy_thread's esp0 setting.
23250 */
23251- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
23252+ pushl_cfi $0
23253 CFI_REL_OFFSET eip, 0
23254
23255 pushl_cfi %eax
23256 SAVE_ALL
23257+ GET_THREAD_INFO(%ebp)
23258+ movl TI_sysenter_return(%ebp),%ebp
23259+ movl %ebp,PT_EIP(%esp)
23260 ENABLE_INTERRUPTS(CLBR_NONE)
23261
23262 /*
23263 * Load the potential sixth argument from user stack.
23264 * Careful about security.
23265 */
23266+ movl PT_OLDESP(%esp),%ebp
23267+
23268+#ifdef CONFIG_PAX_MEMORY_UDEREF
23269+ mov PT_OLDSS(%esp),%ds
23270+1: movl %ds:(%ebp),%ebp
23271+ push %ss
23272+ pop %ds
23273+#else
23274 cmpl $__PAGE_OFFSET-3,%ebp
23275 jae syscall_fault
23276 ASM_STAC
23277 1: movl (%ebp),%ebp
23278 ASM_CLAC
23279+#endif
23280+
23281 movl %ebp,PT_EBP(%esp)
23282 _ASM_EXTABLE(1b,syscall_fault)
23283
23284 GET_THREAD_INFO(%ebp)
23285
23286+#ifdef CONFIG_PAX_RANDKSTACK
23287+ pax_erase_kstack
23288+#endif
23289+
23290 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
23291 jnz sysenter_audit
23292 sysenter_do_call:
23293@@ -434,12 +606,24 @@ sysenter_after_call:
23294 testl $_TIF_ALLWORK_MASK, %ecx
23295 jne sysexit_audit
23296 sysenter_exit:
23297+
23298+#ifdef CONFIG_PAX_RANDKSTACK
23299+ pushl_cfi %eax
23300+ movl %esp, %eax
23301+ call pax_randomize_kstack
23302+ popl_cfi %eax
23303+#endif
23304+
23305+ pax_erase_kstack
23306+
23307 /* if something modifies registers it must also disable sysexit */
23308 movl PT_EIP(%esp), %edx
23309 movl PT_OLDESP(%esp), %ecx
23310 xorl %ebp,%ebp
23311 TRACE_IRQS_ON
23312 1: mov PT_FS(%esp), %fs
23313+2: mov PT_DS(%esp), %ds
23314+3: mov PT_ES(%esp), %es
23315 PTGS_TO_GS
23316 ENABLE_INTERRUPTS_SYSEXIT
23317
23318@@ -456,6 +640,9 @@ sysenter_audit:
23319 movl %eax,%edx /* 2nd arg: syscall number */
23320 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
23321 call __audit_syscall_entry
23322+
23323+ pax_erase_kstack
23324+
23325 pushl_cfi %ebx
23326 movl PT_EAX(%esp),%eax /* reload syscall number */
23327 jmp sysenter_do_call
23328@@ -481,10 +668,16 @@ sysexit_audit:
23329
23330 CFI_ENDPROC
23331 .pushsection .fixup,"ax"
23332-2: movl $0,PT_FS(%esp)
23333+4: movl $0,PT_FS(%esp)
23334+ jmp 1b
23335+5: movl $0,PT_DS(%esp)
23336+ jmp 1b
23337+6: movl $0,PT_ES(%esp)
23338 jmp 1b
23339 .popsection
23340- _ASM_EXTABLE(1b,2b)
23341+ _ASM_EXTABLE(1b,4b)
23342+ _ASM_EXTABLE(2b,5b)
23343+ _ASM_EXTABLE(3b,6b)
23344 PTGS_TO_GS_EX
23345 ENDPROC(ia32_sysenter_target)
23346
23347@@ -495,6 +688,11 @@ ENTRY(system_call)
23348 pushl_cfi %eax # save orig_eax
23349 SAVE_ALL
23350 GET_THREAD_INFO(%ebp)
23351+
23352+#ifdef CONFIG_PAX_RANDKSTACK
23353+ pax_erase_kstack
23354+#endif
23355+
23356 # system call tracing in operation / emulation
23357 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
23358 jnz syscall_trace_entry
23359@@ -514,6 +712,15 @@ syscall_exit:
23360 testl $_TIF_ALLWORK_MASK, %ecx # current->work
23361 jne syscall_exit_work
23362
23363+restore_all_pax:
23364+
23365+#ifdef CONFIG_PAX_RANDKSTACK
23366+ movl %esp, %eax
23367+ call pax_randomize_kstack
23368+#endif
23369+
23370+ pax_erase_kstack
23371+
23372 restore_all:
23373 TRACE_IRQS_IRET
23374 restore_all_notrace:
23375@@ -568,14 +775,34 @@ ldt_ss:
23376 * compensating for the offset by changing to the ESPFIX segment with
23377 * a base address that matches for the difference.
23378 */
23379-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
23380+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
23381 mov %esp, %edx /* load kernel esp */
23382 mov PT_OLDESP(%esp), %eax /* load userspace esp */
23383 mov %dx, %ax /* eax: new kernel esp */
23384 sub %eax, %edx /* offset (low word is 0) */
23385+#ifdef CONFIG_SMP
23386+ movl PER_CPU_VAR(cpu_number), %ebx
23387+ shll $PAGE_SHIFT_asm, %ebx
23388+ addl $cpu_gdt_table, %ebx
23389+#else
23390+ movl $cpu_gdt_table, %ebx
23391+#endif
23392 shr $16, %edx
23393- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
23394- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
23395+
23396+#ifdef CONFIG_PAX_KERNEXEC
23397+ mov %cr0, %esi
23398+ btr $16, %esi
23399+ mov %esi, %cr0
23400+#endif
23401+
23402+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
23403+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
23404+
23405+#ifdef CONFIG_PAX_KERNEXEC
23406+ bts $16, %esi
23407+ mov %esi, %cr0
23408+#endif
23409+
23410 pushl_cfi $__ESPFIX_SS
23411 pushl_cfi %eax /* new kernel esp */
23412 /* Disable interrupts, but do not irqtrace this section: we
23413@@ -605,20 +832,18 @@ work_resched:
23414 movl TI_flags(%ebp), %ecx
23415 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
23416 # than syscall tracing?
23417- jz restore_all
23418+ jz restore_all_pax
23419 testb $_TIF_NEED_RESCHED, %cl
23420 jnz work_resched
23421
23422 work_notifysig: # deal with pending signals and
23423 # notify-resume requests
23424+ movl %esp, %eax
23425 #ifdef CONFIG_VM86
23426 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
23427- movl %esp, %eax
23428 jne work_notifysig_v86 # returning to kernel-space or
23429 # vm86-space
23430 1:
23431-#else
23432- movl %esp, %eax
23433 #endif
23434 TRACE_IRQS_ON
23435 ENABLE_INTERRUPTS(CLBR_NONE)
23436@@ -639,7 +864,7 @@ work_notifysig_v86:
23437 movl %eax, %esp
23438 jmp 1b
23439 #endif
23440-END(work_pending)
23441+ENDPROC(work_pending)
23442
23443 # perform syscall exit tracing
23444 ALIGN
23445@@ -647,11 +872,14 @@ syscall_trace_entry:
23446 movl $-ENOSYS,PT_EAX(%esp)
23447 movl %esp, %eax
23448 call syscall_trace_enter
23449+
23450+ pax_erase_kstack
23451+
23452 /* What it returned is what we'll actually use. */
23453 cmpl $(NR_syscalls), %eax
23454 jnae syscall_call
23455 jmp syscall_exit
23456-END(syscall_trace_entry)
23457+ENDPROC(syscall_trace_entry)
23458
23459 # perform syscall exit tracing
23460 ALIGN
23461@@ -664,26 +892,30 @@ syscall_exit_work:
23462 movl %esp, %eax
23463 call syscall_trace_leave
23464 jmp resume_userspace
23465-END(syscall_exit_work)
23466+ENDPROC(syscall_exit_work)
23467 CFI_ENDPROC
23468
23469 RING0_INT_FRAME # can't unwind into user space anyway
23470 syscall_fault:
23471+#ifdef CONFIG_PAX_MEMORY_UDEREF
23472+ push %ss
23473+ pop %ds
23474+#endif
23475 ASM_CLAC
23476 GET_THREAD_INFO(%ebp)
23477 movl $-EFAULT,PT_EAX(%esp)
23478 jmp resume_userspace
23479-END(syscall_fault)
23480+ENDPROC(syscall_fault)
23481
23482 syscall_badsys:
23483 movl $-ENOSYS,%eax
23484 jmp syscall_after_call
23485-END(syscall_badsys)
23486+ENDPROC(syscall_badsys)
23487
23488 sysenter_badsys:
23489 movl $-ENOSYS,%eax
23490 jmp sysenter_after_call
23491-END(sysenter_badsys)
23492+ENDPROC(sysenter_badsys)
23493 CFI_ENDPROC
23494
23495 .macro FIXUP_ESPFIX_STACK
23496@@ -696,8 +928,15 @@ END(sysenter_badsys)
23497 */
23498 #ifdef CONFIG_X86_ESPFIX32
23499 /* fixup the stack */
23500- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
23501- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
23502+#ifdef CONFIG_SMP
23503+ movl PER_CPU_VAR(cpu_number), %ebx
23504+ shll $PAGE_SHIFT_asm, %ebx
23505+ addl $cpu_gdt_table, %ebx
23506+#else
23507+ movl $cpu_gdt_table, %ebx
23508+#endif
23509+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
23510+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
23511 shl $16, %eax
23512 addl %esp, %eax /* the adjusted stack pointer */
23513 pushl_cfi $__KERNEL_DS
23514@@ -753,7 +992,7 @@ vector=vector+1
23515 .endr
23516 2: jmp common_interrupt
23517 .endr
23518-END(irq_entries_start)
23519+ENDPROC(irq_entries_start)
23520
23521 .previous
23522 END(interrupt)
23523@@ -810,7 +1049,7 @@ ENTRY(coprocessor_error)
23524 pushl_cfi $do_coprocessor_error
23525 jmp error_code
23526 CFI_ENDPROC
23527-END(coprocessor_error)
23528+ENDPROC(coprocessor_error)
23529
23530 ENTRY(simd_coprocessor_error)
23531 RING0_INT_FRAME
23532@@ -823,7 +1062,7 @@ ENTRY(simd_coprocessor_error)
23533 .section .altinstructions,"a"
23534 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
23535 .previous
23536-.section .altinstr_replacement,"ax"
23537+.section .altinstr_replacement,"a"
23538 663: pushl $do_simd_coprocessor_error
23539 664:
23540 .previous
23541@@ -832,7 +1071,7 @@ ENTRY(simd_coprocessor_error)
23542 #endif
23543 jmp error_code
23544 CFI_ENDPROC
23545-END(simd_coprocessor_error)
23546+ENDPROC(simd_coprocessor_error)
23547
23548 ENTRY(device_not_available)
23549 RING0_INT_FRAME
23550@@ -841,18 +1080,18 @@ ENTRY(device_not_available)
23551 pushl_cfi $do_device_not_available
23552 jmp error_code
23553 CFI_ENDPROC
23554-END(device_not_available)
23555+ENDPROC(device_not_available)
23556
23557 #ifdef CONFIG_PARAVIRT
23558 ENTRY(native_iret)
23559 iret
23560 _ASM_EXTABLE(native_iret, iret_exc)
23561-END(native_iret)
23562+ENDPROC(native_iret)
23563
23564 ENTRY(native_irq_enable_sysexit)
23565 sti
23566 sysexit
23567-END(native_irq_enable_sysexit)
23568+ENDPROC(native_irq_enable_sysexit)
23569 #endif
23570
23571 ENTRY(overflow)
23572@@ -862,7 +1101,7 @@ ENTRY(overflow)
23573 pushl_cfi $do_overflow
23574 jmp error_code
23575 CFI_ENDPROC
23576-END(overflow)
23577+ENDPROC(overflow)
23578
23579 ENTRY(bounds)
23580 RING0_INT_FRAME
23581@@ -871,7 +1110,7 @@ ENTRY(bounds)
23582 pushl_cfi $do_bounds
23583 jmp error_code
23584 CFI_ENDPROC
23585-END(bounds)
23586+ENDPROC(bounds)
23587
23588 ENTRY(invalid_op)
23589 RING0_INT_FRAME
23590@@ -880,7 +1119,7 @@ ENTRY(invalid_op)
23591 pushl_cfi $do_invalid_op
23592 jmp error_code
23593 CFI_ENDPROC
23594-END(invalid_op)
23595+ENDPROC(invalid_op)
23596
23597 ENTRY(coprocessor_segment_overrun)
23598 RING0_INT_FRAME
23599@@ -889,7 +1128,7 @@ ENTRY(coprocessor_segment_overrun)
23600 pushl_cfi $do_coprocessor_segment_overrun
23601 jmp error_code
23602 CFI_ENDPROC
23603-END(coprocessor_segment_overrun)
23604+ENDPROC(coprocessor_segment_overrun)
23605
23606 ENTRY(invalid_TSS)
23607 RING0_EC_FRAME
23608@@ -897,7 +1136,7 @@ ENTRY(invalid_TSS)
23609 pushl_cfi $do_invalid_TSS
23610 jmp error_code
23611 CFI_ENDPROC
23612-END(invalid_TSS)
23613+ENDPROC(invalid_TSS)
23614
23615 ENTRY(segment_not_present)
23616 RING0_EC_FRAME
23617@@ -905,7 +1144,7 @@ ENTRY(segment_not_present)
23618 pushl_cfi $do_segment_not_present
23619 jmp error_code
23620 CFI_ENDPROC
23621-END(segment_not_present)
23622+ENDPROC(segment_not_present)
23623
23624 ENTRY(stack_segment)
23625 RING0_EC_FRAME
23626@@ -913,7 +1152,7 @@ ENTRY(stack_segment)
23627 pushl_cfi $do_stack_segment
23628 jmp error_code
23629 CFI_ENDPROC
23630-END(stack_segment)
23631+ENDPROC(stack_segment)
23632
23633 ENTRY(alignment_check)
23634 RING0_EC_FRAME
23635@@ -921,7 +1160,7 @@ ENTRY(alignment_check)
23636 pushl_cfi $do_alignment_check
23637 jmp error_code
23638 CFI_ENDPROC
23639-END(alignment_check)
23640+ENDPROC(alignment_check)
23641
23642 ENTRY(divide_error)
23643 RING0_INT_FRAME
23644@@ -930,7 +1169,7 @@ ENTRY(divide_error)
23645 pushl_cfi $do_divide_error
23646 jmp error_code
23647 CFI_ENDPROC
23648-END(divide_error)
23649+ENDPROC(divide_error)
23650
23651 #ifdef CONFIG_X86_MCE
23652 ENTRY(machine_check)
23653@@ -940,7 +1179,7 @@ ENTRY(machine_check)
23654 pushl_cfi machine_check_vector
23655 jmp error_code
23656 CFI_ENDPROC
23657-END(machine_check)
23658+ENDPROC(machine_check)
23659 #endif
23660
23661 ENTRY(spurious_interrupt_bug)
23662@@ -950,7 +1189,7 @@ ENTRY(spurious_interrupt_bug)
23663 pushl_cfi $do_spurious_interrupt_bug
23664 jmp error_code
23665 CFI_ENDPROC
23666-END(spurious_interrupt_bug)
23667+ENDPROC(spurious_interrupt_bug)
23668
23669 #ifdef CONFIG_XEN
23670 /* Xen doesn't set %esp to be precisely what the normal sysenter
23671@@ -1056,7 +1295,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
23672
23673 ENTRY(mcount)
23674 ret
23675-END(mcount)
23676+ENDPROC(mcount)
23677
23678 ENTRY(ftrace_caller)
23679 pushl %eax
23680@@ -1086,7 +1325,7 @@ ftrace_graph_call:
23681 .globl ftrace_stub
23682 ftrace_stub:
23683 ret
23684-END(ftrace_caller)
23685+ENDPROC(ftrace_caller)
23686
23687 ENTRY(ftrace_regs_caller)
23688 pushf /* push flags before compare (in cs location) */
23689@@ -1184,7 +1423,7 @@ trace:
23690 popl %ecx
23691 popl %eax
23692 jmp ftrace_stub
23693-END(mcount)
23694+ENDPROC(mcount)
23695 #endif /* CONFIG_DYNAMIC_FTRACE */
23696 #endif /* CONFIG_FUNCTION_TRACER */
23697
23698@@ -1202,7 +1441,7 @@ ENTRY(ftrace_graph_caller)
23699 popl %ecx
23700 popl %eax
23701 ret
23702-END(ftrace_graph_caller)
23703+ENDPROC(ftrace_graph_caller)
23704
23705 .globl return_to_handler
23706 return_to_handler:
23707@@ -1263,15 +1502,18 @@ error_code:
23708 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
23709 REG_TO_PTGS %ecx
23710 SET_KERNEL_GS %ecx
23711- movl $(__USER_DS), %ecx
23712+ movl $(__KERNEL_DS), %ecx
23713 movl %ecx, %ds
23714 movl %ecx, %es
23715+
23716+ pax_enter_kernel
23717+
23718 TRACE_IRQS_OFF
23719 movl %esp,%eax # pt_regs pointer
23720 call *%edi
23721 jmp ret_from_exception
23722 CFI_ENDPROC
23723-END(page_fault)
23724+ENDPROC(page_fault)
23725
23726 /*
23727 * Debug traps and NMI can happen at the one SYSENTER instruction
23728@@ -1314,7 +1556,7 @@ debug_stack_correct:
23729 call do_debug
23730 jmp ret_from_exception
23731 CFI_ENDPROC
23732-END(debug)
23733+ENDPROC(debug)
23734
23735 /*
23736 * NMI is doubly nasty. It can happen _while_ we're handling
23737@@ -1354,6 +1596,9 @@ nmi_stack_correct:
23738 xorl %edx,%edx # zero error code
23739 movl %esp,%eax # pt_regs pointer
23740 call do_nmi
23741+
23742+ pax_exit_kernel
23743+
23744 jmp restore_all_notrace
23745 CFI_ENDPROC
23746
23747@@ -1391,13 +1636,16 @@ nmi_espfix_stack:
23748 FIXUP_ESPFIX_STACK # %eax == %esp
23749 xorl %edx,%edx # zero error code
23750 call do_nmi
23751+
23752+ pax_exit_kernel
23753+
23754 RESTORE_REGS
23755 lss 12+4(%esp), %esp # back to espfix stack
23756 CFI_ADJUST_CFA_OFFSET -24
23757 jmp irq_return
23758 #endif
23759 CFI_ENDPROC
23760-END(nmi)
23761+ENDPROC(nmi)
23762
23763 ENTRY(int3)
23764 RING0_INT_FRAME
23765@@ -1410,14 +1658,14 @@ ENTRY(int3)
23766 call do_int3
23767 jmp ret_from_exception
23768 CFI_ENDPROC
23769-END(int3)
23770+ENDPROC(int3)
23771
23772 ENTRY(general_protection)
23773 RING0_EC_FRAME
23774 pushl_cfi $do_general_protection
23775 jmp error_code
23776 CFI_ENDPROC
23777-END(general_protection)
23778+ENDPROC(general_protection)
23779
23780 #ifdef CONFIG_KVM_GUEST
23781 ENTRY(async_page_fault)
23782@@ -1426,6 +1674,6 @@ ENTRY(async_page_fault)
23783 pushl_cfi $do_async_page_fault
23784 jmp error_code
23785 CFI_ENDPROC
23786-END(async_page_fault)
23787+ENDPROC(async_page_fault)
23788 #endif
23789
23790diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
23791index b9dde27..6e9dc4e 100644
23792--- a/arch/x86/kernel/entry_64.S
23793+++ b/arch/x86/kernel/entry_64.S
23794@@ -59,6 +59,8 @@
23795 #include <asm/smap.h>
23796 #include <asm/pgtable_types.h>
23797 #include <linux/err.h>
23798+#include <asm/pgtable.h>
23799+#include <asm/alternative-asm.h>
23800
23801 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
23802 #include <linux/elf-em.h>
23803@@ -81,6 +83,430 @@ ENTRY(native_usergs_sysret64)
23804 ENDPROC(native_usergs_sysret64)
23805 #endif /* CONFIG_PARAVIRT */
23806
23807+ .macro ljmpq sel, off
23808+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
23809+ .byte 0x48; ljmp *1234f(%rip)
23810+ .pushsection .rodata
23811+ .align 16
23812+ 1234: .quad \off; .word \sel
23813+ .popsection
23814+#else
23815+ pushq $\sel
23816+ pushq $\off
23817+ lretq
23818+#endif
23819+ .endm
23820+
23821+ .macro pax_enter_kernel
23822+ pax_set_fptr_mask
23823+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23824+ call pax_enter_kernel
23825+#endif
23826+ .endm
23827+
23828+ .macro pax_exit_kernel
23829+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23830+ call pax_exit_kernel
23831+#endif
23832+
23833+ .endm
23834+
23835+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23836+ENTRY(pax_enter_kernel)
23837+ pushq %rdi
23838+
23839+#ifdef CONFIG_PARAVIRT
23840+ PV_SAVE_REGS(CLBR_RDI)
23841+#endif
23842+
23843+#ifdef CONFIG_PAX_KERNEXEC
23844+ GET_CR0_INTO_RDI
23845+ bts $16,%rdi
23846+ jnc 3f
23847+ mov %cs,%edi
23848+ cmp $__KERNEL_CS,%edi
23849+ jnz 2f
23850+1:
23851+#endif
23852+
23853+#ifdef CONFIG_PAX_MEMORY_UDEREF
23854+ 661: jmp 111f
23855+ .pushsection .altinstr_replacement, "a"
23856+ 662: ASM_NOP2
23857+ .popsection
23858+ .pushsection .altinstructions, "a"
23859+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23860+ .popsection
23861+ GET_CR3_INTO_RDI
23862+ cmp $0,%dil
23863+ jnz 112f
23864+ mov $__KERNEL_DS,%edi
23865+ mov %edi,%ss
23866+ jmp 111f
23867+112: cmp $1,%dil
23868+ jz 113f
23869+ ud2
23870+113: sub $4097,%rdi
23871+ bts $63,%rdi
23872+ SET_RDI_INTO_CR3
23873+ mov $__UDEREF_KERNEL_DS,%edi
23874+ mov %edi,%ss
23875+111:
23876+#endif
23877+
23878+#ifdef CONFIG_PARAVIRT
23879+ PV_RESTORE_REGS(CLBR_RDI)
23880+#endif
23881+
23882+ popq %rdi
23883+ pax_force_retaddr
23884+ retq
23885+
23886+#ifdef CONFIG_PAX_KERNEXEC
23887+2: ljmpq __KERNEL_CS,1b
23888+3: ljmpq __KERNEXEC_KERNEL_CS,4f
23889+4: SET_RDI_INTO_CR0
23890+ jmp 1b
23891+#endif
23892+ENDPROC(pax_enter_kernel)
23893+
23894+ENTRY(pax_exit_kernel)
23895+ pushq %rdi
23896+
23897+#ifdef CONFIG_PARAVIRT
23898+ PV_SAVE_REGS(CLBR_RDI)
23899+#endif
23900+
23901+#ifdef CONFIG_PAX_KERNEXEC
23902+ mov %cs,%rdi
23903+ cmp $__KERNEXEC_KERNEL_CS,%edi
23904+ jz 2f
23905+ GET_CR0_INTO_RDI
23906+ bts $16,%rdi
23907+ jnc 4f
23908+1:
23909+#endif
23910+
23911+#ifdef CONFIG_PAX_MEMORY_UDEREF
23912+ 661: jmp 111f
23913+ .pushsection .altinstr_replacement, "a"
23914+ 662: ASM_NOP2
23915+ .popsection
23916+ .pushsection .altinstructions, "a"
23917+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23918+ .popsection
23919+ mov %ss,%edi
23920+ cmp $__UDEREF_KERNEL_DS,%edi
23921+ jnz 111f
23922+ GET_CR3_INTO_RDI
23923+ cmp $0,%dil
23924+ jz 112f
23925+ ud2
23926+112: add $4097,%rdi
23927+ bts $63,%rdi
23928+ SET_RDI_INTO_CR3
23929+ mov $__KERNEL_DS,%edi
23930+ mov %edi,%ss
23931+111:
23932+#endif
23933+
23934+#ifdef CONFIG_PARAVIRT
23935+ PV_RESTORE_REGS(CLBR_RDI);
23936+#endif
23937+
23938+ popq %rdi
23939+ pax_force_retaddr
23940+ retq
23941+
23942+#ifdef CONFIG_PAX_KERNEXEC
23943+2: GET_CR0_INTO_RDI
23944+ btr $16,%rdi
23945+ jnc 4f
23946+ ljmpq __KERNEL_CS,3f
23947+3: SET_RDI_INTO_CR0
23948+ jmp 1b
23949+4: ud2
23950+ jmp 4b
23951+#endif
23952+ENDPROC(pax_exit_kernel)
23953+#endif
23954+
23955+ .macro pax_enter_kernel_user
23956+ pax_set_fptr_mask
23957+#ifdef CONFIG_PAX_MEMORY_UDEREF
23958+ call pax_enter_kernel_user
23959+#endif
23960+ .endm
23961+
23962+ .macro pax_exit_kernel_user
23963+#ifdef CONFIG_PAX_MEMORY_UDEREF
23964+ call pax_exit_kernel_user
23965+#endif
23966+#ifdef CONFIG_PAX_RANDKSTACK
23967+ pushq %rax
23968+ pushq %r11
23969+ call pax_randomize_kstack
23970+ popq %r11
23971+ popq %rax
23972+#endif
23973+ .endm
23974+
23975+#ifdef CONFIG_PAX_MEMORY_UDEREF
23976+ENTRY(pax_enter_kernel_user)
23977+ pushq %rdi
23978+ pushq %rbx
23979+
23980+#ifdef CONFIG_PARAVIRT
23981+ PV_SAVE_REGS(CLBR_RDI)
23982+#endif
23983+
23984+ 661: jmp 111f
23985+ .pushsection .altinstr_replacement, "a"
23986+ 662: ASM_NOP2
23987+ .popsection
23988+ .pushsection .altinstructions, "a"
23989+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23990+ .popsection
23991+ GET_CR3_INTO_RDI
23992+ cmp $1,%dil
23993+ jnz 4f
23994+ sub $4097,%rdi
23995+ bts $63,%rdi
23996+ SET_RDI_INTO_CR3
23997+ jmp 3f
23998+111:
23999+
24000+ GET_CR3_INTO_RDI
24001+ mov %rdi,%rbx
24002+ add $__START_KERNEL_map,%rbx
24003+ sub phys_base(%rip),%rbx
24004+
24005+#ifdef CONFIG_PARAVIRT
24006+ cmpl $0, pv_info+PARAVIRT_enabled
24007+ jz 1f
24008+ pushq %rdi
24009+ i = 0
24010+ .rept USER_PGD_PTRS
24011+ mov i*8(%rbx),%rsi
24012+ mov $0,%sil
24013+ lea i*8(%rbx),%rdi
24014+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
24015+ i = i + 1
24016+ .endr
24017+ popq %rdi
24018+ jmp 2f
24019+1:
24020+#endif
24021+
24022+ i = 0
24023+ .rept USER_PGD_PTRS
24024+ movb $0,i*8(%rbx)
24025+ i = i + 1
24026+ .endr
24027+
24028+2: SET_RDI_INTO_CR3
24029+
24030+#ifdef CONFIG_PAX_KERNEXEC
24031+ GET_CR0_INTO_RDI
24032+ bts $16,%rdi
24033+ SET_RDI_INTO_CR0
24034+#endif
24035+
24036+3:
24037+
24038+#ifdef CONFIG_PARAVIRT
24039+ PV_RESTORE_REGS(CLBR_RDI)
24040+#endif
24041+
24042+ popq %rbx
24043+ popq %rdi
24044+ pax_force_retaddr
24045+ retq
24046+4: ud2
24047+ENDPROC(pax_enter_kernel_user)
24048+
24049+ENTRY(pax_exit_kernel_user)
24050+ pushq %rdi
24051+ pushq %rbx
24052+
24053+#ifdef CONFIG_PARAVIRT
24054+ PV_SAVE_REGS(CLBR_RDI)
24055+#endif
24056+
24057+ GET_CR3_INTO_RDI
24058+ 661: jmp 1f
24059+ .pushsection .altinstr_replacement, "a"
24060+ 662: ASM_NOP2
24061+ .popsection
24062+ .pushsection .altinstructions, "a"
24063+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
24064+ .popsection
24065+ cmp $0,%dil
24066+ jnz 3f
24067+ add $4097,%rdi
24068+ bts $63,%rdi
24069+ SET_RDI_INTO_CR3
24070+ jmp 2f
24071+1:
24072+
24073+ mov %rdi,%rbx
24074+
24075+#ifdef CONFIG_PAX_KERNEXEC
24076+ GET_CR0_INTO_RDI
24077+ btr $16,%rdi
24078+ jnc 3f
24079+ SET_RDI_INTO_CR0
24080+#endif
24081+
24082+ add $__START_KERNEL_map,%rbx
24083+ sub phys_base(%rip),%rbx
24084+
24085+#ifdef CONFIG_PARAVIRT
24086+ cmpl $0, pv_info+PARAVIRT_enabled
24087+ jz 1f
24088+ i = 0
24089+ .rept USER_PGD_PTRS
24090+ mov i*8(%rbx),%rsi
24091+ mov $0x67,%sil
24092+ lea i*8(%rbx),%rdi
24093+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
24094+ i = i + 1
24095+ .endr
24096+ jmp 2f
24097+1:
24098+#endif
24099+
24100+ i = 0
24101+ .rept USER_PGD_PTRS
24102+ movb $0x67,i*8(%rbx)
24103+ i = i + 1
24104+ .endr
24105+2:
24106+
24107+#ifdef CONFIG_PARAVIRT
24108+ PV_RESTORE_REGS(CLBR_RDI)
24109+#endif
24110+
24111+ popq %rbx
24112+ popq %rdi
24113+ pax_force_retaddr
24114+ retq
24115+3: ud2
24116+ENDPROC(pax_exit_kernel_user)
24117+#endif
24118+
24119+ .macro pax_enter_kernel_nmi
24120+ pax_set_fptr_mask
24121+
24122+#ifdef CONFIG_PAX_KERNEXEC
24123+ GET_CR0_INTO_RDI
24124+ bts $16,%rdi
24125+ jc 110f
24126+ SET_RDI_INTO_CR0
24127+ or $2,%ebx
24128+110:
24129+#endif
24130+
24131+#ifdef CONFIG_PAX_MEMORY_UDEREF
24132+ 661: jmp 111f
24133+ .pushsection .altinstr_replacement, "a"
24134+ 662: ASM_NOP2
24135+ .popsection
24136+ .pushsection .altinstructions, "a"
24137+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
24138+ .popsection
24139+ GET_CR3_INTO_RDI
24140+ cmp $0,%dil
24141+ jz 111f
24142+ sub $4097,%rdi
24143+ or $4,%ebx
24144+ bts $63,%rdi
24145+ SET_RDI_INTO_CR3
24146+ mov $__UDEREF_KERNEL_DS,%edi
24147+ mov %edi,%ss
24148+111:
24149+#endif
24150+ .endm
24151+
24152+ .macro pax_exit_kernel_nmi
24153+#ifdef CONFIG_PAX_KERNEXEC
24154+ btr $1,%ebx
24155+ jnc 110f
24156+ GET_CR0_INTO_RDI
24157+ btr $16,%rdi
24158+ SET_RDI_INTO_CR0
24159+110:
24160+#endif
24161+
24162+#ifdef CONFIG_PAX_MEMORY_UDEREF
24163+ btr $2,%ebx
24164+ jnc 111f
24165+ GET_CR3_INTO_RDI
24166+ add $4097,%rdi
24167+ bts $63,%rdi
24168+ SET_RDI_INTO_CR3
24169+ mov $__KERNEL_DS,%edi
24170+ mov %edi,%ss
24171+111:
24172+#endif
24173+ .endm
24174+
24175+ .macro pax_erase_kstack
24176+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
24177+ call pax_erase_kstack
24178+#endif
24179+ .endm
24180+
24181+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
24182+ENTRY(pax_erase_kstack)
24183+ pushq %rdi
24184+ pushq %rcx
24185+ pushq %rax
24186+ pushq %r11
24187+
24188+ GET_THREAD_INFO(%r11)
24189+ mov TI_lowest_stack(%r11), %rdi
24190+ mov $-0xBEEF, %rax
24191+ std
24192+
24193+1: mov %edi, %ecx
24194+ and $THREAD_SIZE_asm - 1, %ecx
24195+ shr $3, %ecx
24196+ repne scasq
24197+ jecxz 2f
24198+
24199+ cmp $2*8, %ecx
24200+ jc 2f
24201+
24202+ mov $2*8, %ecx
24203+ repe scasq
24204+ jecxz 2f
24205+ jne 1b
24206+
24207+2: cld
24208+ mov %esp, %ecx
24209+ sub %edi, %ecx
24210+
24211+ cmp $THREAD_SIZE_asm, %rcx
24212+ jb 3f
24213+ ud2
24214+3:
24215+
24216+ shr $3, %ecx
24217+ rep stosq
24218+
24219+ mov TI_task_thread_sp0(%r11), %rdi
24220+ sub $256, %rdi
24221+ mov %rdi, TI_lowest_stack(%r11)
24222+
24223+ popq %r11
24224+ popq %rax
24225+ popq %rcx
24226+ popq %rdi
24227+ pax_force_retaddr
24228+ ret
24229+ENDPROC(pax_erase_kstack)
24230+#endif
24231
24232 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
24233 #ifdef CONFIG_TRACE_IRQFLAGS
24234@@ -117,7 +543,7 @@ ENDPROC(native_usergs_sysret64)
24235 .endm
24236
24237 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
24238- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
24239+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
24240 jnc 1f
24241 TRACE_IRQS_ON_DEBUG
24242 1:
24243@@ -155,27 +581,6 @@ ENDPROC(native_usergs_sysret64)
24244 movq \tmp,R11+\offset(%rsp)
24245 .endm
24246
24247- .macro FAKE_STACK_FRAME child_rip
24248- /* push in order ss, rsp, eflags, cs, rip */
24249- xorl %eax, %eax
24250- pushq_cfi $__KERNEL_DS /* ss */
24251- /*CFI_REL_OFFSET ss,0*/
24252- pushq_cfi %rax /* rsp */
24253- CFI_REL_OFFSET rsp,0
24254- pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
24255- /*CFI_REL_OFFSET rflags,0*/
24256- pushq_cfi $__KERNEL_CS /* cs */
24257- /*CFI_REL_OFFSET cs,0*/
24258- pushq_cfi \child_rip /* rip */
24259- CFI_REL_OFFSET rip,0
24260- pushq_cfi %rax /* orig rax */
24261- .endm
24262-
24263- .macro UNFAKE_STACK_FRAME
24264- addq $8*6, %rsp
24265- CFI_ADJUST_CFA_OFFSET -(6*8)
24266- .endm
24267-
24268 /*
24269 * initial frame state for interrupts (and exceptions without error code)
24270 */
24271@@ -241,25 +646,26 @@ ENDPROC(native_usergs_sysret64)
24272 /* save partial stack frame */
24273 .macro SAVE_ARGS_IRQ
24274 cld
24275- /* start from rbp in pt_regs and jump over */
24276- movq_cfi rdi, (RDI-RBP)
24277- movq_cfi rsi, (RSI-RBP)
24278- movq_cfi rdx, (RDX-RBP)
24279- movq_cfi rcx, (RCX-RBP)
24280- movq_cfi rax, (RAX-RBP)
24281- movq_cfi r8, (R8-RBP)
24282- movq_cfi r9, (R9-RBP)
24283- movq_cfi r10, (R10-RBP)
24284- movq_cfi r11, (R11-RBP)
24285+ /* start from r15 in pt_regs and jump over */
24286+ movq_cfi rdi, RDI
24287+ movq_cfi rsi, RSI
24288+ movq_cfi rdx, RDX
24289+ movq_cfi rcx, RCX
24290+ movq_cfi rax, RAX
24291+ movq_cfi r8, R8
24292+ movq_cfi r9, R9
24293+ movq_cfi r10, R10
24294+ movq_cfi r11, R11
24295+ movq_cfi r12, R12
24296
24297 /* Save rbp so that we can unwind from get_irq_regs() */
24298- movq_cfi rbp, 0
24299+ movq_cfi rbp, RBP
24300
24301 /* Save previous stack value */
24302 movq %rsp, %rsi
24303
24304- leaq -RBP(%rsp),%rdi /* arg1 for handler */
24305- testl $3, CS-RBP(%rsi)
24306+ movq %rsp,%rdi /* arg1 for handler */
24307+ testb $3, CS(%rsi)
24308 je 1f
24309 SWAPGS
24310 /*
24311@@ -279,6 +685,18 @@ ENDPROC(native_usergs_sysret64)
24312 0x06 /* DW_OP_deref */, \
24313 0x08 /* DW_OP_const1u */, SS+8-RBP, \
24314 0x22 /* DW_OP_plus */
24315+
24316+#ifdef CONFIG_PAX_MEMORY_UDEREF
24317+ testb $3, CS(%rdi)
24318+ jnz 1f
24319+ pax_enter_kernel
24320+ jmp 2f
24321+1: pax_enter_kernel_user
24322+2:
24323+#else
24324+ pax_enter_kernel
24325+#endif
24326+
24327 /* We entered an interrupt context - irqs are off: */
24328 TRACE_IRQS_OFF
24329 .endm
24330@@ -308,9 +726,52 @@ ENTRY(save_paranoid)
24331 js 1f /* negative -> in kernel */
24332 SWAPGS
24333 xorl %ebx,%ebx
24334-1: ret
24335+1:
24336+#ifdef CONFIG_PAX_MEMORY_UDEREF
24337+ testb $3, CS+8(%rsp)
24338+ jnz 1f
24339+ pax_enter_kernel
24340+ jmp 2f
24341+1: pax_enter_kernel_user
24342+2:
24343+#else
24344+ pax_enter_kernel
24345+#endif
24346+ pax_force_retaddr
24347+ ret
24348 CFI_ENDPROC
24349-END(save_paranoid)
24350+ENDPROC(save_paranoid)
24351+
24352+ENTRY(save_paranoid_nmi)
24353+ XCPT_FRAME 1 RDI+8
24354+ cld
24355+ movq_cfi rdi, RDI+8
24356+ movq_cfi rsi, RSI+8
24357+ movq_cfi rdx, RDX+8
24358+ movq_cfi rcx, RCX+8
24359+ movq_cfi rax, RAX+8
24360+ movq_cfi r8, R8+8
24361+ movq_cfi r9, R9+8
24362+ movq_cfi r10, R10+8
24363+ movq_cfi r11, R11+8
24364+ movq_cfi rbx, RBX+8
24365+ movq_cfi rbp, RBP+8
24366+ movq_cfi r12, R12+8
24367+ movq_cfi r13, R13+8
24368+ movq_cfi r14, R14+8
24369+ movq_cfi r15, R15+8
24370+ movl $1,%ebx
24371+ movl $MSR_GS_BASE,%ecx
24372+ rdmsr
24373+ testl %edx,%edx
24374+ js 1f /* negative -> in kernel */
24375+ SWAPGS
24376+ xorl %ebx,%ebx
24377+1: pax_enter_kernel_nmi
24378+ pax_force_retaddr
24379+ ret
24380+ CFI_ENDPROC
24381+ENDPROC(save_paranoid_nmi)
24382
24383 /*
24384 * A newly forked process directly context switches into this address.
24385@@ -331,7 +792,7 @@ ENTRY(ret_from_fork)
24386
24387 RESTORE_REST
24388
24389- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
24390+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
24391 jz 1f
24392
24393 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
24394@@ -341,15 +802,13 @@ ENTRY(ret_from_fork)
24395 jmp ret_from_sys_call # go to the SYSRET fastpath
24396
24397 1:
24398- subq $REST_SKIP, %rsp # leave space for volatiles
24399- CFI_ADJUST_CFA_OFFSET REST_SKIP
24400 movq %rbp, %rdi
24401 call *%rbx
24402 movl $0, RAX(%rsp)
24403 RESTORE_REST
24404 jmp int_ret_from_sys_call
24405 CFI_ENDPROC
24406-END(ret_from_fork)
24407+ENDPROC(ret_from_fork)
24408
24409 /*
24410 * System call entry. Up to 6 arguments in registers are supported.
24411@@ -386,7 +845,7 @@ END(ret_from_fork)
24412 ENTRY(system_call)
24413 CFI_STARTPROC simple
24414 CFI_SIGNAL_FRAME
24415- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
24416+ CFI_DEF_CFA rsp,0
24417 CFI_REGISTER rip,rcx
24418 /*CFI_REGISTER rflags,r11*/
24419 SWAPGS_UNSAFE_STACK
24420@@ -399,16 +858,23 @@ GLOBAL(system_call_after_swapgs)
24421
24422 movq %rsp,PER_CPU_VAR(old_rsp)
24423 movq PER_CPU_VAR(kernel_stack),%rsp
24424+ SAVE_ARGS 8*6,0
24425+ pax_enter_kernel_user
24426+
24427+#ifdef CONFIG_PAX_RANDKSTACK
24428+ pax_erase_kstack
24429+#endif
24430+
24431 /*
24432 * No need to follow this irqs off/on section - it's straight
24433 * and short:
24434 */
24435 ENABLE_INTERRUPTS(CLBR_NONE)
24436- SAVE_ARGS 8,0
24437 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
24438 movq %rcx,RIP-ARGOFFSET(%rsp)
24439 CFI_REL_OFFSET rip,RIP-ARGOFFSET
24440- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
24441+ GET_THREAD_INFO(%rcx)
24442+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
24443 jnz tracesys
24444 system_call_fastpath:
24445 #if __SYSCALL_MASK == ~0
24446@@ -432,10 +898,13 @@ sysret_check:
24447 LOCKDEP_SYS_EXIT
24448 DISABLE_INTERRUPTS(CLBR_NONE)
24449 TRACE_IRQS_OFF
24450- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
24451+ GET_THREAD_INFO(%rcx)
24452+ movl TI_flags(%rcx),%edx
24453 andl %edi,%edx
24454 jnz sysret_careful
24455 CFI_REMEMBER_STATE
24456+ pax_exit_kernel_user
24457+ pax_erase_kstack
24458 /*
24459 * sysretq will re-enable interrupts:
24460 */
24461@@ -494,6 +963,9 @@ auditsys:
24462 movq %rax,%rsi /* 2nd arg: syscall number */
24463 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
24464 call __audit_syscall_entry
24465+
24466+ pax_erase_kstack
24467+
24468 LOAD_ARGS 0 /* reload call-clobbered registers */
24469 jmp system_call_fastpath
24470
24471@@ -515,7 +987,7 @@ sysret_audit:
24472 /* Do syscall tracing */
24473 tracesys:
24474 #ifdef CONFIG_AUDITSYSCALL
24475- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
24476+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
24477 jz auditsys
24478 #endif
24479 SAVE_REST
24480@@ -523,12 +995,15 @@ tracesys:
24481 FIXUP_TOP_OF_STACK %rdi
24482 movq %rsp,%rdi
24483 call syscall_trace_enter
24484+
24485+ pax_erase_kstack
24486+
24487 /*
24488 * Reload arg registers from stack in case ptrace changed them.
24489 * We don't reload %rax because syscall_trace_enter() returned
24490 * the value it wants us to use in the table lookup.
24491 */
24492- LOAD_ARGS ARGOFFSET, 1
24493+ LOAD_ARGS 1
24494 RESTORE_REST
24495 #if __SYSCALL_MASK == ~0
24496 cmpq $__NR_syscall_max,%rax
24497@@ -558,7 +1033,9 @@ GLOBAL(int_with_check)
24498 andl %edi,%edx
24499 jnz int_careful
24500 andl $~TS_COMPAT,TI_status(%rcx)
24501- jmp retint_swapgs
24502+ pax_exit_kernel_user
24503+ pax_erase_kstack
24504+ jmp retint_swapgs_pax
24505
24506 /* Either reschedule or signal or syscall exit tracking needed. */
24507 /* First do a reschedule test. */
24508@@ -604,7 +1081,7 @@ int_restore_rest:
24509 TRACE_IRQS_OFF
24510 jmp int_with_check
24511 CFI_ENDPROC
24512-END(system_call)
24513+ENDPROC(system_call)
24514
24515 .macro FORK_LIKE func
24516 ENTRY(stub_\func)
24517@@ -617,9 +1094,10 @@ ENTRY(stub_\func)
24518 DEFAULT_FRAME 0 8 /* offset 8: return address */
24519 call sys_\func
24520 RESTORE_TOP_OF_STACK %r11, 8
24521- ret $REST_SKIP /* pop extended registers */
24522+ pax_force_retaddr
24523+ ret
24524 CFI_ENDPROC
24525-END(stub_\func)
24526+ENDPROC(stub_\func)
24527 .endm
24528
24529 .macro FIXED_FRAME label,func
24530@@ -629,9 +1107,10 @@ ENTRY(\label)
24531 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
24532 call \func
24533 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
24534+ pax_force_retaddr
24535 ret
24536 CFI_ENDPROC
24537-END(\label)
24538+ENDPROC(\label)
24539 .endm
24540
24541 FORK_LIKE clone
24542@@ -639,19 +1118,6 @@ END(\label)
24543 FORK_LIKE vfork
24544 FIXED_FRAME stub_iopl, sys_iopl
24545
24546-ENTRY(ptregscall_common)
24547- DEFAULT_FRAME 1 8 /* offset 8: return address */
24548- RESTORE_TOP_OF_STACK %r11, 8
24549- movq_cfi_restore R15+8, r15
24550- movq_cfi_restore R14+8, r14
24551- movq_cfi_restore R13+8, r13
24552- movq_cfi_restore R12+8, r12
24553- movq_cfi_restore RBP+8, rbp
24554- movq_cfi_restore RBX+8, rbx
24555- ret $REST_SKIP /* pop extended registers */
24556- CFI_ENDPROC
24557-END(ptregscall_common)
24558-
24559 ENTRY(stub_execve)
24560 CFI_STARTPROC
24561 addq $8, %rsp
24562@@ -663,7 +1129,7 @@ ENTRY(stub_execve)
24563 RESTORE_REST
24564 jmp int_ret_from_sys_call
24565 CFI_ENDPROC
24566-END(stub_execve)
24567+ENDPROC(stub_execve)
24568
24569 /*
24570 * sigreturn is special because it needs to restore all registers on return.
24571@@ -680,7 +1146,7 @@ ENTRY(stub_rt_sigreturn)
24572 RESTORE_REST
24573 jmp int_ret_from_sys_call
24574 CFI_ENDPROC
24575-END(stub_rt_sigreturn)
24576+ENDPROC(stub_rt_sigreturn)
24577
24578 #ifdef CONFIG_X86_X32_ABI
24579 ENTRY(stub_x32_rt_sigreturn)
24580@@ -694,7 +1160,7 @@ ENTRY(stub_x32_rt_sigreturn)
24581 RESTORE_REST
24582 jmp int_ret_from_sys_call
24583 CFI_ENDPROC
24584-END(stub_x32_rt_sigreturn)
24585+ENDPROC(stub_x32_rt_sigreturn)
24586
24587 ENTRY(stub_x32_execve)
24588 CFI_STARTPROC
24589@@ -708,7 +1174,7 @@ ENTRY(stub_x32_execve)
24590 RESTORE_REST
24591 jmp int_ret_from_sys_call
24592 CFI_ENDPROC
24593-END(stub_x32_execve)
24594+ENDPROC(stub_x32_execve)
24595
24596 #endif
24597
24598@@ -745,7 +1211,7 @@ vector=vector+1
24599 2: jmp common_interrupt
24600 .endr
24601 CFI_ENDPROC
24602-END(irq_entries_start)
24603+ENDPROC(irq_entries_start)
24604
24605 .previous
24606 END(interrupt)
24607@@ -762,8 +1228,8 @@ END(interrupt)
24608 /* 0(%rsp): ~(interrupt number) */
24609 .macro interrupt func
24610 /* reserve pt_regs for scratch regs and rbp */
24611- subq $ORIG_RAX-RBP, %rsp
24612- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
24613+ subq $ORIG_RAX, %rsp
24614+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
24615 SAVE_ARGS_IRQ
24616 call \func
24617 .endm
24618@@ -786,14 +1252,14 @@ ret_from_intr:
24619
24620 /* Restore saved previous stack */
24621 popq %rsi
24622- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
24623- leaq ARGOFFSET-RBP(%rsi), %rsp
24624+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
24625+ movq %rsi, %rsp
24626 CFI_DEF_CFA_REGISTER rsp
24627- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
24628+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
24629
24630 exit_intr:
24631 GET_THREAD_INFO(%rcx)
24632- testl $3,CS-ARGOFFSET(%rsp)
24633+ testb $3,CS-ARGOFFSET(%rsp)
24634 je retint_kernel
24635
24636 /* Interrupt came from user space */
24637@@ -815,12 +1281,35 @@ retint_swapgs: /* return to user-space */
24638 * The iretq could re-enable interrupts:
24639 */
24640 DISABLE_INTERRUPTS(CLBR_ANY)
24641+ pax_exit_kernel_user
24642+retint_swapgs_pax:
24643 TRACE_IRQS_IRETQ
24644 SWAPGS
24645 jmp restore_args
24646
24647 retint_restore_args: /* return to kernel space */
24648 DISABLE_INTERRUPTS(CLBR_ANY)
24649+ pax_exit_kernel
24650+
24651+#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
24652+ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
24653+ * namely calling EFI runtime services with a phys mapping. We're
24654+ * starting off with NOPs and patch in the real instrumentation
24655+ * (BTS/OR) before starting any userland process; even before starting
24656+ * up the APs.
24657+ */
24658+ .pushsection .altinstr_replacement, "a"
24659+ 601: pax_force_retaddr (RIP-ARGOFFSET)
24660+ 602:
24661+ .popsection
24662+ 603: .fill 602b-601b, 1, 0x90
24663+ .pushsection .altinstructions, "a"
24664+ altinstruction_entry 603b, 601b, X86_FEATURE_ALWAYS, 602b-601b, 602b-601b
24665+ .popsection
24666+#else
24667+ pax_force_retaddr (RIP-ARGOFFSET)
24668+#endif
24669+
24670 /*
24671 * The iretq could re-enable interrupts:
24672 */
24673@@ -920,7 +1409,7 @@ ENTRY(retint_kernel)
24674 jmp exit_intr
24675 #endif
24676 CFI_ENDPROC
24677-END(common_interrupt)
24678+ENDPROC(common_interrupt)
24679
24680 /*
24681 * APIC interrupts.
24682@@ -934,7 +1423,7 @@ ENTRY(\sym)
24683 interrupt \do_sym
24684 jmp ret_from_intr
24685 CFI_ENDPROC
24686-END(\sym)
24687+ENDPROC(\sym)
24688 .endm
24689
24690 #ifdef CONFIG_TRACING
24691@@ -1007,7 +1496,7 @@ apicinterrupt IRQ_WORK_VECTOR \
24692 /*
24693 * Exception entry points.
24694 */
24695-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
24696+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
24697
24698 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
24699 ENTRY(\sym)
24700@@ -1058,6 +1547,12 @@ ENTRY(\sym)
24701 .endif
24702
24703 .if \shift_ist != -1
24704+#ifdef CONFIG_SMP
24705+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
24706+ lea init_tss(%r13), %r13
24707+#else
24708+ lea init_tss(%rip), %r13
24709+#endif
24710 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
24711 .endif
24712
24713@@ -1074,7 +1569,7 @@ ENTRY(\sym)
24714 .endif
24715
24716 CFI_ENDPROC
24717-END(\sym)
24718+ENDPROC(\sym)
24719 .endm
24720
24721 #ifdef CONFIG_TRACING
24722@@ -1115,9 +1610,10 @@ gs_change:
24723 2: mfence /* workaround */
24724 SWAPGS
24725 popfq_cfi
24726+ pax_force_retaddr
24727 ret
24728 CFI_ENDPROC
24729-END(native_load_gs_index)
24730+ENDPROC(native_load_gs_index)
24731
24732 _ASM_EXTABLE(gs_change,bad_gs)
24733 .section .fixup,"ax"
24734@@ -1145,9 +1641,10 @@ ENTRY(do_softirq_own_stack)
24735 CFI_DEF_CFA_REGISTER rsp
24736 CFI_ADJUST_CFA_OFFSET -8
24737 decl PER_CPU_VAR(irq_count)
24738+ pax_force_retaddr
24739 ret
24740 CFI_ENDPROC
24741-END(do_softirq_own_stack)
24742+ENDPROC(do_softirq_own_stack)
24743
24744 #ifdef CONFIG_XEN
24745 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
24746@@ -1185,7 +1682,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
24747 decl PER_CPU_VAR(irq_count)
24748 jmp error_exit
24749 CFI_ENDPROC
24750-END(xen_do_hypervisor_callback)
24751+ENDPROC(xen_do_hypervisor_callback)
24752
24753 /*
24754 * Hypervisor uses this for application faults while it executes.
24755@@ -1244,7 +1741,7 @@ ENTRY(xen_failsafe_callback)
24756 SAVE_ALL
24757 jmp error_exit
24758 CFI_ENDPROC
24759-END(xen_failsafe_callback)
24760+ENDPROC(xen_failsafe_callback)
24761
24762 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
24763 xen_hvm_callback_vector xen_evtchn_do_upcall
24764@@ -1291,18 +1788,33 @@ ENTRY(paranoid_exit)
24765 DEFAULT_FRAME
24766 DISABLE_INTERRUPTS(CLBR_NONE)
24767 TRACE_IRQS_OFF_DEBUG
24768- testl %ebx,%ebx /* swapgs needed? */
24769+ testl $1,%ebx /* swapgs needed? */
24770 jnz paranoid_restore
24771- testl $3,CS(%rsp)
24772+ testb $3,CS(%rsp)
24773 jnz paranoid_userspace
24774+#ifdef CONFIG_PAX_MEMORY_UDEREF
24775+ pax_exit_kernel
24776+ TRACE_IRQS_IRETQ 0
24777+ SWAPGS_UNSAFE_STACK
24778+ RESTORE_ALL 8
24779+ pax_force_retaddr_bts
24780+ jmp irq_return
24781+#endif
24782 paranoid_swapgs:
24783+#ifdef CONFIG_PAX_MEMORY_UDEREF
24784+ pax_exit_kernel_user
24785+#else
24786+ pax_exit_kernel
24787+#endif
24788 TRACE_IRQS_IRETQ 0
24789 SWAPGS_UNSAFE_STACK
24790 RESTORE_ALL 8
24791 jmp irq_return
24792 paranoid_restore:
24793+ pax_exit_kernel
24794 TRACE_IRQS_IRETQ_DEBUG 0
24795 RESTORE_ALL 8
24796+ pax_force_retaddr_bts
24797 jmp irq_return
24798 paranoid_userspace:
24799 GET_THREAD_INFO(%rcx)
24800@@ -1331,7 +1843,7 @@ paranoid_schedule:
24801 TRACE_IRQS_OFF
24802 jmp paranoid_userspace
24803 CFI_ENDPROC
24804-END(paranoid_exit)
24805+ENDPROC(paranoid_exit)
24806
24807 /*
24808 * Exception entry point. This expects an error code/orig_rax on the stack.
24809@@ -1358,12 +1870,23 @@ ENTRY(error_entry)
24810 movq %r14, R14+8(%rsp)
24811 movq %r15, R15+8(%rsp)
24812 xorl %ebx,%ebx
24813- testl $3,CS+8(%rsp)
24814+ testb $3,CS+8(%rsp)
24815 je error_kernelspace
24816 error_swapgs:
24817 SWAPGS
24818 error_sti:
24819+#ifdef CONFIG_PAX_MEMORY_UDEREF
24820+ testb $3, CS+8(%rsp)
24821+ jnz 1f
24822+ pax_enter_kernel
24823+ jmp 2f
24824+1: pax_enter_kernel_user
24825+2:
24826+#else
24827+ pax_enter_kernel
24828+#endif
24829 TRACE_IRQS_OFF
24830+ pax_force_retaddr
24831 ret
24832
24833 /*
24834@@ -1398,7 +1921,7 @@ error_bad_iret:
24835 decl %ebx /* Return to usergs */
24836 jmp error_sti
24837 CFI_ENDPROC
24838-END(error_entry)
24839+ENDPROC(error_entry)
24840
24841
24842 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
24843@@ -1409,7 +1932,7 @@ ENTRY(error_exit)
24844 DISABLE_INTERRUPTS(CLBR_NONE)
24845 TRACE_IRQS_OFF
24846 GET_THREAD_INFO(%rcx)
24847- testl %eax,%eax
24848+ testl $1,%eax
24849 jne retint_kernel
24850 LOCKDEP_SYS_EXIT_IRQ
24851 movl TI_flags(%rcx),%edx
24852@@ -1418,7 +1941,7 @@ ENTRY(error_exit)
24853 jnz retint_careful
24854 jmp retint_swapgs
24855 CFI_ENDPROC
24856-END(error_exit)
24857+ENDPROC(error_exit)
24858
24859 /*
24860 * Test if a given stack is an NMI stack or not.
24861@@ -1476,9 +1999,11 @@ ENTRY(nmi)
24862 * If %cs was not the kernel segment, then the NMI triggered in user
24863 * space, which means it is definitely not nested.
24864 */
24865+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
24866+ je 1f
24867 cmpl $__KERNEL_CS, 16(%rsp)
24868 jne first_nmi
24869-
24870+1:
24871 /*
24872 * Check the special variable on the stack to see if NMIs are
24873 * executing.
24874@@ -1512,8 +2037,7 @@ nested_nmi:
24875
24876 1:
24877 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
24878- leaq -1*8(%rsp), %rdx
24879- movq %rdx, %rsp
24880+ subq $8, %rsp
24881 CFI_ADJUST_CFA_OFFSET 1*8
24882 leaq -10*8(%rsp), %rdx
24883 pushq_cfi $__KERNEL_DS
24884@@ -1531,6 +2055,7 @@ nested_nmi_out:
24885 CFI_RESTORE rdx
24886
24887 /* No need to check faults here */
24888+# pax_force_retaddr_bts
24889 INTERRUPT_RETURN
24890
24891 CFI_RESTORE_STATE
24892@@ -1627,13 +2152,13 @@ end_repeat_nmi:
24893 subq $ORIG_RAX-R15, %rsp
24894 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
24895 /*
24896- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
24897+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
24898 * as we should not be calling schedule in NMI context.
24899 * Even with normal interrupts enabled. An NMI should not be
24900 * setting NEED_RESCHED or anything that normal interrupts and
24901 * exceptions might do.
24902 */
24903- call save_paranoid
24904+ call save_paranoid_nmi
24905 DEFAULT_FRAME 0
24906
24907 /*
24908@@ -1643,9 +2168,9 @@ end_repeat_nmi:
24909 * NMI itself takes a page fault, the page fault that was preempted
24910 * will read the information from the NMI page fault and not the
24911 * origin fault. Save it off and restore it if it changes.
24912- * Use the r12 callee-saved register.
24913+ * Use the r13 callee-saved register.
24914 */
24915- movq %cr2, %r12
24916+ movq %cr2, %r13
24917
24918 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
24919 movq %rsp,%rdi
24920@@ -1654,29 +2179,34 @@ end_repeat_nmi:
24921
24922 /* Did the NMI take a page fault? Restore cr2 if it did */
24923 movq %cr2, %rcx
24924- cmpq %rcx, %r12
24925+ cmpq %rcx, %r13
24926 je 1f
24927- movq %r12, %cr2
24928+ movq %r13, %cr2
24929 1:
24930
24931- testl %ebx,%ebx /* swapgs needed? */
24932+ testl $1,%ebx /* swapgs needed? */
24933 jnz nmi_restore
24934 nmi_swapgs:
24935 SWAPGS_UNSAFE_STACK
24936 nmi_restore:
24937+ pax_exit_kernel_nmi
24938 /* Pop the extra iret frame at once */
24939 RESTORE_ALL 6*8
24940+ testb $3, 8(%rsp)
24941+ jnz 1f
24942+ pax_force_retaddr_bts
24943+1:
24944
24945 /* Clear the NMI executing stack variable */
24946 movq $0, 5*8(%rsp)
24947 jmp irq_return
24948 CFI_ENDPROC
24949-END(nmi)
24950+ENDPROC(nmi)
24951
24952 ENTRY(ignore_sysret)
24953 CFI_STARTPROC
24954 mov $-ENOSYS,%eax
24955 sysret
24956 CFI_ENDPROC
24957-END(ignore_sysret)
24958+ENDPROC(ignore_sysret)
24959
24960diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
24961index 94d857f..bf1f0bf 100644
24962--- a/arch/x86/kernel/espfix_64.c
24963+++ b/arch/x86/kernel/espfix_64.c
24964@@ -197,7 +197,7 @@ void init_espfix_ap(void)
24965 set_pte(&pte_p[n*PTE_STRIDE], pte);
24966
24967 /* Job is done for this CPU and any CPU which shares this page */
24968- ACCESS_ONCE(espfix_pages[page]) = stack_page;
24969+ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
24970
24971 unlock_done:
24972 mutex_unlock(&espfix_init_mutex);
24973diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
24974index 3386dc9..28bdf81 100644
24975--- a/arch/x86/kernel/ftrace.c
24976+++ b/arch/x86/kernel/ftrace.c
24977@@ -88,7 +88,7 @@ static unsigned long text_ip_addr(unsigned long ip)
24978 * kernel identity mapping to modify code.
24979 */
24980 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
24981- ip = (unsigned long)__va(__pa_symbol(ip));
24982+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
24983
24984 return ip;
24985 }
24986@@ -104,6 +104,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
24987 {
24988 unsigned char replaced[MCOUNT_INSN_SIZE];
24989
24990+ ip = ktla_ktva(ip);
24991+
24992 /*
24993 * Note: Due to modules and __init, code can
24994 * disappear and change, we need to protect against faulting
24995@@ -229,7 +231,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
24996 unsigned char old[MCOUNT_INSN_SIZE];
24997 int ret;
24998
24999- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
25000+ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
25001
25002 ftrace_update_func = ip;
25003 /* Make sure the breakpoints see the ftrace_update_func update */
25004@@ -310,7 +312,7 @@ static int add_break(unsigned long ip, const char *old)
25005 unsigned char replaced[MCOUNT_INSN_SIZE];
25006 unsigned char brk = BREAKPOINT_INSTRUCTION;
25007
25008- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
25009+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
25010 return -EFAULT;
25011
25012 /* Make sure it is what we expect it to be */
25013diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
25014index eda1a86..8f6df48 100644
25015--- a/arch/x86/kernel/head64.c
25016+++ b/arch/x86/kernel/head64.c
25017@@ -67,12 +67,12 @@ again:
25018 pgd = *pgd_p;
25019
25020 /*
25021- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
25022- * critical -- __PAGE_OFFSET would point us back into the dynamic
25023+ * The use of __early_va rather than __va here is critical:
25024+ * __va would point us back into the dynamic
25025 * range and we might end up looping forever...
25026 */
25027 if (pgd)
25028- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
25029+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
25030 else {
25031 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
25032 reset_early_page_tables();
25033@@ -82,13 +82,13 @@ again:
25034 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
25035 for (i = 0; i < PTRS_PER_PUD; i++)
25036 pud_p[i] = 0;
25037- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
25038+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
25039 }
25040 pud_p += pud_index(address);
25041 pud = *pud_p;
25042
25043 if (pud)
25044- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
25045+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
25046 else {
25047 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
25048 reset_early_page_tables();
25049@@ -98,7 +98,7 @@ again:
25050 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
25051 for (i = 0; i < PTRS_PER_PMD; i++)
25052 pmd_p[i] = 0;
25053- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
25054+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
25055 }
25056 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
25057 pmd_p[pmd_index(address)] = pmd;
25058@@ -175,7 +175,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
25059 if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
25060 early_printk("Kernel alive\n");
25061
25062- clear_page(init_level4_pgt);
25063 /* set init_level4_pgt kernel high mapping*/
25064 init_level4_pgt[511] = early_level4_pgt[511];
25065
25066diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
25067index f36bd42..0ab4474 100644
25068--- a/arch/x86/kernel/head_32.S
25069+++ b/arch/x86/kernel/head_32.S
25070@@ -26,6 +26,12 @@
25071 /* Physical address */
25072 #define pa(X) ((X) - __PAGE_OFFSET)
25073
25074+#ifdef CONFIG_PAX_KERNEXEC
25075+#define ta(X) (X)
25076+#else
25077+#define ta(X) ((X) - __PAGE_OFFSET)
25078+#endif
25079+
25080 /*
25081 * References to members of the new_cpu_data structure.
25082 */
25083@@ -55,11 +61,7 @@
25084 * and small than max_low_pfn, otherwise will waste some page table entries
25085 */
25086
25087-#if PTRS_PER_PMD > 1
25088-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
25089-#else
25090-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
25091-#endif
25092+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
25093
25094 /* Number of possible pages in the lowmem region */
25095 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
25096@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
25097 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
25098
25099 /*
25100+ * Real beginning of normal "text" segment
25101+ */
25102+ENTRY(stext)
25103+ENTRY(_stext)
25104+
25105+/*
25106 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
25107 * %esi points to the real-mode code as a 32-bit pointer.
25108 * CS and DS must be 4 GB flat segments, but we don't depend on
25109@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
25110 * can.
25111 */
25112 __HEAD
25113+
25114+#ifdef CONFIG_PAX_KERNEXEC
25115+ jmp startup_32
25116+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
25117+.fill PAGE_SIZE-5,1,0xcc
25118+#endif
25119+
25120 ENTRY(startup_32)
25121 movl pa(stack_start),%ecx
25122
25123@@ -106,6 +121,59 @@ ENTRY(startup_32)
25124 2:
25125 leal -__PAGE_OFFSET(%ecx),%esp
25126
25127+#ifdef CONFIG_SMP
25128+ movl $pa(cpu_gdt_table),%edi
25129+ movl $__per_cpu_load,%eax
25130+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
25131+ rorl $16,%eax
25132+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
25133+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
25134+ movl $__per_cpu_end - 1,%eax
25135+ subl $__per_cpu_start,%eax
25136+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
25137+#endif
25138+
25139+#ifdef CONFIG_PAX_MEMORY_UDEREF
25140+ movl $NR_CPUS,%ecx
25141+ movl $pa(cpu_gdt_table),%edi
25142+1:
25143+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
25144+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
25145+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
25146+ addl $PAGE_SIZE_asm,%edi
25147+ loop 1b
25148+#endif
25149+
25150+#ifdef CONFIG_PAX_KERNEXEC
25151+ movl $pa(boot_gdt),%edi
25152+ movl $__LOAD_PHYSICAL_ADDR,%eax
25153+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
25154+ rorl $16,%eax
25155+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
25156+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
25157+ rorl $16,%eax
25158+
25159+ ljmp $(__BOOT_CS),$1f
25160+1:
25161+
25162+ movl $NR_CPUS,%ecx
25163+ movl $pa(cpu_gdt_table),%edi
25164+ addl $__PAGE_OFFSET,%eax
25165+1:
25166+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
25167+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
25168+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
25169+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
25170+ rorl $16,%eax
25171+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
25172+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
25173+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
25174+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
25175+ rorl $16,%eax
25176+ addl $PAGE_SIZE_asm,%edi
25177+ loop 1b
25178+#endif
25179+
25180 /*
25181 * Clear BSS first so that there are no surprises...
25182 */
25183@@ -201,8 +269,11 @@ ENTRY(startup_32)
25184 movl %eax, pa(max_pfn_mapped)
25185
25186 /* Do early initialization of the fixmap area */
25187- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
25188- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
25189+#ifdef CONFIG_COMPAT_VDSO
25190+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
25191+#else
25192+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
25193+#endif
25194 #else /* Not PAE */
25195
25196 page_pde_offset = (__PAGE_OFFSET >> 20);
25197@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
25198 movl %eax, pa(max_pfn_mapped)
25199
25200 /* Do early initialization of the fixmap area */
25201- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
25202- movl %eax,pa(initial_page_table+0xffc)
25203+#ifdef CONFIG_COMPAT_VDSO
25204+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
25205+#else
25206+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
25207+#endif
25208 #endif
25209
25210 #ifdef CONFIG_PARAVIRT
25211@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
25212 cmpl $num_subarch_entries, %eax
25213 jae bad_subarch
25214
25215- movl pa(subarch_entries)(,%eax,4), %eax
25216- subl $__PAGE_OFFSET, %eax
25217- jmp *%eax
25218+ jmp *pa(subarch_entries)(,%eax,4)
25219
25220 bad_subarch:
25221 WEAK(lguest_entry)
25222@@ -261,10 +333,10 @@ WEAK(xen_entry)
25223 __INITDATA
25224
25225 subarch_entries:
25226- .long default_entry /* normal x86/PC */
25227- .long lguest_entry /* lguest hypervisor */
25228- .long xen_entry /* Xen hypervisor */
25229- .long default_entry /* Moorestown MID */
25230+ .long ta(default_entry) /* normal x86/PC */
25231+ .long ta(lguest_entry) /* lguest hypervisor */
25232+ .long ta(xen_entry) /* Xen hypervisor */
25233+ .long ta(default_entry) /* Moorestown MID */
25234 num_subarch_entries = (. - subarch_entries) / 4
25235 .previous
25236 #else
25237@@ -354,6 +426,7 @@ default_entry:
25238 movl pa(mmu_cr4_features),%eax
25239 movl %eax,%cr4
25240
25241+#ifdef CONFIG_X86_PAE
25242 testb $X86_CR4_PAE, %al # check if PAE is enabled
25243 jz enable_paging
25244
25245@@ -382,6 +455,9 @@ default_entry:
25246 /* Make changes effective */
25247 wrmsr
25248
25249+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
25250+#endif
25251+
25252 enable_paging:
25253
25254 /*
25255@@ -449,14 +525,20 @@ is486:
25256 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
25257 movl %eax,%ss # after changing gdt.
25258
25259- movl $(__USER_DS),%eax # DS/ES contains default USER segment
25260+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
25261 movl %eax,%ds
25262 movl %eax,%es
25263
25264 movl $(__KERNEL_PERCPU), %eax
25265 movl %eax,%fs # set this cpu's percpu
25266
25267+#ifdef CONFIG_CC_STACKPROTECTOR
25268 movl $(__KERNEL_STACK_CANARY),%eax
25269+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
25270+ movl $(__USER_DS),%eax
25271+#else
25272+ xorl %eax,%eax
25273+#endif
25274 movl %eax,%gs
25275
25276 xorl %eax,%eax # Clear LDT
25277@@ -512,8 +594,11 @@ setup_once:
25278 * relocation. Manually set base address in stack canary
25279 * segment descriptor.
25280 */
25281- movl $gdt_page,%eax
25282+ movl $cpu_gdt_table,%eax
25283 movl $stack_canary,%ecx
25284+#ifdef CONFIG_SMP
25285+ addl $__per_cpu_load,%ecx
25286+#endif
25287 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
25288 shrl $16, %ecx
25289 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
25290@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
25291 cmpl $2,(%esp) # X86_TRAP_NMI
25292 je is_nmi # Ignore NMI
25293
25294- cmpl $2,%ss:early_recursion_flag
25295+ cmpl $1,%ss:early_recursion_flag
25296 je hlt_loop
25297 incl %ss:early_recursion_flag
25298
25299@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
25300 pushl (20+6*4)(%esp) /* trapno */
25301 pushl $fault_msg
25302 call printk
25303-#endif
25304 call dump_stack
25305+#endif
25306 hlt_loop:
25307 hlt
25308 jmp hlt_loop
25309@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
25310 /* This is the default interrupt "handler" :-) */
25311 ALIGN
25312 ignore_int:
25313- cld
25314 #ifdef CONFIG_PRINTK
25315+ cmpl $2,%ss:early_recursion_flag
25316+ je hlt_loop
25317+ incl %ss:early_recursion_flag
25318+ cld
25319 pushl %eax
25320 pushl %ecx
25321 pushl %edx
25322@@ -617,9 +705,6 @@ ignore_int:
25323 movl $(__KERNEL_DS),%eax
25324 movl %eax,%ds
25325 movl %eax,%es
25326- cmpl $2,early_recursion_flag
25327- je hlt_loop
25328- incl early_recursion_flag
25329 pushl 16(%esp)
25330 pushl 24(%esp)
25331 pushl 32(%esp)
25332@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
25333 /*
25334 * BSS section
25335 */
25336-__PAGE_ALIGNED_BSS
25337- .align PAGE_SIZE
25338 #ifdef CONFIG_X86_PAE
25339+.section .initial_pg_pmd,"a",@progbits
25340 initial_pg_pmd:
25341 .fill 1024*KPMDS,4,0
25342 #else
25343+.section .initial_page_table,"a",@progbits
25344 ENTRY(initial_page_table)
25345 .fill 1024,4,0
25346 #endif
25347+.section .initial_pg_fixmap,"a",@progbits
25348 initial_pg_fixmap:
25349 .fill 1024,4,0
25350+.section .empty_zero_page,"a",@progbits
25351 ENTRY(empty_zero_page)
25352 .fill 4096,1,0
25353+.section .swapper_pg_dir,"a",@progbits
25354 ENTRY(swapper_pg_dir)
25355+#ifdef CONFIG_X86_PAE
25356+ .fill 4,8,0
25357+#else
25358 .fill 1024,4,0
25359+#endif
25360
25361 /*
25362 * This starts the data section.
25363 */
25364 #ifdef CONFIG_X86_PAE
25365-__PAGE_ALIGNED_DATA
25366- /* Page-aligned for the benefit of paravirt? */
25367- .align PAGE_SIZE
25368+.section .initial_page_table,"a",@progbits
25369 ENTRY(initial_page_table)
25370 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
25371 # if KPMDS == 3
25372@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
25373 # error "Kernel PMDs should be 1, 2 or 3"
25374 # endif
25375 .align PAGE_SIZE /* needs to be page-sized too */
25376+
25377+#ifdef CONFIG_PAX_PER_CPU_PGD
25378+ENTRY(cpu_pgd)
25379+ .rept 2*NR_CPUS
25380+ .fill 4,8,0
25381+ .endr
25382+#endif
25383+
25384 #endif
25385
25386 .data
25387 .balign 4
25388 ENTRY(stack_start)
25389- .long init_thread_union+THREAD_SIZE
25390+ .long init_thread_union+THREAD_SIZE-8
25391
25392 __INITRODATA
25393 int_msg:
25394@@ -727,7 +825,7 @@ fault_msg:
25395 * segment size, and 32-bit linear address value:
25396 */
25397
25398- .data
25399+.section .rodata,"a",@progbits
25400 .globl boot_gdt_descr
25401 .globl idt_descr
25402
25403@@ -736,7 +834,7 @@ fault_msg:
25404 .word 0 # 32 bit align gdt_desc.address
25405 boot_gdt_descr:
25406 .word __BOOT_DS+7
25407- .long boot_gdt - __PAGE_OFFSET
25408+ .long pa(boot_gdt)
25409
25410 .word 0 # 32-bit align idt_desc.address
25411 idt_descr:
25412@@ -747,7 +845,7 @@ idt_descr:
25413 .word 0 # 32 bit align gdt_desc.address
25414 ENTRY(early_gdt_descr)
25415 .word GDT_ENTRIES*8-1
25416- .long gdt_page /* Overwritten for secondary CPUs */
25417+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
25418
25419 /*
25420 * The boot_gdt must mirror the equivalent in setup.S and is
25421@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
25422 .align L1_CACHE_BYTES
25423 ENTRY(boot_gdt)
25424 .fill GDT_ENTRY_BOOT_CS,8,0
25425- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
25426- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
25427+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
25428+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
25429+
25430+ .align PAGE_SIZE_asm
25431+ENTRY(cpu_gdt_table)
25432+ .rept NR_CPUS
25433+ .quad 0x0000000000000000 /* NULL descriptor */
25434+ .quad 0x0000000000000000 /* 0x0b reserved */
25435+ .quad 0x0000000000000000 /* 0x13 reserved */
25436+ .quad 0x0000000000000000 /* 0x1b reserved */
25437+
25438+#ifdef CONFIG_PAX_KERNEXEC
25439+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
25440+#else
25441+ .quad 0x0000000000000000 /* 0x20 unused */
25442+#endif
25443+
25444+ .quad 0x0000000000000000 /* 0x28 unused */
25445+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
25446+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
25447+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
25448+ .quad 0x0000000000000000 /* 0x4b reserved */
25449+ .quad 0x0000000000000000 /* 0x53 reserved */
25450+ .quad 0x0000000000000000 /* 0x5b reserved */
25451+
25452+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
25453+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
25454+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
25455+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
25456+
25457+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
25458+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
25459+
25460+ /*
25461+ * Segments used for calling PnP BIOS have byte granularity.
25462+ * The code segments and data segments have fixed 64k limits,
25463+ * the transfer segment sizes are set at run time.
25464+ */
25465+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
25466+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
25467+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
25468+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
25469+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
25470+
25471+ /*
25472+ * The APM segments have byte granularity and their bases
25473+ * are set at run time. All have 64k limits.
25474+ */
25475+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
25476+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
25477+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
25478+
25479+ .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */
25480+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
25481+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
25482+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
25483+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
25484+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
25485+
25486+ /* Be sure this is zeroed to avoid false validations in Xen */
25487+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
25488+ .endr
25489diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
25490index a468c0a..8b5a879 100644
25491--- a/arch/x86/kernel/head_64.S
25492+++ b/arch/x86/kernel/head_64.S
25493@@ -20,6 +20,8 @@
25494 #include <asm/processor-flags.h>
25495 #include <asm/percpu.h>
25496 #include <asm/nops.h>
25497+#include <asm/cpufeature.h>
25498+#include <asm/alternative-asm.h>
25499
25500 #ifdef CONFIG_PARAVIRT
25501 #include <asm/asm-offsets.h>
25502@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
25503 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
25504 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
25505 L3_START_KERNEL = pud_index(__START_KERNEL_map)
25506+L4_VMALLOC_START = pgd_index(VMALLOC_START)
25507+L3_VMALLOC_START = pud_index(VMALLOC_START)
25508+L4_VMALLOC_END = pgd_index(VMALLOC_END)
25509+L3_VMALLOC_END = pud_index(VMALLOC_END)
25510+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
25511+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
25512
25513 .text
25514 __HEAD
25515@@ -89,11 +97,24 @@ startup_64:
25516 * Fixup the physical addresses in the page table
25517 */
25518 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
25519+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
25520+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
25521+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
25522+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
25523+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
25524
25525- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
25526- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
25527+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
25528+#ifndef CONFIG_XEN
25529+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
25530+#endif
25531+
25532+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
25533+
25534+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
25535+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
25536
25537 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
25538+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
25539
25540 /*
25541 * Set up the identity mapping for the switchover. These
25542@@ -174,11 +195,12 @@ ENTRY(secondary_startup_64)
25543 * after the boot processor executes this code.
25544 */
25545
25546+ orq $-1, %rbp
25547 movq $(init_level4_pgt - __START_KERNEL_map), %rax
25548 1:
25549
25550- /* Enable PAE mode and PGE */
25551- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
25552+ /* Enable PAE mode and PSE/PGE */
25553+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
25554 movq %rcx, %cr4
25555
25556 /* Setup early boot stage 4 level pagetables. */
25557@@ -199,10 +221,19 @@ ENTRY(secondary_startup_64)
25558 movl $MSR_EFER, %ecx
25559 rdmsr
25560 btsl $_EFER_SCE, %eax /* Enable System Call */
25561- btl $20,%edi /* No Execute supported? */
25562+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
25563 jnc 1f
25564 btsl $_EFER_NX, %eax
25565+ cmpq $-1, %rbp
25566+ je 1f
25567 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
25568+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
25569+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
25570+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
25571+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
25572+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
25573+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
25574+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
25575 1: wrmsr /* Make changes effective */
25576
25577 /* Setup cr0 */
25578@@ -282,6 +313,7 @@ ENTRY(secondary_startup_64)
25579 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
25580 * address given in m16:64.
25581 */
25582+ pax_set_fptr_mask
25583 movq initial_code(%rip),%rax
25584 pushq $0 # fake return address to stop unwinder
25585 pushq $__KERNEL_CS # set correct cs
25586@@ -313,7 +345,7 @@ ENDPROC(start_cpu0)
25587 .quad INIT_PER_CPU_VAR(irq_stack_union)
25588
25589 GLOBAL(stack_start)
25590- .quad init_thread_union+THREAD_SIZE-8
25591+ .quad init_thread_union+THREAD_SIZE-16
25592 .word 0
25593 __FINITDATA
25594
25595@@ -391,7 +423,7 @@ ENTRY(early_idt_handler)
25596 call dump_stack
25597 #ifdef CONFIG_KALLSYMS
25598 leaq early_idt_ripmsg(%rip),%rdi
25599- movq 40(%rsp),%rsi # %rip again
25600+ movq 88(%rsp),%rsi # %rip again
25601 call __print_symbol
25602 #endif
25603 #endif /* EARLY_PRINTK */
25604@@ -420,6 +452,7 @@ ENDPROC(early_idt_handler)
25605 early_recursion_flag:
25606 .long 0
25607
25608+ .section .rodata,"a",@progbits
25609 #ifdef CONFIG_EARLY_PRINTK
25610 early_idt_msg:
25611 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
25612@@ -447,29 +480,52 @@ NEXT_PAGE(early_level4_pgt)
25613 NEXT_PAGE(early_dynamic_pgts)
25614 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
25615
25616- .data
25617+ .section .rodata,"a",@progbits
25618
25619-#ifndef CONFIG_XEN
25620 NEXT_PAGE(init_level4_pgt)
25621- .fill 512,8,0
25622-#else
25623-NEXT_PAGE(init_level4_pgt)
25624- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25625 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
25626 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25627+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
25628+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
25629+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
25630+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
25631+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
25632+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25633 .org init_level4_pgt + L4_START_KERNEL*8, 0
25634 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
25635 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
25636
25637+#ifdef CONFIG_PAX_PER_CPU_PGD
25638+NEXT_PAGE(cpu_pgd)
25639+ .rept 2*NR_CPUS
25640+ .fill 512,8,0
25641+ .endr
25642+#endif
25643+
25644 NEXT_PAGE(level3_ident_pgt)
25645 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25646+#ifdef CONFIG_XEN
25647 .fill 511, 8, 0
25648+#else
25649+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
25650+ .fill 510,8,0
25651+#endif
25652+
25653+NEXT_PAGE(level3_vmalloc_start_pgt)
25654+ .fill 512,8,0
25655+
25656+NEXT_PAGE(level3_vmalloc_end_pgt)
25657+ .fill 512,8,0
25658+
25659+NEXT_PAGE(level3_vmemmap_pgt)
25660+ .fill L3_VMEMMAP_START,8,0
25661+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25662+
25663 NEXT_PAGE(level2_ident_pgt)
25664- /* Since I easily can, map the first 1G.
25665+ /* Since I easily can, map the first 2G.
25666 * Don't set NX because code runs from these pages.
25667 */
25668- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
25669-#endif
25670+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
25671
25672 NEXT_PAGE(level3_kernel_pgt)
25673 .fill L3_START_KERNEL,8,0
25674@@ -477,6 +533,9 @@ NEXT_PAGE(level3_kernel_pgt)
25675 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
25676 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25677
25678+NEXT_PAGE(level2_vmemmap_pgt)
25679+ .fill 512,8,0
25680+
25681 NEXT_PAGE(level2_kernel_pgt)
25682 /*
25683 * 512 MB kernel mapping. We spend a full page on this pagetable
25684@@ -494,28 +553,64 @@ NEXT_PAGE(level2_kernel_pgt)
25685 NEXT_PAGE(level2_fixmap_pgt)
25686 .fill 506,8,0
25687 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25688- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
25689- .fill 5,8,0
25690+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
25691+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
25692+ .fill 4,8,0
25693
25694 NEXT_PAGE(level1_fixmap_pgt)
25695 .fill 512,8,0
25696
25697+NEXT_PAGE(level1_vsyscall_pgt)
25698+ .fill 512,8,0
25699+
25700 #undef PMDS
25701
25702- .data
25703+ .align PAGE_SIZE
25704+ENTRY(cpu_gdt_table)
25705+ .rept NR_CPUS
25706+ .quad 0x0000000000000000 /* NULL descriptor */
25707+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
25708+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
25709+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
25710+ .quad 0x00cffb000000ffff /* __USER32_CS */
25711+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
25712+ .quad 0x00affb000000ffff /* __USER_CS */
25713+
25714+#ifdef CONFIG_PAX_KERNEXEC
25715+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
25716+#else
25717+ .quad 0x0 /* unused */
25718+#endif
25719+
25720+ .quad 0,0 /* TSS */
25721+ .quad 0,0 /* LDT */
25722+ .quad 0,0,0 /* three TLS descriptors */
25723+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
25724+ /* asm/segment.h:GDT_ENTRIES must match this */
25725+
25726+#ifdef CONFIG_PAX_MEMORY_UDEREF
25727+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
25728+#else
25729+ .quad 0x0 /* unused */
25730+#endif
25731+
25732+ /* zero the remaining page */
25733+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
25734+ .endr
25735+
25736 .align 16
25737 .globl early_gdt_descr
25738 early_gdt_descr:
25739 .word GDT_ENTRIES*8-1
25740 early_gdt_descr_base:
25741- .quad INIT_PER_CPU_VAR(gdt_page)
25742+ .quad cpu_gdt_table
25743
25744 ENTRY(phys_base)
25745 /* This must match the first entry in level2_kernel_pgt */
25746 .quad 0x0000000000000000
25747
25748 #include "../../x86/xen/xen-head.S"
25749-
25750- __PAGE_ALIGNED_BSS
25751+
25752+ .section .rodata,"a",@progbits
25753 NEXT_PAGE(empty_zero_page)
25754 .skip PAGE_SIZE
25755diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
25756index 05fd74f..c3548b1 100644
25757--- a/arch/x86/kernel/i386_ksyms_32.c
25758+++ b/arch/x86/kernel/i386_ksyms_32.c
25759@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
25760 EXPORT_SYMBOL(cmpxchg8b_emu);
25761 #endif
25762
25763+EXPORT_SYMBOL_GPL(cpu_gdt_table);
25764+
25765 /* Networking helper routines. */
25766 EXPORT_SYMBOL(csum_partial_copy_generic);
25767+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
25768+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
25769
25770 EXPORT_SYMBOL(__get_user_1);
25771 EXPORT_SYMBOL(__get_user_2);
25772@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
25773 EXPORT_SYMBOL(___preempt_schedule_context);
25774 #endif
25775 #endif
25776+
25777+#ifdef CONFIG_PAX_KERNEXEC
25778+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
25779+#endif
25780+
25781+#ifdef CONFIG_PAX_PER_CPU_PGD
25782+EXPORT_SYMBOL(cpu_pgd);
25783+#endif
25784diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
25785index a9a4229..6f4d476 100644
25786--- a/arch/x86/kernel/i387.c
25787+++ b/arch/x86/kernel/i387.c
25788@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
25789 static inline bool interrupted_user_mode(void)
25790 {
25791 struct pt_regs *regs = get_irq_regs();
25792- return regs && user_mode_vm(regs);
25793+ return regs && user_mode(regs);
25794 }
25795
25796 /*
25797diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
25798index 8af8171..f8c1169 100644
25799--- a/arch/x86/kernel/i8259.c
25800+++ b/arch/x86/kernel/i8259.c
25801@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
25802 static void make_8259A_irq(unsigned int irq)
25803 {
25804 disable_irq_nosync(irq);
25805- io_apic_irqs &= ~(1<<irq);
25806+ io_apic_irqs &= ~(1UL<<irq);
25807 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
25808 i8259A_chip.name);
25809 enable_irq(irq);
25810@@ -209,7 +209,7 @@ spurious_8259A_irq:
25811 "spurious 8259A interrupt: IRQ%d.\n", irq);
25812 spurious_irq_mask |= irqmask;
25813 }
25814- atomic_inc(&irq_err_count);
25815+ atomic_inc_unchecked(&irq_err_count);
25816 /*
25817 * Theoretically we do not have to handle this IRQ,
25818 * but in Linux this does not cause problems and is
25819@@ -350,14 +350,16 @@ static void init_8259A(int auto_eoi)
25820 /* (slave's support for AEOI in flat mode is to be investigated) */
25821 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
25822
25823+ pax_open_kernel();
25824 if (auto_eoi)
25825 /*
25826 * In AEOI mode we just have to mask the interrupt
25827 * when acking.
25828 */
25829- i8259A_chip.irq_mask_ack = disable_8259A_irq;
25830+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
25831 else
25832- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25833+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25834+ pax_close_kernel();
25835
25836 udelay(100); /* wait for 8259A to initialize */
25837
25838diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
25839index a979b5b..1d6db75 100644
25840--- a/arch/x86/kernel/io_delay.c
25841+++ b/arch/x86/kernel/io_delay.c
25842@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
25843 * Quirk table for systems that misbehave (lock up, etc.) if port
25844 * 0x80 is used:
25845 */
25846-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
25847+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
25848 {
25849 .callback = dmi_io_delay_0xed_port,
25850 .ident = "Compaq Presario V6000",
25851diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
25852index 4ddaf66..49d5c18 100644
25853--- a/arch/x86/kernel/ioport.c
25854+++ b/arch/x86/kernel/ioport.c
25855@@ -6,6 +6,7 @@
25856 #include <linux/sched.h>
25857 #include <linux/kernel.h>
25858 #include <linux/capability.h>
25859+#include <linux/security.h>
25860 #include <linux/errno.h>
25861 #include <linux/types.h>
25862 #include <linux/ioport.h>
25863@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25864 return -EINVAL;
25865 if (turn_on && !capable(CAP_SYS_RAWIO))
25866 return -EPERM;
25867+#ifdef CONFIG_GRKERNSEC_IO
25868+ if (turn_on && grsec_disable_privio) {
25869+ gr_handle_ioperm();
25870+ return -ENODEV;
25871+ }
25872+#endif
25873
25874 /*
25875 * If it's the first ioperm() call in this thread's lifetime, set the
25876@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25877 * because the ->io_bitmap_max value must match the bitmap
25878 * contents:
25879 */
25880- tss = &per_cpu(init_tss, get_cpu());
25881+ tss = init_tss + get_cpu();
25882
25883 if (turn_on)
25884 bitmap_clear(t->io_bitmap_ptr, from, num);
25885@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
25886 if (level > old) {
25887 if (!capable(CAP_SYS_RAWIO))
25888 return -EPERM;
25889+#ifdef CONFIG_GRKERNSEC_IO
25890+ if (grsec_disable_privio) {
25891+ gr_handle_iopl();
25892+ return -ENODEV;
25893+ }
25894+#endif
25895 }
25896 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
25897 t->iopl = level << 12;
25898diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
25899index 922d285..6d20692 100644
25900--- a/arch/x86/kernel/irq.c
25901+++ b/arch/x86/kernel/irq.c
25902@@ -22,7 +22,7 @@
25903 #define CREATE_TRACE_POINTS
25904 #include <asm/trace/irq_vectors.h>
25905
25906-atomic_t irq_err_count;
25907+atomic_unchecked_t irq_err_count;
25908
25909 /* Function pointer for generic interrupt vector handling */
25910 void (*x86_platform_ipi_callback)(void) = NULL;
25911@@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
25912 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
25913 seq_printf(p, " Hypervisor callback interrupts\n");
25914 #endif
25915- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
25916+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
25917 #if defined(CONFIG_X86_IO_APIC)
25918- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
25919+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
25920 #endif
25921 return 0;
25922 }
25923@@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
25924
25925 u64 arch_irq_stat(void)
25926 {
25927- u64 sum = atomic_read(&irq_err_count);
25928+ u64 sum = atomic_read_unchecked(&irq_err_count);
25929 return sum;
25930 }
25931
25932diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
25933index 63ce838..2ea3e06 100644
25934--- a/arch/x86/kernel/irq_32.c
25935+++ b/arch/x86/kernel/irq_32.c
25936@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
25937
25938 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25939
25940+extern void gr_handle_kernel_exploit(void);
25941+
25942 int sysctl_panic_on_stackoverflow __read_mostly;
25943
25944 /* Debugging check for stack overflow: is there less than 1KB free? */
25945@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
25946 __asm__ __volatile__("andl %%esp,%0" :
25947 "=r" (sp) : "0" (THREAD_SIZE - 1));
25948
25949- return sp < (sizeof(struct thread_info) + STACK_WARN);
25950+ return sp < STACK_WARN;
25951 }
25952
25953 static void print_stack_overflow(void)
25954 {
25955 printk(KERN_WARNING "low stack detected by irq handler\n");
25956 dump_stack();
25957+ gr_handle_kernel_exploit();
25958 if (sysctl_panic_on_stackoverflow)
25959 panic("low stack detected by irq handler - check messages\n");
25960 }
25961@@ -84,10 +87,9 @@ static inline void *current_stack(void)
25962 static inline int
25963 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25964 {
25965- struct irq_stack *curstk, *irqstk;
25966+ struct irq_stack *irqstk;
25967 u32 *isp, *prev_esp, arg1, arg2;
25968
25969- curstk = (struct irq_stack *) current_stack();
25970 irqstk = __this_cpu_read(hardirq_stack);
25971
25972 /*
25973@@ -96,15 +98,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25974 * handler) we can't do that and just have to keep using the
25975 * current stack (which is the irq stack already after all)
25976 */
25977- if (unlikely(curstk == irqstk))
25978+ if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
25979 return 0;
25980
25981- isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
25982+ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
25983
25984 /* Save the next esp at the bottom of the stack */
25985 prev_esp = (u32 *)irqstk;
25986 *prev_esp = current_stack_pointer;
25987
25988+#ifdef CONFIG_PAX_MEMORY_UDEREF
25989+ __set_fs(MAKE_MM_SEG(0));
25990+#endif
25991+
25992 if (unlikely(overflow))
25993 call_on_stack(print_stack_overflow, isp);
25994
25995@@ -115,6 +121,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25996 : "0" (irq), "1" (desc), "2" (isp),
25997 "D" (desc->handle_irq)
25998 : "memory", "cc", "ecx");
25999+
26000+#ifdef CONFIG_PAX_MEMORY_UDEREF
26001+ __set_fs(current_thread_info()->addr_limit);
26002+#endif
26003+
26004 return 1;
26005 }
26006
26007@@ -123,32 +134,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
26008 */
26009 void irq_ctx_init(int cpu)
26010 {
26011- struct irq_stack *irqstk;
26012-
26013 if (per_cpu(hardirq_stack, cpu))
26014 return;
26015
26016- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
26017- THREADINFO_GFP,
26018- THREAD_SIZE_ORDER));
26019- per_cpu(hardirq_stack, cpu) = irqstk;
26020-
26021- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
26022- THREADINFO_GFP,
26023- THREAD_SIZE_ORDER));
26024- per_cpu(softirq_stack, cpu) = irqstk;
26025-
26026- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
26027- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
26028+ per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
26029+ per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
26030 }
26031
26032 void do_softirq_own_stack(void)
26033 {
26034- struct thread_info *curstk;
26035 struct irq_stack *irqstk;
26036 u32 *isp, *prev_esp;
26037
26038- curstk = current_stack();
26039 irqstk = __this_cpu_read(softirq_stack);
26040
26041 /* build the stack frame on the softirq stack */
26042@@ -158,7 +155,16 @@ void do_softirq_own_stack(void)
26043 prev_esp = (u32 *)irqstk;
26044 *prev_esp = current_stack_pointer;
26045
26046+#ifdef CONFIG_PAX_MEMORY_UDEREF
26047+ __set_fs(MAKE_MM_SEG(0));
26048+#endif
26049+
26050 call_on_stack(__do_softirq, isp);
26051+
26052+#ifdef CONFIG_PAX_MEMORY_UDEREF
26053+ __set_fs(current_thread_info()->addr_limit);
26054+#endif
26055+
26056 }
26057
26058 bool handle_irq(unsigned irq, struct pt_regs *regs)
26059@@ -172,7 +178,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
26060 if (unlikely(!desc))
26061 return false;
26062
26063- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
26064+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
26065 if (unlikely(overflow))
26066 print_stack_overflow();
26067 desc->handle_irq(irq, desc);
26068diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
26069index 4d1c746..55a22d6 100644
26070--- a/arch/x86/kernel/irq_64.c
26071+++ b/arch/x86/kernel/irq_64.c
26072@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
26073 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
26074 EXPORT_PER_CPU_SYMBOL(irq_regs);
26075
26076+extern void gr_handle_kernel_exploit(void);
26077+
26078 int sysctl_panic_on_stackoverflow;
26079
26080 /*
26081@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
26082 u64 estack_top, estack_bottom;
26083 u64 curbase = (u64)task_stack_page(current);
26084
26085- if (user_mode_vm(regs))
26086+ if (user_mode(regs))
26087 return;
26088
26089 if (regs->sp >= curbase + sizeof(struct thread_info) +
26090@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
26091 irq_stack_top, irq_stack_bottom,
26092 estack_top, estack_bottom);
26093
26094+ gr_handle_kernel_exploit();
26095+
26096 if (sysctl_panic_on_stackoverflow)
26097 panic("low stack detected by irq handler - check messages\n");
26098 #endif
26099diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
26100index 26d5a55..a01160a 100644
26101--- a/arch/x86/kernel/jump_label.c
26102+++ b/arch/x86/kernel/jump_label.c
26103@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
26104 * Jump label is enabled for the first time.
26105 * So we expect a default_nop...
26106 */
26107- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
26108+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
26109 != 0))
26110 bug_at((void *)entry->code, __LINE__);
26111 } else {
26112@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
26113 * ...otherwise expect an ideal_nop. Otherwise
26114 * something went horribly wrong.
26115 */
26116- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
26117+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
26118 != 0))
26119 bug_at((void *)entry->code, __LINE__);
26120 }
26121@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
26122 * are converting the default nop to the ideal nop.
26123 */
26124 if (init) {
26125- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
26126+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
26127 bug_at((void *)entry->code, __LINE__);
26128 } else {
26129 code.jump = 0xe9;
26130 code.offset = entry->target -
26131 (entry->code + JUMP_LABEL_NOP_SIZE);
26132- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
26133+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
26134 bug_at((void *)entry->code, __LINE__);
26135 }
26136 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
26137diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
26138index 7ec1d5f..5a7d130 100644
26139--- a/arch/x86/kernel/kgdb.c
26140+++ b/arch/x86/kernel/kgdb.c
26141@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
26142 #ifdef CONFIG_X86_32
26143 switch (regno) {
26144 case GDB_SS:
26145- if (!user_mode_vm(regs))
26146+ if (!user_mode(regs))
26147 *(unsigned long *)mem = __KERNEL_DS;
26148 break;
26149 case GDB_SP:
26150- if (!user_mode_vm(regs))
26151+ if (!user_mode(regs))
26152 *(unsigned long *)mem = kernel_stack_pointer(regs);
26153 break;
26154 case GDB_GS:
26155@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
26156 bp->attr.bp_addr = breakinfo[breakno].addr;
26157 bp->attr.bp_len = breakinfo[breakno].len;
26158 bp->attr.bp_type = breakinfo[breakno].type;
26159- info->address = breakinfo[breakno].addr;
26160+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
26161+ info->address = ktla_ktva(breakinfo[breakno].addr);
26162+ else
26163+ info->address = breakinfo[breakno].addr;
26164 info->len = breakinfo[breakno].len;
26165 info->type = breakinfo[breakno].type;
26166 val = arch_install_hw_breakpoint(bp);
26167@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
26168 case 'k':
26169 /* clear the trace bit */
26170 linux_regs->flags &= ~X86_EFLAGS_TF;
26171- atomic_set(&kgdb_cpu_doing_single_step, -1);
26172+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
26173
26174 /* set the trace bit if we're stepping */
26175 if (remcomInBuffer[0] == 's') {
26176 linux_regs->flags |= X86_EFLAGS_TF;
26177- atomic_set(&kgdb_cpu_doing_single_step,
26178+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
26179 raw_smp_processor_id());
26180 }
26181
26182@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
26183
26184 switch (cmd) {
26185 case DIE_DEBUG:
26186- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
26187+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
26188 if (user_mode(regs))
26189 return single_step_cont(regs, args);
26190 break;
26191@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
26192 #endif /* CONFIG_DEBUG_RODATA */
26193
26194 bpt->type = BP_BREAKPOINT;
26195- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
26196+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
26197 BREAK_INSTR_SIZE);
26198 if (err)
26199 return err;
26200- err = probe_kernel_write((char *)bpt->bpt_addr,
26201+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
26202 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
26203 #ifdef CONFIG_DEBUG_RODATA
26204 if (!err)
26205@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
26206 return -EBUSY;
26207 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
26208 BREAK_INSTR_SIZE);
26209- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
26210+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
26211 if (err)
26212 return err;
26213 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
26214@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
26215 if (mutex_is_locked(&text_mutex))
26216 goto knl_write;
26217 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
26218- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
26219+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
26220 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
26221 goto knl_write;
26222 return err;
26223 knl_write:
26224 #endif /* CONFIG_DEBUG_RODATA */
26225- return probe_kernel_write((char *)bpt->bpt_addr,
26226+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
26227 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
26228 }
26229
26230diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
26231index 67e6d19..731ed28 100644
26232--- a/arch/x86/kernel/kprobes/core.c
26233+++ b/arch/x86/kernel/kprobes/core.c
26234@@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
26235 s32 raddr;
26236 } __packed *insn;
26237
26238- insn = (struct __arch_relative_insn *)from;
26239+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
26240+
26241+ pax_open_kernel();
26242 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
26243 insn->op = op;
26244+ pax_close_kernel();
26245 }
26246
26247 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
26248@@ -168,7 +171,7 @@ int can_boost(kprobe_opcode_t *opcodes)
26249 kprobe_opcode_t opcode;
26250 kprobe_opcode_t *orig_opcodes = opcodes;
26251
26252- if (search_exception_tables((unsigned long)opcodes))
26253+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
26254 return 0; /* Page fault may occur on this address. */
26255
26256 retry:
26257@@ -242,9 +245,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
26258 * for the first byte, we can recover the original instruction
26259 * from it and kp->opcode.
26260 */
26261- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
26262+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
26263 buf[0] = kp->opcode;
26264- return (unsigned long)buf;
26265+ return ktva_ktla((unsigned long)buf);
26266 }
26267
26268 /*
26269@@ -336,7 +339,9 @@ int __copy_instruction(u8 *dest, u8 *src)
26270 /* Another subsystem puts a breakpoint, failed to recover */
26271 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
26272 return 0;
26273+ pax_open_kernel();
26274 memcpy(dest, insn.kaddr, insn.length);
26275+ pax_close_kernel();
26276
26277 #ifdef CONFIG_X86_64
26278 if (insn_rip_relative(&insn)) {
26279@@ -363,7 +368,9 @@ int __copy_instruction(u8 *dest, u8 *src)
26280 return 0;
26281 }
26282 disp = (u8 *) dest + insn_offset_displacement(&insn);
26283+ pax_open_kernel();
26284 *(s32 *) disp = (s32) newdisp;
26285+ pax_close_kernel();
26286 }
26287 #endif
26288 return insn.length;
26289@@ -505,7 +512,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
26290 * nor set current_kprobe, because it doesn't use single
26291 * stepping.
26292 */
26293- regs->ip = (unsigned long)p->ainsn.insn;
26294+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
26295 preempt_enable_no_resched();
26296 return;
26297 }
26298@@ -522,9 +529,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
26299 regs->flags &= ~X86_EFLAGS_IF;
26300 /* single step inline if the instruction is an int3 */
26301 if (p->opcode == BREAKPOINT_INSTRUCTION)
26302- regs->ip = (unsigned long)p->addr;
26303+ regs->ip = ktla_ktva((unsigned long)p->addr);
26304 else
26305- regs->ip = (unsigned long)p->ainsn.insn;
26306+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
26307 }
26308 NOKPROBE_SYMBOL(setup_singlestep);
26309
26310@@ -574,7 +581,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
26311 struct kprobe *p;
26312 struct kprobe_ctlblk *kcb;
26313
26314- if (user_mode_vm(regs))
26315+ if (user_mode(regs))
26316 return 0;
26317
26318 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
26319@@ -609,7 +616,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
26320 setup_singlestep(p, regs, kcb, 0);
26321 return 1;
26322 }
26323- } else if (*addr != BREAKPOINT_INSTRUCTION) {
26324+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
26325 /*
26326 * The breakpoint instruction was removed right
26327 * after we hit it. Another cpu has removed
26328@@ -656,6 +663,9 @@ static void __used kretprobe_trampoline_holder(void)
26329 " movq %rax, 152(%rsp)\n"
26330 RESTORE_REGS_STRING
26331 " popfq\n"
26332+#ifdef KERNEXEC_PLUGIN
26333+ " btsq $63,(%rsp)\n"
26334+#endif
26335 #else
26336 " pushf\n"
26337 SAVE_REGS_STRING
26338@@ -796,7 +806,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
26339 struct kprobe_ctlblk *kcb)
26340 {
26341 unsigned long *tos = stack_addr(regs);
26342- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
26343+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
26344 unsigned long orig_ip = (unsigned long)p->addr;
26345 kprobe_opcode_t *insn = p->ainsn.insn;
26346
26347@@ -979,7 +989,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
26348 struct die_args *args = data;
26349 int ret = NOTIFY_DONE;
26350
26351- if (args->regs && user_mode_vm(args->regs))
26352+ if (args->regs && user_mode(args->regs))
26353 return ret;
26354
26355 if (val == DIE_GPF) {
26356diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
26357index f1314d0..15f3154 100644
26358--- a/arch/x86/kernel/kprobes/opt.c
26359+++ b/arch/x86/kernel/kprobes/opt.c
26360@@ -79,6 +79,7 @@ found:
26361 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
26362 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
26363 {
26364+ pax_open_kernel();
26365 #ifdef CONFIG_X86_64
26366 *addr++ = 0x48;
26367 *addr++ = 0xbf;
26368@@ -86,6 +87,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
26369 *addr++ = 0xb8;
26370 #endif
26371 *(unsigned long *)addr = val;
26372+ pax_close_kernel();
26373 }
26374
26375 asm (
26376@@ -337,7 +339,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
26377 * Verify if the address gap is in 2GB range, because this uses
26378 * a relative jump.
26379 */
26380- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
26381+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
26382 if (abs(rel) > 0x7fffffff) {
26383 __arch_remove_optimized_kprobe(op, 0);
26384 return -ERANGE;
26385@@ -354,16 +356,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
26386 op->optinsn.size = ret;
26387
26388 /* Copy arch-dep-instance from template */
26389- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
26390+ pax_open_kernel();
26391+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
26392+ pax_close_kernel();
26393
26394 /* Set probe information */
26395 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
26396
26397 /* Set probe function call */
26398- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
26399+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
26400
26401 /* Set returning jmp instruction at the tail of out-of-line buffer */
26402- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
26403+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
26404 (u8 *)op->kp.addr + op->optinsn.size);
26405
26406 flush_icache_range((unsigned long) buf,
26407@@ -388,7 +392,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
26408 WARN_ON(kprobe_disabled(&op->kp));
26409
26410 /* Backup instructions which will be replaced by jump address */
26411- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
26412+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
26413 RELATIVE_ADDR_SIZE);
26414
26415 insn_buf[0] = RELATIVEJUMP_OPCODE;
26416@@ -436,7 +440,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
26417 /* This kprobe is really able to run optimized path. */
26418 op = container_of(p, struct optimized_kprobe, kp);
26419 /* Detour through copied instructions */
26420- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
26421+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
26422 if (!reenter)
26423 reset_current_kprobe();
26424 preempt_enable_no_resched();
26425diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
26426index c2bedae..25e7ab60 100644
26427--- a/arch/x86/kernel/ksysfs.c
26428+++ b/arch/x86/kernel/ksysfs.c
26429@@ -184,7 +184,7 @@ out:
26430
26431 static struct kobj_attribute type_attr = __ATTR_RO(type);
26432
26433-static struct bin_attribute data_attr = {
26434+static bin_attribute_no_const data_attr __read_only = {
26435 .attr = {
26436 .name = "data",
26437 .mode = S_IRUGO,
26438diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
26439index c37886d..d851d32 100644
26440--- a/arch/x86/kernel/ldt.c
26441+++ b/arch/x86/kernel/ldt.c
26442@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
26443 if (reload) {
26444 #ifdef CONFIG_SMP
26445 preempt_disable();
26446- load_LDT(pc);
26447+ load_LDT_nolock(pc);
26448 if (!cpumask_equal(mm_cpumask(current->mm),
26449 cpumask_of(smp_processor_id())))
26450 smp_call_function(flush_ldt, current->mm, 1);
26451 preempt_enable();
26452 #else
26453- load_LDT(pc);
26454+ load_LDT_nolock(pc);
26455 #endif
26456 }
26457 if (oldsize) {
26458@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
26459 return err;
26460
26461 for (i = 0; i < old->size; i++)
26462- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
26463+ write_ldt_entry(new->ldt, i, old->ldt + i);
26464 return 0;
26465 }
26466
26467@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
26468 retval = copy_ldt(&mm->context, &old_mm->context);
26469 mutex_unlock(&old_mm->context.lock);
26470 }
26471+
26472+ if (tsk == current) {
26473+ mm->context.vdso = 0;
26474+
26475+#ifdef CONFIG_X86_32
26476+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
26477+ mm->context.user_cs_base = 0UL;
26478+ mm->context.user_cs_limit = ~0UL;
26479+
26480+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
26481+ cpus_clear(mm->context.cpu_user_cs_mask);
26482+#endif
26483+
26484+#endif
26485+#endif
26486+
26487+ }
26488+
26489 return retval;
26490 }
26491
26492@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
26493 }
26494 }
26495
26496+#ifdef CONFIG_PAX_SEGMEXEC
26497+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
26498+ error = -EINVAL;
26499+ goto out_unlock;
26500+ }
26501+#endif
26502+
26503 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
26504 error = -EINVAL;
26505 goto out_unlock;
26506diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
26507index 1667b1d..16492c5 100644
26508--- a/arch/x86/kernel/machine_kexec_32.c
26509+++ b/arch/x86/kernel/machine_kexec_32.c
26510@@ -25,7 +25,7 @@
26511 #include <asm/cacheflush.h>
26512 #include <asm/debugreg.h>
26513
26514-static void set_idt(void *newidt, __u16 limit)
26515+static void set_idt(struct desc_struct *newidt, __u16 limit)
26516 {
26517 struct desc_ptr curidt;
26518
26519@@ -37,7 +37,7 @@ static void set_idt(void *newidt, __u16 limit)
26520 }
26521
26522
26523-static void set_gdt(void *newgdt, __u16 limit)
26524+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
26525 {
26526 struct desc_ptr curgdt;
26527
26528@@ -215,7 +215,7 @@ void machine_kexec(struct kimage *image)
26529 }
26530
26531 control_page = page_address(image->control_code_page);
26532- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
26533+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
26534
26535 relocate_kernel_ptr = control_page;
26536 page_list[PA_CONTROL_PAGE] = __pa(control_page);
26537diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
26538index c73aecf..4c63630 100644
26539--- a/arch/x86/kernel/mcount_64.S
26540+++ b/arch/x86/kernel/mcount_64.S
26541@@ -7,7 +7,7 @@
26542 #include <linux/linkage.h>
26543 #include <asm/ptrace.h>
26544 #include <asm/ftrace.h>
26545-
26546+#include <asm/alternative-asm.h>
26547
26548 .code64
26549 .section .entry.text, "ax"
26550@@ -24,8 +24,9 @@
26551 #ifdef CONFIG_DYNAMIC_FTRACE
26552
26553 ENTRY(function_hook)
26554+ pax_force_retaddr
26555 retq
26556-END(function_hook)
26557+ENDPROC(function_hook)
26558
26559 /* skip is set if stack has been adjusted */
26560 .macro ftrace_caller_setup skip=0
26561@@ -62,8 +63,9 @@ GLOBAL(ftrace_graph_call)
26562 #endif
26563
26564 GLOBAL(ftrace_stub)
26565+ pax_force_retaddr
26566 retq
26567-END(ftrace_caller)
26568+ENDPROC(ftrace_caller)
26569
26570 ENTRY(ftrace_regs_caller)
26571 /* Save the current flags before compare (in SS location)*/
26572@@ -127,7 +129,7 @@ GLOBAL(ftrace_regs_call)
26573 popfq
26574 jmp ftrace_stub
26575
26576-END(ftrace_regs_caller)
26577+ENDPROC(ftrace_regs_caller)
26578
26579
26580 #else /* ! CONFIG_DYNAMIC_FTRACE */
26581@@ -145,6 +147,7 @@ ENTRY(function_hook)
26582 #endif
26583
26584 GLOBAL(ftrace_stub)
26585+ pax_force_retaddr
26586 retq
26587
26588 trace:
26589@@ -158,12 +161,13 @@ trace:
26590 #endif
26591 subq $MCOUNT_INSN_SIZE, %rdi
26592
26593+ pax_force_fptr ftrace_trace_function
26594 call *ftrace_trace_function
26595
26596 MCOUNT_RESTORE_FRAME
26597
26598 jmp ftrace_stub
26599-END(function_hook)
26600+ENDPROC(function_hook)
26601 #endif /* CONFIG_DYNAMIC_FTRACE */
26602 #endif /* CONFIG_FUNCTION_TRACER */
26603
26604@@ -185,8 +189,9 @@ ENTRY(ftrace_graph_caller)
26605
26606 MCOUNT_RESTORE_FRAME
26607
26608+ pax_force_retaddr
26609 retq
26610-END(ftrace_graph_caller)
26611+ENDPROC(ftrace_graph_caller)
26612
26613 GLOBAL(return_to_handler)
26614 subq $24, %rsp
26615@@ -202,5 +207,7 @@ GLOBAL(return_to_handler)
26616 movq 8(%rsp), %rdx
26617 movq (%rsp), %rax
26618 addq $24, %rsp
26619+ pax_force_fptr %rdi
26620 jmp *%rdi
26621+ENDPROC(return_to_handler)
26622 #endif
26623diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
26624index e69f988..da078ea 100644
26625--- a/arch/x86/kernel/module.c
26626+++ b/arch/x86/kernel/module.c
26627@@ -81,17 +81,62 @@ static unsigned long int get_module_load_offset(void)
26628 }
26629 #endif
26630
26631-void *module_alloc(unsigned long size)
26632+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
26633 {
26634- if (PAGE_ALIGN(size) > MODULES_LEN)
26635+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
26636 return NULL;
26637 return __vmalloc_node_range(size, 1,
26638 MODULES_VADDR + get_module_load_offset(),
26639- MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
26640- PAGE_KERNEL_EXEC, NUMA_NO_NODE,
26641+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
26642+ prot, NUMA_NO_NODE,
26643 __builtin_return_address(0));
26644 }
26645
26646+void *module_alloc(unsigned long size)
26647+{
26648+
26649+#ifdef CONFIG_PAX_KERNEXEC
26650+ return __module_alloc(size, PAGE_KERNEL);
26651+#else
26652+ return __module_alloc(size, PAGE_KERNEL_EXEC);
26653+#endif
26654+
26655+}
26656+
26657+#ifdef CONFIG_PAX_KERNEXEC
26658+#ifdef CONFIG_X86_32
26659+void *module_alloc_exec(unsigned long size)
26660+{
26661+ struct vm_struct *area;
26662+
26663+ if (size == 0)
26664+ return NULL;
26665+
26666+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
26667+return area ? area->addr : NULL;
26668+}
26669+EXPORT_SYMBOL(module_alloc_exec);
26670+
26671+void module_free_exec(struct module *mod, void *module_region)
26672+{
26673+ vunmap(module_region);
26674+}
26675+EXPORT_SYMBOL(module_free_exec);
26676+#else
26677+void module_free_exec(struct module *mod, void *module_region)
26678+{
26679+ module_free(mod, module_region);
26680+}
26681+EXPORT_SYMBOL(module_free_exec);
26682+
26683+void *module_alloc_exec(unsigned long size)
26684+{
26685+ return __module_alloc(size, PAGE_KERNEL_RX);
26686+}
26687+EXPORT_SYMBOL(module_alloc_exec);
26688+#endif
26689+#endif
26690+
26691 #ifdef CONFIG_X86_32
26692 int apply_relocate(Elf32_Shdr *sechdrs,
26693 const char *strtab,
26694@@ -102,14 +147,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26695 unsigned int i;
26696 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
26697 Elf32_Sym *sym;
26698- uint32_t *location;
26699+ uint32_t *plocation, location;
26700
26701 DEBUGP("Applying relocate section %u to %u\n",
26702 relsec, sechdrs[relsec].sh_info);
26703 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
26704 /* This is where to make the change */
26705- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
26706- + rel[i].r_offset;
26707+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
26708+ location = (uint32_t)plocation;
26709+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
26710+ plocation = ktla_ktva((void *)plocation);
26711 /* This is the symbol it is referring to. Note that all
26712 undefined symbols have been resolved. */
26713 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
26714@@ -118,11 +165,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26715 switch (ELF32_R_TYPE(rel[i].r_info)) {
26716 case R_386_32:
26717 /* We add the value into the location given */
26718- *location += sym->st_value;
26719+ pax_open_kernel();
26720+ *plocation += sym->st_value;
26721+ pax_close_kernel();
26722 break;
26723 case R_386_PC32:
26724 /* Add the value, subtract its position */
26725- *location += sym->st_value - (uint32_t)location;
26726+ pax_open_kernel();
26727+ *plocation += sym->st_value - location;
26728+ pax_close_kernel();
26729 break;
26730 default:
26731 pr_err("%s: Unknown relocation: %u\n",
26732@@ -167,21 +218,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
26733 case R_X86_64_NONE:
26734 break;
26735 case R_X86_64_64:
26736+ pax_open_kernel();
26737 *(u64 *)loc = val;
26738+ pax_close_kernel();
26739 break;
26740 case R_X86_64_32:
26741+ pax_open_kernel();
26742 *(u32 *)loc = val;
26743+ pax_close_kernel();
26744 if (val != *(u32 *)loc)
26745 goto overflow;
26746 break;
26747 case R_X86_64_32S:
26748+ pax_open_kernel();
26749 *(s32 *)loc = val;
26750+ pax_close_kernel();
26751 if ((s64)val != *(s32 *)loc)
26752 goto overflow;
26753 break;
26754 case R_X86_64_PC32:
26755 val -= (u64)loc;
26756+ pax_open_kernel();
26757 *(u32 *)loc = val;
26758+ pax_close_kernel();
26759+
26760 #if 0
26761 if ((s64)val != *(s32 *)loc)
26762 goto overflow;
26763diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
26764index c9603ac..9f88728 100644
26765--- a/arch/x86/kernel/msr.c
26766+++ b/arch/x86/kernel/msr.c
26767@@ -37,6 +37,7 @@
26768 #include <linux/notifier.h>
26769 #include <linux/uaccess.h>
26770 #include <linux/gfp.h>
26771+#include <linux/grsecurity.h>
26772
26773 #include <asm/processor.h>
26774 #include <asm/msr.h>
26775@@ -103,6 +104,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
26776 int err = 0;
26777 ssize_t bytes = 0;
26778
26779+#ifdef CONFIG_GRKERNSEC_KMEM
26780+ gr_handle_msr_write();
26781+ return -EPERM;
26782+#endif
26783+
26784 if (count % 8)
26785 return -EINVAL; /* Invalid chunk size */
26786
26787@@ -150,6 +156,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
26788 err = -EBADF;
26789 break;
26790 }
26791+#ifdef CONFIG_GRKERNSEC_KMEM
26792+ gr_handle_msr_write();
26793+ return -EPERM;
26794+#endif
26795 if (copy_from_user(&regs, uregs, sizeof regs)) {
26796 err = -EFAULT;
26797 break;
26798@@ -233,7 +243,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
26799 return notifier_from_errno(err);
26800 }
26801
26802-static struct notifier_block __refdata msr_class_cpu_notifier = {
26803+static struct notifier_block msr_class_cpu_notifier = {
26804 .notifier_call = msr_class_cpu_callback,
26805 };
26806
26807diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
26808index c3e985d..110a36a 100644
26809--- a/arch/x86/kernel/nmi.c
26810+++ b/arch/x86/kernel/nmi.c
26811@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
26812
26813 static void nmi_max_handler(struct irq_work *w)
26814 {
26815- struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
26816+ struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
26817 int remainder_ns, decimal_msecs;
26818- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
26819+ u64 whole_msecs = ACCESS_ONCE(n->max_duration);
26820
26821 remainder_ns = do_div(whole_msecs, (1000 * 1000));
26822 decimal_msecs = remainder_ns / 1000;
26823
26824 printk_ratelimited(KERN_INFO
26825 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
26826- a->handler, whole_msecs, decimal_msecs);
26827+ n->action->handler, whole_msecs, decimal_msecs);
26828 }
26829
26830 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26831@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26832 delta = sched_clock() - delta;
26833 trace_nmi_handler(a->handler, (int)delta, thishandled);
26834
26835- if (delta < nmi_longest_ns || delta < a->max_duration)
26836+ if (delta < nmi_longest_ns || delta < a->work->max_duration)
26837 continue;
26838
26839- a->max_duration = delta;
26840- irq_work_queue(&a->irq_work);
26841+ a->work->max_duration = delta;
26842+ irq_work_queue(&a->work->irq_work);
26843 }
26844
26845 rcu_read_unlock();
26846@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26847 }
26848 NOKPROBE_SYMBOL(nmi_handle);
26849
26850-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26851+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
26852 {
26853 struct nmi_desc *desc = nmi_to_desc(type);
26854 unsigned long flags;
26855@@ -156,7 +156,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26856 if (!action->handler)
26857 return -EINVAL;
26858
26859- init_irq_work(&action->irq_work, nmi_max_handler);
26860+ action->work->action = action;
26861+ init_irq_work(&action->work->irq_work, nmi_max_handler);
26862
26863 spin_lock_irqsave(&desc->lock, flags);
26864
26865@@ -174,9 +175,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26866 * event confuses some handlers (kdump uses this flag)
26867 */
26868 if (action->flags & NMI_FLAG_FIRST)
26869- list_add_rcu(&action->list, &desc->head);
26870+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
26871 else
26872- list_add_tail_rcu(&action->list, &desc->head);
26873+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
26874
26875 spin_unlock_irqrestore(&desc->lock, flags);
26876 return 0;
26877@@ -199,7 +200,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
26878 if (!strcmp(n->name, name)) {
26879 WARN(in_nmi(),
26880 "Trying to free NMI (%s) from NMI context!\n", n->name);
26881- list_del_rcu(&n->list);
26882+ pax_list_del_rcu((struct list_head *)&n->list);
26883 break;
26884 }
26885 }
26886@@ -528,6 +529,17 @@ static inline void nmi_nesting_postprocess(void)
26887 dotraplinkage notrace void
26888 do_nmi(struct pt_regs *regs, long error_code)
26889 {
26890+
26891+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26892+ if (!user_mode(regs)) {
26893+ unsigned long cs = regs->cs & 0xFFFF;
26894+ unsigned long ip = ktva_ktla(regs->ip);
26895+
26896+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
26897+ regs->ip = ip;
26898+ }
26899+#endif
26900+
26901 nmi_nesting_preprocess(regs);
26902
26903 nmi_enter();
26904diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
26905index 6d9582e..f746287 100644
26906--- a/arch/x86/kernel/nmi_selftest.c
26907+++ b/arch/x86/kernel/nmi_selftest.c
26908@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
26909 {
26910 /* trap all the unknown NMIs we may generate */
26911 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
26912- __initdata);
26913+ __initconst);
26914 }
26915
26916 static void __init cleanup_nmi_testsuite(void)
26917@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
26918 unsigned long timeout;
26919
26920 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
26921- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
26922+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
26923 nmi_fail = FAILURE;
26924 return;
26925 }
26926diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
26927index bbb6c73..24a58ef 100644
26928--- a/arch/x86/kernel/paravirt-spinlocks.c
26929+++ b/arch/x86/kernel/paravirt-spinlocks.c
26930@@ -8,7 +8,7 @@
26931
26932 #include <asm/paravirt.h>
26933
26934-struct pv_lock_ops pv_lock_ops = {
26935+struct pv_lock_ops pv_lock_ops __read_only = {
26936 #ifdef CONFIG_SMP
26937 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
26938 .unlock_kick = paravirt_nop,
26939diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
26940index 548d25f..f8fb99c 100644
26941--- a/arch/x86/kernel/paravirt.c
26942+++ b/arch/x86/kernel/paravirt.c
26943@@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
26944 {
26945 return x;
26946 }
26947+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26948+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
26949+#endif
26950
26951 void __init default_banner(void)
26952 {
26953@@ -142,16 +145,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
26954
26955 if (opfunc == NULL)
26956 /* If there's no function, patch it with a ud2a (BUG) */
26957- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
26958- else if (opfunc == _paravirt_nop)
26959+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
26960+ else if (opfunc == (void *)_paravirt_nop)
26961 /* If the operation is a nop, then nop the callsite */
26962 ret = paravirt_patch_nop();
26963
26964 /* identity functions just return their single argument */
26965- else if (opfunc == _paravirt_ident_32)
26966+ else if (opfunc == (void *)_paravirt_ident_32)
26967 ret = paravirt_patch_ident_32(insnbuf, len);
26968- else if (opfunc == _paravirt_ident_64)
26969+ else if (opfunc == (void *)_paravirt_ident_64)
26970 ret = paravirt_patch_ident_64(insnbuf, len);
26971+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26972+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
26973+ ret = paravirt_patch_ident_64(insnbuf, len);
26974+#endif
26975
26976 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
26977 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
26978@@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
26979 if (insn_len > len || start == NULL)
26980 insn_len = len;
26981 else
26982- memcpy(insnbuf, start, insn_len);
26983+ memcpy(insnbuf, ktla_ktva(start), insn_len);
26984
26985 return insn_len;
26986 }
26987@@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
26988 return this_cpu_read(paravirt_lazy_mode);
26989 }
26990
26991-struct pv_info pv_info = {
26992+struct pv_info pv_info __read_only = {
26993 .name = "bare hardware",
26994 .paravirt_enabled = 0,
26995 .kernel_rpl = 0,
26996@@ -311,16 +318,16 @@ struct pv_info pv_info = {
26997 #endif
26998 };
26999
27000-struct pv_init_ops pv_init_ops = {
27001+struct pv_init_ops pv_init_ops __read_only = {
27002 .patch = native_patch,
27003 };
27004
27005-struct pv_time_ops pv_time_ops = {
27006+struct pv_time_ops pv_time_ops __read_only = {
27007 .sched_clock = native_sched_clock,
27008 .steal_clock = native_steal_clock,
27009 };
27010
27011-__visible struct pv_irq_ops pv_irq_ops = {
27012+__visible struct pv_irq_ops pv_irq_ops __read_only = {
27013 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
27014 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
27015 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
27016@@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
27017 #endif
27018 };
27019
27020-__visible struct pv_cpu_ops pv_cpu_ops = {
27021+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
27022 .cpuid = native_cpuid,
27023 .get_debugreg = native_get_debugreg,
27024 .set_debugreg = native_set_debugreg,
27025@@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
27026 NOKPROBE_SYMBOL(native_set_debugreg);
27027 NOKPROBE_SYMBOL(native_load_idt);
27028
27029-struct pv_apic_ops pv_apic_ops = {
27030+struct pv_apic_ops pv_apic_ops __read_only= {
27031 #ifdef CONFIG_X86_LOCAL_APIC
27032 .startup_ipi_hook = paravirt_nop,
27033 #endif
27034 };
27035
27036-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
27037+#ifdef CONFIG_X86_32
27038+#ifdef CONFIG_X86_PAE
27039+/* 64-bit pagetable entries */
27040+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
27041+#else
27042 /* 32-bit pagetable entries */
27043 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
27044+#endif
27045 #else
27046 /* 64-bit pagetable entries */
27047 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
27048 #endif
27049
27050-struct pv_mmu_ops pv_mmu_ops = {
27051+struct pv_mmu_ops pv_mmu_ops __read_only = {
27052
27053 .read_cr2 = native_read_cr2,
27054 .write_cr2 = native_write_cr2,
27055@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
27056 .make_pud = PTE_IDENT,
27057
27058 .set_pgd = native_set_pgd,
27059+ .set_pgd_batched = native_set_pgd_batched,
27060 #endif
27061 #endif /* PAGETABLE_LEVELS >= 3 */
27062
27063@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
27064 },
27065
27066 .set_fixmap = native_set_fixmap,
27067+
27068+#ifdef CONFIG_PAX_KERNEXEC
27069+ .pax_open_kernel = native_pax_open_kernel,
27070+ .pax_close_kernel = native_pax_close_kernel,
27071+#endif
27072+
27073 };
27074
27075 EXPORT_SYMBOL_GPL(pv_time_ops);
27076diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
27077index 0497f71..7186c0d 100644
27078--- a/arch/x86/kernel/pci-calgary_64.c
27079+++ b/arch/x86/kernel/pci-calgary_64.c
27080@@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
27081 tce_space = be64_to_cpu(readq(target));
27082 tce_space = tce_space & TAR_SW_BITS;
27083
27084- tce_space = tce_space & (~specified_table_size);
27085+ tce_space = tce_space & (~(unsigned long)specified_table_size);
27086 info->tce_space = (u64 *)__va(tce_space);
27087 }
27088 }
27089diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
27090index 35ccf75..7a15747 100644
27091--- a/arch/x86/kernel/pci-iommu_table.c
27092+++ b/arch/x86/kernel/pci-iommu_table.c
27093@@ -2,7 +2,7 @@
27094 #include <asm/iommu_table.h>
27095 #include <linux/string.h>
27096 #include <linux/kallsyms.h>
27097-
27098+#include <linux/sched.h>
27099
27100 #define DEBUG 1
27101
27102diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
27103index 77dd0ad..9ec4723 100644
27104--- a/arch/x86/kernel/pci-swiotlb.c
27105+++ b/arch/x86/kernel/pci-swiotlb.c
27106@@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
27107 struct dma_attrs *attrs)
27108 {
27109 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
27110- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
27111+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
27112 else
27113 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
27114 }
27115diff --git a/arch/x86/kernel/preempt.S b/arch/x86/kernel/preempt.S
27116index ca7f0d5..8996469 100644
27117--- a/arch/x86/kernel/preempt.S
27118+++ b/arch/x86/kernel/preempt.S
27119@@ -3,12 +3,14 @@
27120 #include <asm/dwarf2.h>
27121 #include <asm/asm.h>
27122 #include <asm/calling.h>
27123+#include <asm/alternative-asm.h>
27124
27125 ENTRY(___preempt_schedule)
27126 CFI_STARTPROC
27127 SAVE_ALL
27128 call preempt_schedule
27129 RESTORE_ALL
27130+ pax_force_retaddr
27131 ret
27132 CFI_ENDPROC
27133
27134@@ -19,6 +21,7 @@ ENTRY(___preempt_schedule_context)
27135 SAVE_ALL
27136 call preempt_schedule_context
27137 RESTORE_ALL
27138+ pax_force_retaddr
27139 ret
27140 CFI_ENDPROC
27141
27142diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
27143index f804dc9..7c62095 100644
27144--- a/arch/x86/kernel/process.c
27145+++ b/arch/x86/kernel/process.c
27146@@ -36,7 +36,8 @@
27147 * section. Since TSS's are completely CPU-local, we want them
27148 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
27149 */
27150-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
27151+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
27152+EXPORT_SYMBOL(init_tss);
27153
27154 #ifdef CONFIG_X86_64
27155 static DEFINE_PER_CPU(unsigned char, is_idle);
27156@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
27157 task_xstate_cachep =
27158 kmem_cache_create("task_xstate", xstate_size,
27159 __alignof__(union thread_xstate),
27160- SLAB_PANIC | SLAB_NOTRACK, NULL);
27161+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
27162 setup_xstate_comp();
27163 }
27164
27165@@ -106,7 +107,7 @@ void exit_thread(void)
27166 unsigned long *bp = t->io_bitmap_ptr;
27167
27168 if (bp) {
27169- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
27170+ struct tss_struct *tss = init_tss + get_cpu();
27171
27172 t->io_bitmap_ptr = NULL;
27173 clear_thread_flag(TIF_IO_BITMAP);
27174@@ -126,6 +127,9 @@ void flush_thread(void)
27175 {
27176 struct task_struct *tsk = current;
27177
27178+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
27179+ loadsegment(gs, 0);
27180+#endif
27181 flush_ptrace_hw_breakpoint(tsk);
27182 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
27183 drop_init_fpu(tsk);
27184@@ -272,7 +276,7 @@ static void __exit_idle(void)
27185 void exit_idle(void)
27186 {
27187 /* idle loop has pid 0 */
27188- if (current->pid)
27189+ if (task_pid_nr(current))
27190 return;
27191 __exit_idle();
27192 }
27193@@ -325,7 +329,7 @@ bool xen_set_default_idle(void)
27194 return ret;
27195 }
27196 #endif
27197-void stop_this_cpu(void *dummy)
27198+__noreturn void stop_this_cpu(void *dummy)
27199 {
27200 local_irq_disable();
27201 /*
27202@@ -454,16 +458,37 @@ static int __init idle_setup(char *str)
27203 }
27204 early_param("idle", idle_setup);
27205
27206-unsigned long arch_align_stack(unsigned long sp)
27207+#ifdef CONFIG_PAX_RANDKSTACK
27208+void pax_randomize_kstack(struct pt_regs *regs)
27209 {
27210- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
27211- sp -= get_random_int() % 8192;
27212- return sp & ~0xf;
27213-}
27214+ struct thread_struct *thread = &current->thread;
27215+ unsigned long time;
27216
27217-unsigned long arch_randomize_brk(struct mm_struct *mm)
27218-{
27219- unsigned long range_end = mm->brk + 0x02000000;
27220- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
27221-}
27222+ if (!randomize_va_space)
27223+ return;
27224+
27225+ if (v8086_mode(regs))
27226+ return;
27227
27228+ rdtscl(time);
27229+
27230+ /* P4 seems to return a 0 LSB, ignore it */
27231+#ifdef CONFIG_MPENTIUM4
27232+ time &= 0x3EUL;
27233+ time <<= 2;
27234+#elif defined(CONFIG_X86_64)
27235+ time &= 0xFUL;
27236+ time <<= 4;
27237+#else
27238+ time &= 0x1FUL;
27239+ time <<= 3;
27240+#endif
27241+
27242+ thread->sp0 ^= time;
27243+ load_sp0(init_tss + smp_processor_id(), thread);
27244+
27245+#ifdef CONFIG_X86_64
27246+ this_cpu_write(kernel_stack, thread->sp0);
27247+#endif
27248+}
27249+#endif
27250diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
27251index 7bc86bb..0ea06e8 100644
27252--- a/arch/x86/kernel/process_32.c
27253+++ b/arch/x86/kernel/process_32.c
27254@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
27255 unsigned long thread_saved_pc(struct task_struct *tsk)
27256 {
27257 return ((unsigned long *)tsk->thread.sp)[3];
27258+//XXX return tsk->thread.eip;
27259 }
27260
27261 void __show_regs(struct pt_regs *regs, int all)
27262@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all)
27263 unsigned long sp;
27264 unsigned short ss, gs;
27265
27266- if (user_mode_vm(regs)) {
27267+ if (user_mode(regs)) {
27268 sp = regs->sp;
27269 ss = regs->ss & 0xffff;
27270- gs = get_user_gs(regs);
27271 } else {
27272 sp = kernel_stack_pointer(regs);
27273 savesegment(ss, ss);
27274- savesegment(gs, gs);
27275 }
27276+ gs = get_user_gs(regs);
27277
27278 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
27279 (u16)regs->cs, regs->ip, regs->flags,
27280- smp_processor_id());
27281+ raw_smp_processor_id());
27282 print_symbol("EIP is at %s\n", regs->ip);
27283
27284 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
27285@@ -132,20 +132,21 @@ void release_thread(struct task_struct *dead_task)
27286 int copy_thread(unsigned long clone_flags, unsigned long sp,
27287 unsigned long arg, struct task_struct *p)
27288 {
27289- struct pt_regs *childregs = task_pt_regs(p);
27290+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
27291 struct task_struct *tsk;
27292 int err;
27293
27294 p->thread.sp = (unsigned long) childregs;
27295 p->thread.sp0 = (unsigned long) (childregs+1);
27296+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
27297
27298 if (unlikely(p->flags & PF_KTHREAD)) {
27299 /* kernel thread */
27300 memset(childregs, 0, sizeof(struct pt_regs));
27301 p->thread.ip = (unsigned long) ret_from_kernel_thread;
27302- task_user_gs(p) = __KERNEL_STACK_CANARY;
27303- childregs->ds = __USER_DS;
27304- childregs->es = __USER_DS;
27305+ savesegment(gs, childregs->gs);
27306+ childregs->ds = __KERNEL_DS;
27307+ childregs->es = __KERNEL_DS;
27308 childregs->fs = __KERNEL_PERCPU;
27309 childregs->bx = sp; /* function */
27310 childregs->bp = arg;
27311@@ -252,7 +253,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27312 struct thread_struct *prev = &prev_p->thread,
27313 *next = &next_p->thread;
27314 int cpu = smp_processor_id();
27315- struct tss_struct *tss = &per_cpu(init_tss, cpu);
27316+ struct tss_struct *tss = init_tss + cpu;
27317 fpu_switch_t fpu;
27318
27319 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
27320@@ -276,6 +277,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27321 */
27322 lazy_save_gs(prev->gs);
27323
27324+#ifdef CONFIG_PAX_MEMORY_UDEREF
27325+ __set_fs(task_thread_info(next_p)->addr_limit);
27326+#endif
27327+
27328 /*
27329 * Load the per-thread Thread-Local Storage descriptor.
27330 */
27331@@ -314,9 +319,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27332 */
27333 arch_end_context_switch(next_p);
27334
27335- this_cpu_write(kernel_stack,
27336- (unsigned long)task_stack_page(next_p) +
27337- THREAD_SIZE - KERNEL_STACK_OFFSET);
27338+ this_cpu_write(current_task, next_p);
27339+ this_cpu_write(current_tinfo, &next_p->tinfo);
27340+ this_cpu_write(kernel_stack, next->sp0);
27341
27342 /*
27343 * Restore %gs if needed (which is common)
27344@@ -326,8 +331,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27345
27346 switch_fpu_finish(next_p, fpu);
27347
27348- this_cpu_write(current_task, next_p);
27349-
27350 return prev_p;
27351 }
27352
27353@@ -357,4 +360,3 @@ unsigned long get_wchan(struct task_struct *p)
27354 } while (count++ < 16);
27355 return 0;
27356 }
27357-
27358diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
27359index ca5b02d..c0b2f6a 100644
27360--- a/arch/x86/kernel/process_64.c
27361+++ b/arch/x86/kernel/process_64.c
27362@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
27363 struct pt_regs *childregs;
27364 struct task_struct *me = current;
27365
27366- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
27367+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
27368 childregs = task_pt_regs(p);
27369 p->thread.sp = (unsigned long) childregs;
27370 p->thread.usersp = me->thread.usersp;
27371+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
27372 set_tsk_thread_flag(p, TIF_FORK);
27373 p->thread.fpu_counter = 0;
27374 p->thread.io_bitmap_ptr = NULL;
27375@@ -172,6 +173,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
27376 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
27377 savesegment(es, p->thread.es);
27378 savesegment(ds, p->thread.ds);
27379+ savesegment(ss, p->thread.ss);
27380+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
27381 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
27382
27383 if (unlikely(p->flags & PF_KTHREAD)) {
27384@@ -280,7 +283,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27385 struct thread_struct *prev = &prev_p->thread;
27386 struct thread_struct *next = &next_p->thread;
27387 int cpu = smp_processor_id();
27388- struct tss_struct *tss = &per_cpu(init_tss, cpu);
27389+ struct tss_struct *tss = init_tss + cpu;
27390 unsigned fsindex, gsindex;
27391 fpu_switch_t fpu;
27392
27393@@ -303,6 +306,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27394 if (unlikely(next->ds | prev->ds))
27395 loadsegment(ds, next->ds);
27396
27397+ savesegment(ss, prev->ss);
27398+ if (unlikely(next->ss != prev->ss))
27399+ loadsegment(ss, next->ss);
27400
27401 /* We must save %fs and %gs before load_TLS() because
27402 * %fs and %gs may be cleared by load_TLS().
27403@@ -362,6 +368,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27404 prev->usersp = this_cpu_read(old_rsp);
27405 this_cpu_write(old_rsp, next->usersp);
27406 this_cpu_write(current_task, next_p);
27407+ this_cpu_write(current_tinfo, &next_p->tinfo);
27408
27409 /*
27410 * If it were not for PREEMPT_ACTIVE we could guarantee that the
27411@@ -371,9 +378,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27412 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
27413 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
27414
27415- this_cpu_write(kernel_stack,
27416- (unsigned long)task_stack_page(next_p) +
27417- THREAD_SIZE - KERNEL_STACK_OFFSET);
27418+ this_cpu_write(kernel_stack, next->sp0);
27419
27420 /*
27421 * Now maybe reload the debug registers and handle I/O bitmaps
27422@@ -443,12 +448,11 @@ unsigned long get_wchan(struct task_struct *p)
27423 if (!p || p == current || p->state == TASK_RUNNING)
27424 return 0;
27425 stack = (unsigned long)task_stack_page(p);
27426- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
27427+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
27428 return 0;
27429 fp = *(u64 *)(p->thread.sp);
27430 do {
27431- if (fp < (unsigned long)stack ||
27432- fp >= (unsigned long)stack+THREAD_SIZE)
27433+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
27434 return 0;
27435 ip = *(u64 *)(fp+8);
27436 if (!in_sched_functions(ip))
27437diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
27438index b1a5dfa..ed94526 100644
27439--- a/arch/x86/kernel/ptrace.c
27440+++ b/arch/x86/kernel/ptrace.c
27441@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
27442 unsigned long sp = (unsigned long)&regs->sp;
27443 u32 *prev_esp;
27444
27445- if (context == (sp & ~(THREAD_SIZE - 1)))
27446+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
27447 return sp;
27448
27449- prev_esp = (u32 *)(context);
27450+ prev_esp = *(u32 **)(context);
27451 if (prev_esp)
27452 return (unsigned long)prev_esp;
27453
27454@@ -452,6 +452,20 @@ static int putreg(struct task_struct *child,
27455 if (child->thread.gs != value)
27456 return do_arch_prctl(child, ARCH_SET_GS, value);
27457 return 0;
27458+
27459+ case offsetof(struct user_regs_struct,ip):
27460+ /*
27461+ * Protect against any attempt to set ip to an
27462+ * impossible address. There are dragons lurking if the
27463+ * address is noncanonical. (This explicitly allows
27464+ * setting ip to TASK_SIZE_MAX, because user code can do
27465+ * that all by itself by running off the end of its
27466+ * address space.
27467+ */
27468+ if (value > TASK_SIZE_MAX)
27469+ return -EIO;
27470+ break;
27471+
27472 #endif
27473 }
27474
27475@@ -588,7 +602,7 @@ static void ptrace_triggered(struct perf_event *bp,
27476 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
27477 {
27478 int i;
27479- int dr7 = 0;
27480+ unsigned long dr7 = 0;
27481 struct arch_hw_breakpoint *info;
27482
27483 for (i = 0; i < HBP_NUM; i++) {
27484@@ -822,7 +836,7 @@ long arch_ptrace(struct task_struct *child, long request,
27485 unsigned long addr, unsigned long data)
27486 {
27487 int ret;
27488- unsigned long __user *datap = (unsigned long __user *)data;
27489+ unsigned long __user *datap = (__force unsigned long __user *)data;
27490
27491 switch (request) {
27492 /* read the word at location addr in the USER area. */
27493@@ -907,14 +921,14 @@ long arch_ptrace(struct task_struct *child, long request,
27494 if ((int) addr < 0)
27495 return -EIO;
27496 ret = do_get_thread_area(child, addr,
27497- (struct user_desc __user *)data);
27498+ (__force struct user_desc __user *) data);
27499 break;
27500
27501 case PTRACE_SET_THREAD_AREA:
27502 if ((int) addr < 0)
27503 return -EIO;
27504 ret = do_set_thread_area(child, addr,
27505- (struct user_desc __user *)data, 0);
27506+ (__force struct user_desc __user *) data, 0);
27507 break;
27508 #endif
27509
27510@@ -1292,7 +1306,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
27511
27512 #ifdef CONFIG_X86_64
27513
27514-static struct user_regset x86_64_regsets[] __read_mostly = {
27515+static user_regset_no_const x86_64_regsets[] __read_only = {
27516 [REGSET_GENERAL] = {
27517 .core_note_type = NT_PRSTATUS,
27518 .n = sizeof(struct user_regs_struct) / sizeof(long),
27519@@ -1333,7 +1347,7 @@ static const struct user_regset_view user_x86_64_view = {
27520 #endif /* CONFIG_X86_64 */
27521
27522 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
27523-static struct user_regset x86_32_regsets[] __read_mostly = {
27524+static user_regset_no_const x86_32_regsets[] __read_only = {
27525 [REGSET_GENERAL] = {
27526 .core_note_type = NT_PRSTATUS,
27527 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
27528@@ -1386,7 +1400,7 @@ static const struct user_regset_view user_x86_32_view = {
27529 */
27530 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
27531
27532-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
27533+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
27534 {
27535 #ifdef CONFIG_X86_64
27536 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
27537@@ -1421,7 +1435,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
27538 memset(info, 0, sizeof(*info));
27539 info->si_signo = SIGTRAP;
27540 info->si_code = si_code;
27541- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
27542+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
27543 }
27544
27545 void user_single_step_siginfo(struct task_struct *tsk,
27546@@ -1441,6 +1455,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
27547 force_sig_info(SIGTRAP, &info, tsk);
27548 }
27549
27550+#ifdef CONFIG_GRKERNSEC_SETXID
27551+extern void gr_delayed_cred_worker(void);
27552+#endif
27553+
27554 /*
27555 * We must return the syscall number to actually look up in the table.
27556 * This can be -1L to skip running any syscall at all.
27557@@ -1451,6 +1469,11 @@ long syscall_trace_enter(struct pt_regs *regs)
27558
27559 user_exit();
27560
27561+#ifdef CONFIG_GRKERNSEC_SETXID
27562+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
27563+ gr_delayed_cred_worker();
27564+#endif
27565+
27566 /*
27567 * If we stepped into a sysenter/syscall insn, it trapped in
27568 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
27569@@ -1506,6 +1529,11 @@ void syscall_trace_leave(struct pt_regs *regs)
27570 */
27571 user_exit();
27572
27573+#ifdef CONFIG_GRKERNSEC_SETXID
27574+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
27575+ gr_delayed_cred_worker();
27576+#endif
27577+
27578 audit_syscall_exit(regs);
27579
27580 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
27581diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
27582index 2f355d2..e75ed0a 100644
27583--- a/arch/x86/kernel/pvclock.c
27584+++ b/arch/x86/kernel/pvclock.c
27585@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
27586 reset_hung_task_detector();
27587 }
27588
27589-static atomic64_t last_value = ATOMIC64_INIT(0);
27590+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
27591
27592 void pvclock_resume(void)
27593 {
27594- atomic64_set(&last_value, 0);
27595+ atomic64_set_unchecked(&last_value, 0);
27596 }
27597
27598 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
27599@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
27600 * updating at the same time, and one of them could be slightly behind,
27601 * making the assumption that last_value always go forward fail to hold.
27602 */
27603- last = atomic64_read(&last_value);
27604+ last = atomic64_read_unchecked(&last_value);
27605 do {
27606 if (ret < last)
27607 return last;
27608- last = atomic64_cmpxchg(&last_value, last, ret);
27609+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
27610 } while (unlikely(last != ret));
27611
27612 return ret;
27613diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
27614index 17962e6..47f55db 100644
27615--- a/arch/x86/kernel/reboot.c
27616+++ b/arch/x86/kernel/reboot.c
27617@@ -69,6 +69,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
27618
27619 void __noreturn machine_real_restart(unsigned int type)
27620 {
27621+
27622+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
27623+ struct desc_struct *gdt;
27624+#endif
27625+
27626 local_irq_disable();
27627
27628 /*
27629@@ -96,7 +101,29 @@ void __noreturn machine_real_restart(unsigned int type)
27630
27631 /* Jump to the identity-mapped low memory code */
27632 #ifdef CONFIG_X86_32
27633- asm volatile("jmpl *%0" : :
27634+
27635+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
27636+ gdt = get_cpu_gdt_table(smp_processor_id());
27637+ pax_open_kernel();
27638+#ifdef CONFIG_PAX_MEMORY_UDEREF
27639+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
27640+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
27641+ loadsegment(ds, __KERNEL_DS);
27642+ loadsegment(es, __KERNEL_DS);
27643+ loadsegment(ss, __KERNEL_DS);
27644+#endif
27645+#ifdef CONFIG_PAX_KERNEXEC
27646+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
27647+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
27648+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
27649+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
27650+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
27651+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
27652+#endif
27653+ pax_close_kernel();
27654+#endif
27655+
27656+ asm volatile("ljmpl *%0" : :
27657 "rm" (real_mode_header->machine_real_restart_asm),
27658 "a" (type));
27659 #else
27660@@ -500,7 +527,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
27661 * This means that this function can never return, it can misbehave
27662 * by not rebooting properly and hanging.
27663 */
27664-static void native_machine_emergency_restart(void)
27665+static void __noreturn native_machine_emergency_restart(void)
27666 {
27667 int i;
27668 int attempt = 0;
27669@@ -620,13 +647,13 @@ void native_machine_shutdown(void)
27670 #endif
27671 }
27672
27673-static void __machine_emergency_restart(int emergency)
27674+static void __noreturn __machine_emergency_restart(int emergency)
27675 {
27676 reboot_emergency = emergency;
27677 machine_ops.emergency_restart();
27678 }
27679
27680-static void native_machine_restart(char *__unused)
27681+static void __noreturn native_machine_restart(char *__unused)
27682 {
27683 pr_notice("machine restart\n");
27684
27685@@ -635,7 +662,7 @@ static void native_machine_restart(char *__unused)
27686 __machine_emergency_restart(0);
27687 }
27688
27689-static void native_machine_halt(void)
27690+static void __noreturn native_machine_halt(void)
27691 {
27692 /* Stop other cpus and apics */
27693 machine_shutdown();
27694@@ -645,7 +672,7 @@ static void native_machine_halt(void)
27695 stop_this_cpu(NULL);
27696 }
27697
27698-static void native_machine_power_off(void)
27699+static void __noreturn native_machine_power_off(void)
27700 {
27701 if (pm_power_off) {
27702 if (!reboot_force)
27703@@ -654,9 +681,10 @@ static void native_machine_power_off(void)
27704 }
27705 /* A fallback in case there is no PM info available */
27706 tboot_shutdown(TB_SHUTDOWN_HALT);
27707+ unreachable();
27708 }
27709
27710-struct machine_ops machine_ops = {
27711+struct machine_ops machine_ops __read_only = {
27712 .power_off = native_machine_power_off,
27713 .shutdown = native_machine_shutdown,
27714 .emergency_restart = native_machine_emergency_restart,
27715diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
27716index c8e41e9..64049ef 100644
27717--- a/arch/x86/kernel/reboot_fixups_32.c
27718+++ b/arch/x86/kernel/reboot_fixups_32.c
27719@@ -57,7 +57,7 @@ struct device_fixup {
27720 unsigned int vendor;
27721 unsigned int device;
27722 void (*reboot_fixup)(struct pci_dev *);
27723-};
27724+} __do_const;
27725
27726 /*
27727 * PCI ids solely used for fixups_table go here
27728diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
27729index 3fd2c69..a444264 100644
27730--- a/arch/x86/kernel/relocate_kernel_64.S
27731+++ b/arch/x86/kernel/relocate_kernel_64.S
27732@@ -96,8 +96,7 @@ relocate_kernel:
27733
27734 /* jump to identity mapped page */
27735 addq $(identity_mapped - relocate_kernel), %r8
27736- pushq %r8
27737- ret
27738+ jmp *%r8
27739
27740 identity_mapped:
27741 /* set return address to 0 if not preserving context */
27742diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
27743index 41ead8d..7ccde23 100644
27744--- a/arch/x86/kernel/setup.c
27745+++ b/arch/x86/kernel/setup.c
27746@@ -110,6 +110,7 @@
27747 #include <asm/mce.h>
27748 #include <asm/alternative.h>
27749 #include <asm/prom.h>
27750+#include <asm/boot.h>
27751
27752 /*
27753 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
27754@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
27755 #endif
27756
27757
27758-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
27759-__visible unsigned long mmu_cr4_features;
27760+#ifdef CONFIG_X86_64
27761+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
27762+#elif defined(CONFIG_X86_PAE)
27763+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
27764 #else
27765-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
27766+__visible unsigned long mmu_cr4_features __read_only;
27767 #endif
27768
27769+void set_in_cr4(unsigned long mask)
27770+{
27771+ unsigned long cr4 = read_cr4();
27772+
27773+ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
27774+ return;
27775+
27776+ pax_open_kernel();
27777+ mmu_cr4_features |= mask;
27778+ pax_close_kernel();
27779+
27780+ if (trampoline_cr4_features)
27781+ *trampoline_cr4_features = mmu_cr4_features;
27782+ cr4 |= mask;
27783+ write_cr4(cr4);
27784+}
27785+EXPORT_SYMBOL(set_in_cr4);
27786+
27787+void clear_in_cr4(unsigned long mask)
27788+{
27789+ unsigned long cr4 = read_cr4();
27790+
27791+ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
27792+ return;
27793+
27794+ pax_open_kernel();
27795+ mmu_cr4_features &= ~mask;
27796+ pax_close_kernel();
27797+
27798+ if (trampoline_cr4_features)
27799+ *trampoline_cr4_features = mmu_cr4_features;
27800+ cr4 &= ~mask;
27801+ write_cr4(cr4);
27802+}
27803+EXPORT_SYMBOL(clear_in_cr4);
27804+
27805 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
27806 int bootloader_type, bootloader_version;
27807
27808@@ -772,7 +811,7 @@ static void __init trim_bios_range(void)
27809 * area (640->1Mb) as ram even though it is not.
27810 * take them out.
27811 */
27812- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
27813+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
27814
27815 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
27816 }
27817@@ -780,7 +819,7 @@ static void __init trim_bios_range(void)
27818 /* called before trim_bios_range() to spare extra sanitize */
27819 static void __init e820_add_kernel_range(void)
27820 {
27821- u64 start = __pa_symbol(_text);
27822+ u64 start = __pa_symbol(ktla_ktva(_text));
27823 u64 size = __pa_symbol(_end) - start;
27824
27825 /*
27826@@ -856,8 +895,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
27827
27828 void __init setup_arch(char **cmdline_p)
27829 {
27830+#ifdef CONFIG_X86_32
27831+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
27832+#else
27833 memblock_reserve(__pa_symbol(_text),
27834 (unsigned long)__bss_stop - (unsigned long)_text);
27835+#endif
27836
27837 early_reserve_initrd();
27838
27839@@ -946,14 +989,14 @@ void __init setup_arch(char **cmdline_p)
27840
27841 if (!boot_params.hdr.root_flags)
27842 root_mountflags &= ~MS_RDONLY;
27843- init_mm.start_code = (unsigned long) _text;
27844- init_mm.end_code = (unsigned long) _etext;
27845+ init_mm.start_code = ktla_ktva((unsigned long) _text);
27846+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
27847 init_mm.end_data = (unsigned long) _edata;
27848 init_mm.brk = _brk_end;
27849
27850- code_resource.start = __pa_symbol(_text);
27851- code_resource.end = __pa_symbol(_etext)-1;
27852- data_resource.start = __pa_symbol(_etext);
27853+ code_resource.start = __pa_symbol(ktla_ktva(_text));
27854+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
27855+ data_resource.start = __pa_symbol(_sdata);
27856 data_resource.end = __pa_symbol(_edata)-1;
27857 bss_resource.start = __pa_symbol(__bss_start);
27858 bss_resource.end = __pa_symbol(__bss_stop)-1;
27859diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
27860index 5cdff03..80fa283 100644
27861--- a/arch/x86/kernel/setup_percpu.c
27862+++ b/arch/x86/kernel/setup_percpu.c
27863@@ -21,19 +21,17 @@
27864 #include <asm/cpu.h>
27865 #include <asm/stackprotector.h>
27866
27867-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27868+#ifdef CONFIG_SMP
27869+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
27870 EXPORT_PER_CPU_SYMBOL(cpu_number);
27871+#endif
27872
27873-#ifdef CONFIG_X86_64
27874 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
27875-#else
27876-#define BOOT_PERCPU_OFFSET 0
27877-#endif
27878
27879 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
27880 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
27881
27882-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
27883+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
27884 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
27885 };
27886 EXPORT_SYMBOL(__per_cpu_offset);
27887@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
27888 {
27889 #ifdef CONFIG_NEED_MULTIPLE_NODES
27890 pg_data_t *last = NULL;
27891- unsigned int cpu;
27892+ int cpu;
27893
27894 for_each_possible_cpu(cpu) {
27895 int node = early_cpu_to_node(cpu);
27896@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
27897 {
27898 #ifdef CONFIG_X86_32
27899 struct desc_struct gdt;
27900+ unsigned long base = per_cpu_offset(cpu);
27901
27902- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
27903- 0x2 | DESCTYPE_S, 0x8);
27904- gdt.s = 1;
27905+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
27906+ 0x83 | DESCTYPE_S, 0xC);
27907 write_gdt_entry(get_cpu_gdt_table(cpu),
27908 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
27909 #endif
27910@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
27911 /* alrighty, percpu areas up and running */
27912 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27913 for_each_possible_cpu(cpu) {
27914+#ifdef CONFIG_CC_STACKPROTECTOR
27915+#ifdef CONFIG_X86_32
27916+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
27917+#endif
27918+#endif
27919 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
27920 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
27921 per_cpu(cpu_number, cpu) = cpu;
27922@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
27923 */
27924 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
27925 #endif
27926+#ifdef CONFIG_CC_STACKPROTECTOR
27927+#ifdef CONFIG_X86_32
27928+ if (!cpu)
27929+ per_cpu(stack_canary.canary, cpu) = canary;
27930+#endif
27931+#endif
27932 /*
27933 * Up to this point, the boot CPU has been using .init.data
27934 * area. Reload any changed state for the boot CPU.
27935diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
27936index ed37a76..39f936e 100644
27937--- a/arch/x86/kernel/signal.c
27938+++ b/arch/x86/kernel/signal.c
27939@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
27940 * Align the stack pointer according to the i386 ABI,
27941 * i.e. so that on function entry ((sp + 4) & 15) == 0.
27942 */
27943- sp = ((sp + 4) & -16ul) - 4;
27944+ sp = ((sp - 12) & -16ul) - 4;
27945 #else /* !CONFIG_X86_32 */
27946 sp = round_down(sp, 16) - 8;
27947 #endif
27948@@ -298,10 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27949 }
27950
27951 if (current->mm->context.vdso)
27952- restorer = current->mm->context.vdso +
27953- selected_vdso32->sym___kernel_sigreturn;
27954+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_sigreturn);
27955 else
27956- restorer = &frame->retcode;
27957+ restorer = (void __user *)&frame->retcode;
27958 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27959 restorer = ksig->ka.sa.sa_restorer;
27960
27961@@ -315,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27962 * reasons and because gdb uses it as a signature to notice
27963 * signal handler stack frames.
27964 */
27965- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
27966+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
27967
27968 if (err)
27969 return -EFAULT;
27970@@ -362,8 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27971 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
27972
27973 /* Set up to return from userspace. */
27974- restorer = current->mm->context.vdso +
27975- selected_vdso32->sym___kernel_rt_sigreturn;
27976+ if (current->mm->context.vdso)
27977+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_rt_sigreturn);
27978+ else
27979+ restorer = (void __user *)&frame->retcode;
27980 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27981 restorer = ksig->ka.sa.sa_restorer;
27982 put_user_ex(restorer, &frame->pretcode);
27983@@ -375,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27984 * reasons and because gdb uses it as a signature to notice
27985 * signal handler stack frames.
27986 */
27987- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
27988+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
27989 } put_user_catch(err);
27990
27991 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
27992@@ -611,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
27993 {
27994 int usig = signr_convert(ksig->sig);
27995 sigset_t *set = sigmask_to_save();
27996- compat_sigset_t *cset = (compat_sigset_t *) set;
27997+ sigset_t sigcopy;
27998+ compat_sigset_t *cset;
27999+
28000+ sigcopy = *set;
28001+
28002+ cset = (compat_sigset_t *) &sigcopy;
28003
28004 /* Set up the stack frame */
28005 if (is_ia32_frame()) {
28006@@ -622,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
28007 } else if (is_x32_frame()) {
28008 return x32_setup_rt_frame(ksig, cset, regs);
28009 } else {
28010- return __setup_rt_frame(ksig->sig, ksig, set, regs);
28011+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
28012 }
28013 }
28014
28015diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
28016index be8e1bd..a3d93fa 100644
28017--- a/arch/x86/kernel/smp.c
28018+++ b/arch/x86/kernel/smp.c
28019@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
28020
28021 __setup("nonmi_ipi", nonmi_ipi_setup);
28022
28023-struct smp_ops smp_ops = {
28024+struct smp_ops smp_ops __read_only = {
28025 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
28026 .smp_prepare_cpus = native_smp_prepare_cpus,
28027 .smp_cpus_done = native_smp_cpus_done,
28028diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
28029index 42a2dca..35a07aa 100644
28030--- a/arch/x86/kernel/smpboot.c
28031+++ b/arch/x86/kernel/smpboot.c
28032@@ -226,14 +226,17 @@ static void notrace start_secondary(void *unused)
28033
28034 enable_start_cpu0 = 0;
28035
28036-#ifdef CONFIG_X86_32
28037+ /* otherwise gcc will move up smp_processor_id before the cpu_init */
28038+ barrier();
28039+
28040 /* switch away from the initial page table */
28041+#ifdef CONFIG_PAX_PER_CPU_PGD
28042+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
28043+#else
28044 load_cr3(swapper_pg_dir);
28045+#endif
28046 __flush_tlb_all();
28047-#endif
28048
28049- /* otherwise gcc will move up smp_processor_id before the cpu_init */
28050- barrier();
28051 /*
28052 * Check TSC synchronization with the BP:
28053 */
28054@@ -760,8 +763,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
28055 alternatives_enable_smp();
28056
28057 idle->thread.sp = (unsigned long) (((struct pt_regs *)
28058- (THREAD_SIZE + task_stack_page(idle))) - 1);
28059+ (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
28060 per_cpu(current_task, cpu) = idle;
28061+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
28062
28063 #ifdef CONFIG_X86_32
28064 /* Stack for startup_32 can be just as for start_secondary onwards */
28065@@ -770,10 +774,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
28066 clear_tsk_thread_flag(idle, TIF_FORK);
28067 initial_gs = per_cpu_offset(cpu);
28068 #endif
28069- per_cpu(kernel_stack, cpu) =
28070- (unsigned long)task_stack_page(idle) -
28071- KERNEL_STACK_OFFSET + THREAD_SIZE;
28072+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
28073+ pax_open_kernel();
28074 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
28075+ pax_close_kernel();
28076 initial_code = (unsigned long)start_secondary;
28077 stack_start = idle->thread.sp;
28078
28079@@ -919,6 +923,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
28080 /* the FPU context is blank, nobody can own it */
28081 __cpu_disable_lazy_restore(cpu);
28082
28083+#ifdef CONFIG_PAX_PER_CPU_PGD
28084+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
28085+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
28086+ KERNEL_PGD_PTRS);
28087+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
28088+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
28089+ KERNEL_PGD_PTRS);
28090+#endif
28091+
28092 err = do_boot_cpu(apicid, cpu, tidle);
28093 if (err) {
28094 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
28095diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
28096index 9b4d51d..5d28b58 100644
28097--- a/arch/x86/kernel/step.c
28098+++ b/arch/x86/kernel/step.c
28099@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
28100 struct desc_struct *desc;
28101 unsigned long base;
28102
28103- seg &= ~7UL;
28104+ seg >>= 3;
28105
28106 mutex_lock(&child->mm->context.lock);
28107- if (unlikely((seg >> 3) >= child->mm->context.size))
28108+ if (unlikely(seg >= child->mm->context.size))
28109 addr = -1L; /* bogus selector, access would fault */
28110 else {
28111 desc = child->mm->context.ldt + seg;
28112@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
28113 addr += base;
28114 }
28115 mutex_unlock(&child->mm->context.lock);
28116- }
28117+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
28118+ addr = ktla_ktva(addr);
28119
28120 return addr;
28121 }
28122@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
28123 unsigned char opcode[15];
28124 unsigned long addr = convert_ip_to_linear(child, regs);
28125
28126+ if (addr == -EINVAL)
28127+ return 0;
28128+
28129 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
28130 for (i = 0; i < copied; i++) {
28131 switch (opcode[i]) {
28132diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
28133new file mode 100644
28134index 0000000..5877189
28135--- /dev/null
28136+++ b/arch/x86/kernel/sys_i386_32.c
28137@@ -0,0 +1,189 @@
28138+/*
28139+ * This file contains various random system calls that
28140+ * have a non-standard calling sequence on the Linux/i386
28141+ * platform.
28142+ */
28143+
28144+#include <linux/errno.h>
28145+#include <linux/sched.h>
28146+#include <linux/mm.h>
28147+#include <linux/fs.h>
28148+#include <linux/smp.h>
28149+#include <linux/sem.h>
28150+#include <linux/msg.h>
28151+#include <linux/shm.h>
28152+#include <linux/stat.h>
28153+#include <linux/syscalls.h>
28154+#include <linux/mman.h>
28155+#include <linux/file.h>
28156+#include <linux/utsname.h>
28157+#include <linux/ipc.h>
28158+#include <linux/elf.h>
28159+
28160+#include <linux/uaccess.h>
28161+#include <linux/unistd.h>
28162+
28163+#include <asm/syscalls.h>
28164+
28165+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
28166+{
28167+ unsigned long pax_task_size = TASK_SIZE;
28168+
28169+#ifdef CONFIG_PAX_SEGMEXEC
28170+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
28171+ pax_task_size = SEGMEXEC_TASK_SIZE;
28172+#endif
28173+
28174+ if (flags & MAP_FIXED)
28175+ if (len > pax_task_size || addr > pax_task_size - len)
28176+ return -EINVAL;
28177+
28178+ return 0;
28179+}
28180+
28181+/*
28182+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
28183+ */
28184+static unsigned long get_align_mask(void)
28185+{
28186+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
28187+ return 0;
28188+
28189+ if (!(current->flags & PF_RANDOMIZE))
28190+ return 0;
28191+
28192+ return va_align.mask;
28193+}
28194+
28195+unsigned long
28196+arch_get_unmapped_area(struct file *filp, unsigned long addr,
28197+ unsigned long len, unsigned long pgoff, unsigned long flags)
28198+{
28199+ struct mm_struct *mm = current->mm;
28200+ struct vm_area_struct *vma;
28201+ unsigned long pax_task_size = TASK_SIZE;
28202+ struct vm_unmapped_area_info info;
28203+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
28204+
28205+#ifdef CONFIG_PAX_SEGMEXEC
28206+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28207+ pax_task_size = SEGMEXEC_TASK_SIZE;
28208+#endif
28209+
28210+ pax_task_size -= PAGE_SIZE;
28211+
28212+ if (len > pax_task_size)
28213+ return -ENOMEM;
28214+
28215+ if (flags & MAP_FIXED)
28216+ return addr;
28217+
28218+#ifdef CONFIG_PAX_RANDMMAP
28219+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28220+#endif
28221+
28222+ if (addr) {
28223+ addr = PAGE_ALIGN(addr);
28224+ if (pax_task_size - len >= addr) {
28225+ vma = find_vma(mm, addr);
28226+ if (check_heap_stack_gap(vma, addr, len, offset))
28227+ return addr;
28228+ }
28229+ }
28230+
28231+ info.flags = 0;
28232+ info.length = len;
28233+ info.align_mask = filp ? get_align_mask() : 0;
28234+ info.align_offset = pgoff << PAGE_SHIFT;
28235+ info.threadstack_offset = offset;
28236+
28237+#ifdef CONFIG_PAX_PAGEEXEC
28238+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
28239+ info.low_limit = 0x00110000UL;
28240+ info.high_limit = mm->start_code;
28241+
28242+#ifdef CONFIG_PAX_RANDMMAP
28243+ if (mm->pax_flags & MF_PAX_RANDMMAP)
28244+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
28245+#endif
28246+
28247+ if (info.low_limit < info.high_limit) {
28248+ addr = vm_unmapped_area(&info);
28249+ if (!IS_ERR_VALUE(addr))
28250+ return addr;
28251+ }
28252+ } else
28253+#endif
28254+
28255+ info.low_limit = mm->mmap_base;
28256+ info.high_limit = pax_task_size;
28257+
28258+ return vm_unmapped_area(&info);
28259+}
28260+
28261+unsigned long
28262+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
28263+ const unsigned long len, const unsigned long pgoff,
28264+ const unsigned long flags)
28265+{
28266+ struct vm_area_struct *vma;
28267+ struct mm_struct *mm = current->mm;
28268+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
28269+ struct vm_unmapped_area_info info;
28270+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
28271+
28272+#ifdef CONFIG_PAX_SEGMEXEC
28273+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28274+ pax_task_size = SEGMEXEC_TASK_SIZE;
28275+#endif
28276+
28277+ pax_task_size -= PAGE_SIZE;
28278+
28279+ /* requested length too big for entire address space */
28280+ if (len > pax_task_size)
28281+ return -ENOMEM;
28282+
28283+ if (flags & MAP_FIXED)
28284+ return addr;
28285+
28286+#ifdef CONFIG_PAX_PAGEEXEC
28287+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
28288+ goto bottomup;
28289+#endif
28290+
28291+#ifdef CONFIG_PAX_RANDMMAP
28292+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28293+#endif
28294+
28295+ /* requesting a specific address */
28296+ if (addr) {
28297+ addr = PAGE_ALIGN(addr);
28298+ if (pax_task_size - len >= addr) {
28299+ vma = find_vma(mm, addr);
28300+ if (check_heap_stack_gap(vma, addr, len, offset))
28301+ return addr;
28302+ }
28303+ }
28304+
28305+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
28306+ info.length = len;
28307+ info.low_limit = PAGE_SIZE;
28308+ info.high_limit = mm->mmap_base;
28309+ info.align_mask = filp ? get_align_mask() : 0;
28310+ info.align_offset = pgoff << PAGE_SHIFT;
28311+ info.threadstack_offset = offset;
28312+
28313+ addr = vm_unmapped_area(&info);
28314+ if (!(addr & ~PAGE_MASK))
28315+ return addr;
28316+ VM_BUG_ON(addr != -ENOMEM);
28317+
28318+bottomup:
28319+ /*
28320+ * A failed mmap() very likely causes application failure,
28321+ * so fall back to the bottom-up function here. This scenario
28322+ * can happen with large stack limits and large mmap()
28323+ * allocations.
28324+ */
28325+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
28326+}
28327diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
28328index 30277e2..5664a29 100644
28329--- a/arch/x86/kernel/sys_x86_64.c
28330+++ b/arch/x86/kernel/sys_x86_64.c
28331@@ -81,8 +81,8 @@ out:
28332 return error;
28333 }
28334
28335-static void find_start_end(unsigned long flags, unsigned long *begin,
28336- unsigned long *end)
28337+static void find_start_end(struct mm_struct *mm, unsigned long flags,
28338+ unsigned long *begin, unsigned long *end)
28339 {
28340 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
28341 unsigned long new_begin;
28342@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
28343 *begin = new_begin;
28344 }
28345 } else {
28346- *begin = current->mm->mmap_legacy_base;
28347+ *begin = mm->mmap_legacy_base;
28348 *end = TASK_SIZE;
28349 }
28350 }
28351@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
28352 struct vm_area_struct *vma;
28353 struct vm_unmapped_area_info info;
28354 unsigned long begin, end;
28355+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
28356
28357 if (flags & MAP_FIXED)
28358 return addr;
28359
28360- find_start_end(flags, &begin, &end);
28361+ find_start_end(mm, flags, &begin, &end);
28362
28363 if (len > end)
28364 return -ENOMEM;
28365
28366+#ifdef CONFIG_PAX_RANDMMAP
28367+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28368+#endif
28369+
28370 if (addr) {
28371 addr = PAGE_ALIGN(addr);
28372 vma = find_vma(mm, addr);
28373- if (end - len >= addr &&
28374- (!vma || addr + len <= vma->vm_start))
28375+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
28376 return addr;
28377 }
28378
28379@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
28380 info.high_limit = end;
28381 info.align_mask = filp ? get_align_mask() : 0;
28382 info.align_offset = pgoff << PAGE_SHIFT;
28383+ info.threadstack_offset = offset;
28384 return vm_unmapped_area(&info);
28385 }
28386
28387@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
28388 struct mm_struct *mm = current->mm;
28389 unsigned long addr = addr0;
28390 struct vm_unmapped_area_info info;
28391+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
28392
28393 /* requested length too big for entire address space */
28394 if (len > TASK_SIZE)
28395@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
28396 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
28397 goto bottomup;
28398
28399+#ifdef CONFIG_PAX_RANDMMAP
28400+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28401+#endif
28402+
28403 /* requesting a specific address */
28404 if (addr) {
28405 addr = PAGE_ALIGN(addr);
28406 vma = find_vma(mm, addr);
28407- if (TASK_SIZE - len >= addr &&
28408- (!vma || addr + len <= vma->vm_start))
28409+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
28410 return addr;
28411 }
28412
28413@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
28414 info.high_limit = mm->mmap_base;
28415 info.align_mask = filp ? get_align_mask() : 0;
28416 info.align_offset = pgoff << PAGE_SHIFT;
28417+ info.threadstack_offset = offset;
28418 addr = vm_unmapped_area(&info);
28419 if (!(addr & ~PAGE_MASK))
28420 return addr;
28421diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
28422index 91a4496..bb87552 100644
28423--- a/arch/x86/kernel/tboot.c
28424+++ b/arch/x86/kernel/tboot.c
28425@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
28426
28427 void tboot_shutdown(u32 shutdown_type)
28428 {
28429- void (*shutdown)(void);
28430+ void (* __noreturn shutdown)(void);
28431
28432 if (!tboot_enabled())
28433 return;
28434@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
28435
28436 switch_to_tboot_pt();
28437
28438- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
28439+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
28440 shutdown();
28441
28442 /* should not reach here */
28443@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
28444 return -ENODEV;
28445 }
28446
28447-static atomic_t ap_wfs_count;
28448+static atomic_unchecked_t ap_wfs_count;
28449
28450 static int tboot_wait_for_aps(int num_aps)
28451 {
28452@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
28453 {
28454 switch (action) {
28455 case CPU_DYING:
28456- atomic_inc(&ap_wfs_count);
28457+ atomic_inc_unchecked(&ap_wfs_count);
28458 if (num_online_cpus() == 1)
28459- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
28460+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
28461 return NOTIFY_BAD;
28462 break;
28463 }
28464@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
28465
28466 tboot_create_trampoline();
28467
28468- atomic_set(&ap_wfs_count, 0);
28469+ atomic_set_unchecked(&ap_wfs_count, 0);
28470 register_hotcpu_notifier(&tboot_cpu_notifier);
28471
28472 #ifdef CONFIG_DEBUG_FS
28473diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
28474index 0fa2960..91eabbe 100644
28475--- a/arch/x86/kernel/time.c
28476+++ b/arch/x86/kernel/time.c
28477@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
28478 {
28479 unsigned long pc = instruction_pointer(regs);
28480
28481- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
28482+ if (!user_mode(regs) && in_lock_functions(pc)) {
28483 #ifdef CONFIG_FRAME_POINTER
28484- return *(unsigned long *)(regs->bp + sizeof(long));
28485+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
28486 #else
28487 unsigned long *sp =
28488 (unsigned long *)kernel_stack_pointer(regs);
28489@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
28490 * or above a saved flags. Eflags has bits 22-31 zero,
28491 * kernel addresses don't.
28492 */
28493+
28494+#ifdef CONFIG_PAX_KERNEXEC
28495+ return ktla_ktva(sp[0]);
28496+#else
28497 if (sp[0] >> 22)
28498 return sp[0];
28499 if (sp[1] >> 22)
28500 return sp[1];
28501 #endif
28502+
28503+#endif
28504 }
28505 return pc;
28506 }
28507diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
28508index f7fec09..9991981 100644
28509--- a/arch/x86/kernel/tls.c
28510+++ b/arch/x86/kernel/tls.c
28511@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
28512 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
28513 return -EINVAL;
28514
28515+#ifdef CONFIG_PAX_SEGMEXEC
28516+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
28517+ return -EINVAL;
28518+#endif
28519+
28520 set_tls_desc(p, idx, &info, 1);
28521
28522 return 0;
28523@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
28524
28525 if (kbuf)
28526 info = kbuf;
28527- else if (__copy_from_user(infobuf, ubuf, count))
28528+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
28529 return -EFAULT;
28530 else
28531 info = infobuf;
28532diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
28533index 1c113db..287b42e 100644
28534--- a/arch/x86/kernel/tracepoint.c
28535+++ b/arch/x86/kernel/tracepoint.c
28536@@ -9,11 +9,11 @@
28537 #include <linux/atomic.h>
28538
28539 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
28540-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
28541+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
28542 (unsigned long) trace_idt_table };
28543
28544 /* No need to be aligned, but done to keep all IDTs defined the same way. */
28545-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
28546+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
28547
28548 static int trace_irq_vector_refcount;
28549 static DEFINE_MUTEX(irq_vector_mutex);
28550diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
28551index de801f2..f189dcf 100644
28552--- a/arch/x86/kernel/traps.c
28553+++ b/arch/x86/kernel/traps.c
28554@@ -67,7 +67,7 @@
28555 #include <asm/proto.h>
28556
28557 /* No need to be aligned, but done to keep all IDTs defined the same way. */
28558-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
28559+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
28560 #else
28561 #include <asm/processor-flags.h>
28562 #include <asm/setup.h>
28563@@ -76,7 +76,7 @@ asmlinkage int system_call(void);
28564 #endif
28565
28566 /* Must be page-aligned because the real IDT is used in a fixmap. */
28567-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
28568+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
28569
28570 DECLARE_BITMAP(used_vectors, NR_VECTORS);
28571 EXPORT_SYMBOL_GPL(used_vectors);
28572@@ -108,11 +108,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
28573 }
28574
28575 static nokprobe_inline int
28576-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
28577+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
28578 struct pt_regs *regs, long error_code)
28579 {
28580 #ifdef CONFIG_X86_32
28581- if (regs->flags & X86_VM_MASK) {
28582+ if (v8086_mode(regs)) {
28583 /*
28584 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
28585 * On nmi (interrupt 2), do_trap should not be called.
28586@@ -125,12 +125,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
28587 return -1;
28588 }
28589 #endif
28590- if (!user_mode(regs)) {
28591+ if (!user_mode_novm(regs)) {
28592 if (!fixup_exception(regs)) {
28593 tsk->thread.error_code = error_code;
28594 tsk->thread.trap_nr = trapnr;
28595+
28596+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28597+ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
28598+ str = "PAX: suspicious stack segment fault";
28599+#endif
28600+
28601 die(str, regs, error_code);
28602 }
28603+
28604+#ifdef CONFIG_PAX_REFCOUNT
28605+ if (trapnr == X86_TRAP_OF)
28606+ pax_report_refcount_overflow(regs);
28607+#endif
28608+
28609 return 0;
28610 }
28611
28612@@ -169,7 +181,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
28613 }
28614
28615 static void
28616-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28617+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
28618 long error_code, siginfo_t *info)
28619 {
28620 struct task_struct *tsk = current;
28621@@ -193,7 +205,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28622 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
28623 printk_ratelimit()) {
28624 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
28625- tsk->comm, tsk->pid, str,
28626+ tsk->comm, task_pid_nr(tsk), str,
28627 regs->ip, regs->sp, error_code);
28628 print_vma_addr(" in ", regs->ip);
28629 pr_cont("\n");
28630@@ -274,6 +286,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
28631 tsk->thread.error_code = error_code;
28632 tsk->thread.trap_nr = X86_TRAP_DF;
28633
28634+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
28635+ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
28636+ die("grsec: kernel stack overflow detected", regs, error_code);
28637+#endif
28638+
28639 #ifdef CONFIG_DOUBLEFAULT
28640 df_debug(regs, error_code);
28641 #endif
28642@@ -296,7 +313,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
28643 conditional_sti(regs);
28644
28645 #ifdef CONFIG_X86_32
28646- if (regs->flags & X86_VM_MASK) {
28647+ if (v8086_mode(regs)) {
28648 local_irq_enable();
28649 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
28650 goto exit;
28651@@ -304,18 +321,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
28652 #endif
28653
28654 tsk = current;
28655- if (!user_mode(regs)) {
28656+ if (!user_mode_novm(regs)) {
28657 if (fixup_exception(regs))
28658 goto exit;
28659
28660 tsk->thread.error_code = error_code;
28661 tsk->thread.trap_nr = X86_TRAP_GP;
28662 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
28663- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
28664+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
28665+
28666+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28667+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
28668+ die("PAX: suspicious general protection fault", regs, error_code);
28669+ else
28670+#endif
28671+
28672 die("general protection fault", regs, error_code);
28673+ }
28674 goto exit;
28675 }
28676
28677+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28678+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
28679+ struct mm_struct *mm = tsk->mm;
28680+ unsigned long limit;
28681+
28682+ down_write(&mm->mmap_sem);
28683+ limit = mm->context.user_cs_limit;
28684+ if (limit < TASK_SIZE) {
28685+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
28686+ up_write(&mm->mmap_sem);
28687+ return;
28688+ }
28689+ up_write(&mm->mmap_sem);
28690+ }
28691+#endif
28692+
28693 tsk->thread.error_code = error_code;
28694 tsk->thread.trap_nr = X86_TRAP_GP;
28695
28696@@ -433,7 +474,7 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
28697 /* Copy the remainder of the stack from the current stack. */
28698 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
28699
28700- BUG_ON(!user_mode_vm(&new_stack->regs));
28701+ BUG_ON(!user_mode(&new_stack->regs));
28702 return new_stack;
28703 }
28704 #endif
28705@@ -518,7 +559,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28706 /* It's safe to allow irq's after DR6 has been saved */
28707 preempt_conditional_sti(regs);
28708
28709- if (regs->flags & X86_VM_MASK) {
28710+ if (v8086_mode(regs)) {
28711 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
28712 X86_TRAP_DB);
28713 preempt_conditional_cli(regs);
28714@@ -533,7 +574,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28715 * We already checked v86 mode above, so we can check for kernel mode
28716 * by just checking the CPL of CS.
28717 */
28718- if ((dr6 & DR_STEP) && !user_mode(regs)) {
28719+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
28720 tsk->thread.debugreg6 &= ~DR_STEP;
28721 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
28722 regs->flags &= ~X86_EFLAGS_TF;
28723@@ -566,7 +607,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
28724 return;
28725 conditional_sti(regs);
28726
28727- if (!user_mode_vm(regs))
28728+ if (!user_mode(regs))
28729 {
28730 if (!fixup_exception(regs)) {
28731 task->thread.error_code = error_code;
28732diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
28733index b7e50bb..f4a93ae 100644
28734--- a/arch/x86/kernel/tsc.c
28735+++ b/arch/x86/kernel/tsc.c
28736@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
28737 */
28738 smp_wmb();
28739
28740- ACCESS_ONCE(c2n->head) = data;
28741+ ACCESS_ONCE_RW(c2n->head) = data;
28742 }
28743
28744 /*
28745diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
28746index 5d1cbfe..2a21feb 100644
28747--- a/arch/x86/kernel/uprobes.c
28748+++ b/arch/x86/kernel/uprobes.c
28749@@ -845,7 +845,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
28750 int ret = NOTIFY_DONE;
28751
28752 /* We are only interested in userspace traps */
28753- if (regs && !user_mode_vm(regs))
28754+ if (regs && !user_mode(regs))
28755 return NOTIFY_DONE;
28756
28757 switch (val) {
28758@@ -919,7 +919,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
28759
28760 if (nleft != rasize) {
28761 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
28762- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
28763+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
28764
28765 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
28766 }
28767diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
28768index b9242ba..50c5edd 100644
28769--- a/arch/x86/kernel/verify_cpu.S
28770+++ b/arch/x86/kernel/verify_cpu.S
28771@@ -20,6 +20,7 @@
28772 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
28773 * arch/x86/kernel/trampoline_64.S: secondary processor verification
28774 * arch/x86/kernel/head_32.S: processor startup
28775+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
28776 *
28777 * verify_cpu, returns the status of longmode and SSE in register %eax.
28778 * 0: Success 1: Failure
28779diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
28780index e8edcf5..27f9344 100644
28781--- a/arch/x86/kernel/vm86_32.c
28782+++ b/arch/x86/kernel/vm86_32.c
28783@@ -44,6 +44,7 @@
28784 #include <linux/ptrace.h>
28785 #include <linux/audit.h>
28786 #include <linux/stddef.h>
28787+#include <linux/grsecurity.h>
28788
28789 #include <asm/uaccess.h>
28790 #include <asm/io.h>
28791@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
28792 do_exit(SIGSEGV);
28793 }
28794
28795- tss = &per_cpu(init_tss, get_cpu());
28796+ tss = init_tss + get_cpu();
28797 current->thread.sp0 = current->thread.saved_sp0;
28798 current->thread.sysenter_cs = __KERNEL_CS;
28799 load_sp0(tss, &current->thread);
28800@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
28801
28802 if (tsk->thread.saved_sp0)
28803 return -EPERM;
28804+
28805+#ifdef CONFIG_GRKERNSEC_VM86
28806+ if (!capable(CAP_SYS_RAWIO)) {
28807+ gr_handle_vm86();
28808+ return -EPERM;
28809+ }
28810+#endif
28811+
28812 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
28813 offsetof(struct kernel_vm86_struct, vm86plus) -
28814 sizeof(info.regs));
28815@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
28816 int tmp;
28817 struct vm86plus_struct __user *v86;
28818
28819+#ifdef CONFIG_GRKERNSEC_VM86
28820+ if (!capable(CAP_SYS_RAWIO)) {
28821+ gr_handle_vm86();
28822+ return -EPERM;
28823+ }
28824+#endif
28825+
28826 tsk = current;
28827 switch (cmd) {
28828 case VM86_REQUEST_IRQ:
28829@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
28830 tsk->thread.saved_fs = info->regs32->fs;
28831 tsk->thread.saved_gs = get_user_gs(info->regs32);
28832
28833- tss = &per_cpu(init_tss, get_cpu());
28834+ tss = init_tss + get_cpu();
28835 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
28836 if (cpu_has_sep)
28837 tsk->thread.sysenter_cs = 0;
28838@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
28839 goto cannot_handle;
28840 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
28841 goto cannot_handle;
28842- intr_ptr = (unsigned long __user *) (i << 2);
28843+ intr_ptr = (__force unsigned long __user *) (i << 2);
28844 if (get_user(segoffs, intr_ptr))
28845 goto cannot_handle;
28846 if ((segoffs >> 16) == BIOSSEG)
28847diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
28848index 49edf2d..c0d1362 100644
28849--- a/arch/x86/kernel/vmlinux.lds.S
28850+++ b/arch/x86/kernel/vmlinux.lds.S
28851@@ -26,6 +26,13 @@
28852 #include <asm/page_types.h>
28853 #include <asm/cache.h>
28854 #include <asm/boot.h>
28855+#include <asm/segment.h>
28856+
28857+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28858+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
28859+#else
28860+#define __KERNEL_TEXT_OFFSET 0
28861+#endif
28862
28863 #undef i386 /* in case the preprocessor is a 32bit one */
28864
28865@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
28866
28867 PHDRS {
28868 text PT_LOAD FLAGS(5); /* R_E */
28869+#ifdef CONFIG_X86_32
28870+ module PT_LOAD FLAGS(5); /* R_E */
28871+#endif
28872+#ifdef CONFIG_XEN
28873+ rodata PT_LOAD FLAGS(5); /* R_E */
28874+#else
28875+ rodata PT_LOAD FLAGS(4); /* R__ */
28876+#endif
28877 data PT_LOAD FLAGS(6); /* RW_ */
28878-#ifdef CONFIG_X86_64
28879+ init.begin PT_LOAD FLAGS(6); /* RW_ */
28880 #ifdef CONFIG_SMP
28881 percpu PT_LOAD FLAGS(6); /* RW_ */
28882 #endif
28883+ text.init PT_LOAD FLAGS(5); /* R_E */
28884+ text.exit PT_LOAD FLAGS(5); /* R_E */
28885 init PT_LOAD FLAGS(7); /* RWE */
28886-#endif
28887 note PT_NOTE FLAGS(0); /* ___ */
28888 }
28889
28890 SECTIONS
28891 {
28892 #ifdef CONFIG_X86_32
28893- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
28894- phys_startup_32 = startup_32 - LOAD_OFFSET;
28895+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
28896 #else
28897- . = __START_KERNEL;
28898- phys_startup_64 = startup_64 - LOAD_OFFSET;
28899+ . = __START_KERNEL;
28900 #endif
28901
28902 /* Text and read-only data */
28903- .text : AT(ADDR(.text) - LOAD_OFFSET) {
28904- _text = .;
28905+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28906 /* bootstrapping code */
28907+#ifdef CONFIG_X86_32
28908+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28909+#else
28910+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28911+#endif
28912+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28913+ _text = .;
28914 HEAD_TEXT
28915 . = ALIGN(8);
28916 _stext = .;
28917@@ -104,13 +124,47 @@ SECTIONS
28918 IRQENTRY_TEXT
28919 *(.fixup)
28920 *(.gnu.warning)
28921- /* End of text section */
28922- _etext = .;
28923 } :text = 0x9090
28924
28925- NOTES :text :note
28926+ . += __KERNEL_TEXT_OFFSET;
28927
28928- EXCEPTION_TABLE(16) :text = 0x9090
28929+#ifdef CONFIG_X86_32
28930+ . = ALIGN(PAGE_SIZE);
28931+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
28932+
28933+#ifdef CONFIG_PAX_KERNEXEC
28934+ MODULES_EXEC_VADDR = .;
28935+ BYTE(0)
28936+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
28937+ . = ALIGN(HPAGE_SIZE) - 1;
28938+ MODULES_EXEC_END = .;
28939+#endif
28940+
28941+ } :module
28942+#endif
28943+
28944+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
28945+ /* End of text section */
28946+ BYTE(0)
28947+ _etext = . - __KERNEL_TEXT_OFFSET;
28948+ }
28949+
28950+#ifdef CONFIG_X86_32
28951+ . = ALIGN(PAGE_SIZE);
28952+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
28953+ . = ALIGN(PAGE_SIZE);
28954+ *(.empty_zero_page)
28955+ *(.initial_pg_fixmap)
28956+ *(.initial_pg_pmd)
28957+ *(.initial_page_table)
28958+ *(.swapper_pg_dir)
28959+ } :rodata
28960+#endif
28961+
28962+ . = ALIGN(PAGE_SIZE);
28963+ NOTES :rodata :note
28964+
28965+ EXCEPTION_TABLE(16) :rodata
28966
28967 #if defined(CONFIG_DEBUG_RODATA)
28968 /* .text should occupy whole number of pages */
28969@@ -122,16 +176,20 @@ SECTIONS
28970
28971 /* Data */
28972 .data : AT(ADDR(.data) - LOAD_OFFSET) {
28973+
28974+#ifdef CONFIG_PAX_KERNEXEC
28975+ . = ALIGN(HPAGE_SIZE);
28976+#else
28977+ . = ALIGN(PAGE_SIZE);
28978+#endif
28979+
28980 /* Start of data section */
28981 _sdata = .;
28982
28983 /* init_task */
28984 INIT_TASK_DATA(THREAD_SIZE)
28985
28986-#ifdef CONFIG_X86_32
28987- /* 32 bit has nosave before _edata */
28988 NOSAVE_DATA
28989-#endif
28990
28991 PAGE_ALIGNED_DATA(PAGE_SIZE)
28992
28993@@ -174,12 +232,19 @@ SECTIONS
28994 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
28995
28996 /* Init code and data - will be freed after init */
28997- . = ALIGN(PAGE_SIZE);
28998 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
28999+ BYTE(0)
29000+
29001+#ifdef CONFIG_PAX_KERNEXEC
29002+ . = ALIGN(HPAGE_SIZE);
29003+#else
29004+ . = ALIGN(PAGE_SIZE);
29005+#endif
29006+
29007 __init_begin = .; /* paired with __init_end */
29008- }
29009+ } :init.begin
29010
29011-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
29012+#ifdef CONFIG_SMP
29013 /*
29014 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
29015 * output PHDR, so the next output section - .init.text - should
29016@@ -188,12 +253,27 @@ SECTIONS
29017 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
29018 #endif
29019
29020- INIT_TEXT_SECTION(PAGE_SIZE)
29021-#ifdef CONFIG_X86_64
29022- :init
29023-#endif
29024+ . = ALIGN(PAGE_SIZE);
29025+ init_begin = .;
29026+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
29027+ VMLINUX_SYMBOL(_sinittext) = .;
29028+ INIT_TEXT
29029+ VMLINUX_SYMBOL(_einittext) = .;
29030+ . = ALIGN(PAGE_SIZE);
29031+ } :text.init
29032
29033- INIT_DATA_SECTION(16)
29034+ /*
29035+ * .exit.text is discard at runtime, not link time, to deal with
29036+ * references from .altinstructions and .eh_frame
29037+ */
29038+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
29039+ EXIT_TEXT
29040+ . = ALIGN(16);
29041+ } :text.exit
29042+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
29043+
29044+ . = ALIGN(PAGE_SIZE);
29045+ INIT_DATA_SECTION(16) :init
29046
29047 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
29048 __x86_cpu_dev_start = .;
29049@@ -264,19 +344,12 @@ SECTIONS
29050 }
29051
29052 . = ALIGN(8);
29053- /*
29054- * .exit.text is discard at runtime, not link time, to deal with
29055- * references from .altinstructions and .eh_frame
29056- */
29057- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
29058- EXIT_TEXT
29059- }
29060
29061 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
29062 EXIT_DATA
29063 }
29064
29065-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
29066+#ifndef CONFIG_SMP
29067 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
29068 #endif
29069
29070@@ -295,16 +368,10 @@ SECTIONS
29071 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
29072 __smp_locks = .;
29073 *(.smp_locks)
29074- . = ALIGN(PAGE_SIZE);
29075 __smp_locks_end = .;
29076+ . = ALIGN(PAGE_SIZE);
29077 }
29078
29079-#ifdef CONFIG_X86_64
29080- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
29081- NOSAVE_DATA
29082- }
29083-#endif
29084-
29085 /* BSS */
29086 . = ALIGN(PAGE_SIZE);
29087 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
29088@@ -320,6 +387,7 @@ SECTIONS
29089 __brk_base = .;
29090 . += 64 * 1024; /* 64k alignment slop space */
29091 *(.brk_reservation) /* areas brk users have reserved */
29092+ . = ALIGN(HPAGE_SIZE);
29093 __brk_limit = .;
29094 }
29095
29096@@ -346,13 +414,12 @@ SECTIONS
29097 * for the boot processor.
29098 */
29099 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
29100-INIT_PER_CPU(gdt_page);
29101 INIT_PER_CPU(irq_stack_union);
29102
29103 /*
29104 * Build-time check on the image size:
29105 */
29106-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
29107+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
29108 "kernel image bigger than KERNEL_IMAGE_SIZE");
29109
29110 #ifdef CONFIG_SMP
29111diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
29112index e1e1e80..1400089 100644
29113--- a/arch/x86/kernel/vsyscall_64.c
29114+++ b/arch/x86/kernel/vsyscall_64.c
29115@@ -54,15 +54,13 @@
29116
29117 DEFINE_VVAR(int, vgetcpu_mode);
29118
29119-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
29120+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
29121
29122 static int __init vsyscall_setup(char *str)
29123 {
29124 if (str) {
29125 if (!strcmp("emulate", str))
29126 vsyscall_mode = EMULATE;
29127- else if (!strcmp("native", str))
29128- vsyscall_mode = NATIVE;
29129 else if (!strcmp("none", str))
29130 vsyscall_mode = NONE;
29131 else
29132@@ -279,8 +277,7 @@ do_ret:
29133 return true;
29134
29135 sigsegv:
29136- force_sig(SIGSEGV, current);
29137- return true;
29138+ do_group_exit(SIGKILL);
29139 }
29140
29141 /*
29142@@ -331,10 +328,7 @@ void __init map_vsyscall(void)
29143 extern char __vsyscall_page;
29144 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
29145
29146- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
29147- vsyscall_mode == NATIVE
29148- ? PAGE_KERNEL_VSYSCALL
29149- : PAGE_KERNEL_VVAR);
29150+ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
29151 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
29152 (unsigned long)VSYSCALL_ADDR);
29153 }
29154diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
29155index 04068192..4d75aa6 100644
29156--- a/arch/x86/kernel/x8664_ksyms_64.c
29157+++ b/arch/x86/kernel/x8664_ksyms_64.c
29158@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
29159 EXPORT_SYMBOL(copy_user_generic_unrolled);
29160 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
29161 EXPORT_SYMBOL(__copy_user_nocache);
29162-EXPORT_SYMBOL(_copy_from_user);
29163-EXPORT_SYMBOL(_copy_to_user);
29164
29165 EXPORT_SYMBOL(copy_page);
29166 EXPORT_SYMBOL(clear_page);
29167@@ -73,3 +71,7 @@ EXPORT_SYMBOL(___preempt_schedule);
29168 EXPORT_SYMBOL(___preempt_schedule_context);
29169 #endif
29170 #endif
29171+
29172+#ifdef CONFIG_PAX_PER_CPU_PGD
29173+EXPORT_SYMBOL(cpu_pgd);
29174+#endif
29175diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
29176index e48b674..a451dd9 100644
29177--- a/arch/x86/kernel/x86_init.c
29178+++ b/arch/x86/kernel/x86_init.c
29179@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
29180 static void default_nmi_init(void) { };
29181 static int default_i8042_detect(void) { return 1; };
29182
29183-struct x86_platform_ops x86_platform = {
29184+struct x86_platform_ops x86_platform __read_only = {
29185 .calibrate_tsc = native_calibrate_tsc,
29186 .get_wallclock = mach_get_cmos_time,
29187 .set_wallclock = mach_set_rtc_mmss,
29188@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
29189 EXPORT_SYMBOL_GPL(x86_platform);
29190
29191 #if defined(CONFIG_PCI_MSI)
29192-struct x86_msi_ops x86_msi = {
29193+struct x86_msi_ops x86_msi __read_only = {
29194 .setup_msi_irqs = native_setup_msi_irqs,
29195 .compose_msi_msg = native_compose_msi_msg,
29196 .teardown_msi_irq = native_teardown_msi_irq,
29197@@ -150,7 +150,7 @@ u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag)
29198 }
29199 #endif
29200
29201-struct x86_io_apic_ops x86_io_apic_ops = {
29202+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
29203 .init = native_io_apic_init_mappings,
29204 .read = native_io_apic_read,
29205 .write = native_io_apic_write,
29206diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
29207index 4c540c4..0b985b0 100644
29208--- a/arch/x86/kernel/xsave.c
29209+++ b/arch/x86/kernel/xsave.c
29210@@ -167,18 +167,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
29211
29212 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
29213 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
29214- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
29215+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
29216
29217 if (!use_xsave())
29218 return err;
29219
29220- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
29221+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
29222
29223 /*
29224 * Read the xstate_bv which we copied (directly from the cpu or
29225 * from the state in task struct) to the user buffers.
29226 */
29227- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
29228+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
29229
29230 /*
29231 * For legacy compatible, we always set FP/SSE bits in the bit
29232@@ -193,7 +193,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
29233 */
29234 xstate_bv |= XSTATE_FPSSE;
29235
29236- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
29237+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
29238
29239 return err;
29240 }
29241@@ -202,6 +202,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
29242 {
29243 int err;
29244
29245+ buf = (struct xsave_struct __user *)____m(buf);
29246 if (use_xsave())
29247 err = xsave_user(buf);
29248 else if (use_fxsr())
29249@@ -312,6 +313,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
29250 */
29251 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
29252 {
29253+ buf = (void __user *)____m(buf);
29254 if (use_xsave()) {
29255 if ((unsigned long)buf % 64 || fx_only) {
29256 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
29257diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
29258index 38a0afe..94421a9 100644
29259--- a/arch/x86/kvm/cpuid.c
29260+++ b/arch/x86/kvm/cpuid.c
29261@@ -166,15 +166,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
29262 struct kvm_cpuid2 *cpuid,
29263 struct kvm_cpuid_entry2 __user *entries)
29264 {
29265- int r;
29266+ int r, i;
29267
29268 r = -E2BIG;
29269 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
29270 goto out;
29271 r = -EFAULT;
29272- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
29273- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
29274+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
29275 goto out;
29276+ for (i = 0; i < cpuid->nent; ++i) {
29277+ struct kvm_cpuid_entry2 cpuid_entry;
29278+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
29279+ goto out;
29280+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
29281+ }
29282 vcpu->arch.cpuid_nent = cpuid->nent;
29283 kvm_apic_set_version(vcpu);
29284 kvm_x86_ops->cpuid_update(vcpu);
29285@@ -189,15 +194,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
29286 struct kvm_cpuid2 *cpuid,
29287 struct kvm_cpuid_entry2 __user *entries)
29288 {
29289- int r;
29290+ int r, i;
29291
29292 r = -E2BIG;
29293 if (cpuid->nent < vcpu->arch.cpuid_nent)
29294 goto out;
29295 r = -EFAULT;
29296- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
29297- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
29298+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
29299 goto out;
29300+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
29301+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
29302+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
29303+ goto out;
29304+ }
29305 return 0;
29306
29307 out:
29308diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
29309index 08e8a89..0e9183e 100644
29310--- a/arch/x86/kvm/lapic.c
29311+++ b/arch/x86/kvm/lapic.c
29312@@ -55,7 +55,7 @@
29313 #define APIC_BUS_CYCLE_NS 1
29314
29315 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
29316-#define apic_debug(fmt, arg...)
29317+#define apic_debug(fmt, arg...) do {} while (0)
29318
29319 #define APIC_LVT_NUM 6
29320 /* 14 is the version for Xeon and Pentium 8.4.8*/
29321diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
29322index 4107765..d9eb358 100644
29323--- a/arch/x86/kvm/paging_tmpl.h
29324+++ b/arch/x86/kvm/paging_tmpl.h
29325@@ -331,7 +331,7 @@ retry_walk:
29326 if (unlikely(kvm_is_error_hva(host_addr)))
29327 goto error;
29328
29329- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
29330+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
29331 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
29332 goto error;
29333 walker->ptep_user[walker->level - 1] = ptep_user;
29334diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
29335index 78dadc3..fd84599 100644
29336--- a/arch/x86/kvm/svm.c
29337+++ b/arch/x86/kvm/svm.c
29338@@ -3547,7 +3547,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
29339 int cpu = raw_smp_processor_id();
29340
29341 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
29342+
29343+ pax_open_kernel();
29344 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
29345+ pax_close_kernel();
29346+
29347 load_TR_desc();
29348 }
29349
29350@@ -3948,6 +3952,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
29351 #endif
29352 #endif
29353
29354+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
29355+ __set_fs(current_thread_info()->addr_limit);
29356+#endif
29357+
29358 reload_tss(vcpu);
29359
29360 local_irq_disable();
29361diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
29362index 41a5426..c0b3c00 100644
29363--- a/arch/x86/kvm/vmx.c
29364+++ b/arch/x86/kvm/vmx.c
29365@@ -1341,12 +1341,12 @@ static void vmcs_write64(unsigned long field, u64 value)
29366 #endif
29367 }
29368
29369-static void vmcs_clear_bits(unsigned long field, u32 mask)
29370+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
29371 {
29372 vmcs_writel(field, vmcs_readl(field) & ~mask);
29373 }
29374
29375-static void vmcs_set_bits(unsigned long field, u32 mask)
29376+static void vmcs_set_bits(unsigned long field, unsigned long mask)
29377 {
29378 vmcs_writel(field, vmcs_readl(field) | mask);
29379 }
29380@@ -1606,7 +1606,11 @@ static void reload_tss(void)
29381 struct desc_struct *descs;
29382
29383 descs = (void *)gdt->address;
29384+
29385+ pax_open_kernel();
29386 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
29387+ pax_close_kernel();
29388+
29389 load_TR_desc();
29390 }
29391
29392@@ -1834,6 +1838,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
29393 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
29394 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
29395
29396+#ifdef CONFIG_PAX_PER_CPU_PGD
29397+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
29398+#endif
29399+
29400 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
29401 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
29402 vmx->loaded_vmcs->cpu = cpu;
29403@@ -2123,7 +2131,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
29404 * reads and returns guest's timestamp counter "register"
29405 * guest_tsc = host_tsc + tsc_offset -- 21.3
29406 */
29407-static u64 guest_read_tsc(void)
29408+static u64 __intentional_overflow(-1) guest_read_tsc(void)
29409 {
29410 u64 host_tsc, tsc_offset;
29411
29412@@ -3114,8 +3122,11 @@ static __init int hardware_setup(void)
29413 if (!cpu_has_vmx_flexpriority())
29414 flexpriority_enabled = 0;
29415
29416- if (!cpu_has_vmx_tpr_shadow())
29417- kvm_x86_ops->update_cr8_intercept = NULL;
29418+ if (!cpu_has_vmx_tpr_shadow()) {
29419+ pax_open_kernel();
29420+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
29421+ pax_close_kernel();
29422+ }
29423
29424 if (enable_ept && !cpu_has_vmx_ept_2m_page())
29425 kvm_disable_largepages();
29426@@ -3126,13 +3137,15 @@ static __init int hardware_setup(void)
29427 if (!cpu_has_vmx_apicv())
29428 enable_apicv = 0;
29429
29430+ pax_open_kernel();
29431 if (enable_apicv)
29432- kvm_x86_ops->update_cr8_intercept = NULL;
29433+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
29434 else {
29435- kvm_x86_ops->hwapic_irr_update = NULL;
29436- kvm_x86_ops->deliver_posted_interrupt = NULL;
29437- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
29438+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
29439+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
29440+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
29441 }
29442+ pax_close_kernel();
29443
29444 if (nested)
29445 nested_vmx_setup_ctls_msrs();
29446@@ -4242,7 +4255,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
29447 unsigned long cr4;
29448
29449 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
29450+
29451+#ifndef CONFIG_PAX_PER_CPU_PGD
29452 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
29453+#endif
29454
29455 /* Save the most likely value for this task's CR4 in the VMCS. */
29456 cr4 = read_cr4();
29457@@ -4269,7 +4285,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
29458 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
29459 vmx->host_idt_base = dt.address;
29460
29461- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
29462+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
29463
29464 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
29465 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
29466@@ -7475,6 +7491,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
29467 "jmp 2f \n\t"
29468 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
29469 "2: "
29470+
29471+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29472+ "ljmp %[cs],$3f\n\t"
29473+ "3: "
29474+#endif
29475+
29476 /* Save guest registers, load host registers, keep flags */
29477 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
29478 "pop %0 \n\t"
29479@@ -7527,6 +7549,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
29480 #endif
29481 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
29482 [wordsize]"i"(sizeof(ulong))
29483+
29484+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29485+ ,[cs]"i"(__KERNEL_CS)
29486+#endif
29487+
29488 : "cc", "memory"
29489 #ifdef CONFIG_X86_64
29490 , "rax", "rbx", "rdi", "rsi"
29491@@ -7540,7 +7567,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
29492 if (debugctlmsr)
29493 update_debugctlmsr(debugctlmsr);
29494
29495-#ifndef CONFIG_X86_64
29496+#ifdef CONFIG_X86_32
29497 /*
29498 * The sysexit path does not restore ds/es, so we must set them to
29499 * a reasonable value ourselves.
29500@@ -7549,8 +7576,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
29501 * may be executed in interrupt context, which saves and restore segments
29502 * around it, nullifying its effect.
29503 */
29504- loadsegment(ds, __USER_DS);
29505- loadsegment(es, __USER_DS);
29506+ loadsegment(ds, __KERNEL_DS);
29507+ loadsegment(es, __KERNEL_DS);
29508+ loadsegment(ss, __KERNEL_DS);
29509+
29510+#ifdef CONFIG_PAX_KERNEXEC
29511+ loadsegment(fs, __KERNEL_PERCPU);
29512+#endif
29513+
29514+#ifdef CONFIG_PAX_MEMORY_UDEREF
29515+ __set_fs(current_thread_info()->addr_limit);
29516+#endif
29517+
29518 #endif
29519
29520 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
29521diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
29522index d6aeccf..cea125a 100644
29523--- a/arch/x86/kvm/x86.c
29524+++ b/arch/x86/kvm/x86.c
29525@@ -1857,8 +1857,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
29526 {
29527 struct kvm *kvm = vcpu->kvm;
29528 int lm = is_long_mode(vcpu);
29529- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
29530- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
29531+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
29532+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
29533 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
29534 : kvm->arch.xen_hvm_config.blob_size_32;
29535 u32 page_num = data & ~PAGE_MASK;
29536@@ -2779,6 +2779,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
29537 if (n < msr_list.nmsrs)
29538 goto out;
29539 r = -EFAULT;
29540+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
29541+ goto out;
29542 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
29543 num_msrs_to_save * sizeof(u32)))
29544 goto out;
29545@@ -5639,7 +5641,7 @@ static struct notifier_block pvclock_gtod_notifier = {
29546 };
29547 #endif
29548
29549-int kvm_arch_init(void *opaque)
29550+int kvm_arch_init(const void *opaque)
29551 {
29552 int r;
29553 struct kvm_x86_ops *ops = opaque;
29554diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
29555index aae9413..d11e829 100644
29556--- a/arch/x86/lguest/boot.c
29557+++ b/arch/x86/lguest/boot.c
29558@@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
29559 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
29560 * Launcher to reboot us.
29561 */
29562-static void lguest_restart(char *reason)
29563+static __noreturn void lguest_restart(char *reason)
29564 {
29565 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
29566+ BUG();
29567 }
29568
29569 /*G:050
29570diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
29571index 00933d5..3a64af9 100644
29572--- a/arch/x86/lib/atomic64_386_32.S
29573+++ b/arch/x86/lib/atomic64_386_32.S
29574@@ -48,6 +48,10 @@ BEGIN(read)
29575 movl (v), %eax
29576 movl 4(v), %edx
29577 RET_ENDP
29578+BEGIN(read_unchecked)
29579+ movl (v), %eax
29580+ movl 4(v), %edx
29581+RET_ENDP
29582 #undef v
29583
29584 #define v %esi
29585@@ -55,6 +59,10 @@ BEGIN(set)
29586 movl %ebx, (v)
29587 movl %ecx, 4(v)
29588 RET_ENDP
29589+BEGIN(set_unchecked)
29590+ movl %ebx, (v)
29591+ movl %ecx, 4(v)
29592+RET_ENDP
29593 #undef v
29594
29595 #define v %esi
29596@@ -70,6 +78,20 @@ RET_ENDP
29597 BEGIN(add)
29598 addl %eax, (v)
29599 adcl %edx, 4(v)
29600+
29601+#ifdef CONFIG_PAX_REFCOUNT
29602+ jno 0f
29603+ subl %eax, (v)
29604+ sbbl %edx, 4(v)
29605+ int $4
29606+0:
29607+ _ASM_EXTABLE(0b, 0b)
29608+#endif
29609+
29610+RET_ENDP
29611+BEGIN(add_unchecked)
29612+ addl %eax, (v)
29613+ adcl %edx, 4(v)
29614 RET_ENDP
29615 #undef v
29616
29617@@ -77,6 +99,24 @@ RET_ENDP
29618 BEGIN(add_return)
29619 addl (v), %eax
29620 adcl 4(v), %edx
29621+
29622+#ifdef CONFIG_PAX_REFCOUNT
29623+ into
29624+1234:
29625+ _ASM_EXTABLE(1234b, 2f)
29626+#endif
29627+
29628+ movl %eax, (v)
29629+ movl %edx, 4(v)
29630+
29631+#ifdef CONFIG_PAX_REFCOUNT
29632+2:
29633+#endif
29634+
29635+RET_ENDP
29636+BEGIN(add_return_unchecked)
29637+ addl (v), %eax
29638+ adcl 4(v), %edx
29639 movl %eax, (v)
29640 movl %edx, 4(v)
29641 RET_ENDP
29642@@ -86,6 +126,20 @@ RET_ENDP
29643 BEGIN(sub)
29644 subl %eax, (v)
29645 sbbl %edx, 4(v)
29646+
29647+#ifdef CONFIG_PAX_REFCOUNT
29648+ jno 0f
29649+ addl %eax, (v)
29650+ adcl %edx, 4(v)
29651+ int $4
29652+0:
29653+ _ASM_EXTABLE(0b, 0b)
29654+#endif
29655+
29656+RET_ENDP
29657+BEGIN(sub_unchecked)
29658+ subl %eax, (v)
29659+ sbbl %edx, 4(v)
29660 RET_ENDP
29661 #undef v
29662
29663@@ -96,6 +150,27 @@ BEGIN(sub_return)
29664 sbbl $0, %edx
29665 addl (v), %eax
29666 adcl 4(v), %edx
29667+
29668+#ifdef CONFIG_PAX_REFCOUNT
29669+ into
29670+1234:
29671+ _ASM_EXTABLE(1234b, 2f)
29672+#endif
29673+
29674+ movl %eax, (v)
29675+ movl %edx, 4(v)
29676+
29677+#ifdef CONFIG_PAX_REFCOUNT
29678+2:
29679+#endif
29680+
29681+RET_ENDP
29682+BEGIN(sub_return_unchecked)
29683+ negl %edx
29684+ negl %eax
29685+ sbbl $0, %edx
29686+ addl (v), %eax
29687+ adcl 4(v), %edx
29688 movl %eax, (v)
29689 movl %edx, 4(v)
29690 RET_ENDP
29691@@ -105,6 +180,20 @@ RET_ENDP
29692 BEGIN(inc)
29693 addl $1, (v)
29694 adcl $0, 4(v)
29695+
29696+#ifdef CONFIG_PAX_REFCOUNT
29697+ jno 0f
29698+ subl $1, (v)
29699+ sbbl $0, 4(v)
29700+ int $4
29701+0:
29702+ _ASM_EXTABLE(0b, 0b)
29703+#endif
29704+
29705+RET_ENDP
29706+BEGIN(inc_unchecked)
29707+ addl $1, (v)
29708+ adcl $0, 4(v)
29709 RET_ENDP
29710 #undef v
29711
29712@@ -114,6 +203,26 @@ BEGIN(inc_return)
29713 movl 4(v), %edx
29714 addl $1, %eax
29715 adcl $0, %edx
29716+
29717+#ifdef CONFIG_PAX_REFCOUNT
29718+ into
29719+1234:
29720+ _ASM_EXTABLE(1234b, 2f)
29721+#endif
29722+
29723+ movl %eax, (v)
29724+ movl %edx, 4(v)
29725+
29726+#ifdef CONFIG_PAX_REFCOUNT
29727+2:
29728+#endif
29729+
29730+RET_ENDP
29731+BEGIN(inc_return_unchecked)
29732+ movl (v), %eax
29733+ movl 4(v), %edx
29734+ addl $1, %eax
29735+ adcl $0, %edx
29736 movl %eax, (v)
29737 movl %edx, 4(v)
29738 RET_ENDP
29739@@ -123,6 +232,20 @@ RET_ENDP
29740 BEGIN(dec)
29741 subl $1, (v)
29742 sbbl $0, 4(v)
29743+
29744+#ifdef CONFIG_PAX_REFCOUNT
29745+ jno 0f
29746+ addl $1, (v)
29747+ adcl $0, 4(v)
29748+ int $4
29749+0:
29750+ _ASM_EXTABLE(0b, 0b)
29751+#endif
29752+
29753+RET_ENDP
29754+BEGIN(dec_unchecked)
29755+ subl $1, (v)
29756+ sbbl $0, 4(v)
29757 RET_ENDP
29758 #undef v
29759
29760@@ -132,6 +255,26 @@ BEGIN(dec_return)
29761 movl 4(v), %edx
29762 subl $1, %eax
29763 sbbl $0, %edx
29764+
29765+#ifdef CONFIG_PAX_REFCOUNT
29766+ into
29767+1234:
29768+ _ASM_EXTABLE(1234b, 2f)
29769+#endif
29770+
29771+ movl %eax, (v)
29772+ movl %edx, 4(v)
29773+
29774+#ifdef CONFIG_PAX_REFCOUNT
29775+2:
29776+#endif
29777+
29778+RET_ENDP
29779+BEGIN(dec_return_unchecked)
29780+ movl (v), %eax
29781+ movl 4(v), %edx
29782+ subl $1, %eax
29783+ sbbl $0, %edx
29784 movl %eax, (v)
29785 movl %edx, 4(v)
29786 RET_ENDP
29787@@ -143,6 +286,13 @@ BEGIN(add_unless)
29788 adcl %edx, %edi
29789 addl (v), %eax
29790 adcl 4(v), %edx
29791+
29792+#ifdef CONFIG_PAX_REFCOUNT
29793+ into
29794+1234:
29795+ _ASM_EXTABLE(1234b, 2f)
29796+#endif
29797+
29798 cmpl %eax, %ecx
29799 je 3f
29800 1:
29801@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
29802 1:
29803 addl $1, %eax
29804 adcl $0, %edx
29805+
29806+#ifdef CONFIG_PAX_REFCOUNT
29807+ into
29808+1234:
29809+ _ASM_EXTABLE(1234b, 2f)
29810+#endif
29811+
29812 movl %eax, (v)
29813 movl %edx, 4(v)
29814 movl $1, %eax
29815@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
29816 movl 4(v), %edx
29817 subl $1, %eax
29818 sbbl $0, %edx
29819+
29820+#ifdef CONFIG_PAX_REFCOUNT
29821+ into
29822+1234:
29823+ _ASM_EXTABLE(1234b, 1f)
29824+#endif
29825+
29826 js 1f
29827 movl %eax, (v)
29828 movl %edx, 4(v)
29829diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
29830index f5cc9eb..51fa319 100644
29831--- a/arch/x86/lib/atomic64_cx8_32.S
29832+++ b/arch/x86/lib/atomic64_cx8_32.S
29833@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
29834 CFI_STARTPROC
29835
29836 read64 %ecx
29837+ pax_force_retaddr
29838 ret
29839 CFI_ENDPROC
29840 ENDPROC(atomic64_read_cx8)
29841
29842+ENTRY(atomic64_read_unchecked_cx8)
29843+ CFI_STARTPROC
29844+
29845+ read64 %ecx
29846+ pax_force_retaddr
29847+ ret
29848+ CFI_ENDPROC
29849+ENDPROC(atomic64_read_unchecked_cx8)
29850+
29851 ENTRY(atomic64_set_cx8)
29852 CFI_STARTPROC
29853
29854@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
29855 cmpxchg8b (%esi)
29856 jne 1b
29857
29858+ pax_force_retaddr
29859 ret
29860 CFI_ENDPROC
29861 ENDPROC(atomic64_set_cx8)
29862
29863+ENTRY(atomic64_set_unchecked_cx8)
29864+ CFI_STARTPROC
29865+
29866+1:
29867+/* we don't need LOCK_PREFIX since aligned 64-bit writes
29868+ * are atomic on 586 and newer */
29869+ cmpxchg8b (%esi)
29870+ jne 1b
29871+
29872+ pax_force_retaddr
29873+ ret
29874+ CFI_ENDPROC
29875+ENDPROC(atomic64_set_unchecked_cx8)
29876+
29877 ENTRY(atomic64_xchg_cx8)
29878 CFI_STARTPROC
29879
29880@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
29881 cmpxchg8b (%esi)
29882 jne 1b
29883
29884+ pax_force_retaddr
29885 ret
29886 CFI_ENDPROC
29887 ENDPROC(atomic64_xchg_cx8)
29888
29889-.macro addsub_return func ins insc
29890-ENTRY(atomic64_\func\()_return_cx8)
29891+.macro addsub_return func ins insc unchecked=""
29892+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29893 CFI_STARTPROC
29894 SAVE ebp
29895 SAVE ebx
29896@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
29897 movl %edx, %ecx
29898 \ins\()l %esi, %ebx
29899 \insc\()l %edi, %ecx
29900+
29901+.ifb \unchecked
29902+#ifdef CONFIG_PAX_REFCOUNT
29903+ into
29904+2:
29905+ _ASM_EXTABLE(2b, 3f)
29906+#endif
29907+.endif
29908+
29909 LOCK_PREFIX
29910 cmpxchg8b (%ebp)
29911 jne 1b
29912-
29913-10:
29914 movl %ebx, %eax
29915 movl %ecx, %edx
29916+
29917+.ifb \unchecked
29918+#ifdef CONFIG_PAX_REFCOUNT
29919+3:
29920+#endif
29921+.endif
29922+
29923 RESTORE edi
29924 RESTORE esi
29925 RESTORE ebx
29926 RESTORE ebp
29927+ pax_force_retaddr
29928 ret
29929 CFI_ENDPROC
29930-ENDPROC(atomic64_\func\()_return_cx8)
29931+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29932 .endm
29933
29934 addsub_return add add adc
29935 addsub_return sub sub sbb
29936+addsub_return add add adc _unchecked
29937+addsub_return sub sub sbb _unchecked
29938
29939-.macro incdec_return func ins insc
29940-ENTRY(atomic64_\func\()_return_cx8)
29941+.macro incdec_return func ins insc unchecked=""
29942+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29943 CFI_STARTPROC
29944 SAVE ebx
29945
29946@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
29947 movl %edx, %ecx
29948 \ins\()l $1, %ebx
29949 \insc\()l $0, %ecx
29950+
29951+.ifb \unchecked
29952+#ifdef CONFIG_PAX_REFCOUNT
29953+ into
29954+2:
29955+ _ASM_EXTABLE(2b, 3f)
29956+#endif
29957+.endif
29958+
29959 LOCK_PREFIX
29960 cmpxchg8b (%esi)
29961 jne 1b
29962
29963-10:
29964 movl %ebx, %eax
29965 movl %ecx, %edx
29966+
29967+.ifb \unchecked
29968+#ifdef CONFIG_PAX_REFCOUNT
29969+3:
29970+#endif
29971+.endif
29972+
29973 RESTORE ebx
29974+ pax_force_retaddr
29975 ret
29976 CFI_ENDPROC
29977-ENDPROC(atomic64_\func\()_return_cx8)
29978+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29979 .endm
29980
29981 incdec_return inc add adc
29982 incdec_return dec sub sbb
29983+incdec_return inc add adc _unchecked
29984+incdec_return dec sub sbb _unchecked
29985
29986 ENTRY(atomic64_dec_if_positive_cx8)
29987 CFI_STARTPROC
29988@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
29989 movl %edx, %ecx
29990 subl $1, %ebx
29991 sbb $0, %ecx
29992+
29993+#ifdef CONFIG_PAX_REFCOUNT
29994+ into
29995+1234:
29996+ _ASM_EXTABLE(1234b, 2f)
29997+#endif
29998+
29999 js 2f
30000 LOCK_PREFIX
30001 cmpxchg8b (%esi)
30002@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
30003 movl %ebx, %eax
30004 movl %ecx, %edx
30005 RESTORE ebx
30006+ pax_force_retaddr
30007 ret
30008 CFI_ENDPROC
30009 ENDPROC(atomic64_dec_if_positive_cx8)
30010@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
30011 movl %edx, %ecx
30012 addl %ebp, %ebx
30013 adcl %edi, %ecx
30014+
30015+#ifdef CONFIG_PAX_REFCOUNT
30016+ into
30017+1234:
30018+ _ASM_EXTABLE(1234b, 3f)
30019+#endif
30020+
30021 LOCK_PREFIX
30022 cmpxchg8b (%esi)
30023 jne 1b
30024@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
30025 CFI_ADJUST_CFA_OFFSET -8
30026 RESTORE ebx
30027 RESTORE ebp
30028+ pax_force_retaddr
30029 ret
30030 4:
30031 cmpl %edx, 4(%esp)
30032@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
30033 xorl %ecx, %ecx
30034 addl $1, %ebx
30035 adcl %edx, %ecx
30036+
30037+#ifdef CONFIG_PAX_REFCOUNT
30038+ into
30039+1234:
30040+ _ASM_EXTABLE(1234b, 3f)
30041+#endif
30042+
30043 LOCK_PREFIX
30044 cmpxchg8b (%esi)
30045 jne 1b
30046@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
30047 movl $1, %eax
30048 3:
30049 RESTORE ebx
30050+ pax_force_retaddr
30051 ret
30052 CFI_ENDPROC
30053 ENDPROC(atomic64_inc_not_zero_cx8)
30054diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
30055index e78b8eee..7e173a8 100644
30056--- a/arch/x86/lib/checksum_32.S
30057+++ b/arch/x86/lib/checksum_32.S
30058@@ -29,7 +29,8 @@
30059 #include <asm/dwarf2.h>
30060 #include <asm/errno.h>
30061 #include <asm/asm.h>
30062-
30063+#include <asm/segment.h>
30064+
30065 /*
30066 * computes a partial checksum, e.g. for TCP/UDP fragments
30067 */
30068@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
30069
30070 #define ARGBASE 16
30071 #define FP 12
30072-
30073-ENTRY(csum_partial_copy_generic)
30074+
30075+ENTRY(csum_partial_copy_generic_to_user)
30076 CFI_STARTPROC
30077+
30078+#ifdef CONFIG_PAX_MEMORY_UDEREF
30079+ pushl_cfi %gs
30080+ popl_cfi %es
30081+ jmp csum_partial_copy_generic
30082+#endif
30083+
30084+ENTRY(csum_partial_copy_generic_from_user)
30085+
30086+#ifdef CONFIG_PAX_MEMORY_UDEREF
30087+ pushl_cfi %gs
30088+ popl_cfi %ds
30089+#endif
30090+
30091+ENTRY(csum_partial_copy_generic)
30092 subl $4,%esp
30093 CFI_ADJUST_CFA_OFFSET 4
30094 pushl_cfi %edi
30095@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
30096 jmp 4f
30097 SRC(1: movw (%esi), %bx )
30098 addl $2, %esi
30099-DST( movw %bx, (%edi) )
30100+DST( movw %bx, %es:(%edi) )
30101 addl $2, %edi
30102 addw %bx, %ax
30103 adcl $0, %eax
30104@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
30105 SRC(1: movl (%esi), %ebx )
30106 SRC( movl 4(%esi), %edx )
30107 adcl %ebx, %eax
30108-DST( movl %ebx, (%edi) )
30109+DST( movl %ebx, %es:(%edi) )
30110 adcl %edx, %eax
30111-DST( movl %edx, 4(%edi) )
30112+DST( movl %edx, %es:4(%edi) )
30113
30114 SRC( movl 8(%esi), %ebx )
30115 SRC( movl 12(%esi), %edx )
30116 adcl %ebx, %eax
30117-DST( movl %ebx, 8(%edi) )
30118+DST( movl %ebx, %es:8(%edi) )
30119 adcl %edx, %eax
30120-DST( movl %edx, 12(%edi) )
30121+DST( movl %edx, %es:12(%edi) )
30122
30123 SRC( movl 16(%esi), %ebx )
30124 SRC( movl 20(%esi), %edx )
30125 adcl %ebx, %eax
30126-DST( movl %ebx, 16(%edi) )
30127+DST( movl %ebx, %es:16(%edi) )
30128 adcl %edx, %eax
30129-DST( movl %edx, 20(%edi) )
30130+DST( movl %edx, %es:20(%edi) )
30131
30132 SRC( movl 24(%esi), %ebx )
30133 SRC( movl 28(%esi), %edx )
30134 adcl %ebx, %eax
30135-DST( movl %ebx, 24(%edi) )
30136+DST( movl %ebx, %es:24(%edi) )
30137 adcl %edx, %eax
30138-DST( movl %edx, 28(%edi) )
30139+DST( movl %edx, %es:28(%edi) )
30140
30141 lea 32(%esi), %esi
30142 lea 32(%edi), %edi
30143@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
30144 shrl $2, %edx # This clears CF
30145 SRC(3: movl (%esi), %ebx )
30146 adcl %ebx, %eax
30147-DST( movl %ebx, (%edi) )
30148+DST( movl %ebx, %es:(%edi) )
30149 lea 4(%esi), %esi
30150 lea 4(%edi), %edi
30151 dec %edx
30152@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
30153 jb 5f
30154 SRC( movw (%esi), %cx )
30155 leal 2(%esi), %esi
30156-DST( movw %cx, (%edi) )
30157+DST( movw %cx, %es:(%edi) )
30158 leal 2(%edi), %edi
30159 je 6f
30160 shll $16,%ecx
30161 SRC(5: movb (%esi), %cl )
30162-DST( movb %cl, (%edi) )
30163+DST( movb %cl, %es:(%edi) )
30164 6: addl %ecx, %eax
30165 adcl $0, %eax
30166 7:
30167@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
30168
30169 6001:
30170 movl ARGBASE+20(%esp), %ebx # src_err_ptr
30171- movl $-EFAULT, (%ebx)
30172+ movl $-EFAULT, %ss:(%ebx)
30173
30174 # zero the complete destination - computing the rest
30175 # is too much work
30176@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
30177
30178 6002:
30179 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
30180- movl $-EFAULT,(%ebx)
30181+ movl $-EFAULT,%ss:(%ebx)
30182 jmp 5000b
30183
30184 .previous
30185
30186+ pushl_cfi %ss
30187+ popl_cfi %ds
30188+ pushl_cfi %ss
30189+ popl_cfi %es
30190 popl_cfi %ebx
30191 CFI_RESTORE ebx
30192 popl_cfi %esi
30193@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
30194 popl_cfi %ecx # equivalent to addl $4,%esp
30195 ret
30196 CFI_ENDPROC
30197-ENDPROC(csum_partial_copy_generic)
30198+ENDPROC(csum_partial_copy_generic_to_user)
30199
30200 #else
30201
30202 /* Version for PentiumII/PPro */
30203
30204 #define ROUND1(x) \
30205+ nop; nop; nop; \
30206 SRC(movl x(%esi), %ebx ) ; \
30207 addl %ebx, %eax ; \
30208- DST(movl %ebx, x(%edi) ) ;
30209+ DST(movl %ebx, %es:x(%edi)) ;
30210
30211 #define ROUND(x) \
30212+ nop; nop; nop; \
30213 SRC(movl x(%esi), %ebx ) ; \
30214 adcl %ebx, %eax ; \
30215- DST(movl %ebx, x(%edi) ) ;
30216+ DST(movl %ebx, %es:x(%edi)) ;
30217
30218 #define ARGBASE 12
30219-
30220-ENTRY(csum_partial_copy_generic)
30221+
30222+ENTRY(csum_partial_copy_generic_to_user)
30223 CFI_STARTPROC
30224+
30225+#ifdef CONFIG_PAX_MEMORY_UDEREF
30226+ pushl_cfi %gs
30227+ popl_cfi %es
30228+ jmp csum_partial_copy_generic
30229+#endif
30230+
30231+ENTRY(csum_partial_copy_generic_from_user)
30232+
30233+#ifdef CONFIG_PAX_MEMORY_UDEREF
30234+ pushl_cfi %gs
30235+ popl_cfi %ds
30236+#endif
30237+
30238+ENTRY(csum_partial_copy_generic)
30239 pushl_cfi %ebx
30240 CFI_REL_OFFSET ebx, 0
30241 pushl_cfi %edi
30242@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
30243 subl %ebx, %edi
30244 lea -1(%esi),%edx
30245 andl $-32,%edx
30246- lea 3f(%ebx,%ebx), %ebx
30247+ lea 3f(%ebx,%ebx,2), %ebx
30248 testl %esi, %esi
30249 jmp *%ebx
30250 1: addl $64,%esi
30251@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
30252 jb 5f
30253 SRC( movw (%esi), %dx )
30254 leal 2(%esi), %esi
30255-DST( movw %dx, (%edi) )
30256+DST( movw %dx, %es:(%edi) )
30257 leal 2(%edi), %edi
30258 je 6f
30259 shll $16,%edx
30260 5:
30261 SRC( movb (%esi), %dl )
30262-DST( movb %dl, (%edi) )
30263+DST( movb %dl, %es:(%edi) )
30264 6: addl %edx, %eax
30265 adcl $0, %eax
30266 7:
30267 .section .fixup, "ax"
30268 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
30269- movl $-EFAULT, (%ebx)
30270+ movl $-EFAULT, %ss:(%ebx)
30271 # zero the complete destination (computing the rest is too much work)
30272 movl ARGBASE+8(%esp),%edi # dst
30273 movl ARGBASE+12(%esp),%ecx # len
30274@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
30275 rep; stosb
30276 jmp 7b
30277 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
30278- movl $-EFAULT, (%ebx)
30279+ movl $-EFAULT, %ss:(%ebx)
30280 jmp 7b
30281 .previous
30282
30283+#ifdef CONFIG_PAX_MEMORY_UDEREF
30284+ pushl_cfi %ss
30285+ popl_cfi %ds
30286+ pushl_cfi %ss
30287+ popl_cfi %es
30288+#endif
30289+
30290 popl_cfi %esi
30291 CFI_RESTORE esi
30292 popl_cfi %edi
30293@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
30294 CFI_RESTORE ebx
30295 ret
30296 CFI_ENDPROC
30297-ENDPROC(csum_partial_copy_generic)
30298+ENDPROC(csum_partial_copy_generic_to_user)
30299
30300 #undef ROUND
30301 #undef ROUND1
30302diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
30303index f2145cf..cea889d 100644
30304--- a/arch/x86/lib/clear_page_64.S
30305+++ b/arch/x86/lib/clear_page_64.S
30306@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
30307 movl $4096/8,%ecx
30308 xorl %eax,%eax
30309 rep stosq
30310+ pax_force_retaddr
30311 ret
30312 CFI_ENDPROC
30313 ENDPROC(clear_page_c)
30314@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
30315 movl $4096,%ecx
30316 xorl %eax,%eax
30317 rep stosb
30318+ pax_force_retaddr
30319 ret
30320 CFI_ENDPROC
30321 ENDPROC(clear_page_c_e)
30322@@ -43,6 +45,7 @@ ENTRY(clear_page)
30323 leaq 64(%rdi),%rdi
30324 jnz .Lloop
30325 nop
30326+ pax_force_retaddr
30327 ret
30328 CFI_ENDPROC
30329 .Lclear_page_end:
30330@@ -58,7 +61,7 @@ ENDPROC(clear_page)
30331
30332 #include <asm/cpufeature.h>
30333
30334- .section .altinstr_replacement,"ax"
30335+ .section .altinstr_replacement,"a"
30336 1: .byte 0xeb /* jmp <disp8> */
30337 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
30338 2: .byte 0xeb /* jmp <disp8> */
30339diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
30340index 1e572c5..2a162cd 100644
30341--- a/arch/x86/lib/cmpxchg16b_emu.S
30342+++ b/arch/x86/lib/cmpxchg16b_emu.S
30343@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
30344
30345 popf
30346 mov $1, %al
30347+ pax_force_retaddr
30348 ret
30349
30350 not_same:
30351 popf
30352 xor %al,%al
30353+ pax_force_retaddr
30354 ret
30355
30356 CFI_ENDPROC
30357diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
30358index 176cca6..e0d658e 100644
30359--- a/arch/x86/lib/copy_page_64.S
30360+++ b/arch/x86/lib/copy_page_64.S
30361@@ -9,6 +9,7 @@ copy_page_rep:
30362 CFI_STARTPROC
30363 movl $4096/8, %ecx
30364 rep movsq
30365+ pax_force_retaddr
30366 ret
30367 CFI_ENDPROC
30368 ENDPROC(copy_page_rep)
30369@@ -24,8 +25,8 @@ ENTRY(copy_page)
30370 CFI_ADJUST_CFA_OFFSET 2*8
30371 movq %rbx, (%rsp)
30372 CFI_REL_OFFSET rbx, 0
30373- movq %r12, 1*8(%rsp)
30374- CFI_REL_OFFSET r12, 1*8
30375+ movq %r13, 1*8(%rsp)
30376+ CFI_REL_OFFSET r13, 1*8
30377
30378 movl $(4096/64)-5, %ecx
30379 .p2align 4
30380@@ -38,7 +39,7 @@ ENTRY(copy_page)
30381 movq 0x8*4(%rsi), %r9
30382 movq 0x8*5(%rsi), %r10
30383 movq 0x8*6(%rsi), %r11
30384- movq 0x8*7(%rsi), %r12
30385+ movq 0x8*7(%rsi), %r13
30386
30387 prefetcht0 5*64(%rsi)
30388
30389@@ -49,7 +50,7 @@ ENTRY(copy_page)
30390 movq %r9, 0x8*4(%rdi)
30391 movq %r10, 0x8*5(%rdi)
30392 movq %r11, 0x8*6(%rdi)
30393- movq %r12, 0x8*7(%rdi)
30394+ movq %r13, 0x8*7(%rdi)
30395
30396 leaq 64 (%rsi), %rsi
30397 leaq 64 (%rdi), %rdi
30398@@ -68,7 +69,7 @@ ENTRY(copy_page)
30399 movq 0x8*4(%rsi), %r9
30400 movq 0x8*5(%rsi), %r10
30401 movq 0x8*6(%rsi), %r11
30402- movq 0x8*7(%rsi), %r12
30403+ movq 0x8*7(%rsi), %r13
30404
30405 movq %rax, 0x8*0(%rdi)
30406 movq %rbx, 0x8*1(%rdi)
30407@@ -77,7 +78,7 @@ ENTRY(copy_page)
30408 movq %r9, 0x8*4(%rdi)
30409 movq %r10, 0x8*5(%rdi)
30410 movq %r11, 0x8*6(%rdi)
30411- movq %r12, 0x8*7(%rdi)
30412+ movq %r13, 0x8*7(%rdi)
30413
30414 leaq 64(%rdi), %rdi
30415 leaq 64(%rsi), %rsi
30416@@ -85,10 +86,11 @@ ENTRY(copy_page)
30417
30418 movq (%rsp), %rbx
30419 CFI_RESTORE rbx
30420- movq 1*8(%rsp), %r12
30421- CFI_RESTORE r12
30422+ movq 1*8(%rsp), %r13
30423+ CFI_RESTORE r13
30424 addq $2*8, %rsp
30425 CFI_ADJUST_CFA_OFFSET -2*8
30426+ pax_force_retaddr
30427 ret
30428 .Lcopy_page_end:
30429 CFI_ENDPROC
30430@@ -99,7 +101,7 @@ ENDPROC(copy_page)
30431
30432 #include <asm/cpufeature.h>
30433
30434- .section .altinstr_replacement,"ax"
30435+ .section .altinstr_replacement,"a"
30436 1: .byte 0xeb /* jmp <disp8> */
30437 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
30438 2:
30439diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
30440index dee945d..a84067b 100644
30441--- a/arch/x86/lib/copy_user_64.S
30442+++ b/arch/x86/lib/copy_user_64.S
30443@@ -18,31 +18,7 @@
30444 #include <asm/alternative-asm.h>
30445 #include <asm/asm.h>
30446 #include <asm/smap.h>
30447-
30448-/*
30449- * By placing feature2 after feature1 in altinstructions section, we logically
30450- * implement:
30451- * If CPU has feature2, jmp to alt2 is used
30452- * else if CPU has feature1, jmp to alt1 is used
30453- * else jmp to orig is used.
30454- */
30455- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
30456-0:
30457- .byte 0xe9 /* 32bit jump */
30458- .long \orig-1f /* by default jump to orig */
30459-1:
30460- .section .altinstr_replacement,"ax"
30461-2: .byte 0xe9 /* near jump with 32bit immediate */
30462- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
30463-3: .byte 0xe9 /* near jump with 32bit immediate */
30464- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
30465- .previous
30466-
30467- .section .altinstructions,"a"
30468- altinstruction_entry 0b,2b,\feature1,5,5
30469- altinstruction_entry 0b,3b,\feature2,5,5
30470- .previous
30471- .endm
30472+#include <asm/pgtable.h>
30473
30474 .macro ALIGN_DESTINATION
30475 #ifdef FIX_ALIGNMENT
30476@@ -70,52 +46,6 @@
30477 #endif
30478 .endm
30479
30480-/* Standard copy_to_user with segment limit checking */
30481-ENTRY(_copy_to_user)
30482- CFI_STARTPROC
30483- GET_THREAD_INFO(%rax)
30484- movq %rdi,%rcx
30485- addq %rdx,%rcx
30486- jc bad_to_user
30487- cmpq TI_addr_limit(%rax),%rcx
30488- ja bad_to_user
30489- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
30490- copy_user_generic_unrolled,copy_user_generic_string, \
30491- copy_user_enhanced_fast_string
30492- CFI_ENDPROC
30493-ENDPROC(_copy_to_user)
30494-
30495-/* Standard copy_from_user with segment limit checking */
30496-ENTRY(_copy_from_user)
30497- CFI_STARTPROC
30498- GET_THREAD_INFO(%rax)
30499- movq %rsi,%rcx
30500- addq %rdx,%rcx
30501- jc bad_from_user
30502- cmpq TI_addr_limit(%rax),%rcx
30503- ja bad_from_user
30504- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
30505- copy_user_generic_unrolled,copy_user_generic_string, \
30506- copy_user_enhanced_fast_string
30507- CFI_ENDPROC
30508-ENDPROC(_copy_from_user)
30509-
30510- .section .fixup,"ax"
30511- /* must zero dest */
30512-ENTRY(bad_from_user)
30513-bad_from_user:
30514- CFI_STARTPROC
30515- movl %edx,%ecx
30516- xorl %eax,%eax
30517- rep
30518- stosb
30519-bad_to_user:
30520- movl %edx,%eax
30521- ret
30522- CFI_ENDPROC
30523-ENDPROC(bad_from_user)
30524- .previous
30525-
30526 /*
30527 * copy_user_generic_unrolled - memory copy with exception handling.
30528 * This version is for CPUs like P4 that don't have efficient micro
30529@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
30530 */
30531 ENTRY(copy_user_generic_unrolled)
30532 CFI_STARTPROC
30533+ ASM_PAX_OPEN_USERLAND
30534 ASM_STAC
30535 cmpl $8,%edx
30536 jb 20f /* less then 8 bytes, go to byte copy loop */
30537@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
30538 jnz 21b
30539 23: xor %eax,%eax
30540 ASM_CLAC
30541+ ASM_PAX_CLOSE_USERLAND
30542+ pax_force_retaddr
30543 ret
30544
30545 .section .fixup,"ax"
30546@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
30547 */
30548 ENTRY(copy_user_generic_string)
30549 CFI_STARTPROC
30550+ ASM_PAX_OPEN_USERLAND
30551 ASM_STAC
30552 cmpl $8,%edx
30553 jb 2f /* less than 8 bytes, go to byte copy loop */
30554@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string)
30555 movsb
30556 xorl %eax,%eax
30557 ASM_CLAC
30558+ ASM_PAX_CLOSE_USERLAND
30559+ pax_force_retaddr
30560 ret
30561
30562 .section .fixup,"ax"
30563@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string)
30564 */
30565 ENTRY(copy_user_enhanced_fast_string)
30566 CFI_STARTPROC
30567+ ASM_PAX_OPEN_USERLAND
30568 ASM_STAC
30569 movl %edx,%ecx
30570 1: rep
30571 movsb
30572 xorl %eax,%eax
30573 ASM_CLAC
30574+ ASM_PAX_CLOSE_USERLAND
30575+ pax_force_retaddr
30576 ret
30577
30578 .section .fixup,"ax"
30579diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
30580index 6a4f43c..c70fb52 100644
30581--- a/arch/x86/lib/copy_user_nocache_64.S
30582+++ b/arch/x86/lib/copy_user_nocache_64.S
30583@@ -8,6 +8,7 @@
30584
30585 #include <linux/linkage.h>
30586 #include <asm/dwarf2.h>
30587+#include <asm/alternative-asm.h>
30588
30589 #define FIX_ALIGNMENT 1
30590
30591@@ -16,6 +17,7 @@
30592 #include <asm/thread_info.h>
30593 #include <asm/asm.h>
30594 #include <asm/smap.h>
30595+#include <asm/pgtable.h>
30596
30597 .macro ALIGN_DESTINATION
30598 #ifdef FIX_ALIGNMENT
30599@@ -49,6 +51,16 @@
30600 */
30601 ENTRY(__copy_user_nocache)
30602 CFI_STARTPROC
30603+
30604+#ifdef CONFIG_PAX_MEMORY_UDEREF
30605+ mov pax_user_shadow_base,%rcx
30606+ cmp %rcx,%rsi
30607+ jae 1f
30608+ add %rcx,%rsi
30609+1:
30610+#endif
30611+
30612+ ASM_PAX_OPEN_USERLAND
30613 ASM_STAC
30614 cmpl $8,%edx
30615 jb 20f /* less then 8 bytes, go to byte copy loop */
30616@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
30617 jnz 21b
30618 23: xorl %eax,%eax
30619 ASM_CLAC
30620+ ASM_PAX_CLOSE_USERLAND
30621 sfence
30622+ pax_force_retaddr
30623 ret
30624
30625 .section .fixup,"ax"
30626diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
30627index 2419d5f..fe52d0e 100644
30628--- a/arch/x86/lib/csum-copy_64.S
30629+++ b/arch/x86/lib/csum-copy_64.S
30630@@ -9,6 +9,7 @@
30631 #include <asm/dwarf2.h>
30632 #include <asm/errno.h>
30633 #include <asm/asm.h>
30634+#include <asm/alternative-asm.h>
30635
30636 /*
30637 * Checksum copy with exception handling.
30638@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
30639 CFI_ADJUST_CFA_OFFSET 7*8
30640 movq %rbx, 2*8(%rsp)
30641 CFI_REL_OFFSET rbx, 2*8
30642- movq %r12, 3*8(%rsp)
30643- CFI_REL_OFFSET r12, 3*8
30644+ movq %r15, 3*8(%rsp)
30645+ CFI_REL_OFFSET r15, 3*8
30646 movq %r14, 4*8(%rsp)
30647 CFI_REL_OFFSET r14, 4*8
30648 movq %r13, 5*8(%rsp)
30649@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
30650 movl %edx, %ecx
30651
30652 xorl %r9d, %r9d
30653- movq %rcx, %r12
30654+ movq %rcx, %r15
30655
30656- shrq $6, %r12
30657+ shrq $6, %r15
30658 jz .Lhandle_tail /* < 64 */
30659
30660 clc
30661
30662 /* main loop. clear in 64 byte blocks */
30663 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
30664- /* r11: temp3, rdx: temp4, r12 loopcnt */
30665+ /* r11: temp3, rdx: temp4, r15 loopcnt */
30666 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
30667 .p2align 4
30668 .Lloop:
30669@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
30670 adcq %r14, %rax
30671 adcq %r13, %rax
30672
30673- decl %r12d
30674+ decl %r15d
30675
30676 dest
30677 movq %rbx, (%rsi)
30678@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
30679 .Lende:
30680 movq 2*8(%rsp), %rbx
30681 CFI_RESTORE rbx
30682- movq 3*8(%rsp), %r12
30683- CFI_RESTORE r12
30684+ movq 3*8(%rsp), %r15
30685+ CFI_RESTORE r15
30686 movq 4*8(%rsp), %r14
30687 CFI_RESTORE r14
30688 movq 5*8(%rsp), %r13
30689@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
30690 CFI_RESTORE rbp
30691 addq $7*8, %rsp
30692 CFI_ADJUST_CFA_OFFSET -7*8
30693+ pax_force_retaddr
30694 ret
30695 CFI_RESTORE_STATE
30696
30697diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
30698index 7609e0e..b449b98 100644
30699--- a/arch/x86/lib/csum-wrappers_64.c
30700+++ b/arch/x86/lib/csum-wrappers_64.c
30701@@ -53,10 +53,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
30702 len -= 2;
30703 }
30704 }
30705+ pax_open_userland();
30706 stac();
30707- isum = csum_partial_copy_generic((__force const void *)src,
30708+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
30709 dst, len, isum, errp, NULL);
30710 clac();
30711+ pax_close_userland();
30712 if (unlikely(*errp))
30713 goto out_err;
30714
30715@@ -110,10 +112,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
30716 }
30717
30718 *errp = 0;
30719+ pax_open_userland();
30720 stac();
30721- ret = csum_partial_copy_generic(src, (void __force *)dst,
30722+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
30723 len, isum, NULL, errp);
30724 clac();
30725+ pax_close_userland();
30726 return ret;
30727 }
30728 EXPORT_SYMBOL(csum_partial_copy_to_user);
30729diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
30730index a451235..1daa956 100644
30731--- a/arch/x86/lib/getuser.S
30732+++ b/arch/x86/lib/getuser.S
30733@@ -33,17 +33,40 @@
30734 #include <asm/thread_info.h>
30735 #include <asm/asm.h>
30736 #include <asm/smap.h>
30737+#include <asm/segment.h>
30738+#include <asm/pgtable.h>
30739+#include <asm/alternative-asm.h>
30740+
30741+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30742+#define __copyuser_seg gs;
30743+#else
30744+#define __copyuser_seg
30745+#endif
30746
30747 .text
30748 ENTRY(__get_user_1)
30749 CFI_STARTPROC
30750+
30751+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30752 GET_THREAD_INFO(%_ASM_DX)
30753 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30754 jae bad_get_user
30755 ASM_STAC
30756-1: movzbl (%_ASM_AX),%edx
30757+
30758+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30759+ mov pax_user_shadow_base,%_ASM_DX
30760+ cmp %_ASM_DX,%_ASM_AX
30761+ jae 1234f
30762+ add %_ASM_DX,%_ASM_AX
30763+1234:
30764+#endif
30765+
30766+#endif
30767+
30768+1: __copyuser_seg movzbl (%_ASM_AX),%edx
30769 xor %eax,%eax
30770 ASM_CLAC
30771+ pax_force_retaddr
30772 ret
30773 CFI_ENDPROC
30774 ENDPROC(__get_user_1)
30775@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
30776 ENTRY(__get_user_2)
30777 CFI_STARTPROC
30778 add $1,%_ASM_AX
30779+
30780+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30781 jc bad_get_user
30782 GET_THREAD_INFO(%_ASM_DX)
30783 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30784 jae bad_get_user
30785 ASM_STAC
30786-2: movzwl -1(%_ASM_AX),%edx
30787+
30788+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30789+ mov pax_user_shadow_base,%_ASM_DX
30790+ cmp %_ASM_DX,%_ASM_AX
30791+ jae 1234f
30792+ add %_ASM_DX,%_ASM_AX
30793+1234:
30794+#endif
30795+
30796+#endif
30797+
30798+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
30799 xor %eax,%eax
30800 ASM_CLAC
30801+ pax_force_retaddr
30802 ret
30803 CFI_ENDPROC
30804 ENDPROC(__get_user_2)
30805@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
30806 ENTRY(__get_user_4)
30807 CFI_STARTPROC
30808 add $3,%_ASM_AX
30809+
30810+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30811 jc bad_get_user
30812 GET_THREAD_INFO(%_ASM_DX)
30813 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30814 jae bad_get_user
30815 ASM_STAC
30816-3: movl -3(%_ASM_AX),%edx
30817+
30818+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30819+ mov pax_user_shadow_base,%_ASM_DX
30820+ cmp %_ASM_DX,%_ASM_AX
30821+ jae 1234f
30822+ add %_ASM_DX,%_ASM_AX
30823+1234:
30824+#endif
30825+
30826+#endif
30827+
30828+3: __copyuser_seg movl -3(%_ASM_AX),%edx
30829 xor %eax,%eax
30830 ASM_CLAC
30831+ pax_force_retaddr
30832 ret
30833 CFI_ENDPROC
30834 ENDPROC(__get_user_4)
30835@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
30836 GET_THREAD_INFO(%_ASM_DX)
30837 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30838 jae bad_get_user
30839+
30840+#ifdef CONFIG_PAX_MEMORY_UDEREF
30841+ mov pax_user_shadow_base,%_ASM_DX
30842+ cmp %_ASM_DX,%_ASM_AX
30843+ jae 1234f
30844+ add %_ASM_DX,%_ASM_AX
30845+1234:
30846+#endif
30847+
30848 ASM_STAC
30849 4: movq -7(%_ASM_AX),%rdx
30850 xor %eax,%eax
30851 ASM_CLAC
30852+ pax_force_retaddr
30853 ret
30854 #else
30855 add $7,%_ASM_AX
30856@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
30857 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30858 jae bad_get_user_8
30859 ASM_STAC
30860-4: movl -7(%_ASM_AX),%edx
30861-5: movl -3(%_ASM_AX),%ecx
30862+4: __copyuser_seg movl -7(%_ASM_AX),%edx
30863+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
30864 xor %eax,%eax
30865 ASM_CLAC
30866+ pax_force_retaddr
30867 ret
30868 #endif
30869 CFI_ENDPROC
30870@@ -113,6 +175,7 @@ bad_get_user:
30871 xor %edx,%edx
30872 mov $(-EFAULT),%_ASM_AX
30873 ASM_CLAC
30874+ pax_force_retaddr
30875 ret
30876 CFI_ENDPROC
30877 END(bad_get_user)
30878@@ -124,6 +187,7 @@ bad_get_user_8:
30879 xor %ecx,%ecx
30880 mov $(-EFAULT),%_ASM_AX
30881 ASM_CLAC
30882+ pax_force_retaddr
30883 ret
30884 CFI_ENDPROC
30885 END(bad_get_user_8)
30886diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
30887index 54fcffe..7be149e 100644
30888--- a/arch/x86/lib/insn.c
30889+++ b/arch/x86/lib/insn.c
30890@@ -20,8 +20,10 @@
30891
30892 #ifdef __KERNEL__
30893 #include <linux/string.h>
30894+#include <asm/pgtable_types.h>
30895 #else
30896 #include <string.h>
30897+#define ktla_ktva(addr) addr
30898 #endif
30899 #include <asm/inat.h>
30900 #include <asm/insn.h>
30901@@ -53,8 +55,8 @@
30902 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
30903 {
30904 memset(insn, 0, sizeof(*insn));
30905- insn->kaddr = kaddr;
30906- insn->next_byte = kaddr;
30907+ insn->kaddr = ktla_ktva(kaddr);
30908+ insn->next_byte = ktla_ktva(kaddr);
30909 insn->x86_64 = x86_64 ? 1 : 0;
30910 insn->opnd_bytes = 4;
30911 if (x86_64)
30912diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
30913index 05a95e7..326f2fa 100644
30914--- a/arch/x86/lib/iomap_copy_64.S
30915+++ b/arch/x86/lib/iomap_copy_64.S
30916@@ -17,6 +17,7 @@
30917
30918 #include <linux/linkage.h>
30919 #include <asm/dwarf2.h>
30920+#include <asm/alternative-asm.h>
30921
30922 /*
30923 * override generic version in lib/iomap_copy.c
30924@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
30925 CFI_STARTPROC
30926 movl %edx,%ecx
30927 rep movsd
30928+ pax_force_retaddr
30929 ret
30930 CFI_ENDPROC
30931 ENDPROC(__iowrite32_copy)
30932diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
30933index 56313a3..0db417e 100644
30934--- a/arch/x86/lib/memcpy_64.S
30935+++ b/arch/x86/lib/memcpy_64.S
30936@@ -24,7 +24,7 @@
30937 * This gets patched over the unrolled variant (below) via the
30938 * alternative instructions framework:
30939 */
30940- .section .altinstr_replacement, "ax", @progbits
30941+ .section .altinstr_replacement, "a", @progbits
30942 .Lmemcpy_c:
30943 movq %rdi, %rax
30944 movq %rdx, %rcx
30945@@ -33,6 +33,7 @@
30946 rep movsq
30947 movl %edx, %ecx
30948 rep movsb
30949+ pax_force_retaddr
30950 ret
30951 .Lmemcpy_e:
30952 .previous
30953@@ -44,11 +45,12 @@
30954 * This gets patched over the unrolled variant (below) via the
30955 * alternative instructions framework:
30956 */
30957- .section .altinstr_replacement, "ax", @progbits
30958+ .section .altinstr_replacement, "a", @progbits
30959 .Lmemcpy_c_e:
30960 movq %rdi, %rax
30961 movq %rdx, %rcx
30962 rep movsb
30963+ pax_force_retaddr
30964 ret
30965 .Lmemcpy_e_e:
30966 .previous
30967@@ -136,6 +138,7 @@ ENTRY(memcpy)
30968 movq %r9, 1*8(%rdi)
30969 movq %r10, -2*8(%rdi, %rdx)
30970 movq %r11, -1*8(%rdi, %rdx)
30971+ pax_force_retaddr
30972 retq
30973 .p2align 4
30974 .Lless_16bytes:
30975@@ -148,6 +151,7 @@ ENTRY(memcpy)
30976 movq -1*8(%rsi, %rdx), %r9
30977 movq %r8, 0*8(%rdi)
30978 movq %r9, -1*8(%rdi, %rdx)
30979+ pax_force_retaddr
30980 retq
30981 .p2align 4
30982 .Lless_8bytes:
30983@@ -161,6 +165,7 @@ ENTRY(memcpy)
30984 movl -4(%rsi, %rdx), %r8d
30985 movl %ecx, (%rdi)
30986 movl %r8d, -4(%rdi, %rdx)
30987+ pax_force_retaddr
30988 retq
30989 .p2align 4
30990 .Lless_3bytes:
30991@@ -179,6 +184,7 @@ ENTRY(memcpy)
30992 movb %cl, (%rdi)
30993
30994 .Lend:
30995+ pax_force_retaddr
30996 retq
30997 CFI_ENDPROC
30998 ENDPROC(memcpy)
30999diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
31000index 65268a6..dd1de11 100644
31001--- a/arch/x86/lib/memmove_64.S
31002+++ b/arch/x86/lib/memmove_64.S
31003@@ -202,14 +202,16 @@ ENTRY(memmove)
31004 movb (%rsi), %r11b
31005 movb %r11b, (%rdi)
31006 13:
31007+ pax_force_retaddr
31008 retq
31009 CFI_ENDPROC
31010
31011- .section .altinstr_replacement,"ax"
31012+ .section .altinstr_replacement,"a"
31013 .Lmemmove_begin_forward_efs:
31014 /* Forward moving data. */
31015 movq %rdx, %rcx
31016 rep movsb
31017+ pax_force_retaddr
31018 retq
31019 .Lmemmove_end_forward_efs:
31020 .previous
31021diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
31022index 2dcb380..2eb79fe 100644
31023--- a/arch/x86/lib/memset_64.S
31024+++ b/arch/x86/lib/memset_64.S
31025@@ -16,7 +16,7 @@
31026 *
31027 * rax original destination
31028 */
31029- .section .altinstr_replacement, "ax", @progbits
31030+ .section .altinstr_replacement, "a", @progbits
31031 .Lmemset_c:
31032 movq %rdi,%r9
31033 movq %rdx,%rcx
31034@@ -30,6 +30,7 @@
31035 movl %edx,%ecx
31036 rep stosb
31037 movq %r9,%rax
31038+ pax_force_retaddr
31039 ret
31040 .Lmemset_e:
31041 .previous
31042@@ -45,13 +46,14 @@
31043 *
31044 * rax original destination
31045 */
31046- .section .altinstr_replacement, "ax", @progbits
31047+ .section .altinstr_replacement, "a", @progbits
31048 .Lmemset_c_e:
31049 movq %rdi,%r9
31050 movb %sil,%al
31051 movq %rdx,%rcx
31052 rep stosb
31053 movq %r9,%rax
31054+ pax_force_retaddr
31055 ret
31056 .Lmemset_e_e:
31057 .previous
31058@@ -118,6 +120,7 @@ ENTRY(__memset)
31059
31060 .Lende:
31061 movq %r10,%rax
31062+ pax_force_retaddr
31063 ret
31064
31065 CFI_RESTORE_STATE
31066diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
31067index c9f2d9b..e7fd2c0 100644
31068--- a/arch/x86/lib/mmx_32.c
31069+++ b/arch/x86/lib/mmx_32.c
31070@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
31071 {
31072 void *p;
31073 int i;
31074+ unsigned long cr0;
31075
31076 if (unlikely(in_interrupt()))
31077 return __memcpy(to, from, len);
31078@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
31079 kernel_fpu_begin();
31080
31081 __asm__ __volatile__ (
31082- "1: prefetch (%0)\n" /* This set is 28 bytes */
31083- " prefetch 64(%0)\n"
31084- " prefetch 128(%0)\n"
31085- " prefetch 192(%0)\n"
31086- " prefetch 256(%0)\n"
31087+ "1: prefetch (%1)\n" /* This set is 28 bytes */
31088+ " prefetch 64(%1)\n"
31089+ " prefetch 128(%1)\n"
31090+ " prefetch 192(%1)\n"
31091+ " prefetch 256(%1)\n"
31092 "2: \n"
31093 ".section .fixup, \"ax\"\n"
31094- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
31095+ "3: \n"
31096+
31097+#ifdef CONFIG_PAX_KERNEXEC
31098+ " movl %%cr0, %0\n"
31099+ " movl %0, %%eax\n"
31100+ " andl $0xFFFEFFFF, %%eax\n"
31101+ " movl %%eax, %%cr0\n"
31102+#endif
31103+
31104+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
31105+
31106+#ifdef CONFIG_PAX_KERNEXEC
31107+ " movl %0, %%cr0\n"
31108+#endif
31109+
31110 " jmp 2b\n"
31111 ".previous\n"
31112 _ASM_EXTABLE(1b, 3b)
31113- : : "r" (from));
31114+ : "=&r" (cr0) : "r" (from) : "ax");
31115
31116 for ( ; i > 5; i--) {
31117 __asm__ __volatile__ (
31118- "1: prefetch 320(%0)\n"
31119- "2: movq (%0), %%mm0\n"
31120- " movq 8(%0), %%mm1\n"
31121- " movq 16(%0), %%mm2\n"
31122- " movq 24(%0), %%mm3\n"
31123- " movq %%mm0, (%1)\n"
31124- " movq %%mm1, 8(%1)\n"
31125- " movq %%mm2, 16(%1)\n"
31126- " movq %%mm3, 24(%1)\n"
31127- " movq 32(%0), %%mm0\n"
31128- " movq 40(%0), %%mm1\n"
31129- " movq 48(%0), %%mm2\n"
31130- " movq 56(%0), %%mm3\n"
31131- " movq %%mm0, 32(%1)\n"
31132- " movq %%mm1, 40(%1)\n"
31133- " movq %%mm2, 48(%1)\n"
31134- " movq %%mm3, 56(%1)\n"
31135+ "1: prefetch 320(%1)\n"
31136+ "2: movq (%1), %%mm0\n"
31137+ " movq 8(%1), %%mm1\n"
31138+ " movq 16(%1), %%mm2\n"
31139+ " movq 24(%1), %%mm3\n"
31140+ " movq %%mm0, (%2)\n"
31141+ " movq %%mm1, 8(%2)\n"
31142+ " movq %%mm2, 16(%2)\n"
31143+ " movq %%mm3, 24(%2)\n"
31144+ " movq 32(%1), %%mm0\n"
31145+ " movq 40(%1), %%mm1\n"
31146+ " movq 48(%1), %%mm2\n"
31147+ " movq 56(%1), %%mm3\n"
31148+ " movq %%mm0, 32(%2)\n"
31149+ " movq %%mm1, 40(%2)\n"
31150+ " movq %%mm2, 48(%2)\n"
31151+ " movq %%mm3, 56(%2)\n"
31152 ".section .fixup, \"ax\"\n"
31153- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
31154+ "3:\n"
31155+
31156+#ifdef CONFIG_PAX_KERNEXEC
31157+ " movl %%cr0, %0\n"
31158+ " movl %0, %%eax\n"
31159+ " andl $0xFFFEFFFF, %%eax\n"
31160+ " movl %%eax, %%cr0\n"
31161+#endif
31162+
31163+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
31164+
31165+#ifdef CONFIG_PAX_KERNEXEC
31166+ " movl %0, %%cr0\n"
31167+#endif
31168+
31169 " jmp 2b\n"
31170 ".previous\n"
31171 _ASM_EXTABLE(1b, 3b)
31172- : : "r" (from), "r" (to) : "memory");
31173+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
31174
31175 from += 64;
31176 to += 64;
31177@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
31178 static void fast_copy_page(void *to, void *from)
31179 {
31180 int i;
31181+ unsigned long cr0;
31182
31183 kernel_fpu_begin();
31184
31185@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
31186 * but that is for later. -AV
31187 */
31188 __asm__ __volatile__(
31189- "1: prefetch (%0)\n"
31190- " prefetch 64(%0)\n"
31191- " prefetch 128(%0)\n"
31192- " prefetch 192(%0)\n"
31193- " prefetch 256(%0)\n"
31194+ "1: prefetch (%1)\n"
31195+ " prefetch 64(%1)\n"
31196+ " prefetch 128(%1)\n"
31197+ " prefetch 192(%1)\n"
31198+ " prefetch 256(%1)\n"
31199 "2: \n"
31200 ".section .fixup, \"ax\"\n"
31201- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
31202+ "3: \n"
31203+
31204+#ifdef CONFIG_PAX_KERNEXEC
31205+ " movl %%cr0, %0\n"
31206+ " movl %0, %%eax\n"
31207+ " andl $0xFFFEFFFF, %%eax\n"
31208+ " movl %%eax, %%cr0\n"
31209+#endif
31210+
31211+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
31212+
31213+#ifdef CONFIG_PAX_KERNEXEC
31214+ " movl %0, %%cr0\n"
31215+#endif
31216+
31217 " jmp 2b\n"
31218 ".previous\n"
31219- _ASM_EXTABLE(1b, 3b) : : "r" (from));
31220+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
31221
31222 for (i = 0; i < (4096-320)/64; i++) {
31223 __asm__ __volatile__ (
31224- "1: prefetch 320(%0)\n"
31225- "2: movq (%0), %%mm0\n"
31226- " movntq %%mm0, (%1)\n"
31227- " movq 8(%0), %%mm1\n"
31228- " movntq %%mm1, 8(%1)\n"
31229- " movq 16(%0), %%mm2\n"
31230- " movntq %%mm2, 16(%1)\n"
31231- " movq 24(%0), %%mm3\n"
31232- " movntq %%mm3, 24(%1)\n"
31233- " movq 32(%0), %%mm4\n"
31234- " movntq %%mm4, 32(%1)\n"
31235- " movq 40(%0), %%mm5\n"
31236- " movntq %%mm5, 40(%1)\n"
31237- " movq 48(%0), %%mm6\n"
31238- " movntq %%mm6, 48(%1)\n"
31239- " movq 56(%0), %%mm7\n"
31240- " movntq %%mm7, 56(%1)\n"
31241+ "1: prefetch 320(%1)\n"
31242+ "2: movq (%1), %%mm0\n"
31243+ " movntq %%mm0, (%2)\n"
31244+ " movq 8(%1), %%mm1\n"
31245+ " movntq %%mm1, 8(%2)\n"
31246+ " movq 16(%1), %%mm2\n"
31247+ " movntq %%mm2, 16(%2)\n"
31248+ " movq 24(%1), %%mm3\n"
31249+ " movntq %%mm3, 24(%2)\n"
31250+ " movq 32(%1), %%mm4\n"
31251+ " movntq %%mm4, 32(%2)\n"
31252+ " movq 40(%1), %%mm5\n"
31253+ " movntq %%mm5, 40(%2)\n"
31254+ " movq 48(%1), %%mm6\n"
31255+ " movntq %%mm6, 48(%2)\n"
31256+ " movq 56(%1), %%mm7\n"
31257+ " movntq %%mm7, 56(%2)\n"
31258 ".section .fixup, \"ax\"\n"
31259- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
31260+ "3:\n"
31261+
31262+#ifdef CONFIG_PAX_KERNEXEC
31263+ " movl %%cr0, %0\n"
31264+ " movl %0, %%eax\n"
31265+ " andl $0xFFFEFFFF, %%eax\n"
31266+ " movl %%eax, %%cr0\n"
31267+#endif
31268+
31269+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
31270+
31271+#ifdef CONFIG_PAX_KERNEXEC
31272+ " movl %0, %%cr0\n"
31273+#endif
31274+
31275 " jmp 2b\n"
31276 ".previous\n"
31277- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
31278+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
31279
31280 from += 64;
31281 to += 64;
31282@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
31283 static void fast_copy_page(void *to, void *from)
31284 {
31285 int i;
31286+ unsigned long cr0;
31287
31288 kernel_fpu_begin();
31289
31290 __asm__ __volatile__ (
31291- "1: prefetch (%0)\n"
31292- " prefetch 64(%0)\n"
31293- " prefetch 128(%0)\n"
31294- " prefetch 192(%0)\n"
31295- " prefetch 256(%0)\n"
31296+ "1: prefetch (%1)\n"
31297+ " prefetch 64(%1)\n"
31298+ " prefetch 128(%1)\n"
31299+ " prefetch 192(%1)\n"
31300+ " prefetch 256(%1)\n"
31301 "2: \n"
31302 ".section .fixup, \"ax\"\n"
31303- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
31304+ "3: \n"
31305+
31306+#ifdef CONFIG_PAX_KERNEXEC
31307+ " movl %%cr0, %0\n"
31308+ " movl %0, %%eax\n"
31309+ " andl $0xFFFEFFFF, %%eax\n"
31310+ " movl %%eax, %%cr0\n"
31311+#endif
31312+
31313+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
31314+
31315+#ifdef CONFIG_PAX_KERNEXEC
31316+ " movl %0, %%cr0\n"
31317+#endif
31318+
31319 " jmp 2b\n"
31320 ".previous\n"
31321- _ASM_EXTABLE(1b, 3b) : : "r" (from));
31322+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
31323
31324 for (i = 0; i < 4096/64; i++) {
31325 __asm__ __volatile__ (
31326- "1: prefetch 320(%0)\n"
31327- "2: movq (%0), %%mm0\n"
31328- " movq 8(%0), %%mm1\n"
31329- " movq 16(%0), %%mm2\n"
31330- " movq 24(%0), %%mm3\n"
31331- " movq %%mm0, (%1)\n"
31332- " movq %%mm1, 8(%1)\n"
31333- " movq %%mm2, 16(%1)\n"
31334- " movq %%mm3, 24(%1)\n"
31335- " movq 32(%0), %%mm0\n"
31336- " movq 40(%0), %%mm1\n"
31337- " movq 48(%0), %%mm2\n"
31338- " movq 56(%0), %%mm3\n"
31339- " movq %%mm0, 32(%1)\n"
31340- " movq %%mm1, 40(%1)\n"
31341- " movq %%mm2, 48(%1)\n"
31342- " movq %%mm3, 56(%1)\n"
31343+ "1: prefetch 320(%1)\n"
31344+ "2: movq (%1), %%mm0\n"
31345+ " movq 8(%1), %%mm1\n"
31346+ " movq 16(%1), %%mm2\n"
31347+ " movq 24(%1), %%mm3\n"
31348+ " movq %%mm0, (%2)\n"
31349+ " movq %%mm1, 8(%2)\n"
31350+ " movq %%mm2, 16(%2)\n"
31351+ " movq %%mm3, 24(%2)\n"
31352+ " movq 32(%1), %%mm0\n"
31353+ " movq 40(%1), %%mm1\n"
31354+ " movq 48(%1), %%mm2\n"
31355+ " movq 56(%1), %%mm3\n"
31356+ " movq %%mm0, 32(%2)\n"
31357+ " movq %%mm1, 40(%2)\n"
31358+ " movq %%mm2, 48(%2)\n"
31359+ " movq %%mm3, 56(%2)\n"
31360 ".section .fixup, \"ax\"\n"
31361- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
31362+ "3:\n"
31363+
31364+#ifdef CONFIG_PAX_KERNEXEC
31365+ " movl %%cr0, %0\n"
31366+ " movl %0, %%eax\n"
31367+ " andl $0xFFFEFFFF, %%eax\n"
31368+ " movl %%eax, %%cr0\n"
31369+#endif
31370+
31371+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
31372+
31373+#ifdef CONFIG_PAX_KERNEXEC
31374+ " movl %0, %%cr0\n"
31375+#endif
31376+
31377 " jmp 2b\n"
31378 ".previous\n"
31379 _ASM_EXTABLE(1b, 3b)
31380- : : "r" (from), "r" (to) : "memory");
31381+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
31382
31383 from += 64;
31384 to += 64;
31385diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
31386index f6d13ee..d789440 100644
31387--- a/arch/x86/lib/msr-reg.S
31388+++ b/arch/x86/lib/msr-reg.S
31389@@ -3,6 +3,7 @@
31390 #include <asm/dwarf2.h>
31391 #include <asm/asm.h>
31392 #include <asm/msr.h>
31393+#include <asm/alternative-asm.h>
31394
31395 #ifdef CONFIG_X86_64
31396 /*
31397@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
31398 movl %edi, 28(%r10)
31399 popq_cfi %rbp
31400 popq_cfi %rbx
31401+ pax_force_retaddr
31402 ret
31403 3:
31404 CFI_RESTORE_STATE
31405diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
31406index fc6ba17..d4d989d 100644
31407--- a/arch/x86/lib/putuser.S
31408+++ b/arch/x86/lib/putuser.S
31409@@ -16,7 +16,9 @@
31410 #include <asm/errno.h>
31411 #include <asm/asm.h>
31412 #include <asm/smap.h>
31413-
31414+#include <asm/segment.h>
31415+#include <asm/pgtable.h>
31416+#include <asm/alternative-asm.h>
31417
31418 /*
31419 * __put_user_X
31420@@ -30,57 +32,125 @@
31421 * as they get called from within inline assembly.
31422 */
31423
31424-#define ENTER CFI_STARTPROC ; \
31425- GET_THREAD_INFO(%_ASM_BX)
31426-#define EXIT ASM_CLAC ; \
31427- ret ; \
31428+#define ENTER CFI_STARTPROC
31429+#define EXIT ASM_CLAC ; \
31430+ pax_force_retaddr ; \
31431+ ret ; \
31432 CFI_ENDPROC
31433
31434+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31435+#define _DEST %_ASM_CX,%_ASM_BX
31436+#else
31437+#define _DEST %_ASM_CX
31438+#endif
31439+
31440+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
31441+#define __copyuser_seg gs;
31442+#else
31443+#define __copyuser_seg
31444+#endif
31445+
31446 .text
31447 ENTRY(__put_user_1)
31448 ENTER
31449+
31450+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31451+ GET_THREAD_INFO(%_ASM_BX)
31452 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
31453 jae bad_put_user
31454 ASM_STAC
31455-1: movb %al,(%_ASM_CX)
31456+
31457+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31458+ mov pax_user_shadow_base,%_ASM_BX
31459+ cmp %_ASM_BX,%_ASM_CX
31460+ jb 1234f
31461+ xor %ebx,%ebx
31462+1234:
31463+#endif
31464+
31465+#endif
31466+
31467+1: __copyuser_seg movb %al,(_DEST)
31468 xor %eax,%eax
31469 EXIT
31470 ENDPROC(__put_user_1)
31471
31472 ENTRY(__put_user_2)
31473 ENTER
31474+
31475+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31476+ GET_THREAD_INFO(%_ASM_BX)
31477 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
31478 sub $1,%_ASM_BX
31479 cmp %_ASM_BX,%_ASM_CX
31480 jae bad_put_user
31481 ASM_STAC
31482-2: movw %ax,(%_ASM_CX)
31483+
31484+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31485+ mov pax_user_shadow_base,%_ASM_BX
31486+ cmp %_ASM_BX,%_ASM_CX
31487+ jb 1234f
31488+ xor %ebx,%ebx
31489+1234:
31490+#endif
31491+
31492+#endif
31493+
31494+2: __copyuser_seg movw %ax,(_DEST)
31495 xor %eax,%eax
31496 EXIT
31497 ENDPROC(__put_user_2)
31498
31499 ENTRY(__put_user_4)
31500 ENTER
31501+
31502+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31503+ GET_THREAD_INFO(%_ASM_BX)
31504 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
31505 sub $3,%_ASM_BX
31506 cmp %_ASM_BX,%_ASM_CX
31507 jae bad_put_user
31508 ASM_STAC
31509-3: movl %eax,(%_ASM_CX)
31510+
31511+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31512+ mov pax_user_shadow_base,%_ASM_BX
31513+ cmp %_ASM_BX,%_ASM_CX
31514+ jb 1234f
31515+ xor %ebx,%ebx
31516+1234:
31517+#endif
31518+
31519+#endif
31520+
31521+3: __copyuser_seg movl %eax,(_DEST)
31522 xor %eax,%eax
31523 EXIT
31524 ENDPROC(__put_user_4)
31525
31526 ENTRY(__put_user_8)
31527 ENTER
31528+
31529+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31530+ GET_THREAD_INFO(%_ASM_BX)
31531 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
31532 sub $7,%_ASM_BX
31533 cmp %_ASM_BX,%_ASM_CX
31534 jae bad_put_user
31535 ASM_STAC
31536-4: mov %_ASM_AX,(%_ASM_CX)
31537+
31538+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31539+ mov pax_user_shadow_base,%_ASM_BX
31540+ cmp %_ASM_BX,%_ASM_CX
31541+ jb 1234f
31542+ xor %ebx,%ebx
31543+1234:
31544+#endif
31545+
31546+#endif
31547+
31548+4: __copyuser_seg mov %_ASM_AX,(_DEST)
31549 #ifdef CONFIG_X86_32
31550-5: movl %edx,4(%_ASM_CX)
31551+5: __copyuser_seg movl %edx,4(_DEST)
31552 #endif
31553 xor %eax,%eax
31554 EXIT
31555diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
31556index 1cad221..de671ee 100644
31557--- a/arch/x86/lib/rwlock.S
31558+++ b/arch/x86/lib/rwlock.S
31559@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
31560 FRAME
31561 0: LOCK_PREFIX
31562 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
31563+
31564+#ifdef CONFIG_PAX_REFCOUNT
31565+ jno 1234f
31566+ LOCK_PREFIX
31567+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
31568+ int $4
31569+1234:
31570+ _ASM_EXTABLE(1234b, 1234b)
31571+#endif
31572+
31573 1: rep; nop
31574 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
31575 jne 1b
31576 LOCK_PREFIX
31577 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
31578+
31579+#ifdef CONFIG_PAX_REFCOUNT
31580+ jno 1234f
31581+ LOCK_PREFIX
31582+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
31583+ int $4
31584+1234:
31585+ _ASM_EXTABLE(1234b, 1234b)
31586+#endif
31587+
31588 jnz 0b
31589 ENDFRAME
31590+ pax_force_retaddr
31591 ret
31592 CFI_ENDPROC
31593 END(__write_lock_failed)
31594@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
31595 FRAME
31596 0: LOCK_PREFIX
31597 READ_LOCK_SIZE(inc) (%__lock_ptr)
31598+
31599+#ifdef CONFIG_PAX_REFCOUNT
31600+ jno 1234f
31601+ LOCK_PREFIX
31602+ READ_LOCK_SIZE(dec) (%__lock_ptr)
31603+ int $4
31604+1234:
31605+ _ASM_EXTABLE(1234b, 1234b)
31606+#endif
31607+
31608 1: rep; nop
31609 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
31610 js 1b
31611 LOCK_PREFIX
31612 READ_LOCK_SIZE(dec) (%__lock_ptr)
31613+
31614+#ifdef CONFIG_PAX_REFCOUNT
31615+ jno 1234f
31616+ LOCK_PREFIX
31617+ READ_LOCK_SIZE(inc) (%__lock_ptr)
31618+ int $4
31619+1234:
31620+ _ASM_EXTABLE(1234b, 1234b)
31621+#endif
31622+
31623 js 0b
31624 ENDFRAME
31625+ pax_force_retaddr
31626 ret
31627 CFI_ENDPROC
31628 END(__read_lock_failed)
31629diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
31630index 5dff5f0..cadebf4 100644
31631--- a/arch/x86/lib/rwsem.S
31632+++ b/arch/x86/lib/rwsem.S
31633@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
31634 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31635 CFI_RESTORE __ASM_REG(dx)
31636 restore_common_regs
31637+ pax_force_retaddr
31638 ret
31639 CFI_ENDPROC
31640 ENDPROC(call_rwsem_down_read_failed)
31641@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
31642 movq %rax,%rdi
31643 call rwsem_down_write_failed
31644 restore_common_regs
31645+ pax_force_retaddr
31646 ret
31647 CFI_ENDPROC
31648 ENDPROC(call_rwsem_down_write_failed)
31649@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
31650 movq %rax,%rdi
31651 call rwsem_wake
31652 restore_common_regs
31653-1: ret
31654+1: pax_force_retaddr
31655+ ret
31656 CFI_ENDPROC
31657 ENDPROC(call_rwsem_wake)
31658
31659@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
31660 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31661 CFI_RESTORE __ASM_REG(dx)
31662 restore_common_regs
31663+ pax_force_retaddr
31664 ret
31665 CFI_ENDPROC
31666 ENDPROC(call_rwsem_downgrade_wake)
31667diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
31668index 92d9fea..b2762c8 100644
31669--- a/arch/x86/lib/thunk_64.S
31670+++ b/arch/x86/lib/thunk_64.S
31671@@ -9,6 +9,7 @@
31672 #include <asm/dwarf2.h>
31673 #include <asm/calling.h>
31674 #include <asm/asm.h>
31675+#include <asm/alternative-asm.h>
31676
31677 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
31678 .macro THUNK name, func, put_ret_addr_in_rdi=0
31679@@ -16,11 +17,11 @@
31680 \name:
31681 CFI_STARTPROC
31682
31683- /* this one pushes 9 elems, the next one would be %rIP */
31684- SAVE_ARGS
31685+ /* this one pushes 15+1 elems, the next one would be %rIP */
31686+ SAVE_ARGS 8
31687
31688 .if \put_ret_addr_in_rdi
31689- movq_cfi_restore 9*8, rdi
31690+ movq_cfi_restore RIP, rdi
31691 .endif
31692
31693 call \func
31694@@ -40,9 +41,10 @@
31695
31696 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
31697 CFI_STARTPROC
31698- SAVE_ARGS
31699+ SAVE_ARGS 8
31700 restore:
31701- RESTORE_ARGS
31702+ RESTORE_ARGS 1,8
31703+ pax_force_retaddr
31704 ret
31705 CFI_ENDPROC
31706 _ASM_NOKPROBE(restore)
31707diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
31708index e2f5e21..4b22130 100644
31709--- a/arch/x86/lib/usercopy_32.c
31710+++ b/arch/x86/lib/usercopy_32.c
31711@@ -42,11 +42,13 @@ do { \
31712 int __d0; \
31713 might_fault(); \
31714 __asm__ __volatile__( \
31715+ __COPYUSER_SET_ES \
31716 ASM_STAC "\n" \
31717 "0: rep; stosl\n" \
31718 " movl %2,%0\n" \
31719 "1: rep; stosb\n" \
31720 "2: " ASM_CLAC "\n" \
31721+ __COPYUSER_RESTORE_ES \
31722 ".section .fixup,\"ax\"\n" \
31723 "3: lea 0(%2,%0,4),%0\n" \
31724 " jmp 2b\n" \
31725@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
31726
31727 #ifdef CONFIG_X86_INTEL_USERCOPY
31728 static unsigned long
31729-__copy_user_intel(void __user *to, const void *from, unsigned long size)
31730+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
31731 {
31732 int d0, d1;
31733 __asm__ __volatile__(
31734@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31735 " .align 2,0x90\n"
31736 "3: movl 0(%4), %%eax\n"
31737 "4: movl 4(%4), %%edx\n"
31738- "5: movl %%eax, 0(%3)\n"
31739- "6: movl %%edx, 4(%3)\n"
31740+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
31741+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
31742 "7: movl 8(%4), %%eax\n"
31743 "8: movl 12(%4),%%edx\n"
31744- "9: movl %%eax, 8(%3)\n"
31745- "10: movl %%edx, 12(%3)\n"
31746+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
31747+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
31748 "11: movl 16(%4), %%eax\n"
31749 "12: movl 20(%4), %%edx\n"
31750- "13: movl %%eax, 16(%3)\n"
31751- "14: movl %%edx, 20(%3)\n"
31752+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
31753+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
31754 "15: movl 24(%4), %%eax\n"
31755 "16: movl 28(%4), %%edx\n"
31756- "17: movl %%eax, 24(%3)\n"
31757- "18: movl %%edx, 28(%3)\n"
31758+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
31759+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
31760 "19: movl 32(%4), %%eax\n"
31761 "20: movl 36(%4), %%edx\n"
31762- "21: movl %%eax, 32(%3)\n"
31763- "22: movl %%edx, 36(%3)\n"
31764+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
31765+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
31766 "23: movl 40(%4), %%eax\n"
31767 "24: movl 44(%4), %%edx\n"
31768- "25: movl %%eax, 40(%3)\n"
31769- "26: movl %%edx, 44(%3)\n"
31770+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
31771+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
31772 "27: movl 48(%4), %%eax\n"
31773 "28: movl 52(%4), %%edx\n"
31774- "29: movl %%eax, 48(%3)\n"
31775- "30: movl %%edx, 52(%3)\n"
31776+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
31777+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
31778 "31: movl 56(%4), %%eax\n"
31779 "32: movl 60(%4), %%edx\n"
31780- "33: movl %%eax, 56(%3)\n"
31781- "34: movl %%edx, 60(%3)\n"
31782+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
31783+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
31784 " addl $-64, %0\n"
31785 " addl $64, %4\n"
31786 " addl $64, %3\n"
31787@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31788 " shrl $2, %0\n"
31789 " andl $3, %%eax\n"
31790 " cld\n"
31791+ __COPYUSER_SET_ES
31792 "99: rep; movsl\n"
31793 "36: movl %%eax, %0\n"
31794 "37: rep; movsb\n"
31795 "100:\n"
31796+ __COPYUSER_RESTORE_ES
31797+ ".section .fixup,\"ax\"\n"
31798+ "101: lea 0(%%eax,%0,4),%0\n"
31799+ " jmp 100b\n"
31800+ ".previous\n"
31801+ _ASM_EXTABLE(1b,100b)
31802+ _ASM_EXTABLE(2b,100b)
31803+ _ASM_EXTABLE(3b,100b)
31804+ _ASM_EXTABLE(4b,100b)
31805+ _ASM_EXTABLE(5b,100b)
31806+ _ASM_EXTABLE(6b,100b)
31807+ _ASM_EXTABLE(7b,100b)
31808+ _ASM_EXTABLE(8b,100b)
31809+ _ASM_EXTABLE(9b,100b)
31810+ _ASM_EXTABLE(10b,100b)
31811+ _ASM_EXTABLE(11b,100b)
31812+ _ASM_EXTABLE(12b,100b)
31813+ _ASM_EXTABLE(13b,100b)
31814+ _ASM_EXTABLE(14b,100b)
31815+ _ASM_EXTABLE(15b,100b)
31816+ _ASM_EXTABLE(16b,100b)
31817+ _ASM_EXTABLE(17b,100b)
31818+ _ASM_EXTABLE(18b,100b)
31819+ _ASM_EXTABLE(19b,100b)
31820+ _ASM_EXTABLE(20b,100b)
31821+ _ASM_EXTABLE(21b,100b)
31822+ _ASM_EXTABLE(22b,100b)
31823+ _ASM_EXTABLE(23b,100b)
31824+ _ASM_EXTABLE(24b,100b)
31825+ _ASM_EXTABLE(25b,100b)
31826+ _ASM_EXTABLE(26b,100b)
31827+ _ASM_EXTABLE(27b,100b)
31828+ _ASM_EXTABLE(28b,100b)
31829+ _ASM_EXTABLE(29b,100b)
31830+ _ASM_EXTABLE(30b,100b)
31831+ _ASM_EXTABLE(31b,100b)
31832+ _ASM_EXTABLE(32b,100b)
31833+ _ASM_EXTABLE(33b,100b)
31834+ _ASM_EXTABLE(34b,100b)
31835+ _ASM_EXTABLE(35b,100b)
31836+ _ASM_EXTABLE(36b,100b)
31837+ _ASM_EXTABLE(37b,100b)
31838+ _ASM_EXTABLE(99b,101b)
31839+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
31840+ : "1"(to), "2"(from), "0"(size)
31841+ : "eax", "edx", "memory");
31842+ return size;
31843+}
31844+
31845+static unsigned long
31846+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
31847+{
31848+ int d0, d1;
31849+ __asm__ __volatile__(
31850+ " .align 2,0x90\n"
31851+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
31852+ " cmpl $67, %0\n"
31853+ " jbe 3f\n"
31854+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
31855+ " .align 2,0x90\n"
31856+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
31857+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
31858+ "5: movl %%eax, 0(%3)\n"
31859+ "6: movl %%edx, 4(%3)\n"
31860+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
31861+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
31862+ "9: movl %%eax, 8(%3)\n"
31863+ "10: movl %%edx, 12(%3)\n"
31864+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
31865+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
31866+ "13: movl %%eax, 16(%3)\n"
31867+ "14: movl %%edx, 20(%3)\n"
31868+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
31869+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
31870+ "17: movl %%eax, 24(%3)\n"
31871+ "18: movl %%edx, 28(%3)\n"
31872+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
31873+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
31874+ "21: movl %%eax, 32(%3)\n"
31875+ "22: movl %%edx, 36(%3)\n"
31876+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
31877+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
31878+ "25: movl %%eax, 40(%3)\n"
31879+ "26: movl %%edx, 44(%3)\n"
31880+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
31881+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
31882+ "29: movl %%eax, 48(%3)\n"
31883+ "30: movl %%edx, 52(%3)\n"
31884+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
31885+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
31886+ "33: movl %%eax, 56(%3)\n"
31887+ "34: movl %%edx, 60(%3)\n"
31888+ " addl $-64, %0\n"
31889+ " addl $64, %4\n"
31890+ " addl $64, %3\n"
31891+ " cmpl $63, %0\n"
31892+ " ja 1b\n"
31893+ "35: movl %0, %%eax\n"
31894+ " shrl $2, %0\n"
31895+ " andl $3, %%eax\n"
31896+ " cld\n"
31897+ "99: rep; "__copyuser_seg" movsl\n"
31898+ "36: movl %%eax, %0\n"
31899+ "37: rep; "__copyuser_seg" movsb\n"
31900+ "100:\n"
31901 ".section .fixup,\"ax\"\n"
31902 "101: lea 0(%%eax,%0,4),%0\n"
31903 " jmp 100b\n"
31904@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31905 int d0, d1;
31906 __asm__ __volatile__(
31907 " .align 2,0x90\n"
31908- "0: movl 32(%4), %%eax\n"
31909+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31910 " cmpl $67, %0\n"
31911 " jbe 2f\n"
31912- "1: movl 64(%4), %%eax\n"
31913+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31914 " .align 2,0x90\n"
31915- "2: movl 0(%4), %%eax\n"
31916- "21: movl 4(%4), %%edx\n"
31917+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31918+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31919 " movl %%eax, 0(%3)\n"
31920 " movl %%edx, 4(%3)\n"
31921- "3: movl 8(%4), %%eax\n"
31922- "31: movl 12(%4),%%edx\n"
31923+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31924+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31925 " movl %%eax, 8(%3)\n"
31926 " movl %%edx, 12(%3)\n"
31927- "4: movl 16(%4), %%eax\n"
31928- "41: movl 20(%4), %%edx\n"
31929+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31930+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31931 " movl %%eax, 16(%3)\n"
31932 " movl %%edx, 20(%3)\n"
31933- "10: movl 24(%4), %%eax\n"
31934- "51: movl 28(%4), %%edx\n"
31935+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31936+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31937 " movl %%eax, 24(%3)\n"
31938 " movl %%edx, 28(%3)\n"
31939- "11: movl 32(%4), %%eax\n"
31940- "61: movl 36(%4), %%edx\n"
31941+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31942+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31943 " movl %%eax, 32(%3)\n"
31944 " movl %%edx, 36(%3)\n"
31945- "12: movl 40(%4), %%eax\n"
31946- "71: movl 44(%4), %%edx\n"
31947+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31948+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31949 " movl %%eax, 40(%3)\n"
31950 " movl %%edx, 44(%3)\n"
31951- "13: movl 48(%4), %%eax\n"
31952- "81: movl 52(%4), %%edx\n"
31953+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31954+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31955 " movl %%eax, 48(%3)\n"
31956 " movl %%edx, 52(%3)\n"
31957- "14: movl 56(%4), %%eax\n"
31958- "91: movl 60(%4), %%edx\n"
31959+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31960+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31961 " movl %%eax, 56(%3)\n"
31962 " movl %%edx, 60(%3)\n"
31963 " addl $-64, %0\n"
31964@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31965 " shrl $2, %0\n"
31966 " andl $3, %%eax\n"
31967 " cld\n"
31968- "6: rep; movsl\n"
31969+ "6: rep; "__copyuser_seg" movsl\n"
31970 " movl %%eax,%0\n"
31971- "7: rep; movsb\n"
31972+ "7: rep; "__copyuser_seg" movsb\n"
31973 "8:\n"
31974 ".section .fixup,\"ax\"\n"
31975 "9: lea 0(%%eax,%0,4),%0\n"
31976@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31977
31978 __asm__ __volatile__(
31979 " .align 2,0x90\n"
31980- "0: movl 32(%4), %%eax\n"
31981+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31982 " cmpl $67, %0\n"
31983 " jbe 2f\n"
31984- "1: movl 64(%4), %%eax\n"
31985+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31986 " .align 2,0x90\n"
31987- "2: movl 0(%4), %%eax\n"
31988- "21: movl 4(%4), %%edx\n"
31989+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31990+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31991 " movnti %%eax, 0(%3)\n"
31992 " movnti %%edx, 4(%3)\n"
31993- "3: movl 8(%4), %%eax\n"
31994- "31: movl 12(%4),%%edx\n"
31995+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31996+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31997 " movnti %%eax, 8(%3)\n"
31998 " movnti %%edx, 12(%3)\n"
31999- "4: movl 16(%4), %%eax\n"
32000- "41: movl 20(%4), %%edx\n"
32001+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
32002+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
32003 " movnti %%eax, 16(%3)\n"
32004 " movnti %%edx, 20(%3)\n"
32005- "10: movl 24(%4), %%eax\n"
32006- "51: movl 28(%4), %%edx\n"
32007+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
32008+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
32009 " movnti %%eax, 24(%3)\n"
32010 " movnti %%edx, 28(%3)\n"
32011- "11: movl 32(%4), %%eax\n"
32012- "61: movl 36(%4), %%edx\n"
32013+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
32014+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
32015 " movnti %%eax, 32(%3)\n"
32016 " movnti %%edx, 36(%3)\n"
32017- "12: movl 40(%4), %%eax\n"
32018- "71: movl 44(%4), %%edx\n"
32019+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
32020+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
32021 " movnti %%eax, 40(%3)\n"
32022 " movnti %%edx, 44(%3)\n"
32023- "13: movl 48(%4), %%eax\n"
32024- "81: movl 52(%4), %%edx\n"
32025+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
32026+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
32027 " movnti %%eax, 48(%3)\n"
32028 " movnti %%edx, 52(%3)\n"
32029- "14: movl 56(%4), %%eax\n"
32030- "91: movl 60(%4), %%edx\n"
32031+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
32032+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
32033 " movnti %%eax, 56(%3)\n"
32034 " movnti %%edx, 60(%3)\n"
32035 " addl $-64, %0\n"
32036@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
32037 " shrl $2, %0\n"
32038 " andl $3, %%eax\n"
32039 " cld\n"
32040- "6: rep; movsl\n"
32041+ "6: rep; "__copyuser_seg" movsl\n"
32042 " movl %%eax,%0\n"
32043- "7: rep; movsb\n"
32044+ "7: rep; "__copyuser_seg" movsb\n"
32045 "8:\n"
32046 ".section .fixup,\"ax\"\n"
32047 "9: lea 0(%%eax,%0,4),%0\n"
32048@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
32049
32050 __asm__ __volatile__(
32051 " .align 2,0x90\n"
32052- "0: movl 32(%4), %%eax\n"
32053+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
32054 " cmpl $67, %0\n"
32055 " jbe 2f\n"
32056- "1: movl 64(%4), %%eax\n"
32057+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
32058 " .align 2,0x90\n"
32059- "2: movl 0(%4), %%eax\n"
32060- "21: movl 4(%4), %%edx\n"
32061+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
32062+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
32063 " movnti %%eax, 0(%3)\n"
32064 " movnti %%edx, 4(%3)\n"
32065- "3: movl 8(%4), %%eax\n"
32066- "31: movl 12(%4),%%edx\n"
32067+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
32068+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
32069 " movnti %%eax, 8(%3)\n"
32070 " movnti %%edx, 12(%3)\n"
32071- "4: movl 16(%4), %%eax\n"
32072- "41: movl 20(%4), %%edx\n"
32073+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
32074+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
32075 " movnti %%eax, 16(%3)\n"
32076 " movnti %%edx, 20(%3)\n"
32077- "10: movl 24(%4), %%eax\n"
32078- "51: movl 28(%4), %%edx\n"
32079+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
32080+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
32081 " movnti %%eax, 24(%3)\n"
32082 " movnti %%edx, 28(%3)\n"
32083- "11: movl 32(%4), %%eax\n"
32084- "61: movl 36(%4), %%edx\n"
32085+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
32086+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
32087 " movnti %%eax, 32(%3)\n"
32088 " movnti %%edx, 36(%3)\n"
32089- "12: movl 40(%4), %%eax\n"
32090- "71: movl 44(%4), %%edx\n"
32091+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
32092+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
32093 " movnti %%eax, 40(%3)\n"
32094 " movnti %%edx, 44(%3)\n"
32095- "13: movl 48(%4), %%eax\n"
32096- "81: movl 52(%4), %%edx\n"
32097+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
32098+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
32099 " movnti %%eax, 48(%3)\n"
32100 " movnti %%edx, 52(%3)\n"
32101- "14: movl 56(%4), %%eax\n"
32102- "91: movl 60(%4), %%edx\n"
32103+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
32104+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
32105 " movnti %%eax, 56(%3)\n"
32106 " movnti %%edx, 60(%3)\n"
32107 " addl $-64, %0\n"
32108@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
32109 " shrl $2, %0\n"
32110 " andl $3, %%eax\n"
32111 " cld\n"
32112- "6: rep; movsl\n"
32113+ "6: rep; "__copyuser_seg" movsl\n"
32114 " movl %%eax,%0\n"
32115- "7: rep; movsb\n"
32116+ "7: rep; "__copyuser_seg" movsb\n"
32117 "8:\n"
32118 ".section .fixup,\"ax\"\n"
32119 "9: lea 0(%%eax,%0,4),%0\n"
32120@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
32121 */
32122 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
32123 unsigned long size);
32124-unsigned long __copy_user_intel(void __user *to, const void *from,
32125+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
32126+ unsigned long size);
32127+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
32128 unsigned long size);
32129 unsigned long __copy_user_zeroing_intel_nocache(void *to,
32130 const void __user *from, unsigned long size);
32131 #endif /* CONFIG_X86_INTEL_USERCOPY */
32132
32133 /* Generic arbitrary sized copy. */
32134-#define __copy_user(to, from, size) \
32135+#define __copy_user(to, from, size, prefix, set, restore) \
32136 do { \
32137 int __d0, __d1, __d2; \
32138 __asm__ __volatile__( \
32139+ set \
32140 " cmp $7,%0\n" \
32141 " jbe 1f\n" \
32142 " movl %1,%0\n" \
32143 " negl %0\n" \
32144 " andl $7,%0\n" \
32145 " subl %0,%3\n" \
32146- "4: rep; movsb\n" \
32147+ "4: rep; "prefix"movsb\n" \
32148 " movl %3,%0\n" \
32149 " shrl $2,%0\n" \
32150 " andl $3,%3\n" \
32151 " .align 2,0x90\n" \
32152- "0: rep; movsl\n" \
32153+ "0: rep; "prefix"movsl\n" \
32154 " movl %3,%0\n" \
32155- "1: rep; movsb\n" \
32156+ "1: rep; "prefix"movsb\n" \
32157 "2:\n" \
32158+ restore \
32159 ".section .fixup,\"ax\"\n" \
32160 "5: addl %3,%0\n" \
32161 " jmp 2b\n" \
32162@@ -538,14 +650,14 @@ do { \
32163 " negl %0\n" \
32164 " andl $7,%0\n" \
32165 " subl %0,%3\n" \
32166- "4: rep; movsb\n" \
32167+ "4: rep; "__copyuser_seg"movsb\n" \
32168 " movl %3,%0\n" \
32169 " shrl $2,%0\n" \
32170 " andl $3,%3\n" \
32171 " .align 2,0x90\n" \
32172- "0: rep; movsl\n" \
32173+ "0: rep; "__copyuser_seg"movsl\n" \
32174 " movl %3,%0\n" \
32175- "1: rep; movsb\n" \
32176+ "1: rep; "__copyuser_seg"movsb\n" \
32177 "2:\n" \
32178 ".section .fixup,\"ax\"\n" \
32179 "5: addl %3,%0\n" \
32180@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
32181 {
32182 stac();
32183 if (movsl_is_ok(to, from, n))
32184- __copy_user(to, from, n);
32185+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
32186 else
32187- n = __copy_user_intel(to, from, n);
32188+ n = __generic_copy_to_user_intel(to, from, n);
32189 clac();
32190 return n;
32191 }
32192@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
32193 {
32194 stac();
32195 if (movsl_is_ok(to, from, n))
32196- __copy_user(to, from, n);
32197+ __copy_user(to, from, n, __copyuser_seg, "", "");
32198 else
32199- n = __copy_user_intel((void __user *)to,
32200- (const void *)from, n);
32201+ n = __generic_copy_from_user_intel(to, from, n);
32202 clac();
32203 return n;
32204 }
32205@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
32206 if (n > 64 && cpu_has_xmm2)
32207 n = __copy_user_intel_nocache(to, from, n);
32208 else
32209- __copy_user(to, from, n);
32210+ __copy_user(to, from, n, __copyuser_seg, "", "");
32211 #else
32212- __copy_user(to, from, n);
32213+ __copy_user(to, from, n, __copyuser_seg, "", "");
32214 #endif
32215 clac();
32216 return n;
32217 }
32218 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
32219
32220-/**
32221- * copy_to_user: - Copy a block of data into user space.
32222- * @to: Destination address, in user space.
32223- * @from: Source address, in kernel space.
32224- * @n: Number of bytes to copy.
32225- *
32226- * Context: User context only. This function may sleep.
32227- *
32228- * Copy data from kernel space to user space.
32229- *
32230- * Returns number of bytes that could not be copied.
32231- * On success, this will be zero.
32232- */
32233-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
32234+#ifdef CONFIG_PAX_MEMORY_UDEREF
32235+void __set_fs(mm_segment_t x)
32236 {
32237- if (access_ok(VERIFY_WRITE, to, n))
32238- n = __copy_to_user(to, from, n);
32239- return n;
32240+ switch (x.seg) {
32241+ case 0:
32242+ loadsegment(gs, 0);
32243+ break;
32244+ case TASK_SIZE_MAX:
32245+ loadsegment(gs, __USER_DS);
32246+ break;
32247+ case -1UL:
32248+ loadsegment(gs, __KERNEL_DS);
32249+ break;
32250+ default:
32251+ BUG();
32252+ }
32253 }
32254-EXPORT_SYMBOL(_copy_to_user);
32255+EXPORT_SYMBOL(__set_fs);
32256
32257-/**
32258- * copy_from_user: - Copy a block of data from user space.
32259- * @to: Destination address, in kernel space.
32260- * @from: Source address, in user space.
32261- * @n: Number of bytes to copy.
32262- *
32263- * Context: User context only. This function may sleep.
32264- *
32265- * Copy data from user space to kernel space.
32266- *
32267- * Returns number of bytes that could not be copied.
32268- * On success, this will be zero.
32269- *
32270- * If some data could not be copied, this function will pad the copied
32271- * data to the requested size using zero bytes.
32272- */
32273-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
32274+void set_fs(mm_segment_t x)
32275 {
32276- if (access_ok(VERIFY_READ, from, n))
32277- n = __copy_from_user(to, from, n);
32278- else
32279- memset(to, 0, n);
32280- return n;
32281+ current_thread_info()->addr_limit = x;
32282+ __set_fs(x);
32283 }
32284-EXPORT_SYMBOL(_copy_from_user);
32285+EXPORT_SYMBOL(set_fs);
32286+#endif
32287diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
32288index c905e89..01ab928 100644
32289--- a/arch/x86/lib/usercopy_64.c
32290+++ b/arch/x86/lib/usercopy_64.c
32291@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
32292 might_fault();
32293 /* no memory constraint because it doesn't change any memory gcc knows
32294 about */
32295+ pax_open_userland();
32296 stac();
32297 asm volatile(
32298 " testq %[size8],%[size8]\n"
32299@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
32300 _ASM_EXTABLE(0b,3b)
32301 _ASM_EXTABLE(1b,2b)
32302 : [size8] "=&c"(size), [dst] "=&D" (__d0)
32303- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
32304+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
32305 [zero] "r" (0UL), [eight] "r" (8UL));
32306 clac();
32307+ pax_close_userland();
32308 return size;
32309 }
32310 EXPORT_SYMBOL(__clear_user);
32311@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
32312 }
32313 EXPORT_SYMBOL(clear_user);
32314
32315-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
32316+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
32317 {
32318- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
32319- return copy_user_generic((__force void *)to, (__force void *)from, len);
32320- }
32321- return len;
32322+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
32323+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
32324+ return len;
32325 }
32326 EXPORT_SYMBOL(copy_in_user);
32327
32328@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
32329 * it is not necessary to optimize tail handling.
32330 */
32331 __visible unsigned long
32332-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
32333+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
32334 {
32335 char c;
32336 unsigned zero_len;
32337
32338+ clac();
32339+ pax_close_userland();
32340 for (; len; --len, to++) {
32341 if (__get_user_nocheck(c, from++, sizeof(char)))
32342 break;
32343@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
32344 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
32345 if (__put_user_nocheck(c, to++, sizeof(char)))
32346 break;
32347- clac();
32348 return len;
32349 }
32350diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
32351index 6a19ad9..1c48f9a 100644
32352--- a/arch/x86/mm/Makefile
32353+++ b/arch/x86/mm/Makefile
32354@@ -30,3 +30,7 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o
32355 obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
32356
32357 obj-$(CONFIG_MEMTEST) += memtest.o
32358+
32359+quote:="
32360+obj-$(CONFIG_X86_64) += uderef_64.o
32361+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
32362diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
32363index 903ec1e..c4166b2 100644
32364--- a/arch/x86/mm/extable.c
32365+++ b/arch/x86/mm/extable.c
32366@@ -6,12 +6,24 @@
32367 static inline unsigned long
32368 ex_insn_addr(const struct exception_table_entry *x)
32369 {
32370- return (unsigned long)&x->insn + x->insn;
32371+ unsigned long reloc = 0;
32372+
32373+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
32374+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
32375+#endif
32376+
32377+ return (unsigned long)&x->insn + x->insn + reloc;
32378 }
32379 static inline unsigned long
32380 ex_fixup_addr(const struct exception_table_entry *x)
32381 {
32382- return (unsigned long)&x->fixup + x->fixup;
32383+ unsigned long reloc = 0;
32384+
32385+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
32386+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
32387+#endif
32388+
32389+ return (unsigned long)&x->fixup + x->fixup + reloc;
32390 }
32391
32392 int fixup_exception(struct pt_regs *regs)
32393@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
32394 unsigned long new_ip;
32395
32396 #ifdef CONFIG_PNPBIOS
32397- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
32398+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
32399 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
32400 extern u32 pnp_bios_is_utter_crap;
32401 pnp_bios_is_utter_crap = 1;
32402@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
32403 i += 4;
32404 p->fixup -= i;
32405 i += 4;
32406+
32407+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
32408+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
32409+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
32410+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
32411+#endif
32412+
32413 }
32414 }
32415
32416diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
32417index a241946..d7a04cf 100644
32418--- a/arch/x86/mm/fault.c
32419+++ b/arch/x86/mm/fault.c
32420@@ -14,12 +14,19 @@
32421 #include <linux/hugetlb.h> /* hstate_index_to_shift */
32422 #include <linux/prefetch.h> /* prefetchw */
32423 #include <linux/context_tracking.h> /* exception_enter(), ... */
32424+#include <linux/unistd.h>
32425+#include <linux/compiler.h>
32426
32427 #include <asm/traps.h> /* dotraplinkage, ... */
32428 #include <asm/pgalloc.h> /* pgd_*(), ... */
32429 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
32430 #include <asm/fixmap.h> /* VSYSCALL_ADDR */
32431 #include <asm/vsyscall.h> /* emulate_vsyscall */
32432+#include <asm/tlbflush.h>
32433+
32434+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32435+#include <asm/stacktrace.h>
32436+#endif
32437
32438 #define CREATE_TRACE_POINTS
32439 #include <asm/trace/exceptions.h>
32440@@ -60,7 +67,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
32441 int ret = 0;
32442
32443 /* kprobe_running() needs smp_processor_id() */
32444- if (kprobes_built_in() && !user_mode_vm(regs)) {
32445+ if (kprobes_built_in() && !user_mode(regs)) {
32446 preempt_disable();
32447 if (kprobe_running() && kprobe_fault_handler(regs, 14))
32448 ret = 1;
32449@@ -121,7 +128,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
32450 return !instr_lo || (instr_lo>>1) == 1;
32451 case 0x00:
32452 /* Prefetch instruction is 0x0F0D or 0x0F18 */
32453- if (probe_kernel_address(instr, opcode))
32454+ if (user_mode(regs)) {
32455+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
32456+ return 0;
32457+ } else if (probe_kernel_address(instr, opcode))
32458 return 0;
32459
32460 *prefetch = (instr_lo == 0xF) &&
32461@@ -155,7 +165,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
32462 while (instr < max_instr) {
32463 unsigned char opcode;
32464
32465- if (probe_kernel_address(instr, opcode))
32466+ if (user_mode(regs)) {
32467+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
32468+ break;
32469+ } else if (probe_kernel_address(instr, opcode))
32470 break;
32471
32472 instr++;
32473@@ -186,6 +199,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
32474 force_sig_info(si_signo, &info, tsk);
32475 }
32476
32477+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32478+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
32479+#endif
32480+
32481+#ifdef CONFIG_PAX_EMUTRAMP
32482+static int pax_handle_fetch_fault(struct pt_regs *regs);
32483+#endif
32484+
32485+#ifdef CONFIG_PAX_PAGEEXEC
32486+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
32487+{
32488+ pgd_t *pgd;
32489+ pud_t *pud;
32490+ pmd_t *pmd;
32491+
32492+ pgd = pgd_offset(mm, address);
32493+ if (!pgd_present(*pgd))
32494+ return NULL;
32495+ pud = pud_offset(pgd, address);
32496+ if (!pud_present(*pud))
32497+ return NULL;
32498+ pmd = pmd_offset(pud, address);
32499+ if (!pmd_present(*pmd))
32500+ return NULL;
32501+ return pmd;
32502+}
32503+#endif
32504+
32505 DEFINE_SPINLOCK(pgd_lock);
32506 LIST_HEAD(pgd_list);
32507
32508@@ -236,10 +277,27 @@ void vmalloc_sync_all(void)
32509 for (address = VMALLOC_START & PMD_MASK;
32510 address >= TASK_SIZE && address < FIXADDR_TOP;
32511 address += PMD_SIZE) {
32512+
32513+#ifdef CONFIG_PAX_PER_CPU_PGD
32514+ unsigned long cpu;
32515+#else
32516 struct page *page;
32517+#endif
32518
32519 spin_lock(&pgd_lock);
32520+
32521+#ifdef CONFIG_PAX_PER_CPU_PGD
32522+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
32523+ pgd_t *pgd = get_cpu_pgd(cpu, user);
32524+ pmd_t *ret;
32525+
32526+ ret = vmalloc_sync_one(pgd, address);
32527+ if (!ret)
32528+ break;
32529+ pgd = get_cpu_pgd(cpu, kernel);
32530+#else
32531 list_for_each_entry(page, &pgd_list, lru) {
32532+ pgd_t *pgd;
32533 spinlock_t *pgt_lock;
32534 pmd_t *ret;
32535
32536@@ -247,8 +305,14 @@ void vmalloc_sync_all(void)
32537 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
32538
32539 spin_lock(pgt_lock);
32540- ret = vmalloc_sync_one(page_address(page), address);
32541+ pgd = page_address(page);
32542+#endif
32543+
32544+ ret = vmalloc_sync_one(pgd, address);
32545+
32546+#ifndef CONFIG_PAX_PER_CPU_PGD
32547 spin_unlock(pgt_lock);
32548+#endif
32549
32550 if (!ret)
32551 break;
32552@@ -282,6 +346,12 @@ static noinline int vmalloc_fault(unsigned long address)
32553 * an interrupt in the middle of a task switch..
32554 */
32555 pgd_paddr = read_cr3();
32556+
32557+#ifdef CONFIG_PAX_PER_CPU_PGD
32558+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
32559+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
32560+#endif
32561+
32562 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
32563 if (!pmd_k)
32564 return -1;
32565@@ -378,11 +448,25 @@ static noinline int vmalloc_fault(unsigned long address)
32566 * happen within a race in page table update. In the later
32567 * case just flush:
32568 */
32569- pgd = pgd_offset(current->active_mm, address);
32570+
32571 pgd_ref = pgd_offset_k(address);
32572 if (pgd_none(*pgd_ref))
32573 return -1;
32574
32575+#ifdef CONFIG_PAX_PER_CPU_PGD
32576+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
32577+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
32578+ if (pgd_none(*pgd)) {
32579+ set_pgd(pgd, *pgd_ref);
32580+ arch_flush_lazy_mmu_mode();
32581+ } else {
32582+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
32583+ }
32584+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
32585+#else
32586+ pgd = pgd_offset(current->active_mm, address);
32587+#endif
32588+
32589 if (pgd_none(*pgd)) {
32590 set_pgd(pgd, *pgd_ref);
32591 arch_flush_lazy_mmu_mode();
32592@@ -549,7 +633,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
32593 static int is_errata100(struct pt_regs *regs, unsigned long address)
32594 {
32595 #ifdef CONFIG_X86_64
32596- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
32597+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
32598 return 1;
32599 #endif
32600 return 0;
32601@@ -576,9 +660,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
32602 }
32603
32604 static const char nx_warning[] = KERN_CRIT
32605-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
32606+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
32607 static const char smep_warning[] = KERN_CRIT
32608-"unable to execute userspace code (SMEP?) (uid: %d)\n";
32609+"unable to execute userspace code (SMEP?) (uid: %d, task: %s, pid: %d)\n";
32610
32611 static void
32612 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32613@@ -587,7 +671,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32614 if (!oops_may_print())
32615 return;
32616
32617- if (error_code & PF_INSTR) {
32618+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
32619 unsigned int level;
32620 pgd_t *pgd;
32621 pte_t *pte;
32622@@ -598,13 +682,25 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32623 pte = lookup_address_in_pgd(pgd, address, &level);
32624
32625 if (pte && pte_present(*pte) && !pte_exec(*pte))
32626- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
32627+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
32628 if (pte && pte_present(*pte) && pte_exec(*pte) &&
32629 (pgd_flags(*pgd) & _PAGE_USER) &&
32630 (read_cr4() & X86_CR4_SMEP))
32631- printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
32632+ printk(smep_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
32633 }
32634
32635+#ifdef CONFIG_PAX_KERNEXEC
32636+ if (init_mm.start_code <= address && address < init_mm.end_code) {
32637+ if (current->signal->curr_ip)
32638+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
32639+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
32640+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32641+ else
32642+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
32643+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32644+ }
32645+#endif
32646+
32647 printk(KERN_ALERT "BUG: unable to handle kernel ");
32648 if (address < PAGE_SIZE)
32649 printk(KERN_CONT "NULL pointer dereference");
32650@@ -785,6 +881,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
32651 return;
32652 }
32653 #endif
32654+
32655+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32656+ if (pax_is_fetch_fault(regs, error_code, address)) {
32657+
32658+#ifdef CONFIG_PAX_EMUTRAMP
32659+ switch (pax_handle_fetch_fault(regs)) {
32660+ case 2:
32661+ return;
32662+ }
32663+#endif
32664+
32665+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32666+ do_group_exit(SIGKILL);
32667+ }
32668+#endif
32669+
32670 /* Kernel addresses are always protection faults: */
32671 if (address >= TASK_SIZE)
32672 error_code |= PF_PROT;
32673@@ -870,7 +982,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
32674 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
32675 printk(KERN_ERR
32676 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
32677- tsk->comm, tsk->pid, address);
32678+ tsk->comm, task_pid_nr(tsk), address);
32679 code = BUS_MCEERR_AR;
32680 }
32681 #endif
32682@@ -924,6 +1036,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
32683 return 1;
32684 }
32685
32686+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32687+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
32688+{
32689+ pte_t *pte;
32690+ pmd_t *pmd;
32691+ spinlock_t *ptl;
32692+ unsigned char pte_mask;
32693+
32694+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
32695+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
32696+ return 0;
32697+
32698+ /* PaX: it's our fault, let's handle it if we can */
32699+
32700+ /* PaX: take a look at read faults before acquiring any locks */
32701+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
32702+ /* instruction fetch attempt from a protected page in user mode */
32703+ up_read(&mm->mmap_sem);
32704+
32705+#ifdef CONFIG_PAX_EMUTRAMP
32706+ switch (pax_handle_fetch_fault(regs)) {
32707+ case 2:
32708+ return 1;
32709+ }
32710+#endif
32711+
32712+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32713+ do_group_exit(SIGKILL);
32714+ }
32715+
32716+ pmd = pax_get_pmd(mm, address);
32717+ if (unlikely(!pmd))
32718+ return 0;
32719+
32720+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
32721+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
32722+ pte_unmap_unlock(pte, ptl);
32723+ return 0;
32724+ }
32725+
32726+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
32727+ /* write attempt to a protected page in user mode */
32728+ pte_unmap_unlock(pte, ptl);
32729+ return 0;
32730+ }
32731+
32732+#ifdef CONFIG_SMP
32733+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
32734+#else
32735+ if (likely(address > get_limit(regs->cs)))
32736+#endif
32737+ {
32738+ set_pte(pte, pte_mkread(*pte));
32739+ __flush_tlb_one(address);
32740+ pte_unmap_unlock(pte, ptl);
32741+ up_read(&mm->mmap_sem);
32742+ return 1;
32743+ }
32744+
32745+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
32746+
32747+ /*
32748+ * PaX: fill DTLB with user rights and retry
32749+ */
32750+ __asm__ __volatile__ (
32751+ "orb %2,(%1)\n"
32752+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
32753+/*
32754+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
32755+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
32756+ * page fault when examined during a TLB load attempt. this is true not only
32757+ * for PTEs holding a non-present entry but also present entries that will
32758+ * raise a page fault (such as those set up by PaX, or the copy-on-write
32759+ * mechanism). in effect it means that we do *not* need to flush the TLBs
32760+ * for our target pages since their PTEs are simply not in the TLBs at all.
32761+
32762+ * the best thing in omitting it is that we gain around 15-20% speed in the
32763+ * fast path of the page fault handler and can get rid of tracing since we
32764+ * can no longer flush unintended entries.
32765+ */
32766+ "invlpg (%0)\n"
32767+#endif
32768+ __copyuser_seg"testb $0,(%0)\n"
32769+ "xorb %3,(%1)\n"
32770+ :
32771+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
32772+ : "memory", "cc");
32773+ pte_unmap_unlock(pte, ptl);
32774+ up_read(&mm->mmap_sem);
32775+ return 1;
32776+}
32777+#endif
32778+
32779 /*
32780 * Handle a spurious fault caused by a stale TLB entry.
32781 *
32782@@ -991,6 +1196,9 @@ int show_unhandled_signals = 1;
32783 static inline int
32784 access_error(unsigned long error_code, struct vm_area_struct *vma)
32785 {
32786+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
32787+ return 1;
32788+
32789 if (error_code & PF_WRITE) {
32790 /* write, present and write, not present: */
32791 if (unlikely(!(vma->vm_flags & VM_WRITE)))
32792@@ -1025,7 +1233,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
32793 if (error_code & PF_USER)
32794 return false;
32795
32796- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
32797+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
32798 return false;
32799
32800 return true;
32801@@ -1053,6 +1261,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32802 tsk = current;
32803 mm = tsk->mm;
32804
32805+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32806+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
32807+ if (!search_exception_tables(regs->ip)) {
32808+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32809+ bad_area_nosemaphore(regs, error_code, address);
32810+ return;
32811+ }
32812+ if (address < pax_user_shadow_base) {
32813+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32814+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
32815+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
32816+ } else
32817+ address -= pax_user_shadow_base;
32818+ }
32819+#endif
32820+
32821 /*
32822 * Detect and handle instructions that would cause a page fault for
32823 * both a tracked kernel page and a userspace page.
32824@@ -1130,7 +1354,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32825 * User-mode registers count as a user access even for any
32826 * potential system fault or CPU buglet:
32827 */
32828- if (user_mode_vm(regs)) {
32829+ if (user_mode(regs)) {
32830 local_irq_enable();
32831 error_code |= PF_USER;
32832 flags |= FAULT_FLAG_USER;
32833@@ -1177,6 +1401,11 @@ retry:
32834 might_sleep();
32835 }
32836
32837+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32838+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
32839+ return;
32840+#endif
32841+
32842 vma = find_vma(mm, address);
32843 if (unlikely(!vma)) {
32844 bad_area(regs, error_code, address);
32845@@ -1188,18 +1417,24 @@ retry:
32846 bad_area(regs, error_code, address);
32847 return;
32848 }
32849- if (error_code & PF_USER) {
32850- /*
32851- * Accessing the stack below %sp is always a bug.
32852- * The large cushion allows instructions like enter
32853- * and pusha to work. ("enter $65535, $31" pushes
32854- * 32 pointers and then decrements %sp by 65535.)
32855- */
32856- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
32857- bad_area(regs, error_code, address);
32858- return;
32859- }
32860+ /*
32861+ * Accessing the stack below %sp is always a bug.
32862+ * The large cushion allows instructions like enter
32863+ * and pusha to work. ("enter $65535, $31" pushes
32864+ * 32 pointers and then decrements %sp by 65535.)
32865+ */
32866+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
32867+ bad_area(regs, error_code, address);
32868+ return;
32869 }
32870+
32871+#ifdef CONFIG_PAX_SEGMEXEC
32872+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
32873+ bad_area(regs, error_code, address);
32874+ return;
32875+ }
32876+#endif
32877+
32878 if (unlikely(expand_stack(vma, address))) {
32879 bad_area(regs, error_code, address);
32880 return;
32881@@ -1316,3 +1551,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
32882 }
32883 NOKPROBE_SYMBOL(trace_do_page_fault);
32884 #endif /* CONFIG_TRACING */
32885+
32886+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32887+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
32888+{
32889+ struct mm_struct *mm = current->mm;
32890+ unsigned long ip = regs->ip;
32891+
32892+ if (v8086_mode(regs))
32893+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
32894+
32895+#ifdef CONFIG_PAX_PAGEEXEC
32896+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
32897+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
32898+ return true;
32899+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
32900+ return true;
32901+ return false;
32902+ }
32903+#endif
32904+
32905+#ifdef CONFIG_PAX_SEGMEXEC
32906+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
32907+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
32908+ return true;
32909+ return false;
32910+ }
32911+#endif
32912+
32913+ return false;
32914+}
32915+#endif
32916+
32917+#ifdef CONFIG_PAX_EMUTRAMP
32918+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
32919+{
32920+ int err;
32921+
32922+ do { /* PaX: libffi trampoline emulation */
32923+ unsigned char mov, jmp;
32924+ unsigned int addr1, addr2;
32925+
32926+#ifdef CONFIG_X86_64
32927+ if ((regs->ip + 9) >> 32)
32928+ break;
32929+#endif
32930+
32931+ err = get_user(mov, (unsigned char __user *)regs->ip);
32932+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32933+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32934+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32935+
32936+ if (err)
32937+ break;
32938+
32939+ if (mov == 0xB8 && jmp == 0xE9) {
32940+ regs->ax = addr1;
32941+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32942+ return 2;
32943+ }
32944+ } while (0);
32945+
32946+ do { /* PaX: gcc trampoline emulation #1 */
32947+ unsigned char mov1, mov2;
32948+ unsigned short jmp;
32949+ unsigned int addr1, addr2;
32950+
32951+#ifdef CONFIG_X86_64
32952+ if ((regs->ip + 11) >> 32)
32953+ break;
32954+#endif
32955+
32956+ err = get_user(mov1, (unsigned char __user *)regs->ip);
32957+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32958+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
32959+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32960+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
32961+
32962+ if (err)
32963+ break;
32964+
32965+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
32966+ regs->cx = addr1;
32967+ regs->ax = addr2;
32968+ regs->ip = addr2;
32969+ return 2;
32970+ }
32971+ } while (0);
32972+
32973+ do { /* PaX: gcc trampoline emulation #2 */
32974+ unsigned char mov, jmp;
32975+ unsigned int addr1, addr2;
32976+
32977+#ifdef CONFIG_X86_64
32978+ if ((regs->ip + 9) >> 32)
32979+ break;
32980+#endif
32981+
32982+ err = get_user(mov, (unsigned char __user *)regs->ip);
32983+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32984+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32985+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32986+
32987+ if (err)
32988+ break;
32989+
32990+ if (mov == 0xB9 && jmp == 0xE9) {
32991+ regs->cx = addr1;
32992+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32993+ return 2;
32994+ }
32995+ } while (0);
32996+
32997+ return 1; /* PaX in action */
32998+}
32999+
33000+#ifdef CONFIG_X86_64
33001+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
33002+{
33003+ int err;
33004+
33005+ do { /* PaX: libffi trampoline emulation */
33006+ unsigned short mov1, mov2, jmp1;
33007+ unsigned char stcclc, jmp2;
33008+ unsigned long addr1, addr2;
33009+
33010+ err = get_user(mov1, (unsigned short __user *)regs->ip);
33011+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
33012+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
33013+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
33014+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
33015+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
33016+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
33017+
33018+ if (err)
33019+ break;
33020+
33021+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
33022+ regs->r11 = addr1;
33023+ regs->r10 = addr2;
33024+ if (stcclc == 0xF8)
33025+ regs->flags &= ~X86_EFLAGS_CF;
33026+ else
33027+ regs->flags |= X86_EFLAGS_CF;
33028+ regs->ip = addr1;
33029+ return 2;
33030+ }
33031+ } while (0);
33032+
33033+ do { /* PaX: gcc trampoline emulation #1 */
33034+ unsigned short mov1, mov2, jmp1;
33035+ unsigned char jmp2;
33036+ unsigned int addr1;
33037+ unsigned long addr2;
33038+
33039+ err = get_user(mov1, (unsigned short __user *)regs->ip);
33040+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
33041+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
33042+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
33043+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
33044+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
33045+
33046+ if (err)
33047+ break;
33048+
33049+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
33050+ regs->r11 = addr1;
33051+ regs->r10 = addr2;
33052+ regs->ip = addr1;
33053+ return 2;
33054+ }
33055+ } while (0);
33056+
33057+ do { /* PaX: gcc trampoline emulation #2 */
33058+ unsigned short mov1, mov2, jmp1;
33059+ unsigned char jmp2;
33060+ unsigned long addr1, addr2;
33061+
33062+ err = get_user(mov1, (unsigned short __user *)regs->ip);
33063+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
33064+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
33065+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
33066+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
33067+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
33068+
33069+ if (err)
33070+ break;
33071+
33072+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
33073+ regs->r11 = addr1;
33074+ regs->r10 = addr2;
33075+ regs->ip = addr1;
33076+ return 2;
33077+ }
33078+ } while (0);
33079+
33080+ return 1; /* PaX in action */
33081+}
33082+#endif
33083+
33084+/*
33085+ * PaX: decide what to do with offenders (regs->ip = fault address)
33086+ *
33087+ * returns 1 when task should be killed
33088+ * 2 when gcc trampoline was detected
33089+ */
33090+static int pax_handle_fetch_fault(struct pt_regs *regs)
33091+{
33092+ if (v8086_mode(regs))
33093+ return 1;
33094+
33095+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
33096+ return 1;
33097+
33098+#ifdef CONFIG_X86_32
33099+ return pax_handle_fetch_fault_32(regs);
33100+#else
33101+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
33102+ return pax_handle_fetch_fault_32(regs);
33103+ else
33104+ return pax_handle_fetch_fault_64(regs);
33105+#endif
33106+}
33107+#endif
33108+
33109+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
33110+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
33111+{
33112+ long i;
33113+
33114+ printk(KERN_ERR "PAX: bytes at PC: ");
33115+ for (i = 0; i < 20; i++) {
33116+ unsigned char c;
33117+ if (get_user(c, (unsigned char __force_user *)pc+i))
33118+ printk(KERN_CONT "?? ");
33119+ else
33120+ printk(KERN_CONT "%02x ", c);
33121+ }
33122+ printk("\n");
33123+
33124+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
33125+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
33126+ unsigned long c;
33127+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
33128+#ifdef CONFIG_X86_32
33129+ printk(KERN_CONT "???????? ");
33130+#else
33131+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
33132+ printk(KERN_CONT "???????? ???????? ");
33133+ else
33134+ printk(KERN_CONT "???????????????? ");
33135+#endif
33136+ } else {
33137+#ifdef CONFIG_X86_64
33138+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
33139+ printk(KERN_CONT "%08x ", (unsigned int)c);
33140+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
33141+ } else
33142+#endif
33143+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
33144+ }
33145+ }
33146+ printk("\n");
33147+}
33148+#endif
33149+
33150+/**
33151+ * probe_kernel_write(): safely attempt to write to a location
33152+ * @dst: address to write to
33153+ * @src: pointer to the data that shall be written
33154+ * @size: size of the data chunk
33155+ *
33156+ * Safely write to address @dst from the buffer at @src. If a kernel fault
33157+ * happens, handle that and return -EFAULT.
33158+ */
33159+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
33160+{
33161+ long ret;
33162+ mm_segment_t old_fs = get_fs();
33163+
33164+ set_fs(KERNEL_DS);
33165+ pagefault_disable();
33166+ pax_open_kernel();
33167+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
33168+ pax_close_kernel();
33169+ pagefault_enable();
33170+ set_fs(old_fs);
33171+
33172+ return ret ? -EFAULT : 0;
33173+}
33174diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
33175index 207d9aef..69030980 100644
33176--- a/arch/x86/mm/gup.c
33177+++ b/arch/x86/mm/gup.c
33178@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
33179 addr = start;
33180 len = (unsigned long) nr_pages << PAGE_SHIFT;
33181 end = start + len;
33182- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
33183+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
33184 (void __user *)start, len)))
33185 return 0;
33186
33187@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
33188 goto slow_irqon;
33189 #endif
33190
33191+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
33192+ (void __user *)start, len)))
33193+ return 0;
33194+
33195 /*
33196 * XXX: batch / limit 'nr', to avoid large irq off latency
33197 * needs some instrumenting to determine the common sizes used by
33198diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
33199index 4500142..53a363c 100644
33200--- a/arch/x86/mm/highmem_32.c
33201+++ b/arch/x86/mm/highmem_32.c
33202@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
33203 idx = type + KM_TYPE_NR*smp_processor_id();
33204 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33205 BUG_ON(!pte_none(*(kmap_pte-idx)));
33206+
33207+ pax_open_kernel();
33208 set_pte(kmap_pte-idx, mk_pte(page, prot));
33209+ pax_close_kernel();
33210+
33211 arch_flush_lazy_mmu_mode();
33212
33213 return (void *)vaddr;
33214diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
33215index 8b977eb..4732c33 100644
33216--- a/arch/x86/mm/hugetlbpage.c
33217+++ b/arch/x86/mm/hugetlbpage.c
33218@@ -80,23 +80,24 @@ int pud_huge(pud_t pud)
33219 #ifdef CONFIG_HUGETLB_PAGE
33220 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
33221 unsigned long addr, unsigned long len,
33222- unsigned long pgoff, unsigned long flags)
33223+ unsigned long pgoff, unsigned long flags, unsigned long offset)
33224 {
33225 struct hstate *h = hstate_file(file);
33226 struct vm_unmapped_area_info info;
33227-
33228+
33229 info.flags = 0;
33230 info.length = len;
33231 info.low_limit = current->mm->mmap_legacy_base;
33232 info.high_limit = TASK_SIZE;
33233 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
33234 info.align_offset = 0;
33235+ info.threadstack_offset = offset;
33236 return vm_unmapped_area(&info);
33237 }
33238
33239 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
33240 unsigned long addr0, unsigned long len,
33241- unsigned long pgoff, unsigned long flags)
33242+ unsigned long pgoff, unsigned long flags, unsigned long offset)
33243 {
33244 struct hstate *h = hstate_file(file);
33245 struct vm_unmapped_area_info info;
33246@@ -108,6 +109,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
33247 info.high_limit = current->mm->mmap_base;
33248 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
33249 info.align_offset = 0;
33250+ info.threadstack_offset = offset;
33251 addr = vm_unmapped_area(&info);
33252
33253 /*
33254@@ -120,6 +122,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
33255 VM_BUG_ON(addr != -ENOMEM);
33256 info.flags = 0;
33257 info.low_limit = TASK_UNMAPPED_BASE;
33258+
33259+#ifdef CONFIG_PAX_RANDMMAP
33260+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
33261+ info.low_limit += current->mm->delta_mmap;
33262+#endif
33263+
33264 info.high_limit = TASK_SIZE;
33265 addr = vm_unmapped_area(&info);
33266 }
33267@@ -134,10 +142,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
33268 struct hstate *h = hstate_file(file);
33269 struct mm_struct *mm = current->mm;
33270 struct vm_area_struct *vma;
33271+ unsigned long pax_task_size = TASK_SIZE;
33272+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
33273
33274 if (len & ~huge_page_mask(h))
33275 return -EINVAL;
33276- if (len > TASK_SIZE)
33277+
33278+#ifdef CONFIG_PAX_SEGMEXEC
33279+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33280+ pax_task_size = SEGMEXEC_TASK_SIZE;
33281+#endif
33282+
33283+ pax_task_size -= PAGE_SIZE;
33284+
33285+ if (len > pax_task_size)
33286 return -ENOMEM;
33287
33288 if (flags & MAP_FIXED) {
33289@@ -146,19 +164,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
33290 return addr;
33291 }
33292
33293+#ifdef CONFIG_PAX_RANDMMAP
33294+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
33295+#endif
33296+
33297 if (addr) {
33298 addr = ALIGN(addr, huge_page_size(h));
33299 vma = find_vma(mm, addr);
33300- if (TASK_SIZE - len >= addr &&
33301- (!vma || addr + len <= vma->vm_start))
33302+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
33303 return addr;
33304 }
33305 if (mm->get_unmapped_area == arch_get_unmapped_area)
33306 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
33307- pgoff, flags);
33308+ pgoff, flags, offset);
33309 else
33310 return hugetlb_get_unmapped_area_topdown(file, addr, len,
33311- pgoff, flags);
33312+ pgoff, flags, offset);
33313 }
33314 #endif /* CONFIG_HUGETLB_PAGE */
33315
33316diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
33317index 66dba36..f8082ec 100644
33318--- a/arch/x86/mm/init.c
33319+++ b/arch/x86/mm/init.c
33320@@ -4,6 +4,7 @@
33321 #include <linux/swap.h>
33322 #include <linux/memblock.h>
33323 #include <linux/bootmem.h> /* for max_low_pfn */
33324+#include <linux/tboot.h>
33325
33326 #include <asm/cacheflush.h>
33327 #include <asm/e820.h>
33328@@ -17,6 +18,8 @@
33329 #include <asm/proto.h>
33330 #include <asm/dma.h> /* for MAX_DMA_PFN */
33331 #include <asm/microcode.h>
33332+#include <asm/desc.h>
33333+#include <asm/bios_ebda.h>
33334
33335 /*
33336 * We need to define the tracepoints somewhere, and tlb.c
33337@@ -570,7 +573,18 @@ void __init init_mem_mapping(void)
33338 early_ioremap_page_table_range_init();
33339 #endif
33340
33341+#ifdef CONFIG_PAX_PER_CPU_PGD
33342+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
33343+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
33344+ KERNEL_PGD_PTRS);
33345+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
33346+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
33347+ KERNEL_PGD_PTRS);
33348+ load_cr3(get_cpu_pgd(0, kernel));
33349+#else
33350 load_cr3(swapper_pg_dir);
33351+#endif
33352+
33353 __flush_tlb_all();
33354
33355 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
33356@@ -586,10 +600,40 @@ void __init init_mem_mapping(void)
33357 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
33358 * mmio resources as well as potential bios/acpi data regions.
33359 */
33360+
33361+#ifdef CONFIG_GRKERNSEC_KMEM
33362+static unsigned int ebda_start __read_only;
33363+static unsigned int ebda_end __read_only;
33364+#endif
33365+
33366 int devmem_is_allowed(unsigned long pagenr)
33367 {
33368- if (pagenr < 256)
33369+#ifdef CONFIG_GRKERNSEC_KMEM
33370+ /* allow BDA */
33371+ if (!pagenr)
33372 return 1;
33373+ /* allow EBDA */
33374+ if (pagenr >= ebda_start && pagenr < ebda_end)
33375+ return 1;
33376+ /* if tboot is in use, allow access to its hardcoded serial log range */
33377+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
33378+ return 1;
33379+#else
33380+ if (!pagenr)
33381+ return 1;
33382+#ifdef CONFIG_VM86
33383+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
33384+ return 1;
33385+#endif
33386+#endif
33387+
33388+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
33389+ return 1;
33390+#ifdef CONFIG_GRKERNSEC_KMEM
33391+ /* throw out everything else below 1MB */
33392+ if (pagenr <= 256)
33393+ return 0;
33394+#endif
33395 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
33396 return 0;
33397 if (!page_is_ram(pagenr))
33398@@ -635,8 +679,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
33399 #endif
33400 }
33401
33402+#ifdef CONFIG_GRKERNSEC_KMEM
33403+static inline void gr_init_ebda(void)
33404+{
33405+ unsigned int ebda_addr;
33406+ unsigned int ebda_size = 0;
33407+
33408+ ebda_addr = get_bios_ebda();
33409+ if (ebda_addr) {
33410+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
33411+ ebda_size <<= 10;
33412+ }
33413+ if (ebda_addr && ebda_size) {
33414+ ebda_start = ebda_addr >> PAGE_SHIFT;
33415+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
33416+ } else {
33417+ ebda_start = 0x9f000 >> PAGE_SHIFT;
33418+ ebda_end = 0xa0000 >> PAGE_SHIFT;
33419+ }
33420+}
33421+#else
33422+static inline void gr_init_ebda(void) { }
33423+#endif
33424+
33425 void free_initmem(void)
33426 {
33427+#ifdef CONFIG_PAX_KERNEXEC
33428+#ifdef CONFIG_X86_32
33429+ /* PaX: limit KERNEL_CS to actual size */
33430+ unsigned long addr, limit;
33431+ struct desc_struct d;
33432+ int cpu;
33433+#else
33434+ pgd_t *pgd;
33435+ pud_t *pud;
33436+ pmd_t *pmd;
33437+ unsigned long addr, end;
33438+#endif
33439+#endif
33440+
33441+ gr_init_ebda();
33442+
33443+#ifdef CONFIG_PAX_KERNEXEC
33444+#ifdef CONFIG_X86_32
33445+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
33446+ limit = (limit - 1UL) >> PAGE_SHIFT;
33447+
33448+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
33449+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
33450+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
33451+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
33452+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
33453+ }
33454+
33455+ /* PaX: make KERNEL_CS read-only */
33456+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
33457+ if (!paravirt_enabled())
33458+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
33459+/*
33460+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
33461+ pgd = pgd_offset_k(addr);
33462+ pud = pud_offset(pgd, addr);
33463+ pmd = pmd_offset(pud, addr);
33464+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
33465+ }
33466+*/
33467+#ifdef CONFIG_X86_PAE
33468+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
33469+/*
33470+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
33471+ pgd = pgd_offset_k(addr);
33472+ pud = pud_offset(pgd, addr);
33473+ pmd = pmd_offset(pud, addr);
33474+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
33475+ }
33476+*/
33477+#endif
33478+
33479+#ifdef CONFIG_MODULES
33480+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
33481+#endif
33482+
33483+#else
33484+ /* PaX: make kernel code/rodata read-only, rest non-executable */
33485+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
33486+ pgd = pgd_offset_k(addr);
33487+ pud = pud_offset(pgd, addr);
33488+ pmd = pmd_offset(pud, addr);
33489+ if (!pmd_present(*pmd))
33490+ continue;
33491+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
33492+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
33493+ else
33494+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
33495+ }
33496+
33497+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
33498+ end = addr + KERNEL_IMAGE_SIZE;
33499+ for (; addr < end; addr += PMD_SIZE) {
33500+ pgd = pgd_offset_k(addr);
33501+ pud = pud_offset(pgd, addr);
33502+ pmd = pmd_offset(pud, addr);
33503+ if (!pmd_present(*pmd))
33504+ continue;
33505+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
33506+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
33507+ }
33508+#endif
33509+
33510+ flush_tlb_all();
33511+#endif
33512+
33513 free_init_pages("unused kernel",
33514 (unsigned long)(&__init_begin),
33515 (unsigned long)(&__init_end));
33516diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
33517index 7d05565..bfc5338 100644
33518--- a/arch/x86/mm/init_32.c
33519+++ b/arch/x86/mm/init_32.c
33520@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
33521 bool __read_mostly __vmalloc_start_set = false;
33522
33523 /*
33524- * Creates a middle page table and puts a pointer to it in the
33525- * given global directory entry. This only returns the gd entry
33526- * in non-PAE compilation mode, since the middle layer is folded.
33527- */
33528-static pmd_t * __init one_md_table_init(pgd_t *pgd)
33529-{
33530- pud_t *pud;
33531- pmd_t *pmd_table;
33532-
33533-#ifdef CONFIG_X86_PAE
33534- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
33535- pmd_table = (pmd_t *)alloc_low_page();
33536- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
33537- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
33538- pud = pud_offset(pgd, 0);
33539- BUG_ON(pmd_table != pmd_offset(pud, 0));
33540-
33541- return pmd_table;
33542- }
33543-#endif
33544- pud = pud_offset(pgd, 0);
33545- pmd_table = pmd_offset(pud, 0);
33546-
33547- return pmd_table;
33548-}
33549-
33550-/*
33551 * Create a page table and place a pointer to it in a middle page
33552 * directory entry:
33553 */
33554@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
33555 pte_t *page_table = (pte_t *)alloc_low_page();
33556
33557 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
33558+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
33559+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
33560+#else
33561 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
33562+#endif
33563 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
33564 }
33565
33566 return pte_offset_kernel(pmd, 0);
33567 }
33568
33569+static pmd_t * __init one_md_table_init(pgd_t *pgd)
33570+{
33571+ pud_t *pud;
33572+ pmd_t *pmd_table;
33573+
33574+ pud = pud_offset(pgd, 0);
33575+ pmd_table = pmd_offset(pud, 0);
33576+
33577+ return pmd_table;
33578+}
33579+
33580 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
33581 {
33582 int pgd_idx = pgd_index(vaddr);
33583@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33584 int pgd_idx, pmd_idx;
33585 unsigned long vaddr;
33586 pgd_t *pgd;
33587+ pud_t *pud;
33588 pmd_t *pmd;
33589 pte_t *pte = NULL;
33590 unsigned long count = page_table_range_init_count(start, end);
33591@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33592 pgd = pgd_base + pgd_idx;
33593
33594 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
33595- pmd = one_md_table_init(pgd);
33596- pmd = pmd + pmd_index(vaddr);
33597+ pud = pud_offset(pgd, vaddr);
33598+ pmd = pmd_offset(pud, vaddr);
33599+
33600+#ifdef CONFIG_X86_PAE
33601+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33602+#endif
33603+
33604 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
33605 pmd++, pmd_idx++) {
33606 pte = page_table_kmap_check(one_page_table_init(pmd),
33607@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33608 }
33609 }
33610
33611-static inline int is_kernel_text(unsigned long addr)
33612+static inline int is_kernel_text(unsigned long start, unsigned long end)
33613 {
33614- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
33615- return 1;
33616- return 0;
33617+ if ((start >= ktla_ktva((unsigned long)_etext) ||
33618+ end <= ktla_ktva((unsigned long)_stext)) &&
33619+ (start >= ktla_ktva((unsigned long)_einittext) ||
33620+ end <= ktla_ktva((unsigned long)_sinittext)) &&
33621+
33622+#ifdef CONFIG_ACPI_SLEEP
33623+ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
33624+#endif
33625+
33626+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
33627+ return 0;
33628+ return 1;
33629 }
33630
33631 /*
33632@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
33633 unsigned long last_map_addr = end;
33634 unsigned long start_pfn, end_pfn;
33635 pgd_t *pgd_base = swapper_pg_dir;
33636- int pgd_idx, pmd_idx, pte_ofs;
33637+ unsigned int pgd_idx, pmd_idx, pte_ofs;
33638 unsigned long pfn;
33639 pgd_t *pgd;
33640+ pud_t *pud;
33641 pmd_t *pmd;
33642 pte_t *pte;
33643 unsigned pages_2m, pages_4k;
33644@@ -291,8 +295,13 @@ repeat:
33645 pfn = start_pfn;
33646 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33647 pgd = pgd_base + pgd_idx;
33648- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
33649- pmd = one_md_table_init(pgd);
33650+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
33651+ pud = pud_offset(pgd, 0);
33652+ pmd = pmd_offset(pud, 0);
33653+
33654+#ifdef CONFIG_X86_PAE
33655+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33656+#endif
33657
33658 if (pfn >= end_pfn)
33659 continue;
33660@@ -304,14 +313,13 @@ repeat:
33661 #endif
33662 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
33663 pmd++, pmd_idx++) {
33664- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
33665+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
33666
33667 /*
33668 * Map with big pages if possible, otherwise
33669 * create normal page tables:
33670 */
33671 if (use_pse) {
33672- unsigned int addr2;
33673 pgprot_t prot = PAGE_KERNEL_LARGE;
33674 /*
33675 * first pass will use the same initial
33676@@ -322,11 +330,7 @@ repeat:
33677 _PAGE_PSE);
33678
33679 pfn &= PMD_MASK >> PAGE_SHIFT;
33680- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
33681- PAGE_OFFSET + PAGE_SIZE-1;
33682-
33683- if (is_kernel_text(addr) ||
33684- is_kernel_text(addr2))
33685+ if (is_kernel_text(address, address + PMD_SIZE))
33686 prot = PAGE_KERNEL_LARGE_EXEC;
33687
33688 pages_2m++;
33689@@ -343,7 +347,7 @@ repeat:
33690 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33691 pte += pte_ofs;
33692 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
33693- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
33694+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
33695 pgprot_t prot = PAGE_KERNEL;
33696 /*
33697 * first pass will use the same initial
33698@@ -351,7 +355,7 @@ repeat:
33699 */
33700 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
33701
33702- if (is_kernel_text(addr))
33703+ if (is_kernel_text(address, address + PAGE_SIZE))
33704 prot = PAGE_KERNEL_EXEC;
33705
33706 pages_4k++;
33707@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
33708
33709 pud = pud_offset(pgd, va);
33710 pmd = pmd_offset(pud, va);
33711- if (!pmd_present(*pmd))
33712+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
33713 break;
33714
33715 /* should not be large page here */
33716@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
33717
33718 static void __init pagetable_init(void)
33719 {
33720- pgd_t *pgd_base = swapper_pg_dir;
33721-
33722- permanent_kmaps_init(pgd_base);
33723+ permanent_kmaps_init(swapper_pg_dir);
33724 }
33725
33726-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
33727+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
33728 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33729
33730 /* user-defined highmem size */
33731@@ -787,10 +789,10 @@ void __init mem_init(void)
33732 ((unsigned long)&__init_end -
33733 (unsigned long)&__init_begin) >> 10,
33734
33735- (unsigned long)&_etext, (unsigned long)&_edata,
33736- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
33737+ (unsigned long)&_sdata, (unsigned long)&_edata,
33738+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
33739
33740- (unsigned long)&_text, (unsigned long)&_etext,
33741+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
33742 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
33743
33744 /*
33745@@ -884,6 +886,7 @@ void set_kernel_text_rw(void)
33746 if (!kernel_set_to_readonly)
33747 return;
33748
33749+ start = ktla_ktva(start);
33750 pr_debug("Set kernel text: %lx - %lx for read write\n",
33751 start, start+size);
33752
33753@@ -898,6 +901,7 @@ void set_kernel_text_ro(void)
33754 if (!kernel_set_to_readonly)
33755 return;
33756
33757+ start = ktla_ktva(start);
33758 pr_debug("Set kernel text: %lx - %lx for read only\n",
33759 start, start+size);
33760
33761@@ -926,6 +930,7 @@ void mark_rodata_ro(void)
33762 unsigned long start = PFN_ALIGN(_text);
33763 unsigned long size = PFN_ALIGN(_etext) - start;
33764
33765+ start = ktla_ktva(start);
33766 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
33767 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
33768 size >> 10);
33769diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
33770index ac7de5f..ceb56df 100644
33771--- a/arch/x86/mm/init_64.c
33772+++ b/arch/x86/mm/init_64.c
33773@@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on);
33774 * around without checking the pgd every time.
33775 */
33776
33777-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
33778+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
33779 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33780
33781 int force_personality32;
33782@@ -184,12 +184,29 @@ void sync_global_pgds(unsigned long start, unsigned long end)
33783
33784 for (address = start; address <= end; address += PGDIR_SIZE) {
33785 const pgd_t *pgd_ref = pgd_offset_k(address);
33786+
33787+#ifdef CONFIG_PAX_PER_CPU_PGD
33788+ unsigned long cpu;
33789+#else
33790 struct page *page;
33791+#endif
33792
33793 if (pgd_none(*pgd_ref))
33794 continue;
33795
33796 spin_lock(&pgd_lock);
33797+
33798+#ifdef CONFIG_PAX_PER_CPU_PGD
33799+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33800+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
33801+
33802+ if (pgd_none(*pgd))
33803+ set_pgd(pgd, *pgd_ref);
33804+ else
33805+ BUG_ON(pgd_page_vaddr(*pgd)
33806+ != pgd_page_vaddr(*pgd_ref));
33807+ pgd = pgd_offset_cpu(cpu, kernel, address);
33808+#else
33809 list_for_each_entry(page, &pgd_list, lru) {
33810 pgd_t *pgd;
33811 spinlock_t *pgt_lock;
33812@@ -198,6 +215,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
33813 /* the pgt_lock only for Xen */
33814 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
33815 spin_lock(pgt_lock);
33816+#endif
33817
33818 if (pgd_none(*pgd))
33819 set_pgd(pgd, *pgd_ref);
33820@@ -205,7 +223,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
33821 BUG_ON(pgd_page_vaddr(*pgd)
33822 != pgd_page_vaddr(*pgd_ref));
33823
33824+#ifndef CONFIG_PAX_PER_CPU_PGD
33825 spin_unlock(pgt_lock);
33826+#endif
33827+
33828 }
33829 spin_unlock(&pgd_lock);
33830 }
33831@@ -238,7 +259,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
33832 {
33833 if (pgd_none(*pgd)) {
33834 pud_t *pud = (pud_t *)spp_getpage();
33835- pgd_populate(&init_mm, pgd, pud);
33836+ pgd_populate_kernel(&init_mm, pgd, pud);
33837 if (pud != pud_offset(pgd, 0))
33838 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
33839 pud, pud_offset(pgd, 0));
33840@@ -250,7 +271,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
33841 {
33842 if (pud_none(*pud)) {
33843 pmd_t *pmd = (pmd_t *) spp_getpage();
33844- pud_populate(&init_mm, pud, pmd);
33845+ pud_populate_kernel(&init_mm, pud, pmd);
33846 if (pmd != pmd_offset(pud, 0))
33847 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
33848 pmd, pmd_offset(pud, 0));
33849@@ -279,7 +300,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
33850 pmd = fill_pmd(pud, vaddr);
33851 pte = fill_pte(pmd, vaddr);
33852
33853+ pax_open_kernel();
33854 set_pte(pte, new_pte);
33855+ pax_close_kernel();
33856
33857 /*
33858 * It's enough to flush this one mapping.
33859@@ -338,14 +361,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
33860 pgd = pgd_offset_k((unsigned long)__va(phys));
33861 if (pgd_none(*pgd)) {
33862 pud = (pud_t *) spp_getpage();
33863- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
33864- _PAGE_USER));
33865+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
33866 }
33867 pud = pud_offset(pgd, (unsigned long)__va(phys));
33868 if (pud_none(*pud)) {
33869 pmd = (pmd_t *) spp_getpage();
33870- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
33871- _PAGE_USER));
33872+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
33873 }
33874 pmd = pmd_offset(pud, phys);
33875 BUG_ON(!pmd_none(*pmd));
33876@@ -586,7 +607,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
33877 prot);
33878
33879 spin_lock(&init_mm.page_table_lock);
33880- pud_populate(&init_mm, pud, pmd);
33881+ pud_populate_kernel(&init_mm, pud, pmd);
33882 spin_unlock(&init_mm.page_table_lock);
33883 }
33884 __flush_tlb_all();
33885@@ -627,7 +648,7 @@ kernel_physical_mapping_init(unsigned long start,
33886 page_size_mask);
33887
33888 spin_lock(&init_mm.page_table_lock);
33889- pgd_populate(&init_mm, pgd, pud);
33890+ pgd_populate_kernel(&init_mm, pgd, pud);
33891 spin_unlock(&init_mm.page_table_lock);
33892 pgd_changed = true;
33893 }
33894@@ -1205,8 +1226,8 @@ static struct vm_operations_struct gate_vma_ops = {
33895 static struct vm_area_struct gate_vma = {
33896 .vm_start = VSYSCALL_ADDR,
33897 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
33898- .vm_page_prot = PAGE_READONLY_EXEC,
33899- .vm_flags = VM_READ | VM_EXEC,
33900+ .vm_page_prot = PAGE_READONLY,
33901+ .vm_flags = VM_READ,
33902 .vm_ops = &gate_vma_ops,
33903 };
33904
33905diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
33906index 7b179b49..6bd17777 100644
33907--- a/arch/x86/mm/iomap_32.c
33908+++ b/arch/x86/mm/iomap_32.c
33909@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
33910 type = kmap_atomic_idx_push();
33911 idx = type + KM_TYPE_NR * smp_processor_id();
33912 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33913+
33914+ pax_open_kernel();
33915 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
33916+ pax_close_kernel();
33917+
33918 arch_flush_lazy_mmu_mode();
33919
33920 return (void *)vaddr;
33921diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
33922index baff1da..2816ef4 100644
33923--- a/arch/x86/mm/ioremap.c
33924+++ b/arch/x86/mm/ioremap.c
33925@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
33926 unsigned long i;
33927
33928 for (i = 0; i < nr_pages; ++i)
33929- if (pfn_valid(start_pfn + i) &&
33930- !PageReserved(pfn_to_page(start_pfn + i)))
33931+ if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
33932+ !PageReserved(pfn_to_page(start_pfn + i))))
33933 return 1;
33934
33935 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
33936@@ -268,7 +268,7 @@ EXPORT_SYMBOL(ioremap_prot);
33937 *
33938 * Caller must ensure there is only one unmapping for the same pointer.
33939 */
33940-void iounmap(volatile void __iomem *addr)
33941+void iounmap(const volatile void __iomem *addr)
33942 {
33943 struct vm_struct *p, *o;
33944
33945@@ -322,6 +322,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
33946
33947 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
33948 if (page_is_ram(start >> PAGE_SHIFT))
33949+#ifdef CONFIG_HIGHMEM
33950+ if ((start >> PAGE_SHIFT) < max_low_pfn)
33951+#endif
33952 return __va(phys);
33953
33954 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
33955@@ -334,13 +337,16 @@ void *xlate_dev_mem_ptr(unsigned long phys)
33956 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
33957 {
33958 if (page_is_ram(phys >> PAGE_SHIFT))
33959+#ifdef CONFIG_HIGHMEM
33960+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33961+#endif
33962 return;
33963
33964 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
33965 return;
33966 }
33967
33968-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
33969+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
33970
33971 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
33972 {
33973@@ -376,8 +382,7 @@ void __init early_ioremap_init(void)
33974 early_ioremap_setup();
33975
33976 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
33977- memset(bm_pte, 0, sizeof(bm_pte));
33978- pmd_populate_kernel(&init_mm, pmd, bm_pte);
33979+ pmd_populate_user(&init_mm, pmd, bm_pte);
33980
33981 /*
33982 * The boot-ioremap range spans multiple pmds, for which
33983diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
33984index dd89a13..d77bdcc 100644
33985--- a/arch/x86/mm/kmemcheck/kmemcheck.c
33986+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
33987@@ -628,9 +628,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
33988 * memory (e.g. tracked pages)? For now, we need this to avoid
33989 * invoking kmemcheck for PnP BIOS calls.
33990 */
33991- if (regs->flags & X86_VM_MASK)
33992+ if (v8086_mode(regs))
33993 return false;
33994- if (regs->cs != __KERNEL_CS)
33995+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
33996 return false;
33997
33998 pte = kmemcheck_pte_lookup(address);
33999diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
34000index 919b912..9267313 100644
34001--- a/arch/x86/mm/mmap.c
34002+++ b/arch/x86/mm/mmap.c
34003@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
34004 * Leave an at least ~128 MB hole with possible stack randomization.
34005 */
34006 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
34007-#define MAX_GAP (TASK_SIZE/6*5)
34008+#define MAX_GAP (pax_task_size/6*5)
34009
34010 static int mmap_is_legacy(void)
34011 {
34012@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
34013 return rnd << PAGE_SHIFT;
34014 }
34015
34016-static unsigned long mmap_base(void)
34017+static unsigned long mmap_base(struct mm_struct *mm)
34018 {
34019 unsigned long gap = rlimit(RLIMIT_STACK);
34020+ unsigned long pax_task_size = TASK_SIZE;
34021+
34022+#ifdef CONFIG_PAX_SEGMEXEC
34023+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
34024+ pax_task_size = SEGMEXEC_TASK_SIZE;
34025+#endif
34026
34027 if (gap < MIN_GAP)
34028 gap = MIN_GAP;
34029 else if (gap > MAX_GAP)
34030 gap = MAX_GAP;
34031
34032- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
34033+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
34034 }
34035
34036 /*
34037 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
34038 * does, but not when emulating X86_32
34039 */
34040-static unsigned long mmap_legacy_base(void)
34041+static unsigned long mmap_legacy_base(struct mm_struct *mm)
34042 {
34043- if (mmap_is_ia32())
34044+ if (mmap_is_ia32()) {
34045+
34046+#ifdef CONFIG_PAX_SEGMEXEC
34047+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
34048+ return SEGMEXEC_TASK_UNMAPPED_BASE;
34049+ else
34050+#endif
34051+
34052 return TASK_UNMAPPED_BASE;
34053- else
34054+ } else
34055 return TASK_UNMAPPED_BASE + mmap_rnd();
34056 }
34057
34058@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
34059 */
34060 void arch_pick_mmap_layout(struct mm_struct *mm)
34061 {
34062- mm->mmap_legacy_base = mmap_legacy_base();
34063- mm->mmap_base = mmap_base();
34064+ mm->mmap_legacy_base = mmap_legacy_base(mm);
34065+ mm->mmap_base = mmap_base(mm);
34066+
34067+#ifdef CONFIG_PAX_RANDMMAP
34068+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
34069+ mm->mmap_legacy_base += mm->delta_mmap;
34070+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
34071+ }
34072+#endif
34073
34074 if (mmap_is_legacy()) {
34075 mm->mmap_base = mm->mmap_legacy_base;
34076diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
34077index 0057a7a..95c7edd 100644
34078--- a/arch/x86/mm/mmio-mod.c
34079+++ b/arch/x86/mm/mmio-mod.c
34080@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
34081 break;
34082 default:
34083 {
34084- unsigned char *ip = (unsigned char *)instptr;
34085+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
34086 my_trace->opcode = MMIO_UNKNOWN_OP;
34087 my_trace->width = 0;
34088 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
34089@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
34090 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
34091 void __iomem *addr)
34092 {
34093- static atomic_t next_id;
34094+ static atomic_unchecked_t next_id;
34095 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
34096 /* These are page-unaligned. */
34097 struct mmiotrace_map map = {
34098@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
34099 .private = trace
34100 },
34101 .phys = offset,
34102- .id = atomic_inc_return(&next_id)
34103+ .id = atomic_inc_return_unchecked(&next_id)
34104 };
34105 map.map_id = trace->id;
34106
34107@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
34108 ioremap_trace_core(offset, size, addr);
34109 }
34110
34111-static void iounmap_trace_core(volatile void __iomem *addr)
34112+static void iounmap_trace_core(const volatile void __iomem *addr)
34113 {
34114 struct mmiotrace_map map = {
34115 .phys = 0,
34116@@ -328,7 +328,7 @@ not_enabled:
34117 }
34118 }
34119
34120-void mmiotrace_iounmap(volatile void __iomem *addr)
34121+void mmiotrace_iounmap(const volatile void __iomem *addr)
34122 {
34123 might_sleep();
34124 if (is_enabled()) /* recheck and proper locking in *_core() */
34125diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
34126index a32b706..efb308b 100644
34127--- a/arch/x86/mm/numa.c
34128+++ b/arch/x86/mm/numa.c
34129@@ -478,7 +478,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
34130 return true;
34131 }
34132
34133-static int __init numa_register_memblks(struct numa_meminfo *mi)
34134+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
34135 {
34136 unsigned long uninitialized_var(pfn_align);
34137 int i, nid;
34138diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
34139index 36de293..b820ddc 100644
34140--- a/arch/x86/mm/pageattr.c
34141+++ b/arch/x86/mm/pageattr.c
34142@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
34143 */
34144 #ifdef CONFIG_PCI_BIOS
34145 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
34146- pgprot_val(forbidden) |= _PAGE_NX;
34147+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
34148 #endif
34149
34150 /*
34151@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
34152 * Does not cover __inittext since that is gone later on. On
34153 * 64bit we do not enforce !NX on the low mapping
34154 */
34155- if (within(address, (unsigned long)_text, (unsigned long)_etext))
34156- pgprot_val(forbidden) |= _PAGE_NX;
34157+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
34158+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
34159
34160+#ifdef CONFIG_DEBUG_RODATA
34161 /*
34162 * The .rodata section needs to be read-only. Using the pfn
34163 * catches all aliases.
34164@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
34165 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
34166 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
34167 pgprot_val(forbidden) |= _PAGE_RW;
34168+#endif
34169
34170 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
34171 /*
34172@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
34173 }
34174 #endif
34175
34176+#ifdef CONFIG_PAX_KERNEXEC
34177+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
34178+ pgprot_val(forbidden) |= _PAGE_RW;
34179+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
34180+ }
34181+#endif
34182+
34183 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
34184
34185 return prot;
34186@@ -420,23 +429,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
34187 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
34188 {
34189 /* change init_mm */
34190+ pax_open_kernel();
34191 set_pte_atomic(kpte, pte);
34192+
34193 #ifdef CONFIG_X86_32
34194 if (!SHARED_KERNEL_PMD) {
34195+
34196+#ifdef CONFIG_PAX_PER_CPU_PGD
34197+ unsigned long cpu;
34198+#else
34199 struct page *page;
34200+#endif
34201
34202+#ifdef CONFIG_PAX_PER_CPU_PGD
34203+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
34204+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
34205+#else
34206 list_for_each_entry(page, &pgd_list, lru) {
34207- pgd_t *pgd;
34208+ pgd_t *pgd = (pgd_t *)page_address(page);
34209+#endif
34210+
34211 pud_t *pud;
34212 pmd_t *pmd;
34213
34214- pgd = (pgd_t *)page_address(page) + pgd_index(address);
34215+ pgd += pgd_index(address);
34216 pud = pud_offset(pgd, address);
34217 pmd = pmd_offset(pud, address);
34218 set_pte_atomic((pte_t *)pmd, pte);
34219 }
34220 }
34221 #endif
34222+ pax_close_kernel();
34223 }
34224
34225 static int
34226diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
34227index 6574388..87e9bef 100644
34228--- a/arch/x86/mm/pat.c
34229+++ b/arch/x86/mm/pat.c
34230@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
34231
34232 if (!entry) {
34233 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
34234- current->comm, current->pid, start, end - 1);
34235+ current->comm, task_pid_nr(current), start, end - 1);
34236 return -EINVAL;
34237 }
34238
34239@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34240
34241 while (cursor < to) {
34242 if (!devmem_is_allowed(pfn)) {
34243- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
34244- current->comm, from, to - 1);
34245+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
34246+ current->comm, from, to - 1, cursor);
34247 return 0;
34248 }
34249 cursor += PAGE_SIZE;
34250@@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
34251 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
34252 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
34253 "for [mem %#010Lx-%#010Lx]\n",
34254- current->comm, current->pid,
34255+ current->comm, task_pid_nr(current),
34256 cattr_name(flags),
34257 base, (unsigned long long)(base + size-1));
34258 return -EINVAL;
34259@@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
34260 flags = lookup_memtype(paddr);
34261 if (want_flags != flags) {
34262 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
34263- current->comm, current->pid,
34264+ current->comm, task_pid_nr(current),
34265 cattr_name(want_flags),
34266 (unsigned long long)paddr,
34267 (unsigned long long)(paddr + size - 1),
34268@@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
34269 free_memtype(paddr, paddr + size);
34270 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
34271 " for [mem %#010Lx-%#010Lx], got %s\n",
34272- current->comm, current->pid,
34273+ current->comm, task_pid_nr(current),
34274 cattr_name(want_flags),
34275 (unsigned long long)paddr,
34276 (unsigned long long)(paddr + size - 1),
34277diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
34278index 415f6c4..d319983 100644
34279--- a/arch/x86/mm/pat_rbtree.c
34280+++ b/arch/x86/mm/pat_rbtree.c
34281@@ -160,7 +160,7 @@ success:
34282
34283 failure:
34284 printk(KERN_INFO "%s:%d conflicting memory types "
34285- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
34286+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
34287 end, cattr_name(found_type), cattr_name(match->type));
34288 return -EBUSY;
34289 }
34290diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
34291index 9f0614d..92ae64a 100644
34292--- a/arch/x86/mm/pf_in.c
34293+++ b/arch/x86/mm/pf_in.c
34294@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
34295 int i;
34296 enum reason_type rv = OTHERS;
34297
34298- p = (unsigned char *)ins_addr;
34299+ p = (unsigned char *)ktla_ktva(ins_addr);
34300 p += skip_prefix(p, &prf);
34301 p += get_opcode(p, &opcode);
34302
34303@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
34304 struct prefix_bits prf;
34305 int i;
34306
34307- p = (unsigned char *)ins_addr;
34308+ p = (unsigned char *)ktla_ktva(ins_addr);
34309 p += skip_prefix(p, &prf);
34310 p += get_opcode(p, &opcode);
34311
34312@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
34313 struct prefix_bits prf;
34314 int i;
34315
34316- p = (unsigned char *)ins_addr;
34317+ p = (unsigned char *)ktla_ktva(ins_addr);
34318 p += skip_prefix(p, &prf);
34319 p += get_opcode(p, &opcode);
34320
34321@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
34322 struct prefix_bits prf;
34323 int i;
34324
34325- p = (unsigned char *)ins_addr;
34326+ p = (unsigned char *)ktla_ktva(ins_addr);
34327 p += skip_prefix(p, &prf);
34328 p += get_opcode(p, &opcode);
34329 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
34330@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
34331 struct prefix_bits prf;
34332 int i;
34333
34334- p = (unsigned char *)ins_addr;
34335+ p = (unsigned char *)ktla_ktva(ins_addr);
34336 p += skip_prefix(p, &prf);
34337 p += get_opcode(p, &opcode);
34338 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
34339diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
34340index 6fb6927..4fc13c0 100644
34341--- a/arch/x86/mm/pgtable.c
34342+++ b/arch/x86/mm/pgtable.c
34343@@ -97,10 +97,71 @@ static inline void pgd_list_del(pgd_t *pgd)
34344 list_del(&page->lru);
34345 }
34346
34347-#define UNSHARED_PTRS_PER_PGD \
34348- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
34349+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
34350+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
34351
34352+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
34353+{
34354+ unsigned int count = USER_PGD_PTRS;
34355
34356+ if (!pax_user_shadow_base)
34357+ return;
34358+
34359+ while (count--)
34360+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
34361+}
34362+#endif
34363+
34364+#ifdef CONFIG_PAX_PER_CPU_PGD
34365+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
34366+{
34367+ unsigned int count = USER_PGD_PTRS;
34368+
34369+ while (count--) {
34370+ pgd_t pgd;
34371+
34372+#ifdef CONFIG_X86_64
34373+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
34374+#else
34375+ pgd = *src++;
34376+#endif
34377+
34378+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
34379+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
34380+#endif
34381+
34382+ *dst++ = pgd;
34383+ }
34384+
34385+}
34386+#endif
34387+
34388+#ifdef CONFIG_X86_64
34389+#define pxd_t pud_t
34390+#define pyd_t pgd_t
34391+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
34392+#define pgtable_pxd_page_ctor(page) true
34393+#define pgtable_pxd_page_dtor(page)
34394+#define pxd_free(mm, pud) pud_free((mm), (pud))
34395+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
34396+#define pyd_offset(mm, address) pgd_offset((mm), (address))
34397+#define PYD_SIZE PGDIR_SIZE
34398+#else
34399+#define pxd_t pmd_t
34400+#define pyd_t pud_t
34401+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
34402+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
34403+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
34404+#define pxd_free(mm, pud) pmd_free((mm), (pud))
34405+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
34406+#define pyd_offset(mm, address) pud_offset((mm), (address))
34407+#define PYD_SIZE PUD_SIZE
34408+#endif
34409+
34410+#ifdef CONFIG_PAX_PER_CPU_PGD
34411+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
34412+static inline void pgd_dtor(pgd_t *pgd) {}
34413+#else
34414 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
34415 {
34416 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
34417@@ -141,6 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
34418 pgd_list_del(pgd);
34419 spin_unlock(&pgd_lock);
34420 }
34421+#endif
34422
34423 /*
34424 * List of all pgd's needed for non-PAE so it can invalidate entries
34425@@ -153,7 +215,7 @@ static void pgd_dtor(pgd_t *pgd)
34426 * -- nyc
34427 */
34428
34429-#ifdef CONFIG_X86_PAE
34430+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
34431 /*
34432 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
34433 * updating the top-level pagetable entries to guarantee the
34434@@ -165,7 +227,7 @@ static void pgd_dtor(pgd_t *pgd)
34435 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
34436 * and initialize the kernel pmds here.
34437 */
34438-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
34439+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
34440
34441 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
34442 {
34443@@ -183,43 +245,45 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
34444 */
34445 flush_tlb_mm(mm);
34446 }
34447+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
34448+#define PREALLOCATED_PXDS USER_PGD_PTRS
34449 #else /* !CONFIG_X86_PAE */
34450
34451 /* No need to prepopulate any pagetable entries in non-PAE modes. */
34452-#define PREALLOCATED_PMDS 0
34453+#define PREALLOCATED_PXDS 0
34454
34455 #endif /* CONFIG_X86_PAE */
34456
34457-static void free_pmds(pmd_t *pmds[])
34458+static void free_pxds(pxd_t *pxds[])
34459 {
34460 int i;
34461
34462- for(i = 0; i < PREALLOCATED_PMDS; i++)
34463- if (pmds[i]) {
34464- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
34465- free_page((unsigned long)pmds[i]);
34466+ for(i = 0; i < PREALLOCATED_PXDS; i++)
34467+ if (pxds[i]) {
34468+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
34469+ free_page((unsigned long)pxds[i]);
34470 }
34471 }
34472
34473-static int preallocate_pmds(pmd_t *pmds[])
34474+static int preallocate_pxds(pxd_t *pxds[])
34475 {
34476 int i;
34477 bool failed = false;
34478
34479- for(i = 0; i < PREALLOCATED_PMDS; i++) {
34480- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
34481- if (!pmd)
34482+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
34483+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
34484+ if (!pxd)
34485 failed = true;
34486- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
34487- free_page((unsigned long)pmd);
34488- pmd = NULL;
34489+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
34490+ free_page((unsigned long)pxd);
34491+ pxd = NULL;
34492 failed = true;
34493 }
34494- pmds[i] = pmd;
34495+ pxds[i] = pxd;
34496 }
34497
34498 if (failed) {
34499- free_pmds(pmds);
34500+ free_pxds(pxds);
34501 return -ENOMEM;
34502 }
34503
34504@@ -232,49 +296,52 @@ static int preallocate_pmds(pmd_t *pmds[])
34505 * preallocate which never got a corresponding vma will need to be
34506 * freed manually.
34507 */
34508-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
34509+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
34510 {
34511 int i;
34512
34513- for(i = 0; i < PREALLOCATED_PMDS; i++) {
34514+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
34515 pgd_t pgd = pgdp[i];
34516
34517 if (pgd_val(pgd) != 0) {
34518- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
34519+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
34520
34521- pgdp[i] = native_make_pgd(0);
34522+ set_pgd(pgdp + i, native_make_pgd(0));
34523
34524- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
34525- pmd_free(mm, pmd);
34526+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
34527+ pxd_free(mm, pxd);
34528 }
34529 }
34530 }
34531
34532-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
34533+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
34534 {
34535- pud_t *pud;
34536+ pyd_t *pyd;
34537 int i;
34538
34539- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
34540+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
34541 return;
34542
34543- pud = pud_offset(pgd, 0);
34544-
34545- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
34546- pmd_t *pmd = pmds[i];
34547+#ifdef CONFIG_X86_64
34548+ pyd = pyd_offset(mm, 0L);
34549+#else
34550+ pyd = pyd_offset(pgd, 0L);
34551+#endif
34552
34553+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
34554+ pxd_t *pxd = pxds[i];
34555 if (i >= KERNEL_PGD_BOUNDARY)
34556- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
34557- sizeof(pmd_t) * PTRS_PER_PMD);
34558+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
34559+ sizeof(pxd_t) * PTRS_PER_PMD);
34560
34561- pud_populate(mm, pud, pmd);
34562+ pyd_populate(mm, pyd, pxd);
34563 }
34564 }
34565
34566 pgd_t *pgd_alloc(struct mm_struct *mm)
34567 {
34568 pgd_t *pgd;
34569- pmd_t *pmds[PREALLOCATED_PMDS];
34570+ pxd_t *pxds[PREALLOCATED_PXDS];
34571
34572 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
34573
34574@@ -283,11 +350,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34575
34576 mm->pgd = pgd;
34577
34578- if (preallocate_pmds(pmds) != 0)
34579+ if (preallocate_pxds(pxds) != 0)
34580 goto out_free_pgd;
34581
34582 if (paravirt_pgd_alloc(mm) != 0)
34583- goto out_free_pmds;
34584+ goto out_free_pxds;
34585
34586 /*
34587 * Make sure that pre-populating the pmds is atomic with
34588@@ -297,14 +364,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34589 spin_lock(&pgd_lock);
34590
34591 pgd_ctor(mm, pgd);
34592- pgd_prepopulate_pmd(mm, pgd, pmds);
34593+ pgd_prepopulate_pxd(mm, pgd, pxds);
34594
34595 spin_unlock(&pgd_lock);
34596
34597 return pgd;
34598
34599-out_free_pmds:
34600- free_pmds(pmds);
34601+out_free_pxds:
34602+ free_pxds(pxds);
34603 out_free_pgd:
34604 free_page((unsigned long)pgd);
34605 out:
34606@@ -313,7 +380,7 @@ out:
34607
34608 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
34609 {
34610- pgd_mop_up_pmds(mm, pgd);
34611+ pgd_mop_up_pxds(mm, pgd);
34612 pgd_dtor(pgd);
34613 paravirt_pgd_free(mm, pgd);
34614 free_page((unsigned long)pgd);
34615diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
34616index 4dd8cf6..f9d143e 100644
34617--- a/arch/x86/mm/pgtable_32.c
34618+++ b/arch/x86/mm/pgtable_32.c
34619@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
34620 return;
34621 }
34622 pte = pte_offset_kernel(pmd, vaddr);
34623+
34624+ pax_open_kernel();
34625 if (pte_val(pteval))
34626 set_pte_at(&init_mm, vaddr, pte, pteval);
34627 else
34628 pte_clear(&init_mm, vaddr, pte);
34629+ pax_close_kernel();
34630
34631 /*
34632 * It's enough to flush this one mapping.
34633diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
34634index e666cbb..61788c45 100644
34635--- a/arch/x86/mm/physaddr.c
34636+++ b/arch/x86/mm/physaddr.c
34637@@ -10,7 +10,7 @@
34638 #ifdef CONFIG_X86_64
34639
34640 #ifdef CONFIG_DEBUG_VIRTUAL
34641-unsigned long __phys_addr(unsigned long x)
34642+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34643 {
34644 unsigned long y = x - __START_KERNEL_map;
34645
34646@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
34647 #else
34648
34649 #ifdef CONFIG_DEBUG_VIRTUAL
34650-unsigned long __phys_addr(unsigned long x)
34651+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34652 {
34653 unsigned long phys_addr = x - PAGE_OFFSET;
34654 /* VMALLOC_* aren't constants */
34655diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
34656index 90555bf..f5f1828 100644
34657--- a/arch/x86/mm/setup_nx.c
34658+++ b/arch/x86/mm/setup_nx.c
34659@@ -5,8 +5,10 @@
34660 #include <asm/pgtable.h>
34661 #include <asm/proto.h>
34662
34663+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34664 static int disable_nx;
34665
34666+#ifndef CONFIG_PAX_PAGEEXEC
34667 /*
34668 * noexec = on|off
34669 *
34670@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
34671 return 0;
34672 }
34673 early_param("noexec", noexec_setup);
34674+#endif
34675+
34676+#endif
34677
34678 void x86_configure_nx(void)
34679 {
34680+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34681 if (cpu_has_nx && !disable_nx)
34682 __supported_pte_mask |= _PAGE_NX;
34683 else
34684+#endif
34685 __supported_pte_mask &= ~_PAGE_NX;
34686 }
34687
34688diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
34689index ee61c36..e6fedeb 100644
34690--- a/arch/x86/mm/tlb.c
34691+++ b/arch/x86/mm/tlb.c
34692@@ -48,7 +48,11 @@ void leave_mm(int cpu)
34693 BUG();
34694 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
34695 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
34696+
34697+#ifndef CONFIG_PAX_PER_CPU_PGD
34698 load_cr3(swapper_pg_dir);
34699+#endif
34700+
34701 /*
34702 * This gets called in the idle path where RCU
34703 * functions differently. Tracing normally
34704diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
34705new file mode 100644
34706index 0000000..dace51c
34707--- /dev/null
34708+++ b/arch/x86/mm/uderef_64.c
34709@@ -0,0 +1,37 @@
34710+#include <linux/mm.h>
34711+#include <asm/pgtable.h>
34712+#include <asm/uaccess.h>
34713+
34714+#ifdef CONFIG_PAX_MEMORY_UDEREF
34715+/* PaX: due to the special call convention these functions must
34716+ * - remain leaf functions under all configurations,
34717+ * - never be called directly, only dereferenced from the wrappers.
34718+ */
34719+void __pax_open_userland(void)
34720+{
34721+ unsigned int cpu;
34722+
34723+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34724+ return;
34725+
34726+ cpu = raw_get_cpu();
34727+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
34728+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
34729+ raw_put_cpu_no_resched();
34730+}
34731+EXPORT_SYMBOL(__pax_open_userland);
34732+
34733+void __pax_close_userland(void)
34734+{
34735+ unsigned int cpu;
34736+
34737+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34738+ return;
34739+
34740+ cpu = raw_get_cpu();
34741+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
34742+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
34743+ raw_put_cpu_no_resched();
34744+}
34745+EXPORT_SYMBOL(__pax_close_userland);
34746+#endif
34747diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
34748index 6440221..f84b5c7 100644
34749--- a/arch/x86/net/bpf_jit.S
34750+++ b/arch/x86/net/bpf_jit.S
34751@@ -9,6 +9,7 @@
34752 */
34753 #include <linux/linkage.h>
34754 #include <asm/dwarf2.h>
34755+#include <asm/alternative-asm.h>
34756
34757 /*
34758 * Calling convention :
34759@@ -38,6 +39,7 @@ sk_load_word_positive_offset:
34760 jle bpf_slow_path_word
34761 mov (SKBDATA,%rsi),%eax
34762 bswap %eax /* ntohl() */
34763+ pax_force_retaddr
34764 ret
34765
34766 sk_load_half:
34767@@ -55,6 +57,7 @@ sk_load_half_positive_offset:
34768 jle bpf_slow_path_half
34769 movzwl (SKBDATA,%rsi),%eax
34770 rol $8,%ax # ntohs()
34771+ pax_force_retaddr
34772 ret
34773
34774 sk_load_byte:
34775@@ -69,6 +72,7 @@ sk_load_byte_positive_offset:
34776 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
34777 jle bpf_slow_path_byte
34778 movzbl (SKBDATA,%rsi),%eax
34779+ pax_force_retaddr
34780 ret
34781
34782 /* rsi contains offset and can be scratched */
34783@@ -90,6 +94,7 @@ bpf_slow_path_word:
34784 js bpf_error
34785 mov - MAX_BPF_STACK + 32(%rbp),%eax
34786 bswap %eax
34787+ pax_force_retaddr
34788 ret
34789
34790 bpf_slow_path_half:
34791@@ -98,12 +103,14 @@ bpf_slow_path_half:
34792 mov - MAX_BPF_STACK + 32(%rbp),%ax
34793 rol $8,%ax
34794 movzwl %ax,%eax
34795+ pax_force_retaddr
34796 ret
34797
34798 bpf_slow_path_byte:
34799 bpf_slow_path_common(1)
34800 js bpf_error
34801 movzbl - MAX_BPF_STACK + 32(%rbp),%eax
34802+ pax_force_retaddr
34803 ret
34804
34805 #define sk_negative_common(SIZE) \
34806@@ -126,6 +133,7 @@ sk_load_word_negative_offset:
34807 sk_negative_common(4)
34808 mov (%rax), %eax
34809 bswap %eax
34810+ pax_force_retaddr
34811 ret
34812
34813 bpf_slow_path_half_neg:
34814@@ -137,6 +145,7 @@ sk_load_half_negative_offset:
34815 mov (%rax),%ax
34816 rol $8,%ax
34817 movzwl %ax,%eax
34818+ pax_force_retaddr
34819 ret
34820
34821 bpf_slow_path_byte_neg:
34822@@ -146,6 +155,7 @@ sk_load_byte_negative_offset:
34823 .globl sk_load_byte_negative_offset
34824 sk_negative_common(1)
34825 movzbl (%rax), %eax
34826+ pax_force_retaddr
34827 ret
34828
34829 bpf_error:
34830@@ -156,4 +166,5 @@ bpf_error:
34831 mov - MAX_BPF_STACK + 16(%rbp),%r14
34832 mov - MAX_BPF_STACK + 24(%rbp),%r15
34833 leaveq
34834+ pax_force_retaddr
34835 ret
34836diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
34837index c881ba8..71aca2e 100644
34838--- a/arch/x86/net/bpf_jit_comp.c
34839+++ b/arch/x86/net/bpf_jit_comp.c
34840@@ -15,7 +15,11 @@
34841 #include <linux/if_vlan.h>
34842 #include <linux/random.h>
34843
34844+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
34845+int bpf_jit_enable __read_only;
34846+#else
34847 int bpf_jit_enable __read_mostly;
34848+#endif
34849
34850 /*
34851 * assembly code in arch/x86/net/bpf_jit.S
34852@@ -109,36 +113,32 @@ static inline void bpf_flush_icache(void *start, void *end)
34853 #define CHOOSE_LOAD_FUNC(K, func) \
34854 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
34855
34856-struct bpf_binary_header {
34857- unsigned int pages;
34858- /* Note : for security reasons, bpf code will follow a randomly
34859- * sized amount of int3 instructions
34860- */
34861- u8 image[];
34862-};
34863-
34864-static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
34865+/* Note : for security reasons, bpf code will follow a randomly
34866+ * sized amount of int3 instructions
34867+ */
34868+static u8 *bpf_alloc_binary(unsigned int proglen,
34869 u8 **image_ptr)
34870 {
34871 unsigned int sz, hole;
34872- struct bpf_binary_header *header;
34873+ u8 *header;
34874
34875 /* Most of BPF filters are really small,
34876 * but if some of them fill a page, allow at least
34877 * 128 extra bytes to insert a random section of int3
34878 */
34879- sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
34880- header = module_alloc(sz);
34881+ sz = round_up(proglen + 128, PAGE_SIZE);
34882+ header = module_alloc_exec(sz);
34883 if (!header)
34884 return NULL;
34885
34886+ pax_open_kernel();
34887 memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
34888+ pax_close_kernel();
34889
34890- header->pages = sz / PAGE_SIZE;
34891- hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
34892+ hole = PAGE_SIZE - (proglen & ~PAGE_MASK);
34893
34894 /* insert a random number of int3 instructions before BPF code */
34895- *image_ptr = &header->image[prandom_u32() % hole];
34896+ *image_ptr = &header[prandom_u32() % hole];
34897 return header;
34898 }
34899
34900@@ -864,7 +864,9 @@ common_load:
34901 pr_err("bpf_jit_compile fatal error\n");
34902 return -EFAULT;
34903 }
34904+ pax_open_kernel();
34905 memcpy(image + proglen, temp, ilen);
34906+ pax_close_kernel();
34907 }
34908 proglen += ilen;
34909 addrs[i] = proglen;
34910@@ -879,7 +881,7 @@ void bpf_jit_compile(struct bpf_prog *prog)
34911
34912 void bpf_int_jit_compile(struct bpf_prog *prog)
34913 {
34914- struct bpf_binary_header *header = NULL;
34915+ u8 *header = NULL;
34916 int proglen, oldproglen = 0;
34917 struct jit_context ctx = {};
34918 u8 *image = NULL;
34919@@ -911,7 +913,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
34920 if (proglen <= 0) {
34921 image = NULL;
34922 if (header)
34923- module_free(NULL, header);
34924+ module_free_exec(NULL, image);
34925 goto out;
34926 }
34927 if (image) {
34928@@ -935,7 +937,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
34929
34930 if (image) {
34931 bpf_flush_icache(header, image + proglen);
34932- set_memory_ro((unsigned long)header, header->pages);
34933 prog->bpf_func = (void *)image;
34934 prog->jited = 1;
34935 }
34936@@ -943,23 +944,15 @@ out:
34937 kfree(addrs);
34938 }
34939
34940-static void bpf_jit_free_deferred(struct work_struct *work)
34941-{
34942- struct bpf_prog *fp = container_of(work, struct bpf_prog, work);
34943- unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
34944- struct bpf_binary_header *header = (void *)addr;
34945-
34946- set_memory_rw(addr, header->pages);
34947- module_free(NULL, header);
34948- kfree(fp);
34949-}
34950-
34951 void bpf_jit_free(struct bpf_prog *fp)
34952 {
34953- if (fp->jited) {
34954- INIT_WORK(&fp->work, bpf_jit_free_deferred);
34955- schedule_work(&fp->work);
34956- } else {
34957- kfree(fp);
34958- }
34959+ unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
34960+
34961+ if (!fp->jited)
34962+ goto free_filter;
34963+
34964+ module_free_exec(NULL, (void *)addr);
34965+
34966+free_filter:
34967+ bpf_prog_unlock_free(fp);
34968 }
34969diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
34970index 5d04be5..2beeaa2 100644
34971--- a/arch/x86/oprofile/backtrace.c
34972+++ b/arch/x86/oprofile/backtrace.c
34973@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
34974 struct stack_frame_ia32 *fp;
34975 unsigned long bytes;
34976
34977- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34978+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34979 if (bytes != 0)
34980 return NULL;
34981
34982- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
34983+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
34984
34985 oprofile_add_trace(bufhead[0].return_address);
34986
34987@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
34988 struct stack_frame bufhead[2];
34989 unsigned long bytes;
34990
34991- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34992+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34993 if (bytes != 0)
34994 return NULL;
34995
34996@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
34997 {
34998 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
34999
35000- if (!user_mode_vm(regs)) {
35001+ if (!user_mode(regs)) {
35002 unsigned long stack = kernel_stack_pointer(regs);
35003 if (depth)
35004 dump_trace(NULL, regs, (unsigned long *)stack, 0,
35005diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
35006index 379e8bd..6386e09 100644
35007--- a/arch/x86/oprofile/nmi_int.c
35008+++ b/arch/x86/oprofile/nmi_int.c
35009@@ -23,6 +23,7 @@
35010 #include <asm/nmi.h>
35011 #include <asm/msr.h>
35012 #include <asm/apic.h>
35013+#include <asm/pgtable.h>
35014
35015 #include "op_counter.h"
35016 #include "op_x86_model.h"
35017@@ -785,8 +786,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
35018 if (ret)
35019 return ret;
35020
35021- if (!model->num_virt_counters)
35022- model->num_virt_counters = model->num_counters;
35023+ if (!model->num_virt_counters) {
35024+ pax_open_kernel();
35025+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
35026+ pax_close_kernel();
35027+ }
35028
35029 mux_init(ops);
35030
35031diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
35032index 50d86c0..7985318 100644
35033--- a/arch/x86/oprofile/op_model_amd.c
35034+++ b/arch/x86/oprofile/op_model_amd.c
35035@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
35036 num_counters = AMD64_NUM_COUNTERS;
35037 }
35038
35039- op_amd_spec.num_counters = num_counters;
35040- op_amd_spec.num_controls = num_counters;
35041- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
35042+ pax_open_kernel();
35043+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
35044+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
35045+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
35046+ pax_close_kernel();
35047
35048 return 0;
35049 }
35050diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
35051index d90528e..0127e2b 100644
35052--- a/arch/x86/oprofile/op_model_ppro.c
35053+++ b/arch/x86/oprofile/op_model_ppro.c
35054@@ -19,6 +19,7 @@
35055 #include <asm/msr.h>
35056 #include <asm/apic.h>
35057 #include <asm/nmi.h>
35058+#include <asm/pgtable.h>
35059
35060 #include "op_x86_model.h"
35061 #include "op_counter.h"
35062@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
35063
35064 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
35065
35066- op_arch_perfmon_spec.num_counters = num_counters;
35067- op_arch_perfmon_spec.num_controls = num_counters;
35068+ pax_open_kernel();
35069+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
35070+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
35071+ pax_close_kernel();
35072 }
35073
35074 static int arch_perfmon_init(struct oprofile_operations *ignore)
35075diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
35076index 71e8a67..6a313bb 100644
35077--- a/arch/x86/oprofile/op_x86_model.h
35078+++ b/arch/x86/oprofile/op_x86_model.h
35079@@ -52,7 +52,7 @@ struct op_x86_model_spec {
35080 void (*switch_ctrl)(struct op_x86_model_spec const *model,
35081 struct op_msrs const * const msrs);
35082 #endif
35083-};
35084+} __do_const;
35085
35086 struct op_counter_config;
35087
35088diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
35089index b9958c3..24229ab 100644
35090--- a/arch/x86/pci/intel_mid_pci.c
35091+++ b/arch/x86/pci/intel_mid_pci.c
35092@@ -250,7 +250,7 @@ int __init intel_mid_pci_init(void)
35093 pci_mmcfg_late_init();
35094 pcibios_enable_irq = intel_mid_pci_irq_enable;
35095 pcibios_disable_irq = intel_mid_pci_irq_disable;
35096- pci_root_ops = intel_mid_pci_ops;
35097+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
35098 pci_soc_mode = 1;
35099 /* Continue with standard init */
35100 return 1;
35101diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
35102index eb500c2..eab9e70 100644
35103--- a/arch/x86/pci/irq.c
35104+++ b/arch/x86/pci/irq.c
35105@@ -51,7 +51,7 @@ struct irq_router {
35106 struct irq_router_handler {
35107 u16 vendor;
35108 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
35109-};
35110+} __do_const;
35111
35112 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
35113 void (*pcibios_disable_irq)(struct pci_dev *dev) = pirq_disable_irq;
35114@@ -791,7 +791,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
35115 return 0;
35116 }
35117
35118-static __initdata struct irq_router_handler pirq_routers[] = {
35119+static __initconst const struct irq_router_handler pirq_routers[] = {
35120 { PCI_VENDOR_ID_INTEL, intel_router_probe },
35121 { PCI_VENDOR_ID_AL, ali_router_probe },
35122 { PCI_VENDOR_ID_ITE, ite_router_probe },
35123@@ -818,7 +818,7 @@ static struct pci_dev *pirq_router_dev;
35124 static void __init pirq_find_router(struct irq_router *r)
35125 {
35126 struct irq_routing_table *rt = pirq_table;
35127- struct irq_router_handler *h;
35128+ const struct irq_router_handler *h;
35129
35130 #ifdef CONFIG_PCI_BIOS
35131 if (!rt->signature) {
35132@@ -1091,7 +1091,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
35133 return 0;
35134 }
35135
35136-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
35137+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
35138 {
35139 .callback = fix_broken_hp_bios_irq9,
35140 .ident = "HP Pavilion N5400 Series Laptop",
35141diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
35142index c77b24a..c979855 100644
35143--- a/arch/x86/pci/pcbios.c
35144+++ b/arch/x86/pci/pcbios.c
35145@@ -79,7 +79,7 @@ union bios32 {
35146 static struct {
35147 unsigned long address;
35148 unsigned short segment;
35149-} bios32_indirect = { 0, __KERNEL_CS };
35150+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
35151
35152 /*
35153 * Returns the entry point for the given service, NULL on error
35154@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
35155 unsigned long length; /* %ecx */
35156 unsigned long entry; /* %edx */
35157 unsigned long flags;
35158+ struct desc_struct d, *gdt;
35159
35160 local_irq_save(flags);
35161- __asm__("lcall *(%%edi); cld"
35162+
35163+ gdt = get_cpu_gdt_table(smp_processor_id());
35164+
35165+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
35166+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
35167+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
35168+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
35169+
35170+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
35171 : "=a" (return_code),
35172 "=b" (address),
35173 "=c" (length),
35174 "=d" (entry)
35175 : "0" (service),
35176 "1" (0),
35177- "D" (&bios32_indirect));
35178+ "D" (&bios32_indirect),
35179+ "r"(__PCIBIOS_DS)
35180+ : "memory");
35181+
35182+ pax_open_kernel();
35183+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
35184+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
35185+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
35186+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
35187+ pax_close_kernel();
35188+
35189 local_irq_restore(flags);
35190
35191 switch (return_code) {
35192- case 0:
35193- return address + entry;
35194- case 0x80: /* Not present */
35195- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
35196- return 0;
35197- default: /* Shouldn't happen */
35198- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
35199- service, return_code);
35200+ case 0: {
35201+ int cpu;
35202+ unsigned char flags;
35203+
35204+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
35205+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
35206+ printk(KERN_WARNING "bios32_service: not valid\n");
35207 return 0;
35208+ }
35209+ address = address + PAGE_OFFSET;
35210+ length += 16UL; /* some BIOSs underreport this... */
35211+ flags = 4;
35212+ if (length >= 64*1024*1024) {
35213+ length >>= PAGE_SHIFT;
35214+ flags |= 8;
35215+ }
35216+
35217+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
35218+ gdt = get_cpu_gdt_table(cpu);
35219+ pack_descriptor(&d, address, length, 0x9b, flags);
35220+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
35221+ pack_descriptor(&d, address, length, 0x93, flags);
35222+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
35223+ }
35224+ return entry;
35225+ }
35226+ case 0x80: /* Not present */
35227+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
35228+ return 0;
35229+ default: /* Shouldn't happen */
35230+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
35231+ service, return_code);
35232+ return 0;
35233 }
35234 }
35235
35236 static struct {
35237 unsigned long address;
35238 unsigned short segment;
35239-} pci_indirect = { 0, __KERNEL_CS };
35240+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
35241
35242-static int pci_bios_present;
35243+static int pci_bios_present __read_only;
35244
35245 static int check_pcibios(void)
35246 {
35247@@ -131,11 +174,13 @@ static int check_pcibios(void)
35248 unsigned long flags, pcibios_entry;
35249
35250 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
35251- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
35252+ pci_indirect.address = pcibios_entry;
35253
35254 local_irq_save(flags);
35255- __asm__(
35256- "lcall *(%%edi); cld\n\t"
35257+ __asm__("movw %w6, %%ds\n\t"
35258+ "lcall *%%ss:(%%edi); cld\n\t"
35259+ "push %%ss\n\t"
35260+ "pop %%ds\n\t"
35261 "jc 1f\n\t"
35262 "xor %%ah, %%ah\n"
35263 "1:"
35264@@ -144,7 +189,8 @@ static int check_pcibios(void)
35265 "=b" (ebx),
35266 "=c" (ecx)
35267 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
35268- "D" (&pci_indirect)
35269+ "D" (&pci_indirect),
35270+ "r" (__PCIBIOS_DS)
35271 : "memory");
35272 local_irq_restore(flags);
35273
35274@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
35275
35276 switch (len) {
35277 case 1:
35278- __asm__("lcall *(%%esi); cld\n\t"
35279+ __asm__("movw %w6, %%ds\n\t"
35280+ "lcall *%%ss:(%%esi); cld\n\t"
35281+ "push %%ss\n\t"
35282+ "pop %%ds\n\t"
35283 "jc 1f\n\t"
35284 "xor %%ah, %%ah\n"
35285 "1:"
35286@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
35287 : "1" (PCIBIOS_READ_CONFIG_BYTE),
35288 "b" (bx),
35289 "D" ((long)reg),
35290- "S" (&pci_indirect));
35291+ "S" (&pci_indirect),
35292+ "r" (__PCIBIOS_DS));
35293 /*
35294 * Zero-extend the result beyond 8 bits, do not trust the
35295 * BIOS having done it:
35296@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
35297 *value &= 0xff;
35298 break;
35299 case 2:
35300- __asm__("lcall *(%%esi); cld\n\t"
35301+ __asm__("movw %w6, %%ds\n\t"
35302+ "lcall *%%ss:(%%esi); cld\n\t"
35303+ "push %%ss\n\t"
35304+ "pop %%ds\n\t"
35305 "jc 1f\n\t"
35306 "xor %%ah, %%ah\n"
35307 "1:"
35308@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
35309 : "1" (PCIBIOS_READ_CONFIG_WORD),
35310 "b" (bx),
35311 "D" ((long)reg),
35312- "S" (&pci_indirect));
35313+ "S" (&pci_indirect),
35314+ "r" (__PCIBIOS_DS));
35315 /*
35316 * Zero-extend the result beyond 16 bits, do not trust the
35317 * BIOS having done it:
35318@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
35319 *value &= 0xffff;
35320 break;
35321 case 4:
35322- __asm__("lcall *(%%esi); cld\n\t"
35323+ __asm__("movw %w6, %%ds\n\t"
35324+ "lcall *%%ss:(%%esi); cld\n\t"
35325+ "push %%ss\n\t"
35326+ "pop %%ds\n\t"
35327 "jc 1f\n\t"
35328 "xor %%ah, %%ah\n"
35329 "1:"
35330@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
35331 : "1" (PCIBIOS_READ_CONFIG_DWORD),
35332 "b" (bx),
35333 "D" ((long)reg),
35334- "S" (&pci_indirect));
35335+ "S" (&pci_indirect),
35336+ "r" (__PCIBIOS_DS));
35337 break;
35338 }
35339
35340@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
35341
35342 switch (len) {
35343 case 1:
35344- __asm__("lcall *(%%esi); cld\n\t"
35345+ __asm__("movw %w6, %%ds\n\t"
35346+ "lcall *%%ss:(%%esi); cld\n\t"
35347+ "push %%ss\n\t"
35348+ "pop %%ds\n\t"
35349 "jc 1f\n\t"
35350 "xor %%ah, %%ah\n"
35351 "1:"
35352@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
35353 "c" (value),
35354 "b" (bx),
35355 "D" ((long)reg),
35356- "S" (&pci_indirect));
35357+ "S" (&pci_indirect),
35358+ "r" (__PCIBIOS_DS));
35359 break;
35360 case 2:
35361- __asm__("lcall *(%%esi); cld\n\t"
35362+ __asm__("movw %w6, %%ds\n\t"
35363+ "lcall *%%ss:(%%esi); cld\n\t"
35364+ "push %%ss\n\t"
35365+ "pop %%ds\n\t"
35366 "jc 1f\n\t"
35367 "xor %%ah, %%ah\n"
35368 "1:"
35369@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
35370 "c" (value),
35371 "b" (bx),
35372 "D" ((long)reg),
35373- "S" (&pci_indirect));
35374+ "S" (&pci_indirect),
35375+ "r" (__PCIBIOS_DS));
35376 break;
35377 case 4:
35378- __asm__("lcall *(%%esi); cld\n\t"
35379+ __asm__("movw %w6, %%ds\n\t"
35380+ "lcall *%%ss:(%%esi); cld\n\t"
35381+ "push %%ss\n\t"
35382+ "pop %%ds\n\t"
35383 "jc 1f\n\t"
35384 "xor %%ah, %%ah\n"
35385 "1:"
35386@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
35387 "c" (value),
35388 "b" (bx),
35389 "D" ((long)reg),
35390- "S" (&pci_indirect));
35391+ "S" (&pci_indirect),
35392+ "r" (__PCIBIOS_DS));
35393 break;
35394 }
35395
35396@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
35397
35398 DBG("PCI: Fetching IRQ routing table... ");
35399 __asm__("push %%es\n\t"
35400+ "movw %w8, %%ds\n\t"
35401 "push %%ds\n\t"
35402 "pop %%es\n\t"
35403- "lcall *(%%esi); cld\n\t"
35404+ "lcall *%%ss:(%%esi); cld\n\t"
35405 "pop %%es\n\t"
35406+ "push %%ss\n\t"
35407+ "pop %%ds\n"
35408 "jc 1f\n\t"
35409 "xor %%ah, %%ah\n"
35410 "1:"
35411@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
35412 "1" (0),
35413 "D" ((long) &opt),
35414 "S" (&pci_indirect),
35415- "m" (opt)
35416+ "m" (opt),
35417+ "r" (__PCIBIOS_DS)
35418 : "memory");
35419 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
35420 if (ret & 0xff00)
35421@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
35422 {
35423 int ret;
35424
35425- __asm__("lcall *(%%esi); cld\n\t"
35426+ __asm__("movw %w5, %%ds\n\t"
35427+ "lcall *%%ss:(%%esi); cld\n\t"
35428+ "push %%ss\n\t"
35429+ "pop %%ds\n"
35430 "jc 1f\n\t"
35431 "xor %%ah, %%ah\n"
35432 "1:"
35433@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
35434 : "0" (PCIBIOS_SET_PCI_HW_INT),
35435 "b" ((dev->bus->number << 8) | dev->devfn),
35436 "c" ((irq << 8) | (pin + 10)),
35437- "S" (&pci_indirect));
35438+ "S" (&pci_indirect),
35439+ "r" (__PCIBIOS_DS));
35440 return !(ret & 0xff00);
35441 }
35442 EXPORT_SYMBOL(pcibios_set_irq_routing);
35443diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
35444index 9ee3491..872192f 100644
35445--- a/arch/x86/platform/efi/efi_32.c
35446+++ b/arch/x86/platform/efi/efi_32.c
35447@@ -59,11 +59,22 @@ void efi_call_phys_prelog(void)
35448 {
35449 struct desc_ptr gdt_descr;
35450
35451+#ifdef CONFIG_PAX_KERNEXEC
35452+ struct desc_struct d;
35453+#endif
35454+
35455 local_irq_save(efi_rt_eflags);
35456
35457 load_cr3(initial_page_table);
35458 __flush_tlb_all();
35459
35460+#ifdef CONFIG_PAX_KERNEXEC
35461+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
35462+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
35463+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
35464+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
35465+#endif
35466+
35467 gdt_descr.address = __pa(get_cpu_gdt_table(0));
35468 gdt_descr.size = GDT_SIZE - 1;
35469 load_gdt(&gdt_descr);
35470@@ -73,11 +84,24 @@ void efi_call_phys_epilog(void)
35471 {
35472 struct desc_ptr gdt_descr;
35473
35474+#ifdef CONFIG_PAX_KERNEXEC
35475+ struct desc_struct d;
35476+
35477+ memset(&d, 0, sizeof d);
35478+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
35479+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
35480+#endif
35481+
35482 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
35483 gdt_descr.size = GDT_SIZE - 1;
35484 load_gdt(&gdt_descr);
35485
35486+#ifdef CONFIG_PAX_PER_CPU_PGD
35487+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
35488+#else
35489 load_cr3(swapper_pg_dir);
35490+#endif
35491+
35492 __flush_tlb_all();
35493
35494 local_irq_restore(efi_rt_eflags);
35495diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
35496index 290d397..e09d270 100644
35497--- a/arch/x86/platform/efi/efi_64.c
35498+++ b/arch/x86/platform/efi/efi_64.c
35499@@ -99,6 +99,11 @@ void __init efi_call_phys_prelog(void)
35500 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
35501 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
35502 }
35503+
35504+#ifdef CONFIG_PAX_PER_CPU_PGD
35505+ load_cr3(swapper_pg_dir);
35506+#endif
35507+
35508 __flush_tlb_all();
35509 }
35510
35511@@ -116,6 +121,11 @@ void __init efi_call_phys_epilog(void)
35512 for (pgd = 0; pgd < n_pgds; pgd++)
35513 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
35514 kfree(save_pgd);
35515+
35516+#ifdef CONFIG_PAX_PER_CPU_PGD
35517+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
35518+#endif
35519+
35520 __flush_tlb_all();
35521 local_irq_restore(efi_flags);
35522 early_code_mapping_set_exec(0);
35523@@ -146,8 +156,23 @@ int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
35524 unsigned npages;
35525 pgd_t *pgd;
35526
35527- if (efi_enabled(EFI_OLD_MEMMAP))
35528+ if (efi_enabled(EFI_OLD_MEMMAP)) {
35529+ /* PaX: We need to disable the NX bit in the PGD, otherwise we won't be
35530+ * able to execute the EFI services.
35531+ */
35532+ if (__supported_pte_mask & _PAGE_NX) {
35533+ unsigned long addr = (unsigned long) __va(0);
35534+ pgd_t pe = __pgd(pgd_val(*pgd_offset_k(addr)) & ~_PAGE_NX);
35535+
35536+ pr_alert("PAX: Disabling NX protection for low memory map. Try booting without \"efi=old_map\"\n");
35537+#ifdef CONFIG_PAX_PER_CPU_PGD
35538+ set_pgd(pgd_offset_cpu(0, kernel, addr), pe);
35539+#endif
35540+ set_pgd(pgd_offset_k(addr), pe);
35541+ }
35542+
35543 return 0;
35544+ }
35545
35546 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
35547 pgd = __va(efi_scratch.efi_pgt);
35548diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
35549index fbe66e6..eae5e38 100644
35550--- a/arch/x86/platform/efi/efi_stub_32.S
35551+++ b/arch/x86/platform/efi/efi_stub_32.S
35552@@ -6,7 +6,9 @@
35553 */
35554
35555 #include <linux/linkage.h>
35556+#include <linux/init.h>
35557 #include <asm/page_types.h>
35558+#include <asm/segment.h>
35559
35560 /*
35561 * efi_call_phys(void *, ...) is a function with variable parameters.
35562@@ -20,7 +22,7 @@
35563 * service functions will comply with gcc calling convention, too.
35564 */
35565
35566-.text
35567+__INIT
35568 ENTRY(efi_call_phys)
35569 /*
35570 * 0. The function can only be called in Linux kernel. So CS has been
35571@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
35572 * The mapping of lower virtual memory has been created in prelog and
35573 * epilog.
35574 */
35575- movl $1f, %edx
35576- subl $__PAGE_OFFSET, %edx
35577- jmp *%edx
35578+#ifdef CONFIG_PAX_KERNEXEC
35579+ movl $(__KERNEXEC_EFI_DS), %edx
35580+ mov %edx, %ds
35581+ mov %edx, %es
35582+ mov %edx, %ss
35583+ addl $2f,(1f)
35584+ ljmp *(1f)
35585+
35586+__INITDATA
35587+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
35588+.previous
35589+
35590+2:
35591+ subl $2b,(1b)
35592+#else
35593+ jmp 1f-__PAGE_OFFSET
35594 1:
35595+#endif
35596
35597 /*
35598 * 2. Now on the top of stack is the return
35599@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
35600 * parameter 2, ..., param n. To make things easy, we save the return
35601 * address of efi_call_phys in a global variable.
35602 */
35603- popl %edx
35604- movl %edx, saved_return_addr
35605- /* get the function pointer into ECX*/
35606- popl %ecx
35607- movl %ecx, efi_rt_function_ptr
35608- movl $2f, %edx
35609- subl $__PAGE_OFFSET, %edx
35610- pushl %edx
35611+ popl (saved_return_addr)
35612+ popl (efi_rt_function_ptr)
35613
35614 /*
35615 * 3. Clear PG bit in %CR0.
35616@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
35617 /*
35618 * 5. Call the physical function.
35619 */
35620- jmp *%ecx
35621+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
35622
35623-2:
35624 /*
35625 * 6. After EFI runtime service returns, control will return to
35626 * following instruction. We'd better readjust stack pointer first.
35627@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
35628 movl %cr0, %edx
35629 orl $0x80000000, %edx
35630 movl %edx, %cr0
35631- jmp 1f
35632-1:
35633+
35634 /*
35635 * 8. Now restore the virtual mode from flat mode by
35636 * adding EIP with PAGE_OFFSET.
35637 */
35638- movl $1f, %edx
35639- jmp *%edx
35640+#ifdef CONFIG_PAX_KERNEXEC
35641+ movl $(__KERNEL_DS), %edx
35642+ mov %edx, %ds
35643+ mov %edx, %es
35644+ mov %edx, %ss
35645+ ljmp $(__KERNEL_CS),$1f
35646+#else
35647+ jmp 1f+__PAGE_OFFSET
35648+#endif
35649 1:
35650
35651 /*
35652 * 9. Balance the stack. And because EAX contain the return value,
35653 * we'd better not clobber it.
35654 */
35655- leal efi_rt_function_ptr, %edx
35656- movl (%edx), %ecx
35657- pushl %ecx
35658+ pushl (efi_rt_function_ptr)
35659
35660 /*
35661- * 10. Push the saved return address onto the stack and return.
35662+ * 10. Return to the saved return address.
35663 */
35664- leal saved_return_addr, %edx
35665- movl (%edx), %ecx
35666- pushl %ecx
35667- ret
35668+ jmpl *(saved_return_addr)
35669 ENDPROC(efi_call_phys)
35670 .previous
35671
35672-.data
35673+__INITDATA
35674 saved_return_addr:
35675 .long 0
35676 efi_rt_function_ptr:
35677diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
35678index 5fcda72..cd4dc41 100644
35679--- a/arch/x86/platform/efi/efi_stub_64.S
35680+++ b/arch/x86/platform/efi/efi_stub_64.S
35681@@ -11,6 +11,7 @@
35682 #include <asm/msr.h>
35683 #include <asm/processor-flags.h>
35684 #include <asm/page_types.h>
35685+#include <asm/alternative-asm.h>
35686
35687 #define SAVE_XMM \
35688 mov %rsp, %rax; \
35689@@ -88,6 +89,7 @@ ENTRY(efi_call)
35690 RESTORE_PGT
35691 addq $48, %rsp
35692 RESTORE_XMM
35693+ pax_force_retaddr 0, 1
35694 ret
35695 ENDPROC(efi_call)
35696
35697@@ -245,8 +247,8 @@ efi_gdt64:
35698 .long 0 /* Filled out by user */
35699 .word 0
35700 .quad 0x0000000000000000 /* NULL descriptor */
35701- .quad 0x00af9a000000ffff /* __KERNEL_CS */
35702- .quad 0x00cf92000000ffff /* __KERNEL_DS */
35703+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
35704+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
35705 .quad 0x0080890000000000 /* TS descriptor */
35706 .quad 0x0000000000000000 /* TS continued */
35707 efi_gdt64_end:
35708diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
35709index 1bbedc4..eb795b5 100644
35710--- a/arch/x86/platform/intel-mid/intel-mid.c
35711+++ b/arch/x86/platform/intel-mid/intel-mid.c
35712@@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
35713 {
35714 };
35715
35716-static void intel_mid_reboot(void)
35717+static void __noreturn intel_mid_reboot(void)
35718 {
35719 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
35720+ BUG();
35721 }
35722
35723 static unsigned long __init intel_mid_calibrate_tsc(void)
35724diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35725index 46aa25c..59a68ed 100644
35726--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35727+++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35728@@ -10,10 +10,9 @@
35729 */
35730
35731
35732-/* __attribute__((weak)) makes these declarations overridable */
35733 /* For every CPU addition a new get_<cpuname>_ops interface needs
35734 * to be added.
35735 */
35736-extern void *get_penwell_ops(void) __attribute__((weak));
35737-extern void *get_cloverview_ops(void) __attribute__((weak));
35738-extern void *get_tangier_ops(void) __attribute__((weak));
35739+extern const void *get_penwell_ops(void);
35740+extern const void *get_cloverview_ops(void);
35741+extern const void *get_tangier_ops(void);
35742diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
35743index 23381d2..8ddc10e 100644
35744--- a/arch/x86/platform/intel-mid/mfld.c
35745+++ b/arch/x86/platform/intel-mid/mfld.c
35746@@ -64,12 +64,12 @@ static void __init penwell_arch_setup(void)
35747 pm_power_off = mfld_power_off;
35748 }
35749
35750-void *get_penwell_ops(void)
35751+const void *get_penwell_ops(void)
35752 {
35753 return &penwell_ops;
35754 }
35755
35756-void *get_cloverview_ops(void)
35757+const void *get_cloverview_ops(void)
35758 {
35759 return &penwell_ops;
35760 }
35761diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
35762index aaca917..66eadbc 100644
35763--- a/arch/x86/platform/intel-mid/mrfl.c
35764+++ b/arch/x86/platform/intel-mid/mrfl.c
35765@@ -97,7 +97,7 @@ static struct intel_mid_ops tangier_ops = {
35766 .arch_setup = tangier_arch_setup,
35767 };
35768
35769-void *get_tangier_ops(void)
35770+const void *get_tangier_ops(void)
35771 {
35772 return &tangier_ops;
35773 }
35774diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
35775index d6ee929..3637cb5 100644
35776--- a/arch/x86/platform/olpc/olpc_dt.c
35777+++ b/arch/x86/platform/olpc/olpc_dt.c
35778@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
35779 return res;
35780 }
35781
35782-static struct of_pdt_ops prom_olpc_ops __initdata = {
35783+static struct of_pdt_ops prom_olpc_ops __initconst = {
35784 .nextprop = olpc_dt_nextprop,
35785 .getproplen = olpc_dt_getproplen,
35786 .getproperty = olpc_dt_getproperty,
35787diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
35788index 6ec7910..ecdbb11 100644
35789--- a/arch/x86/power/cpu.c
35790+++ b/arch/x86/power/cpu.c
35791@@ -137,11 +137,8 @@ static void do_fpu_end(void)
35792 static void fix_processor_context(void)
35793 {
35794 int cpu = smp_processor_id();
35795- struct tss_struct *t = &per_cpu(init_tss, cpu);
35796-#ifdef CONFIG_X86_64
35797- struct desc_struct *desc = get_cpu_gdt_table(cpu);
35798- tss_desc tss;
35799-#endif
35800+ struct tss_struct *t = init_tss + cpu;
35801+
35802 set_tss_desc(cpu, t); /*
35803 * This just modifies memory; should not be
35804 * necessary. But... This is necessary, because
35805@@ -150,10 +147,6 @@ static void fix_processor_context(void)
35806 */
35807
35808 #ifdef CONFIG_X86_64
35809- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
35810- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
35811- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
35812-
35813 syscall_init(); /* This sets MSR_*STAR and related */
35814 #endif
35815 load_TR_desc(); /* This does ltr */
35816diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
35817index bad628a..a102610 100644
35818--- a/arch/x86/realmode/init.c
35819+++ b/arch/x86/realmode/init.c
35820@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
35821 __va(real_mode_header->trampoline_header);
35822
35823 #ifdef CONFIG_X86_32
35824- trampoline_header->start = __pa_symbol(startup_32_smp);
35825+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
35826+
35827+#ifdef CONFIG_PAX_KERNEXEC
35828+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
35829+#endif
35830+
35831+ trampoline_header->boot_cs = __BOOT_CS;
35832 trampoline_header->gdt_limit = __BOOT_DS + 7;
35833 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
35834 #else
35835@@ -84,7 +90,7 @@ void __init setup_real_mode(void)
35836 *trampoline_cr4_features = read_cr4();
35837
35838 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
35839- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
35840+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
35841 trampoline_pgd[511] = init_level4_pgt[511].pgd;
35842 #endif
35843 }
35844diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
35845index 7c0d7be..d24dc88 100644
35846--- a/arch/x86/realmode/rm/Makefile
35847+++ b/arch/x86/realmode/rm/Makefile
35848@@ -67,5 +67,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
35849
35850 KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
35851 -I$(srctree)/arch/x86/boot
35852+ifdef CONSTIFY_PLUGIN
35853+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
35854+endif
35855 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
35856 GCOV_PROFILE := n
35857diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
35858index a28221d..93c40f1 100644
35859--- a/arch/x86/realmode/rm/header.S
35860+++ b/arch/x86/realmode/rm/header.S
35861@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
35862 #endif
35863 /* APM/BIOS reboot */
35864 .long pa_machine_real_restart_asm
35865-#ifdef CONFIG_X86_64
35866+#ifdef CONFIG_X86_32
35867+ .long __KERNEL_CS
35868+#else
35869 .long __KERNEL32_CS
35870 #endif
35871 END(real_mode_header)
35872diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
35873index 48ddd76..c26749f 100644
35874--- a/arch/x86/realmode/rm/trampoline_32.S
35875+++ b/arch/x86/realmode/rm/trampoline_32.S
35876@@ -24,6 +24,12 @@
35877 #include <asm/page_types.h>
35878 #include "realmode.h"
35879
35880+#ifdef CONFIG_PAX_KERNEXEC
35881+#define ta(X) (X)
35882+#else
35883+#define ta(X) (pa_ ## X)
35884+#endif
35885+
35886 .text
35887 .code16
35888
35889@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
35890
35891 cli # We should be safe anyway
35892
35893- movl tr_start, %eax # where we need to go
35894-
35895 movl $0xA5A5A5A5, trampoline_status
35896 # write marker for master knows we're running
35897
35898@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
35899 movw $1, %dx # protected mode (PE) bit
35900 lmsw %dx # into protected mode
35901
35902- ljmpl $__BOOT_CS, $pa_startup_32
35903+ ljmpl *(trampoline_header)
35904
35905 .section ".text32","ax"
35906 .code32
35907@@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
35908 .balign 8
35909 GLOBAL(trampoline_header)
35910 tr_start: .space 4
35911- tr_gdt_pad: .space 2
35912+ tr_boot_cs: .space 2
35913 tr_gdt: .space 6
35914 END(trampoline_header)
35915
35916diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
35917index dac7b20..72dbaca 100644
35918--- a/arch/x86/realmode/rm/trampoline_64.S
35919+++ b/arch/x86/realmode/rm/trampoline_64.S
35920@@ -93,6 +93,7 @@ ENTRY(startup_32)
35921 movl %edx, %gs
35922
35923 movl pa_tr_cr4, %eax
35924+ andl $~X86_CR4_PCIDE, %eax
35925 movl %eax, %cr4 # Enable PAE mode
35926
35927 # Setup trampoline 4 level pagetables
35928@@ -106,7 +107,7 @@ ENTRY(startup_32)
35929 wrmsr
35930
35931 # Enable paging and in turn activate Long Mode
35932- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
35933+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
35934 movl %eax, %cr0
35935
35936 /*
35937diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
35938index 9e7e147..25a4158 100644
35939--- a/arch/x86/realmode/rm/wakeup_asm.S
35940+++ b/arch/x86/realmode/rm/wakeup_asm.S
35941@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
35942 lgdtl pmode_gdt
35943
35944 /* This really couldn't... */
35945- movl pmode_entry, %eax
35946 movl pmode_cr0, %ecx
35947 movl %ecx, %cr0
35948- ljmpl $__KERNEL_CS, $pa_startup_32
35949- /* -> jmp *%eax in trampoline_32.S */
35950+
35951+ ljmpl *pmode_entry
35952 #else
35953 jmp trampoline_start
35954 #endif
35955diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
35956index 604a37e..e49702a 100644
35957--- a/arch/x86/tools/Makefile
35958+++ b/arch/x86/tools/Makefile
35959@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
35960
35961 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
35962
35963-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
35964+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
35965 hostprogs-y += relocs
35966 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
35967 PHONY += relocs
35968diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
35969index bbb1d22..e505211 100644
35970--- a/arch/x86/tools/relocs.c
35971+++ b/arch/x86/tools/relocs.c
35972@@ -1,5 +1,7 @@
35973 /* This is included from relocs_32/64.c */
35974
35975+#include "../../../include/generated/autoconf.h"
35976+
35977 #define ElfW(type) _ElfW(ELF_BITS, type)
35978 #define _ElfW(bits, type) __ElfW(bits, type)
35979 #define __ElfW(bits, type) Elf##bits##_##type
35980@@ -11,6 +13,7 @@
35981 #define Elf_Sym ElfW(Sym)
35982
35983 static Elf_Ehdr ehdr;
35984+static Elf_Phdr *phdr;
35985
35986 struct relocs {
35987 uint32_t *offset;
35988@@ -383,9 +386,39 @@ static void read_ehdr(FILE *fp)
35989 }
35990 }
35991
35992+static void read_phdrs(FILE *fp)
35993+{
35994+ unsigned int i;
35995+
35996+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
35997+ if (!phdr) {
35998+ die("Unable to allocate %d program headers\n",
35999+ ehdr.e_phnum);
36000+ }
36001+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
36002+ die("Seek to %d failed: %s\n",
36003+ ehdr.e_phoff, strerror(errno));
36004+ }
36005+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
36006+ die("Cannot read ELF program headers: %s\n",
36007+ strerror(errno));
36008+ }
36009+ for(i = 0; i < ehdr.e_phnum; i++) {
36010+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
36011+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
36012+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
36013+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
36014+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
36015+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
36016+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
36017+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
36018+ }
36019+
36020+}
36021+
36022 static void read_shdrs(FILE *fp)
36023 {
36024- int i;
36025+ unsigned int i;
36026 Elf_Shdr shdr;
36027
36028 secs = calloc(ehdr.e_shnum, sizeof(struct section));
36029@@ -420,7 +453,7 @@ static void read_shdrs(FILE *fp)
36030
36031 static void read_strtabs(FILE *fp)
36032 {
36033- int i;
36034+ unsigned int i;
36035 for (i = 0; i < ehdr.e_shnum; i++) {
36036 struct section *sec = &secs[i];
36037 if (sec->shdr.sh_type != SHT_STRTAB) {
36038@@ -445,7 +478,7 @@ static void read_strtabs(FILE *fp)
36039
36040 static void read_symtabs(FILE *fp)
36041 {
36042- int i,j;
36043+ unsigned int i,j;
36044 for (i = 0; i < ehdr.e_shnum; i++) {
36045 struct section *sec = &secs[i];
36046 if (sec->shdr.sh_type != SHT_SYMTAB) {
36047@@ -476,9 +509,11 @@ static void read_symtabs(FILE *fp)
36048 }
36049
36050
36051-static void read_relocs(FILE *fp)
36052+static void read_relocs(FILE *fp, int use_real_mode)
36053 {
36054- int i,j;
36055+ unsigned int i,j;
36056+ uint32_t base;
36057+
36058 for (i = 0; i < ehdr.e_shnum; i++) {
36059 struct section *sec = &secs[i];
36060 if (sec->shdr.sh_type != SHT_REL_TYPE) {
36061@@ -498,9 +533,22 @@ static void read_relocs(FILE *fp)
36062 die("Cannot read symbol table: %s\n",
36063 strerror(errno));
36064 }
36065+ base = 0;
36066+
36067+#ifdef CONFIG_X86_32
36068+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
36069+ if (phdr[j].p_type != PT_LOAD )
36070+ continue;
36071+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
36072+ continue;
36073+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
36074+ break;
36075+ }
36076+#endif
36077+
36078 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
36079 Elf_Rel *rel = &sec->reltab[j];
36080- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
36081+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
36082 rel->r_info = elf_xword_to_cpu(rel->r_info);
36083 #if (SHT_REL_TYPE == SHT_RELA)
36084 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
36085@@ -512,7 +560,7 @@ static void read_relocs(FILE *fp)
36086
36087 static void print_absolute_symbols(void)
36088 {
36089- int i;
36090+ unsigned int i;
36091 const char *format;
36092
36093 if (ELF_BITS == 64)
36094@@ -525,7 +573,7 @@ static void print_absolute_symbols(void)
36095 for (i = 0; i < ehdr.e_shnum; i++) {
36096 struct section *sec = &secs[i];
36097 char *sym_strtab;
36098- int j;
36099+ unsigned int j;
36100
36101 if (sec->shdr.sh_type != SHT_SYMTAB) {
36102 continue;
36103@@ -552,7 +600,7 @@ static void print_absolute_symbols(void)
36104
36105 static void print_absolute_relocs(void)
36106 {
36107- int i, printed = 0;
36108+ unsigned int i, printed = 0;
36109 const char *format;
36110
36111 if (ELF_BITS == 64)
36112@@ -565,7 +613,7 @@ static void print_absolute_relocs(void)
36113 struct section *sec_applies, *sec_symtab;
36114 char *sym_strtab;
36115 Elf_Sym *sh_symtab;
36116- int j;
36117+ unsigned int j;
36118 if (sec->shdr.sh_type != SHT_REL_TYPE) {
36119 continue;
36120 }
36121@@ -642,13 +690,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
36122 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
36123 Elf_Sym *sym, const char *symname))
36124 {
36125- int i;
36126+ unsigned int i;
36127 /* Walk through the relocations */
36128 for (i = 0; i < ehdr.e_shnum; i++) {
36129 char *sym_strtab;
36130 Elf_Sym *sh_symtab;
36131 struct section *sec_applies, *sec_symtab;
36132- int j;
36133+ unsigned int j;
36134 struct section *sec = &secs[i];
36135
36136 if (sec->shdr.sh_type != SHT_REL_TYPE) {
36137@@ -822,6 +870,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
36138 {
36139 unsigned r_type = ELF32_R_TYPE(rel->r_info);
36140 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
36141+ char *sym_strtab = sec->link->link->strtab;
36142+
36143+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
36144+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
36145+ return 0;
36146+
36147+#ifdef CONFIG_PAX_KERNEXEC
36148+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
36149+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
36150+ return 0;
36151+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
36152+ return 0;
36153+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
36154+ return 0;
36155+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
36156+ return 0;
36157+#endif
36158
36159 switch (r_type) {
36160 case R_386_NONE:
36161@@ -960,7 +1025,7 @@ static int write32_as_text(uint32_t v, FILE *f)
36162
36163 static void emit_relocs(int as_text, int use_real_mode)
36164 {
36165- int i;
36166+ unsigned int i;
36167 int (*write_reloc)(uint32_t, FILE *) = write32;
36168 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
36169 const char *symname);
36170@@ -1060,10 +1125,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
36171 {
36172 regex_init(use_real_mode);
36173 read_ehdr(fp);
36174+ read_phdrs(fp);
36175 read_shdrs(fp);
36176 read_strtabs(fp);
36177 read_symtabs(fp);
36178- read_relocs(fp);
36179+ read_relocs(fp, use_real_mode);
36180 if (ELF_BITS == 64)
36181 percpu_init();
36182 if (show_absolute_syms) {
36183diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
36184index f40281e..92728c9 100644
36185--- a/arch/x86/um/mem_32.c
36186+++ b/arch/x86/um/mem_32.c
36187@@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
36188 gate_vma.vm_start = FIXADDR_USER_START;
36189 gate_vma.vm_end = FIXADDR_USER_END;
36190 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
36191- gate_vma.vm_page_prot = __P101;
36192+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
36193
36194 return 0;
36195 }
36196diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
36197index 80ffa5b..a33bd15 100644
36198--- a/arch/x86/um/tls_32.c
36199+++ b/arch/x86/um/tls_32.c
36200@@ -260,7 +260,7 @@ out:
36201 if (unlikely(task == current &&
36202 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
36203 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
36204- "without flushed TLS.", current->pid);
36205+ "without flushed TLS.", task_pid_nr(current));
36206 }
36207
36208 return 0;
36209diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
36210index 5a4affe..9e2d522 100644
36211--- a/arch/x86/vdso/Makefile
36212+++ b/arch/x86/vdso/Makefile
36213@@ -174,7 +174,7 @@ quiet_cmd_vdso = VDSO $@
36214 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
36215 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
36216
36217-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
36218+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
36219 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
36220 GCOV_PROFILE := n
36221
36222diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
36223index e904c27..b9eaa03 100644
36224--- a/arch/x86/vdso/vdso32-setup.c
36225+++ b/arch/x86/vdso/vdso32-setup.c
36226@@ -14,6 +14,7 @@
36227 #include <asm/cpufeature.h>
36228 #include <asm/processor.h>
36229 #include <asm/vdso.h>
36230+#include <asm/mman.h>
36231
36232 #ifdef CONFIG_COMPAT_VDSO
36233 #define VDSO_DEFAULT 0
36234diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
36235index 970463b..da82d3e 100644
36236--- a/arch/x86/vdso/vma.c
36237+++ b/arch/x86/vdso/vma.c
36238@@ -16,10 +16,9 @@
36239 #include <asm/vdso.h>
36240 #include <asm/page.h>
36241 #include <asm/hpet.h>
36242+#include <asm/mman.h>
36243
36244 #if defined(CONFIG_X86_64)
36245-unsigned int __read_mostly vdso64_enabled = 1;
36246-
36247 extern unsigned short vdso_sync_cpuid;
36248 #endif
36249
36250@@ -101,6 +100,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
36251 .pages = no_pages,
36252 };
36253
36254+#ifdef CONFIG_PAX_RANDMMAP
36255+ if (mm->pax_flags & MF_PAX_RANDMMAP)
36256+ calculate_addr = false;
36257+#endif
36258+
36259 if (calculate_addr) {
36260 addr = vdso_addr(current->mm->start_stack,
36261 image->size - image->sym_vvar_start);
36262@@ -111,14 +115,14 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
36263 down_write(&mm->mmap_sem);
36264
36265 addr = get_unmapped_area(NULL, addr,
36266- image->size - image->sym_vvar_start, 0, 0);
36267+ image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE);
36268 if (IS_ERR_VALUE(addr)) {
36269 ret = addr;
36270 goto up_fail;
36271 }
36272
36273 text_start = addr - image->sym_vvar_start;
36274- current->mm->context.vdso = (void __user *)text_start;
36275+ mm->context.vdso = text_start;
36276
36277 /*
36278 * MAYWRITE to allow gdb to COW and set breakpoints
36279@@ -163,15 +167,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
36280 hpet_address >> PAGE_SHIFT,
36281 PAGE_SIZE,
36282 pgprot_noncached(PAGE_READONLY));
36283-
36284- if (ret)
36285- goto up_fail;
36286 }
36287 #endif
36288
36289 up_fail:
36290 if (ret)
36291- current->mm->context.vdso = NULL;
36292+ current->mm->context.vdso = 0;
36293
36294 up_write(&mm->mmap_sem);
36295 return ret;
36296@@ -191,8 +192,8 @@ static int load_vdso32(void)
36297
36298 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
36299 current_thread_info()->sysenter_return =
36300- current->mm->context.vdso +
36301- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
36302+ (void __force_user *)(current->mm->context.vdso +
36303+ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
36304
36305 return 0;
36306 }
36307@@ -201,9 +202,6 @@ static int load_vdso32(void)
36308 #ifdef CONFIG_X86_64
36309 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
36310 {
36311- if (!vdso64_enabled)
36312- return 0;
36313-
36314 return map_vdso(&vdso_image_64, true);
36315 }
36316
36317@@ -212,12 +210,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
36318 int uses_interp)
36319 {
36320 #ifdef CONFIG_X86_X32_ABI
36321- if (test_thread_flag(TIF_X32)) {
36322- if (!vdso64_enabled)
36323- return 0;
36324-
36325+ if (test_thread_flag(TIF_X32))
36326 return map_vdso(&vdso_image_x32, true);
36327- }
36328 #endif
36329
36330 return load_vdso32();
36331@@ -229,12 +223,3 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
36332 return load_vdso32();
36333 }
36334 #endif
36335-
36336-#ifdef CONFIG_X86_64
36337-static __init int vdso_setup(char *s)
36338-{
36339- vdso64_enabled = simple_strtoul(s, NULL, 0);
36340- return 0;
36341-}
36342-__setup("vdso=", vdso_setup);
36343-#endif
36344diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
36345index e88fda8..76ce7ce 100644
36346--- a/arch/x86/xen/Kconfig
36347+++ b/arch/x86/xen/Kconfig
36348@@ -9,6 +9,7 @@ config XEN
36349 select XEN_HAVE_PVMMU
36350 depends on X86_64 || (X86_32 && X86_PAE)
36351 depends on X86_TSC
36352+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
36353 help
36354 This is the Linux Xen port. Enabling this will allow the
36355 kernel to boot in a paravirtualized environment under the
36356diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
36357index c0cb11f..bed56ff 100644
36358--- a/arch/x86/xen/enlighten.c
36359+++ b/arch/x86/xen/enlighten.c
36360@@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
36361
36362 struct shared_info xen_dummy_shared_info;
36363
36364-void *xen_initial_gdt;
36365-
36366 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
36367 __read_mostly int xen_have_vector_callback;
36368 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
36369@@ -542,8 +540,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
36370 {
36371 unsigned long va = dtr->address;
36372 unsigned int size = dtr->size + 1;
36373- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
36374- unsigned long frames[pages];
36375+ unsigned long frames[65536 / PAGE_SIZE];
36376 int f;
36377
36378 /*
36379@@ -591,8 +588,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
36380 {
36381 unsigned long va = dtr->address;
36382 unsigned int size = dtr->size + 1;
36383- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
36384- unsigned long frames[pages];
36385+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
36386 int f;
36387
36388 /*
36389@@ -600,7 +596,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
36390 * 8-byte entries, or 16 4k pages..
36391 */
36392
36393- BUG_ON(size > 65536);
36394+ BUG_ON(size > GDT_SIZE);
36395 BUG_ON(va & ~PAGE_MASK);
36396
36397 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
36398@@ -989,7 +985,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
36399 return 0;
36400 }
36401
36402-static void set_xen_basic_apic_ops(void)
36403+static void __init set_xen_basic_apic_ops(void)
36404 {
36405 apic->read = xen_apic_read;
36406 apic->write = xen_apic_write;
36407@@ -1295,30 +1291,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
36408 #endif
36409 };
36410
36411-static void xen_reboot(int reason)
36412+static __noreturn void xen_reboot(int reason)
36413 {
36414 struct sched_shutdown r = { .reason = reason };
36415
36416- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
36417- BUG();
36418+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
36419+ BUG();
36420 }
36421
36422-static void xen_restart(char *msg)
36423+static __noreturn void xen_restart(char *msg)
36424 {
36425 xen_reboot(SHUTDOWN_reboot);
36426 }
36427
36428-static void xen_emergency_restart(void)
36429+static __noreturn void xen_emergency_restart(void)
36430 {
36431 xen_reboot(SHUTDOWN_reboot);
36432 }
36433
36434-static void xen_machine_halt(void)
36435+static __noreturn void xen_machine_halt(void)
36436 {
36437 xen_reboot(SHUTDOWN_poweroff);
36438 }
36439
36440-static void xen_machine_power_off(void)
36441+static __noreturn void xen_machine_power_off(void)
36442 {
36443 if (pm_power_off)
36444 pm_power_off();
36445@@ -1568,7 +1564,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
36446 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
36447
36448 /* Work out if we support NX */
36449- x86_configure_nx();
36450+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
36451+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
36452+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
36453+ unsigned l, h;
36454+
36455+ __supported_pte_mask |= _PAGE_NX;
36456+ rdmsr(MSR_EFER, l, h);
36457+ l |= EFER_NX;
36458+ wrmsr(MSR_EFER, l, h);
36459+ }
36460+#endif
36461
36462 /* Get mfn list */
36463 xen_build_dynamic_phys_to_machine();
36464@@ -1596,13 +1602,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
36465
36466 machine_ops = xen_machine_ops;
36467
36468- /*
36469- * The only reliable way to retain the initial address of the
36470- * percpu gdt_page is to remember it here, so we can go and
36471- * mark it RW later, when the initial percpu area is freed.
36472- */
36473- xen_initial_gdt = &per_cpu(gdt_page, 0);
36474-
36475 xen_smp_init();
36476
36477 #ifdef CONFIG_ACPI_NUMA
36478diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
36479index 16fb009..02b7801 100644
36480--- a/arch/x86/xen/mmu.c
36481+++ b/arch/x86/xen/mmu.c
36482@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
36483 return val;
36484 }
36485
36486-static pteval_t pte_pfn_to_mfn(pteval_t val)
36487+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
36488 {
36489 if (val & _PAGE_PRESENT) {
36490 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
36491@@ -1904,7 +1904,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
36492 * L3_k[511] -> level2_fixmap_pgt */
36493 convert_pfn_mfn(level3_kernel_pgt);
36494
36495+ convert_pfn_mfn(level3_vmalloc_start_pgt);
36496+ convert_pfn_mfn(level3_vmalloc_end_pgt);
36497+ convert_pfn_mfn(level3_vmemmap_pgt);
36498 /* L3_k[511][506] -> level1_fixmap_pgt */
36499+ /* L3_k[511][507] -> level1_vsyscall_pgt */
36500 convert_pfn_mfn(level2_fixmap_pgt);
36501 }
36502 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
36503@@ -1929,11 +1933,16 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
36504 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
36505 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
36506 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
36507+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
36508+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
36509+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
36510 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
36511 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
36512+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
36513 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
36514 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
36515 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
36516+ set_page_prot(level1_vsyscall_pgt, PAGE_KERNEL_RO);
36517
36518 /* Pin down new L4 */
36519 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
36520@@ -2117,6 +2126,7 @@ static void __init xen_post_allocator_init(void)
36521 pv_mmu_ops.set_pud = xen_set_pud;
36522 #if PAGETABLE_LEVELS == 4
36523 pv_mmu_ops.set_pgd = xen_set_pgd;
36524+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
36525 #endif
36526
36527 /* This will work as long as patching hasn't happened yet
36528@@ -2195,6 +2205,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
36529 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
36530 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
36531 .set_pgd = xen_set_pgd_hyper,
36532+ .set_pgd_batched = xen_set_pgd_hyper,
36533
36534 .alloc_pud = xen_alloc_pmd_init,
36535 .release_pud = xen_release_pmd_init,
36536diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
36537index 7005974..54fb05f 100644
36538--- a/arch/x86/xen/smp.c
36539+++ b/arch/x86/xen/smp.c
36540@@ -283,17 +283,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
36541
36542 if (xen_pv_domain()) {
36543 if (!xen_feature(XENFEAT_writable_page_tables))
36544- /* We've switched to the "real" per-cpu gdt, so make
36545- * sure the old memory can be recycled. */
36546- make_lowmem_page_readwrite(xen_initial_gdt);
36547-
36548 #ifdef CONFIG_X86_32
36549 /*
36550 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
36551 * expects __USER_DS
36552 */
36553- loadsegment(ds, __USER_DS);
36554- loadsegment(es, __USER_DS);
36555+ loadsegment(ds, __KERNEL_DS);
36556+ loadsegment(es, __KERNEL_DS);
36557 #endif
36558
36559 xen_filter_cpu_maps();
36560@@ -372,7 +368,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
36561 #ifdef CONFIG_X86_32
36562 /* Note: PVH is not yet supported on x86_32. */
36563 ctxt->user_regs.fs = __KERNEL_PERCPU;
36564- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
36565+ savesegment(gs, ctxt->user_regs.gs);
36566 #endif
36567 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
36568
36569@@ -381,8 +377,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
36570 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
36571 ctxt->flags = VGCF_IN_KERNEL;
36572 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
36573- ctxt->user_regs.ds = __USER_DS;
36574- ctxt->user_regs.es = __USER_DS;
36575+ ctxt->user_regs.ds = __KERNEL_DS;
36576+ ctxt->user_regs.es = __KERNEL_DS;
36577 ctxt->user_regs.ss = __KERNEL_DS;
36578
36579 xen_copy_trap_info(ctxt->trap_ctxt);
36580@@ -437,14 +433,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
36581 int rc;
36582
36583 per_cpu(current_task, cpu) = idle;
36584+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
36585 #ifdef CONFIG_X86_32
36586 irq_ctx_init(cpu);
36587 #else
36588 clear_tsk_thread_flag(idle, TIF_FORK);
36589 #endif
36590- per_cpu(kernel_stack, cpu) =
36591- (unsigned long)task_stack_page(idle) -
36592- KERNEL_STACK_OFFSET + THREAD_SIZE;
36593+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
36594
36595 xen_setup_runstate_info(cpu);
36596 xen_setup_timer(cpu);
36597@@ -720,7 +715,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
36598
36599 void __init xen_smp_init(void)
36600 {
36601- smp_ops = xen_smp_ops;
36602+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
36603 xen_fill_possible_map();
36604 }
36605
36606diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
36607index fd92a64..1f72641 100644
36608--- a/arch/x86/xen/xen-asm_32.S
36609+++ b/arch/x86/xen/xen-asm_32.S
36610@@ -99,7 +99,7 @@ ENTRY(xen_iret)
36611 pushw %fs
36612 movl $(__KERNEL_PERCPU), %eax
36613 movl %eax, %fs
36614- movl %fs:xen_vcpu, %eax
36615+ mov PER_CPU_VAR(xen_vcpu), %eax
36616 POP_FS
36617 #else
36618 movl %ss:xen_vcpu, %eax
36619diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
36620index 485b695..fda3e7c 100644
36621--- a/arch/x86/xen/xen-head.S
36622+++ b/arch/x86/xen/xen-head.S
36623@@ -39,6 +39,17 @@ ENTRY(startup_xen)
36624 #ifdef CONFIG_X86_32
36625 mov %esi,xen_start_info
36626 mov $init_thread_union+THREAD_SIZE,%esp
36627+#ifdef CONFIG_SMP
36628+ movl $cpu_gdt_table,%edi
36629+ movl $__per_cpu_load,%eax
36630+ movw %ax,__KERNEL_PERCPU + 2(%edi)
36631+ rorl $16,%eax
36632+ movb %al,__KERNEL_PERCPU + 4(%edi)
36633+ movb %ah,__KERNEL_PERCPU + 7(%edi)
36634+ movl $__per_cpu_end - 1,%eax
36635+ subl $__per_cpu_start,%eax
36636+ movw %ax,__KERNEL_PERCPU + 0(%edi)
36637+#endif
36638 #else
36639 mov %rsi,xen_start_info
36640 mov $init_thread_union+THREAD_SIZE,%rsp
36641diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
36642index 28c7e0b..2acfec7 100644
36643--- a/arch/x86/xen/xen-ops.h
36644+++ b/arch/x86/xen/xen-ops.h
36645@@ -10,8 +10,6 @@
36646 extern const char xen_hypervisor_callback[];
36647 extern const char xen_failsafe_callback[];
36648
36649-extern void *xen_initial_gdt;
36650-
36651 struct trap_info;
36652 void xen_copy_trap_info(struct trap_info *traps);
36653
36654diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
36655index 525bd3d..ef888b1 100644
36656--- a/arch/xtensa/variants/dc232b/include/variant/core.h
36657+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
36658@@ -119,9 +119,9 @@
36659 ----------------------------------------------------------------------*/
36660
36661 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
36662-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
36663 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
36664 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
36665+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36666
36667 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
36668 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
36669diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
36670index 2f33760..835e50a 100644
36671--- a/arch/xtensa/variants/fsf/include/variant/core.h
36672+++ b/arch/xtensa/variants/fsf/include/variant/core.h
36673@@ -11,6 +11,7 @@
36674 #ifndef _XTENSA_CORE_H
36675 #define _XTENSA_CORE_H
36676
36677+#include <linux/const.h>
36678
36679 /****************************************************************************
36680 Parameters Useful for Any Code, USER or PRIVILEGED
36681@@ -112,9 +113,9 @@
36682 ----------------------------------------------------------------------*/
36683
36684 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
36685-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
36686 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
36687 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
36688+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36689
36690 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
36691 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
36692diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
36693index af00795..2bb8105 100644
36694--- a/arch/xtensa/variants/s6000/include/variant/core.h
36695+++ b/arch/xtensa/variants/s6000/include/variant/core.h
36696@@ -11,6 +11,7 @@
36697 #ifndef _XTENSA_CORE_CONFIGURATION_H
36698 #define _XTENSA_CORE_CONFIGURATION_H
36699
36700+#include <linux/const.h>
36701
36702 /****************************************************************************
36703 Parameters Useful for Any Code, USER or PRIVILEGED
36704@@ -118,9 +119,9 @@
36705 ----------------------------------------------------------------------*/
36706
36707 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
36708-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
36709 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
36710 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
36711+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36712
36713 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
36714 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
36715diff --git a/block/bio.c b/block/bio.c
36716index 3e6331d..f970433 100644
36717--- a/block/bio.c
36718+++ b/block/bio.c
36719@@ -1160,7 +1160,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
36720 /*
36721 * Overflow, abort
36722 */
36723- if (end < start)
36724+ if (end < start || end - start > INT_MAX - nr_pages)
36725 return ERR_PTR(-EINVAL);
36726
36727 nr_pages += end - start;
36728@@ -1294,7 +1294,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
36729 /*
36730 * Overflow, abort
36731 */
36732- if (end < start)
36733+ if (end < start || end - start > INT_MAX - nr_pages)
36734 return ERR_PTR(-EINVAL);
36735
36736 nr_pages += end - start;
36737@@ -1556,7 +1556,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
36738 const int read = bio_data_dir(bio) == READ;
36739 struct bio_map_data *bmd = bio->bi_private;
36740 int i;
36741- char *p = bmd->sgvecs[0].iov_base;
36742+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
36743
36744 bio_for_each_segment_all(bvec, bio, i) {
36745 char *addr = page_address(bvec->bv_page);
36746diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
36747index e17da94..e01cce1 100644
36748--- a/block/blk-cgroup.c
36749+++ b/block/blk-cgroup.c
36750@@ -822,7 +822,7 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
36751 static struct cgroup_subsys_state *
36752 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
36753 {
36754- static atomic64_t id_seq = ATOMIC64_INIT(0);
36755+ static atomic64_unchecked_t id_seq = ATOMIC64_INIT(0);
36756 struct blkcg *blkcg;
36757
36758 if (!parent_css) {
36759@@ -836,7 +836,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
36760
36761 blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
36762 blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
36763- blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
36764+ blkcg->id = atomic64_inc_return_unchecked(&id_seq); /* root is 0, start from 1 */
36765 done:
36766 spin_lock_init(&blkcg->lock);
36767 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
36768diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
36769index 0736729..2ec3b48 100644
36770--- a/block/blk-iopoll.c
36771+++ b/block/blk-iopoll.c
36772@@ -74,7 +74,7 @@ void blk_iopoll_complete(struct blk_iopoll *iop)
36773 }
36774 EXPORT_SYMBOL(blk_iopoll_complete);
36775
36776-static void blk_iopoll_softirq(struct softirq_action *h)
36777+static __latent_entropy void blk_iopoll_softirq(void)
36778 {
36779 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
36780 int rearm = 0, budget = blk_iopoll_budget;
36781diff --git a/block/blk-map.c b/block/blk-map.c
36782index f890d43..97b0482 100644
36783--- a/block/blk-map.c
36784+++ b/block/blk-map.c
36785@@ -300,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
36786 if (!len || !kbuf)
36787 return -EINVAL;
36788
36789- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
36790+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
36791 if (do_copy)
36792 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
36793 else
36794diff --git a/block/blk-softirq.c b/block/blk-softirq.c
36795index 53b1737..08177d2e 100644
36796--- a/block/blk-softirq.c
36797+++ b/block/blk-softirq.c
36798@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
36799 * Softirq action handler - move entries to local list and loop over them
36800 * while passing them to the queue registered handler.
36801 */
36802-static void blk_done_softirq(struct softirq_action *h)
36803+static __latent_entropy void blk_done_softirq(void)
36804 {
36805 struct list_head *cpu_list, local_list;
36806
36807diff --git a/block/bsg.c b/block/bsg.c
36808index ff46add..c4ba8ee 100644
36809--- a/block/bsg.c
36810+++ b/block/bsg.c
36811@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
36812 struct sg_io_v4 *hdr, struct bsg_device *bd,
36813 fmode_t has_write_perm)
36814 {
36815+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36816+ unsigned char *cmdptr;
36817+
36818 if (hdr->request_len > BLK_MAX_CDB) {
36819 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
36820 if (!rq->cmd)
36821 return -ENOMEM;
36822- }
36823+ cmdptr = rq->cmd;
36824+ } else
36825+ cmdptr = tmpcmd;
36826
36827- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
36828+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
36829 hdr->request_len))
36830 return -EFAULT;
36831
36832+ if (cmdptr != rq->cmd)
36833+ memcpy(rq->cmd, cmdptr, hdr->request_len);
36834+
36835 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
36836 if (blk_verify_command(rq->cmd, has_write_perm))
36837 return -EPERM;
36838diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
36839index 18b282c..050dbe5 100644
36840--- a/block/compat_ioctl.c
36841+++ b/block/compat_ioctl.c
36842@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
36843 cgc = compat_alloc_user_space(sizeof(*cgc));
36844 cgc32 = compat_ptr(arg);
36845
36846- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
36847+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
36848 get_user(data, &cgc32->buffer) ||
36849 put_user(compat_ptr(data), &cgc->buffer) ||
36850 copy_in_user(&cgc->buflen, &cgc32->buflen,
36851@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
36852 err |= __get_user(f->spec1, &uf->spec1);
36853 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
36854 err |= __get_user(name, &uf->name);
36855- f->name = compat_ptr(name);
36856+ f->name = (void __force_kernel *)compat_ptr(name);
36857 if (err) {
36858 err = -EFAULT;
36859 goto out;
36860diff --git a/block/genhd.c b/block/genhd.c
36861index e6723bd..703e4ac 100644
36862--- a/block/genhd.c
36863+++ b/block/genhd.c
36864@@ -469,21 +469,24 @@ static char *bdevt_str(dev_t devt, char *buf)
36865
36866 /*
36867 * Register device numbers dev..(dev+range-1)
36868- * range must be nonzero
36869+ * Noop if @range is zero.
36870 * The hash chain is sorted on range, so that subranges can override.
36871 */
36872 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
36873 struct kobject *(*probe)(dev_t, int *, void *),
36874 int (*lock)(dev_t, void *), void *data)
36875 {
36876- kobj_map(bdev_map, devt, range, module, probe, lock, data);
36877+ if (range)
36878+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
36879 }
36880
36881 EXPORT_SYMBOL(blk_register_region);
36882
36883+/* undo blk_register_region(), noop if @range is zero */
36884 void blk_unregister_region(dev_t devt, unsigned long range)
36885 {
36886- kobj_unmap(bdev_map, devt, range);
36887+ if (range)
36888+ kobj_unmap(bdev_map, devt, range);
36889 }
36890
36891 EXPORT_SYMBOL(blk_unregister_region);
36892diff --git a/block/partitions/efi.c b/block/partitions/efi.c
36893index 56d08fd..2e07090 100644
36894--- a/block/partitions/efi.c
36895+++ b/block/partitions/efi.c
36896@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
36897 if (!gpt)
36898 return NULL;
36899
36900+ if (!le32_to_cpu(gpt->num_partition_entries))
36901+ return NULL;
36902+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
36903+ if (!pte)
36904+ return NULL;
36905+
36906 count = le32_to_cpu(gpt->num_partition_entries) *
36907 le32_to_cpu(gpt->sizeof_partition_entry);
36908- if (!count)
36909- return NULL;
36910- pte = kmalloc(count, GFP_KERNEL);
36911- if (!pte)
36912- return NULL;
36913-
36914 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
36915 (u8 *) pte, count) < count) {
36916 kfree(pte);
36917diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
36918index a6d6270..c4bb72f 100644
36919--- a/block/scsi_ioctl.c
36920+++ b/block/scsi_ioctl.c
36921@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
36922 return put_user(0, p);
36923 }
36924
36925-static int sg_get_timeout(struct request_queue *q)
36926+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
36927 {
36928 return jiffies_to_clock_t(q->sg_timeout);
36929 }
36930@@ -227,8 +227,20 @@ EXPORT_SYMBOL(blk_verify_command);
36931 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
36932 struct sg_io_hdr *hdr, fmode_t mode)
36933 {
36934- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
36935+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36936+ unsigned char *cmdptr;
36937+
36938+ if (rq->cmd != rq->__cmd)
36939+ cmdptr = rq->cmd;
36940+ else
36941+ cmdptr = tmpcmd;
36942+
36943+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
36944 return -EFAULT;
36945+
36946+ if (cmdptr != rq->cmd)
36947+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
36948+
36949 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
36950 return -EPERM;
36951
36952@@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36953 int err;
36954 unsigned int in_len, out_len, bytes, opcode, cmdlen;
36955 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
36956+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36957+ unsigned char *cmdptr;
36958
36959 if (!sic)
36960 return -EINVAL;
36961@@ -470,9 +484,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36962 */
36963 err = -EFAULT;
36964 rq->cmd_len = cmdlen;
36965- if (copy_from_user(rq->cmd, sic->data, cmdlen))
36966+
36967+ if (rq->cmd != rq->__cmd)
36968+ cmdptr = rq->cmd;
36969+ else
36970+ cmdptr = tmpcmd;
36971+
36972+ if (copy_from_user(cmdptr, sic->data, cmdlen))
36973 goto error;
36974
36975+ if (rq->cmd != cmdptr)
36976+ memcpy(rq->cmd, cmdptr, cmdlen);
36977+
36978 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
36979 goto error;
36980
36981diff --git a/crypto/cryptd.c b/crypto/cryptd.c
36982index e592c90..c566114 100644
36983--- a/crypto/cryptd.c
36984+++ b/crypto/cryptd.c
36985@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
36986
36987 struct cryptd_blkcipher_request_ctx {
36988 crypto_completion_t complete;
36989-};
36990+} __no_const;
36991
36992 struct cryptd_hash_ctx {
36993 struct crypto_shash *child;
36994@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
36995
36996 struct cryptd_aead_request_ctx {
36997 crypto_completion_t complete;
36998-};
36999+} __no_const;
37000
37001 static void cryptd_queue_worker(struct work_struct *work);
37002
37003diff --git a/crypto/cts.c b/crypto/cts.c
37004index 042223f..133f087 100644
37005--- a/crypto/cts.c
37006+++ b/crypto/cts.c
37007@@ -202,7 +202,8 @@ static int cts_cbc_decrypt(struct crypto_cts_ctx *ctx,
37008 /* 5. Append the tail (BB - Ln) bytes of Xn (tmp) to Cn to create En */
37009 memcpy(s + bsize + lastn, tmp + lastn, bsize - lastn);
37010 /* 6. Decrypt En to create Pn-1 */
37011- memset(iv, 0, sizeof(iv));
37012+ memzero_explicit(iv, sizeof(iv));
37013+
37014 sg_set_buf(&sgsrc[0], s + bsize, bsize);
37015 sg_set_buf(&sgdst[0], d, bsize);
37016 err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize);
37017diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
37018index 309d345..1632720 100644
37019--- a/crypto/pcrypt.c
37020+++ b/crypto/pcrypt.c
37021@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
37022 int ret;
37023
37024 pinst->kobj.kset = pcrypt_kset;
37025- ret = kobject_add(&pinst->kobj, NULL, name);
37026+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
37027 if (!ret)
37028 kobject_uevent(&pinst->kobj, KOBJ_ADD);
37029
37030diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
37031index 4279480..7bb0474 100644
37032--- a/crypto/sha1_generic.c
37033+++ b/crypto/sha1_generic.c
37034@@ -64,7 +64,7 @@ int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
37035 src = data + done;
37036 } while (done + SHA1_BLOCK_SIZE <= len);
37037
37038- memset(temp, 0, sizeof(temp));
37039+ memzero_explicit(temp, sizeof(temp));
37040 partial = 0;
37041 }
37042 memcpy(sctx->buffer + partial, src, len - done);
37043diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
37044index 5433667..32c5e5e 100644
37045--- a/crypto/sha256_generic.c
37046+++ b/crypto/sha256_generic.c
37047@@ -210,10 +210,9 @@ static void sha256_transform(u32 *state, const u8 *input)
37048
37049 /* clear any sensitive info... */
37050 a = b = c = d = e = f = g = h = t1 = t2 = 0;
37051- memset(W, 0, 64 * sizeof(u32));
37052+ memzero_explicit(W, 64 * sizeof(u32));
37053 }
37054
37055-
37056 static int sha224_init(struct shash_desc *desc)
37057 {
37058 struct sha256_state *sctx = shash_desc_ctx(desc);
37059@@ -316,7 +315,7 @@ static int sha224_final(struct shash_desc *desc, u8 *hash)
37060 sha256_final(desc, D);
37061
37062 memcpy(hash, D, SHA224_DIGEST_SIZE);
37063- memset(D, 0, SHA256_DIGEST_SIZE);
37064+ memzero_explicit(D, SHA256_DIGEST_SIZE);
37065
37066 return 0;
37067 }
37068diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
37069index 6ed124f..04d295a 100644
37070--- a/crypto/sha512_generic.c
37071+++ b/crypto/sha512_generic.c
37072@@ -238,7 +238,7 @@ static int sha384_final(struct shash_desc *desc, u8 *hash)
37073 sha512_final(desc, D);
37074
37075 memcpy(hash, D, 48);
37076- memset(D, 0, 64);
37077+ memzero_explicit(D, 64);
37078
37079 return 0;
37080 }
37081diff --git a/crypto/tgr192.c b/crypto/tgr192.c
37082index 8740355..3c7af0d 100644
37083--- a/crypto/tgr192.c
37084+++ b/crypto/tgr192.c
37085@@ -612,7 +612,7 @@ static int tgr160_final(struct shash_desc *desc, u8 * out)
37086
37087 tgr192_final(desc, D);
37088 memcpy(out, D, TGR160_DIGEST_SIZE);
37089- memset(D, 0, TGR192_DIGEST_SIZE);
37090+ memzero_explicit(D, TGR192_DIGEST_SIZE);
37091
37092 return 0;
37093 }
37094@@ -623,7 +623,7 @@ static int tgr128_final(struct shash_desc *desc, u8 * out)
37095
37096 tgr192_final(desc, D);
37097 memcpy(out, D, TGR128_DIGEST_SIZE);
37098- memset(D, 0, TGR192_DIGEST_SIZE);
37099+ memzero_explicit(D, TGR192_DIGEST_SIZE);
37100
37101 return 0;
37102 }
37103diff --git a/crypto/vmac.c b/crypto/vmac.c
37104index 2eb11a3..d84c24b 100644
37105--- a/crypto/vmac.c
37106+++ b/crypto/vmac.c
37107@@ -613,7 +613,7 @@ static int vmac_final(struct shash_desc *pdesc, u8 *out)
37108 }
37109 mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx);
37110 memcpy(out, &mac, sizeof(vmac_t));
37111- memset(&mac, 0, sizeof(vmac_t));
37112+ memzero_explicit(&mac, sizeof(vmac_t));
37113 memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
37114 ctx->partial_size = 0;
37115 return 0;
37116diff --git a/crypto/wp512.c b/crypto/wp512.c
37117index 180f1d6..ec64e77 100644
37118--- a/crypto/wp512.c
37119+++ b/crypto/wp512.c
37120@@ -1102,8 +1102,8 @@ static int wp384_final(struct shash_desc *desc, u8 *out)
37121 u8 D[64];
37122
37123 wp512_final(desc, D);
37124- memcpy (out, D, WP384_DIGEST_SIZE);
37125- memset (D, 0, WP512_DIGEST_SIZE);
37126+ memcpy(out, D, WP384_DIGEST_SIZE);
37127+ memzero_explicit(D, WP512_DIGEST_SIZE);
37128
37129 return 0;
37130 }
37131@@ -1113,8 +1113,8 @@ static int wp256_final(struct shash_desc *desc, u8 *out)
37132 u8 D[64];
37133
37134 wp512_final(desc, D);
37135- memcpy (out, D, WP256_DIGEST_SIZE);
37136- memset (D, 0, WP512_DIGEST_SIZE);
37137+ memcpy(out, D, WP256_DIGEST_SIZE);
37138+ memzero_explicit(D, WP512_DIGEST_SIZE);
37139
37140 return 0;
37141 }
37142diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
37143index 6921c7f..78e1af7 100644
37144--- a/drivers/acpi/acpica/hwxfsleep.c
37145+++ b/drivers/acpi/acpica/hwxfsleep.c
37146@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
37147 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
37148
37149 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
37150- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
37151- acpi_hw_extended_sleep},
37152- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
37153- acpi_hw_extended_wake_prep},
37154- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
37155+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
37156+ .extended_function = acpi_hw_extended_sleep},
37157+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
37158+ .extended_function = acpi_hw_extended_wake_prep},
37159+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
37160+ .extended_function = acpi_hw_extended_wake}
37161 };
37162
37163 /*
37164diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
37165index 16129c7..8b675cd 100644
37166--- a/drivers/acpi/apei/apei-internal.h
37167+++ b/drivers/acpi/apei/apei-internal.h
37168@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
37169 struct apei_exec_ins_type {
37170 u32 flags;
37171 apei_exec_ins_func_t run;
37172-};
37173+} __do_const;
37174
37175 struct apei_exec_context {
37176 u32 ip;
37177diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
37178index fc5f780..e5ac91a 100644
37179--- a/drivers/acpi/apei/ghes.c
37180+++ b/drivers/acpi/apei/ghes.c
37181@@ -478,7 +478,7 @@ static void __ghes_print_estatus(const char *pfx,
37182 const struct acpi_hest_generic *generic,
37183 const struct acpi_hest_generic_status *estatus)
37184 {
37185- static atomic_t seqno;
37186+ static atomic_unchecked_t seqno;
37187 unsigned int curr_seqno;
37188 char pfx_seq[64];
37189
37190@@ -489,7 +489,7 @@ static void __ghes_print_estatus(const char *pfx,
37191 else
37192 pfx = KERN_ERR;
37193 }
37194- curr_seqno = atomic_inc_return(&seqno);
37195+ curr_seqno = atomic_inc_return_unchecked(&seqno);
37196 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
37197 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
37198 pfx_seq, generic->header.source_id);
37199diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
37200index a83e3c6..c3d617f 100644
37201--- a/drivers/acpi/bgrt.c
37202+++ b/drivers/acpi/bgrt.c
37203@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
37204 if (!bgrt_image)
37205 return -ENODEV;
37206
37207- bin_attr_image.private = bgrt_image;
37208- bin_attr_image.size = bgrt_image_size;
37209+ pax_open_kernel();
37210+ *(void **)&bin_attr_image.private = bgrt_image;
37211+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
37212+ pax_close_kernel();
37213
37214 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
37215 if (!bgrt_kobj)
37216diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
37217index 36eb42e..3b2f47e 100644
37218--- a/drivers/acpi/blacklist.c
37219+++ b/drivers/acpi/blacklist.c
37220@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
37221 u32 is_critical_error;
37222 };
37223
37224-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
37225+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
37226
37227 /*
37228 * POLICY: If *anything* doesn't work, put it on the blacklist.
37229@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
37230 return 0;
37231 }
37232
37233-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
37234+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
37235 {
37236 .callback = dmi_disable_osi_vista,
37237 .ident = "Fujitsu Siemens",
37238diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
37239index c68e724..e863008 100644
37240--- a/drivers/acpi/custom_method.c
37241+++ b/drivers/acpi/custom_method.c
37242@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
37243 struct acpi_table_header table;
37244 acpi_status status;
37245
37246+#ifdef CONFIG_GRKERNSEC_KMEM
37247+ return -EPERM;
37248+#endif
37249+
37250 if (!(*ppos)) {
37251 /* parse the table header to get the table length */
37252 if (count <= sizeof(struct acpi_table_header))
37253diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
37254index 17f9ec5..d9a455e 100644
37255--- a/drivers/acpi/processor_idle.c
37256+++ b/drivers/acpi/processor_idle.c
37257@@ -952,7 +952,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
37258 {
37259 int i, count = CPUIDLE_DRIVER_STATE_START;
37260 struct acpi_processor_cx *cx;
37261- struct cpuidle_state *state;
37262+ cpuidle_state_no_const *state;
37263 struct cpuidle_driver *drv = &acpi_idle_driver;
37264
37265 if (!pr->flags.power_setup_done)
37266diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
37267index 38cb978..352c761 100644
37268--- a/drivers/acpi/sysfs.c
37269+++ b/drivers/acpi/sysfs.c
37270@@ -423,11 +423,11 @@ static u32 num_counters;
37271 static struct attribute **all_attrs;
37272 static u32 acpi_gpe_count;
37273
37274-static struct attribute_group interrupt_stats_attr_group = {
37275+static attribute_group_no_const interrupt_stats_attr_group = {
37276 .name = "interrupts",
37277 };
37278
37279-static struct kobj_attribute *counter_attrs;
37280+static kobj_attribute_no_const *counter_attrs;
37281
37282 static void delete_gpe_attr_array(void)
37283 {
37284diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
37285index b784e9d..a69a049 100644
37286--- a/drivers/ata/libahci.c
37287+++ b/drivers/ata/libahci.c
37288@@ -1252,7 +1252,7 @@ int ahci_kick_engine(struct ata_port *ap)
37289 }
37290 EXPORT_SYMBOL_GPL(ahci_kick_engine);
37291
37292-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
37293+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
37294 struct ata_taskfile *tf, int is_cmd, u16 flags,
37295 unsigned long timeout_msec)
37296 {
37297diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
37298index 6f67490..f951ead 100644
37299--- a/drivers/ata/libata-core.c
37300+++ b/drivers/ata/libata-core.c
37301@@ -99,7 +99,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
37302 static void ata_dev_xfermask(struct ata_device *dev);
37303 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
37304
37305-atomic_t ata_print_id = ATOMIC_INIT(0);
37306+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
37307
37308 struct ata_force_param {
37309 const char *name;
37310@@ -4797,7 +4797,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
37311 struct ata_port *ap;
37312 unsigned int tag;
37313
37314- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
37315+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
37316 ap = qc->ap;
37317
37318 qc->flags = 0;
37319@@ -4813,7 +4813,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
37320 struct ata_port *ap;
37321 struct ata_link *link;
37322
37323- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
37324+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
37325 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
37326 ap = qc->ap;
37327 link = qc->dev->link;
37328@@ -5917,6 +5917,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
37329 return;
37330
37331 spin_lock(&lock);
37332+ pax_open_kernel();
37333
37334 for (cur = ops->inherits; cur; cur = cur->inherits) {
37335 void **inherit = (void **)cur;
37336@@ -5930,8 +5931,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
37337 if (IS_ERR(*pp))
37338 *pp = NULL;
37339
37340- ops->inherits = NULL;
37341+ *(struct ata_port_operations **)&ops->inherits = NULL;
37342
37343+ pax_close_kernel();
37344 spin_unlock(&lock);
37345 }
37346
37347@@ -6127,7 +6129,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
37348
37349 /* give ports names and add SCSI hosts */
37350 for (i = 0; i < host->n_ports; i++) {
37351- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
37352+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
37353 host->ports[i]->local_port_no = i + 1;
37354 }
37355
37356diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
37357index 0586f66..1a8f74a 100644
37358--- a/drivers/ata/libata-scsi.c
37359+++ b/drivers/ata/libata-scsi.c
37360@@ -4151,7 +4151,7 @@ int ata_sas_port_init(struct ata_port *ap)
37361
37362 if (rc)
37363 return rc;
37364- ap->print_id = atomic_inc_return(&ata_print_id);
37365+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
37366 return 0;
37367 }
37368 EXPORT_SYMBOL_GPL(ata_sas_port_init);
37369diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
37370index 5f4e0cc..ff2c347 100644
37371--- a/drivers/ata/libata.h
37372+++ b/drivers/ata/libata.h
37373@@ -53,7 +53,7 @@ enum {
37374 ATA_DNXFER_QUIET = (1 << 31),
37375 };
37376
37377-extern atomic_t ata_print_id;
37378+extern atomic_unchecked_t ata_print_id;
37379 extern int atapi_passthru16;
37380 extern int libata_fua;
37381 extern int libata_noacpi;
37382diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
37383index 4edb1a8..84e1658 100644
37384--- a/drivers/ata/pata_arasan_cf.c
37385+++ b/drivers/ata/pata_arasan_cf.c
37386@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
37387 /* Handle platform specific quirks */
37388 if (quirk) {
37389 if (quirk & CF_BROKEN_PIO) {
37390- ap->ops->set_piomode = NULL;
37391+ pax_open_kernel();
37392+ *(void **)&ap->ops->set_piomode = NULL;
37393+ pax_close_kernel();
37394 ap->pio_mask = 0;
37395 }
37396 if (quirk & CF_BROKEN_MWDMA)
37397diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
37398index f9b983a..887b9d8 100644
37399--- a/drivers/atm/adummy.c
37400+++ b/drivers/atm/adummy.c
37401@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
37402 vcc->pop(vcc, skb);
37403 else
37404 dev_kfree_skb_any(skb);
37405- atomic_inc(&vcc->stats->tx);
37406+ atomic_inc_unchecked(&vcc->stats->tx);
37407
37408 return 0;
37409 }
37410diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
37411index f1a9198..f466a4a 100644
37412--- a/drivers/atm/ambassador.c
37413+++ b/drivers/atm/ambassador.c
37414@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
37415 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
37416
37417 // VC layer stats
37418- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
37419+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
37420
37421 // free the descriptor
37422 kfree (tx_descr);
37423@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
37424 dump_skb ("<<<", vc, skb);
37425
37426 // VC layer stats
37427- atomic_inc(&atm_vcc->stats->rx);
37428+ atomic_inc_unchecked(&atm_vcc->stats->rx);
37429 __net_timestamp(skb);
37430 // end of our responsibility
37431 atm_vcc->push (atm_vcc, skb);
37432@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
37433 } else {
37434 PRINTK (KERN_INFO, "dropped over-size frame");
37435 // should we count this?
37436- atomic_inc(&atm_vcc->stats->rx_drop);
37437+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
37438 }
37439
37440 } else {
37441@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
37442 }
37443
37444 if (check_area (skb->data, skb->len)) {
37445- atomic_inc(&atm_vcc->stats->tx_err);
37446+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
37447 return -ENOMEM; // ?
37448 }
37449
37450diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
37451index 480fa6f..947067c 100644
37452--- a/drivers/atm/atmtcp.c
37453+++ b/drivers/atm/atmtcp.c
37454@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
37455 if (vcc->pop) vcc->pop(vcc,skb);
37456 else dev_kfree_skb(skb);
37457 if (dev_data) return 0;
37458- atomic_inc(&vcc->stats->tx_err);
37459+ atomic_inc_unchecked(&vcc->stats->tx_err);
37460 return -ENOLINK;
37461 }
37462 size = skb->len+sizeof(struct atmtcp_hdr);
37463@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
37464 if (!new_skb) {
37465 if (vcc->pop) vcc->pop(vcc,skb);
37466 else dev_kfree_skb(skb);
37467- atomic_inc(&vcc->stats->tx_err);
37468+ atomic_inc_unchecked(&vcc->stats->tx_err);
37469 return -ENOBUFS;
37470 }
37471 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
37472@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
37473 if (vcc->pop) vcc->pop(vcc,skb);
37474 else dev_kfree_skb(skb);
37475 out_vcc->push(out_vcc,new_skb);
37476- atomic_inc(&vcc->stats->tx);
37477- atomic_inc(&out_vcc->stats->rx);
37478+ atomic_inc_unchecked(&vcc->stats->tx);
37479+ atomic_inc_unchecked(&out_vcc->stats->rx);
37480 return 0;
37481 }
37482
37483@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
37484 read_unlock(&vcc_sklist_lock);
37485 if (!out_vcc) {
37486 result = -EUNATCH;
37487- atomic_inc(&vcc->stats->tx_err);
37488+ atomic_inc_unchecked(&vcc->stats->tx_err);
37489 goto done;
37490 }
37491 skb_pull(skb,sizeof(struct atmtcp_hdr));
37492@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
37493 __net_timestamp(new_skb);
37494 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
37495 out_vcc->push(out_vcc,new_skb);
37496- atomic_inc(&vcc->stats->tx);
37497- atomic_inc(&out_vcc->stats->rx);
37498+ atomic_inc_unchecked(&vcc->stats->tx);
37499+ atomic_inc_unchecked(&out_vcc->stats->rx);
37500 done:
37501 if (vcc->pop) vcc->pop(vcc,skb);
37502 else dev_kfree_skb(skb);
37503diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
37504index d65975a..0b87e20 100644
37505--- a/drivers/atm/eni.c
37506+++ b/drivers/atm/eni.c
37507@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
37508 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
37509 vcc->dev->number);
37510 length = 0;
37511- atomic_inc(&vcc->stats->rx_err);
37512+ atomic_inc_unchecked(&vcc->stats->rx_err);
37513 }
37514 else {
37515 length = ATM_CELL_SIZE-1; /* no HEC */
37516@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
37517 size);
37518 }
37519 eff = length = 0;
37520- atomic_inc(&vcc->stats->rx_err);
37521+ atomic_inc_unchecked(&vcc->stats->rx_err);
37522 }
37523 else {
37524 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
37525@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
37526 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
37527 vcc->dev->number,vcc->vci,length,size << 2,descr);
37528 length = eff = 0;
37529- atomic_inc(&vcc->stats->rx_err);
37530+ atomic_inc_unchecked(&vcc->stats->rx_err);
37531 }
37532 }
37533 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
37534@@ -767,7 +767,7 @@ rx_dequeued++;
37535 vcc->push(vcc,skb);
37536 pushed++;
37537 }
37538- atomic_inc(&vcc->stats->rx);
37539+ atomic_inc_unchecked(&vcc->stats->rx);
37540 }
37541 wake_up(&eni_dev->rx_wait);
37542 }
37543@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
37544 PCI_DMA_TODEVICE);
37545 if (vcc->pop) vcc->pop(vcc,skb);
37546 else dev_kfree_skb_irq(skb);
37547- atomic_inc(&vcc->stats->tx);
37548+ atomic_inc_unchecked(&vcc->stats->tx);
37549 wake_up(&eni_dev->tx_wait);
37550 dma_complete++;
37551 }
37552diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
37553index 82f2ae0..f205c02 100644
37554--- a/drivers/atm/firestream.c
37555+++ b/drivers/atm/firestream.c
37556@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
37557 }
37558 }
37559
37560- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
37561+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
37562
37563 fs_dprintk (FS_DEBUG_TXMEM, "i");
37564 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
37565@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
37566 #endif
37567 skb_put (skb, qe->p1 & 0xffff);
37568 ATM_SKB(skb)->vcc = atm_vcc;
37569- atomic_inc(&atm_vcc->stats->rx);
37570+ atomic_inc_unchecked(&atm_vcc->stats->rx);
37571 __net_timestamp(skb);
37572 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
37573 atm_vcc->push (atm_vcc, skb);
37574@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
37575 kfree (pe);
37576 }
37577 if (atm_vcc)
37578- atomic_inc(&atm_vcc->stats->rx_drop);
37579+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
37580 break;
37581 case 0x1f: /* Reassembly abort: no buffers. */
37582 /* Silently increment error counter. */
37583 if (atm_vcc)
37584- atomic_inc(&atm_vcc->stats->rx_drop);
37585+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
37586 break;
37587 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
37588 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
37589diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
37590index d4725fc..2d4ea65 100644
37591--- a/drivers/atm/fore200e.c
37592+++ b/drivers/atm/fore200e.c
37593@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
37594 #endif
37595 /* check error condition */
37596 if (*entry->status & STATUS_ERROR)
37597- atomic_inc(&vcc->stats->tx_err);
37598+ atomic_inc_unchecked(&vcc->stats->tx_err);
37599 else
37600- atomic_inc(&vcc->stats->tx);
37601+ atomic_inc_unchecked(&vcc->stats->tx);
37602 }
37603 }
37604
37605@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
37606 if (skb == NULL) {
37607 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
37608
37609- atomic_inc(&vcc->stats->rx_drop);
37610+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37611 return -ENOMEM;
37612 }
37613
37614@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
37615
37616 dev_kfree_skb_any(skb);
37617
37618- atomic_inc(&vcc->stats->rx_drop);
37619+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37620 return -ENOMEM;
37621 }
37622
37623 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
37624
37625 vcc->push(vcc, skb);
37626- atomic_inc(&vcc->stats->rx);
37627+ atomic_inc_unchecked(&vcc->stats->rx);
37628
37629 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
37630
37631@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
37632 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
37633 fore200e->atm_dev->number,
37634 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
37635- atomic_inc(&vcc->stats->rx_err);
37636+ atomic_inc_unchecked(&vcc->stats->rx_err);
37637 }
37638 }
37639
37640@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
37641 goto retry_here;
37642 }
37643
37644- atomic_inc(&vcc->stats->tx_err);
37645+ atomic_inc_unchecked(&vcc->stats->tx_err);
37646
37647 fore200e->tx_sat++;
37648 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
37649diff --git a/drivers/atm/he.c b/drivers/atm/he.c
37650index c39702b..785b73b 100644
37651--- a/drivers/atm/he.c
37652+++ b/drivers/atm/he.c
37653@@ -1689,7 +1689,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37654
37655 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
37656 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
37657- atomic_inc(&vcc->stats->rx_drop);
37658+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37659 goto return_host_buffers;
37660 }
37661
37662@@ -1716,7 +1716,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37663 RBRQ_LEN_ERR(he_dev->rbrq_head)
37664 ? "LEN_ERR" : "",
37665 vcc->vpi, vcc->vci);
37666- atomic_inc(&vcc->stats->rx_err);
37667+ atomic_inc_unchecked(&vcc->stats->rx_err);
37668 goto return_host_buffers;
37669 }
37670
37671@@ -1768,7 +1768,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37672 vcc->push(vcc, skb);
37673 spin_lock(&he_dev->global_lock);
37674
37675- atomic_inc(&vcc->stats->rx);
37676+ atomic_inc_unchecked(&vcc->stats->rx);
37677
37678 return_host_buffers:
37679 ++pdus_assembled;
37680@@ -2094,7 +2094,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
37681 tpd->vcc->pop(tpd->vcc, tpd->skb);
37682 else
37683 dev_kfree_skb_any(tpd->skb);
37684- atomic_inc(&tpd->vcc->stats->tx_err);
37685+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
37686 }
37687 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
37688 return;
37689@@ -2506,7 +2506,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37690 vcc->pop(vcc, skb);
37691 else
37692 dev_kfree_skb_any(skb);
37693- atomic_inc(&vcc->stats->tx_err);
37694+ atomic_inc_unchecked(&vcc->stats->tx_err);
37695 return -EINVAL;
37696 }
37697
37698@@ -2517,7 +2517,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37699 vcc->pop(vcc, skb);
37700 else
37701 dev_kfree_skb_any(skb);
37702- atomic_inc(&vcc->stats->tx_err);
37703+ atomic_inc_unchecked(&vcc->stats->tx_err);
37704 return -EINVAL;
37705 }
37706 #endif
37707@@ -2529,7 +2529,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37708 vcc->pop(vcc, skb);
37709 else
37710 dev_kfree_skb_any(skb);
37711- atomic_inc(&vcc->stats->tx_err);
37712+ atomic_inc_unchecked(&vcc->stats->tx_err);
37713 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37714 return -ENOMEM;
37715 }
37716@@ -2571,7 +2571,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37717 vcc->pop(vcc, skb);
37718 else
37719 dev_kfree_skb_any(skb);
37720- atomic_inc(&vcc->stats->tx_err);
37721+ atomic_inc_unchecked(&vcc->stats->tx_err);
37722 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37723 return -ENOMEM;
37724 }
37725@@ -2602,7 +2602,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37726 __enqueue_tpd(he_dev, tpd, cid);
37727 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37728
37729- atomic_inc(&vcc->stats->tx);
37730+ atomic_inc_unchecked(&vcc->stats->tx);
37731
37732 return 0;
37733 }
37734diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
37735index 1dc0519..1aadaf7 100644
37736--- a/drivers/atm/horizon.c
37737+++ b/drivers/atm/horizon.c
37738@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
37739 {
37740 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
37741 // VC layer stats
37742- atomic_inc(&vcc->stats->rx);
37743+ atomic_inc_unchecked(&vcc->stats->rx);
37744 __net_timestamp(skb);
37745 // end of our responsibility
37746 vcc->push (vcc, skb);
37747@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
37748 dev->tx_iovec = NULL;
37749
37750 // VC layer stats
37751- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
37752+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
37753
37754 // free the skb
37755 hrz_kfree_skb (skb);
37756diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
37757index 2b24ed0..b3d6acc 100644
37758--- a/drivers/atm/idt77252.c
37759+++ b/drivers/atm/idt77252.c
37760@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
37761 else
37762 dev_kfree_skb(skb);
37763
37764- atomic_inc(&vcc->stats->tx);
37765+ atomic_inc_unchecked(&vcc->stats->tx);
37766 }
37767
37768 atomic_dec(&scq->used);
37769@@ -1072,13 +1072,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37770 if ((sb = dev_alloc_skb(64)) == NULL) {
37771 printk("%s: Can't allocate buffers for aal0.\n",
37772 card->name);
37773- atomic_add(i, &vcc->stats->rx_drop);
37774+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37775 break;
37776 }
37777 if (!atm_charge(vcc, sb->truesize)) {
37778 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
37779 card->name);
37780- atomic_add(i - 1, &vcc->stats->rx_drop);
37781+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
37782 dev_kfree_skb(sb);
37783 break;
37784 }
37785@@ -1095,7 +1095,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37786 ATM_SKB(sb)->vcc = vcc;
37787 __net_timestamp(sb);
37788 vcc->push(vcc, sb);
37789- atomic_inc(&vcc->stats->rx);
37790+ atomic_inc_unchecked(&vcc->stats->rx);
37791
37792 cell += ATM_CELL_PAYLOAD;
37793 }
37794@@ -1132,13 +1132,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37795 "(CDC: %08x)\n",
37796 card->name, len, rpp->len, readl(SAR_REG_CDC));
37797 recycle_rx_pool_skb(card, rpp);
37798- atomic_inc(&vcc->stats->rx_err);
37799+ atomic_inc_unchecked(&vcc->stats->rx_err);
37800 return;
37801 }
37802 if (stat & SAR_RSQE_CRC) {
37803 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
37804 recycle_rx_pool_skb(card, rpp);
37805- atomic_inc(&vcc->stats->rx_err);
37806+ atomic_inc_unchecked(&vcc->stats->rx_err);
37807 return;
37808 }
37809 if (skb_queue_len(&rpp->queue) > 1) {
37810@@ -1149,7 +1149,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37811 RXPRINTK("%s: Can't alloc RX skb.\n",
37812 card->name);
37813 recycle_rx_pool_skb(card, rpp);
37814- atomic_inc(&vcc->stats->rx_err);
37815+ atomic_inc_unchecked(&vcc->stats->rx_err);
37816 return;
37817 }
37818 if (!atm_charge(vcc, skb->truesize)) {
37819@@ -1168,7 +1168,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37820 __net_timestamp(skb);
37821
37822 vcc->push(vcc, skb);
37823- atomic_inc(&vcc->stats->rx);
37824+ atomic_inc_unchecked(&vcc->stats->rx);
37825
37826 return;
37827 }
37828@@ -1190,7 +1190,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37829 __net_timestamp(skb);
37830
37831 vcc->push(vcc, skb);
37832- atomic_inc(&vcc->stats->rx);
37833+ atomic_inc_unchecked(&vcc->stats->rx);
37834
37835 if (skb->truesize > SAR_FB_SIZE_3)
37836 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
37837@@ -1301,14 +1301,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
37838 if (vcc->qos.aal != ATM_AAL0) {
37839 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
37840 card->name, vpi, vci);
37841- atomic_inc(&vcc->stats->rx_drop);
37842+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37843 goto drop;
37844 }
37845
37846 if ((sb = dev_alloc_skb(64)) == NULL) {
37847 printk("%s: Can't allocate buffers for AAL0.\n",
37848 card->name);
37849- atomic_inc(&vcc->stats->rx_err);
37850+ atomic_inc_unchecked(&vcc->stats->rx_err);
37851 goto drop;
37852 }
37853
37854@@ -1327,7 +1327,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
37855 ATM_SKB(sb)->vcc = vcc;
37856 __net_timestamp(sb);
37857 vcc->push(vcc, sb);
37858- atomic_inc(&vcc->stats->rx);
37859+ atomic_inc_unchecked(&vcc->stats->rx);
37860
37861 drop:
37862 skb_pull(queue, 64);
37863@@ -1952,13 +1952,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37864
37865 if (vc == NULL) {
37866 printk("%s: NULL connection in send().\n", card->name);
37867- atomic_inc(&vcc->stats->tx_err);
37868+ atomic_inc_unchecked(&vcc->stats->tx_err);
37869 dev_kfree_skb(skb);
37870 return -EINVAL;
37871 }
37872 if (!test_bit(VCF_TX, &vc->flags)) {
37873 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
37874- atomic_inc(&vcc->stats->tx_err);
37875+ atomic_inc_unchecked(&vcc->stats->tx_err);
37876 dev_kfree_skb(skb);
37877 return -EINVAL;
37878 }
37879@@ -1970,14 +1970,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37880 break;
37881 default:
37882 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
37883- atomic_inc(&vcc->stats->tx_err);
37884+ atomic_inc_unchecked(&vcc->stats->tx_err);
37885 dev_kfree_skb(skb);
37886 return -EINVAL;
37887 }
37888
37889 if (skb_shinfo(skb)->nr_frags != 0) {
37890 printk("%s: No scatter-gather yet.\n", card->name);
37891- atomic_inc(&vcc->stats->tx_err);
37892+ atomic_inc_unchecked(&vcc->stats->tx_err);
37893 dev_kfree_skb(skb);
37894 return -EINVAL;
37895 }
37896@@ -1985,7 +1985,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37897
37898 err = queue_skb(card, vc, skb, oam);
37899 if (err) {
37900- atomic_inc(&vcc->stats->tx_err);
37901+ atomic_inc_unchecked(&vcc->stats->tx_err);
37902 dev_kfree_skb(skb);
37903 return err;
37904 }
37905@@ -2008,7 +2008,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
37906 skb = dev_alloc_skb(64);
37907 if (!skb) {
37908 printk("%s: Out of memory in send_oam().\n", card->name);
37909- atomic_inc(&vcc->stats->tx_err);
37910+ atomic_inc_unchecked(&vcc->stats->tx_err);
37911 return -ENOMEM;
37912 }
37913 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
37914diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
37915index 4217f29..88f547a 100644
37916--- a/drivers/atm/iphase.c
37917+++ b/drivers/atm/iphase.c
37918@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
37919 status = (u_short) (buf_desc_ptr->desc_mode);
37920 if (status & (RX_CER | RX_PTE | RX_OFL))
37921 {
37922- atomic_inc(&vcc->stats->rx_err);
37923+ atomic_inc_unchecked(&vcc->stats->rx_err);
37924 IF_ERR(printk("IA: bad packet, dropping it");)
37925 if (status & RX_CER) {
37926 IF_ERR(printk(" cause: packet CRC error\n");)
37927@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
37928 len = dma_addr - buf_addr;
37929 if (len > iadev->rx_buf_sz) {
37930 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
37931- atomic_inc(&vcc->stats->rx_err);
37932+ atomic_inc_unchecked(&vcc->stats->rx_err);
37933 goto out_free_desc;
37934 }
37935
37936@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37937 ia_vcc = INPH_IA_VCC(vcc);
37938 if (ia_vcc == NULL)
37939 {
37940- atomic_inc(&vcc->stats->rx_err);
37941+ atomic_inc_unchecked(&vcc->stats->rx_err);
37942 atm_return(vcc, skb->truesize);
37943 dev_kfree_skb_any(skb);
37944 goto INCR_DLE;
37945@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37946 if ((length > iadev->rx_buf_sz) || (length >
37947 (skb->len - sizeof(struct cpcs_trailer))))
37948 {
37949- atomic_inc(&vcc->stats->rx_err);
37950+ atomic_inc_unchecked(&vcc->stats->rx_err);
37951 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
37952 length, skb->len);)
37953 atm_return(vcc, skb->truesize);
37954@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37955
37956 IF_RX(printk("rx_dle_intr: skb push");)
37957 vcc->push(vcc,skb);
37958- atomic_inc(&vcc->stats->rx);
37959+ atomic_inc_unchecked(&vcc->stats->rx);
37960 iadev->rx_pkt_cnt++;
37961 }
37962 INCR_DLE:
37963@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
37964 {
37965 struct k_sonet_stats *stats;
37966 stats = &PRIV(_ia_dev[board])->sonet_stats;
37967- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
37968- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
37969- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
37970- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
37971- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
37972- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
37973- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
37974- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
37975- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
37976+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
37977+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
37978+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
37979+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
37980+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
37981+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
37982+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
37983+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
37984+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
37985 }
37986 ia_cmds.status = 0;
37987 break;
37988@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37989 if ((desc == 0) || (desc > iadev->num_tx_desc))
37990 {
37991 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
37992- atomic_inc(&vcc->stats->tx);
37993+ atomic_inc_unchecked(&vcc->stats->tx);
37994 if (vcc->pop)
37995 vcc->pop(vcc, skb);
37996 else
37997@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37998 ATM_DESC(skb) = vcc->vci;
37999 skb_queue_tail(&iadev->tx_dma_q, skb);
38000
38001- atomic_inc(&vcc->stats->tx);
38002+ atomic_inc_unchecked(&vcc->stats->tx);
38003 iadev->tx_pkt_cnt++;
38004 /* Increment transaction counter */
38005 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
38006
38007 #if 0
38008 /* add flow control logic */
38009- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
38010+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
38011 if (iavcc->vc_desc_cnt > 10) {
38012 vcc->tx_quota = vcc->tx_quota * 3 / 4;
38013 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
38014diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
38015index fa7d7019..1e404c7 100644
38016--- a/drivers/atm/lanai.c
38017+++ b/drivers/atm/lanai.c
38018@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
38019 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
38020 lanai_endtx(lanai, lvcc);
38021 lanai_free_skb(lvcc->tx.atmvcc, skb);
38022- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
38023+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
38024 }
38025
38026 /* Try to fill the buffer - don't call unless there is backlog */
38027@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
38028 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
38029 __net_timestamp(skb);
38030 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
38031- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
38032+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
38033 out:
38034 lvcc->rx.buf.ptr = end;
38035 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
38036@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38037 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
38038 "vcc %d\n", lanai->number, (unsigned int) s, vci);
38039 lanai->stats.service_rxnotaal5++;
38040- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38041+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38042 return 0;
38043 }
38044 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
38045@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38046 int bytes;
38047 read_unlock(&vcc_sklist_lock);
38048 DPRINTK("got trashed rx pdu on vci %d\n", vci);
38049- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38050+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38051 lvcc->stats.x.aal5.service_trash++;
38052 bytes = (SERVICE_GET_END(s) * 16) -
38053 (((unsigned long) lvcc->rx.buf.ptr) -
38054@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38055 }
38056 if (s & SERVICE_STREAM) {
38057 read_unlock(&vcc_sklist_lock);
38058- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38059+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38060 lvcc->stats.x.aal5.service_stream++;
38061 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
38062 "PDU on VCI %d!\n", lanai->number, vci);
38063@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38064 return 0;
38065 }
38066 DPRINTK("got rx crc error on vci %d\n", vci);
38067- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38068+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38069 lvcc->stats.x.aal5.service_rxcrc++;
38070 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
38071 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
38072diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
38073index 9988ac9..7c52585 100644
38074--- a/drivers/atm/nicstar.c
38075+++ b/drivers/atm/nicstar.c
38076@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
38077 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
38078 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
38079 card->index);
38080- atomic_inc(&vcc->stats->tx_err);
38081+ atomic_inc_unchecked(&vcc->stats->tx_err);
38082 dev_kfree_skb_any(skb);
38083 return -EINVAL;
38084 }
38085@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
38086 if (!vc->tx) {
38087 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
38088 card->index);
38089- atomic_inc(&vcc->stats->tx_err);
38090+ atomic_inc_unchecked(&vcc->stats->tx_err);
38091 dev_kfree_skb_any(skb);
38092 return -EINVAL;
38093 }
38094@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
38095 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
38096 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
38097 card->index);
38098- atomic_inc(&vcc->stats->tx_err);
38099+ atomic_inc_unchecked(&vcc->stats->tx_err);
38100 dev_kfree_skb_any(skb);
38101 return -EINVAL;
38102 }
38103
38104 if (skb_shinfo(skb)->nr_frags != 0) {
38105 printk("nicstar%d: No scatter-gather yet.\n", card->index);
38106- atomic_inc(&vcc->stats->tx_err);
38107+ atomic_inc_unchecked(&vcc->stats->tx_err);
38108 dev_kfree_skb_any(skb);
38109 return -EINVAL;
38110 }
38111@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
38112 }
38113
38114 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
38115- atomic_inc(&vcc->stats->tx_err);
38116+ atomic_inc_unchecked(&vcc->stats->tx_err);
38117 dev_kfree_skb_any(skb);
38118 return -EIO;
38119 }
38120- atomic_inc(&vcc->stats->tx);
38121+ atomic_inc_unchecked(&vcc->stats->tx);
38122
38123 return 0;
38124 }
38125@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38126 printk
38127 ("nicstar%d: Can't allocate buffers for aal0.\n",
38128 card->index);
38129- atomic_add(i, &vcc->stats->rx_drop);
38130+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
38131 break;
38132 }
38133 if (!atm_charge(vcc, sb->truesize)) {
38134 RXPRINTK
38135 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
38136 card->index);
38137- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
38138+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
38139 dev_kfree_skb_any(sb);
38140 break;
38141 }
38142@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38143 ATM_SKB(sb)->vcc = vcc;
38144 __net_timestamp(sb);
38145 vcc->push(vcc, sb);
38146- atomic_inc(&vcc->stats->rx);
38147+ atomic_inc_unchecked(&vcc->stats->rx);
38148 cell += ATM_CELL_PAYLOAD;
38149 }
38150
38151@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38152 if (iovb == NULL) {
38153 printk("nicstar%d: Out of iovec buffers.\n",
38154 card->index);
38155- atomic_inc(&vcc->stats->rx_drop);
38156+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38157 recycle_rx_buf(card, skb);
38158 return;
38159 }
38160@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38161 small or large buffer itself. */
38162 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
38163 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
38164- atomic_inc(&vcc->stats->rx_err);
38165+ atomic_inc_unchecked(&vcc->stats->rx_err);
38166 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
38167 NS_MAX_IOVECS);
38168 NS_PRV_IOVCNT(iovb) = 0;
38169@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38170 ("nicstar%d: Expected a small buffer, and this is not one.\n",
38171 card->index);
38172 which_list(card, skb);
38173- atomic_inc(&vcc->stats->rx_err);
38174+ atomic_inc_unchecked(&vcc->stats->rx_err);
38175 recycle_rx_buf(card, skb);
38176 vc->rx_iov = NULL;
38177 recycle_iov_buf(card, iovb);
38178@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38179 ("nicstar%d: Expected a large buffer, and this is not one.\n",
38180 card->index);
38181 which_list(card, skb);
38182- atomic_inc(&vcc->stats->rx_err);
38183+ atomic_inc_unchecked(&vcc->stats->rx_err);
38184 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
38185 NS_PRV_IOVCNT(iovb));
38186 vc->rx_iov = NULL;
38187@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38188 printk(" - PDU size mismatch.\n");
38189 else
38190 printk(".\n");
38191- atomic_inc(&vcc->stats->rx_err);
38192+ atomic_inc_unchecked(&vcc->stats->rx_err);
38193 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
38194 NS_PRV_IOVCNT(iovb));
38195 vc->rx_iov = NULL;
38196@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38197 /* skb points to a small buffer */
38198 if (!atm_charge(vcc, skb->truesize)) {
38199 push_rxbufs(card, skb);
38200- atomic_inc(&vcc->stats->rx_drop);
38201+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38202 } else {
38203 skb_put(skb, len);
38204 dequeue_sm_buf(card, skb);
38205@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38206 ATM_SKB(skb)->vcc = vcc;
38207 __net_timestamp(skb);
38208 vcc->push(vcc, skb);
38209- atomic_inc(&vcc->stats->rx);
38210+ atomic_inc_unchecked(&vcc->stats->rx);
38211 }
38212 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
38213 struct sk_buff *sb;
38214@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38215 if (len <= NS_SMBUFSIZE) {
38216 if (!atm_charge(vcc, sb->truesize)) {
38217 push_rxbufs(card, sb);
38218- atomic_inc(&vcc->stats->rx_drop);
38219+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38220 } else {
38221 skb_put(sb, len);
38222 dequeue_sm_buf(card, sb);
38223@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38224 ATM_SKB(sb)->vcc = vcc;
38225 __net_timestamp(sb);
38226 vcc->push(vcc, sb);
38227- atomic_inc(&vcc->stats->rx);
38228+ atomic_inc_unchecked(&vcc->stats->rx);
38229 }
38230
38231 push_rxbufs(card, skb);
38232@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38233
38234 if (!atm_charge(vcc, skb->truesize)) {
38235 push_rxbufs(card, skb);
38236- atomic_inc(&vcc->stats->rx_drop);
38237+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38238 } else {
38239 dequeue_lg_buf(card, skb);
38240 #ifdef NS_USE_DESTRUCTORS
38241@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38242 ATM_SKB(skb)->vcc = vcc;
38243 __net_timestamp(skb);
38244 vcc->push(vcc, skb);
38245- atomic_inc(&vcc->stats->rx);
38246+ atomic_inc_unchecked(&vcc->stats->rx);
38247 }
38248
38249 push_rxbufs(card, sb);
38250@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38251 printk
38252 ("nicstar%d: Out of huge buffers.\n",
38253 card->index);
38254- atomic_inc(&vcc->stats->rx_drop);
38255+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38256 recycle_iovec_rx_bufs(card,
38257 (struct iovec *)
38258 iovb->data,
38259@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38260 card->hbpool.count++;
38261 } else
38262 dev_kfree_skb_any(hb);
38263- atomic_inc(&vcc->stats->rx_drop);
38264+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38265 } else {
38266 /* Copy the small buffer to the huge buffer */
38267 sb = (struct sk_buff *)iov->iov_base;
38268@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38269 #endif /* NS_USE_DESTRUCTORS */
38270 __net_timestamp(hb);
38271 vcc->push(vcc, hb);
38272- atomic_inc(&vcc->stats->rx);
38273+ atomic_inc_unchecked(&vcc->stats->rx);
38274 }
38275 }
38276
38277diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
38278index 7652e8d..db45069 100644
38279--- a/drivers/atm/solos-pci.c
38280+++ b/drivers/atm/solos-pci.c
38281@@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
38282 }
38283 atm_charge(vcc, skb->truesize);
38284 vcc->push(vcc, skb);
38285- atomic_inc(&vcc->stats->rx);
38286+ atomic_inc_unchecked(&vcc->stats->rx);
38287 break;
38288
38289 case PKT_STATUS:
38290@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
38291 vcc = SKB_CB(oldskb)->vcc;
38292
38293 if (vcc) {
38294- atomic_inc(&vcc->stats->tx);
38295+ atomic_inc_unchecked(&vcc->stats->tx);
38296 solos_pop(vcc, oldskb);
38297 } else {
38298 dev_kfree_skb_irq(oldskb);
38299diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
38300index 0215934..ce9f5b1 100644
38301--- a/drivers/atm/suni.c
38302+++ b/drivers/atm/suni.c
38303@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
38304
38305
38306 #define ADD_LIMITED(s,v) \
38307- atomic_add((v),&stats->s); \
38308- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
38309+ atomic_add_unchecked((v),&stats->s); \
38310+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
38311
38312
38313 static void suni_hz(unsigned long from_timer)
38314diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
38315index 5120a96..e2572bd 100644
38316--- a/drivers/atm/uPD98402.c
38317+++ b/drivers/atm/uPD98402.c
38318@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
38319 struct sonet_stats tmp;
38320 int error = 0;
38321
38322- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
38323+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
38324 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
38325 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
38326 if (zero && !error) {
38327@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
38328
38329
38330 #define ADD_LIMITED(s,v) \
38331- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
38332- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
38333- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
38334+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
38335+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
38336+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
38337
38338
38339 static void stat_event(struct atm_dev *dev)
38340@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
38341 if (reason & uPD98402_INT_PFM) stat_event(dev);
38342 if (reason & uPD98402_INT_PCO) {
38343 (void) GET(PCOCR); /* clear interrupt cause */
38344- atomic_add(GET(HECCT),
38345+ atomic_add_unchecked(GET(HECCT),
38346 &PRIV(dev)->sonet_stats.uncorr_hcs);
38347 }
38348 if ((reason & uPD98402_INT_RFO) &&
38349@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
38350 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
38351 uPD98402_INT_LOS),PIMR); /* enable them */
38352 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
38353- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
38354- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
38355- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
38356+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
38357+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
38358+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
38359 return 0;
38360 }
38361
38362diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
38363index 969c3c2..9b72956 100644
38364--- a/drivers/atm/zatm.c
38365+++ b/drivers/atm/zatm.c
38366@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
38367 }
38368 if (!size) {
38369 dev_kfree_skb_irq(skb);
38370- if (vcc) atomic_inc(&vcc->stats->rx_err);
38371+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
38372 continue;
38373 }
38374 if (!atm_charge(vcc,skb->truesize)) {
38375@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
38376 skb->len = size;
38377 ATM_SKB(skb)->vcc = vcc;
38378 vcc->push(vcc,skb);
38379- atomic_inc(&vcc->stats->rx);
38380+ atomic_inc_unchecked(&vcc->stats->rx);
38381 }
38382 zout(pos & 0xffff,MTA(mbx));
38383 #if 0 /* probably a stupid idea */
38384@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
38385 skb_queue_head(&zatm_vcc->backlog,skb);
38386 break;
38387 }
38388- atomic_inc(&vcc->stats->tx);
38389+ atomic_inc_unchecked(&vcc->stats->tx);
38390 wake_up(&zatm_vcc->tx_wait);
38391 }
38392
38393diff --git a/drivers/base/bus.c b/drivers/base/bus.c
38394index 83e910a..b224a73 100644
38395--- a/drivers/base/bus.c
38396+++ b/drivers/base/bus.c
38397@@ -1124,7 +1124,7 @@ int subsys_interface_register(struct subsys_interface *sif)
38398 return -EINVAL;
38399
38400 mutex_lock(&subsys->p->mutex);
38401- list_add_tail(&sif->node, &subsys->p->interfaces);
38402+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
38403 if (sif->add_dev) {
38404 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
38405 while ((dev = subsys_dev_iter_next(&iter)))
38406@@ -1149,7 +1149,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
38407 subsys = sif->subsys;
38408
38409 mutex_lock(&subsys->p->mutex);
38410- list_del_init(&sif->node);
38411+ pax_list_del_init((struct list_head *)&sif->node);
38412 if (sif->remove_dev) {
38413 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
38414 while ((dev = subsys_dev_iter_next(&iter)))
38415diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
38416index 25798db..15f130e 100644
38417--- a/drivers/base/devtmpfs.c
38418+++ b/drivers/base/devtmpfs.c
38419@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
38420 if (!thread)
38421 return 0;
38422
38423- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
38424+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
38425 if (err)
38426 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
38427 else
38428@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
38429 *err = sys_unshare(CLONE_NEWNS);
38430 if (*err)
38431 goto out;
38432- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
38433+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
38434 if (*err)
38435 goto out;
38436- sys_chdir("/.."); /* will traverse into overmounted root */
38437- sys_chroot(".");
38438+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
38439+ sys_chroot((char __force_user *)".");
38440 complete(&setup_done);
38441 while (1) {
38442 spin_lock(&req_lock);
38443diff --git a/drivers/base/node.c b/drivers/base/node.c
38444index d51c49c..28908df 100644
38445--- a/drivers/base/node.c
38446+++ b/drivers/base/node.c
38447@@ -623,7 +623,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
38448 struct node_attr {
38449 struct device_attribute attr;
38450 enum node_states state;
38451-};
38452+} __do_const;
38453
38454 static ssize_t show_node_state(struct device *dev,
38455 struct device_attribute *attr, char *buf)
38456diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
38457index eee55c1..b8c9393 100644
38458--- a/drivers/base/power/domain.c
38459+++ b/drivers/base/power/domain.c
38460@@ -1821,9 +1821,9 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
38461
38462 if (dev->power.subsys_data->domain_data) {
38463 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
38464- gpd_data->ops = (struct gpd_dev_ops){ NULL };
38465+ memset(&gpd_data->ops, 0, sizeof(gpd_data->ops));
38466 if (clear_td)
38467- gpd_data->td = (struct gpd_timing_data){ 0 };
38468+ memset(&gpd_data->td, 0, sizeof(gpd_data->td));
38469
38470 if (--gpd_data->refcount == 0) {
38471 dev->power.subsys_data->domain_data = NULL;
38472@@ -1862,7 +1862,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
38473 {
38474 struct cpuidle_driver *cpuidle_drv;
38475 struct gpd_cpu_data *cpu_data;
38476- struct cpuidle_state *idle_state;
38477+ cpuidle_state_no_const *idle_state;
38478 int ret = 0;
38479
38480 if (IS_ERR_OR_NULL(genpd) || state < 0)
38481@@ -1930,7 +1930,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
38482 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
38483 {
38484 struct gpd_cpu_data *cpu_data;
38485- struct cpuidle_state *idle_state;
38486+ cpuidle_state_no_const *idle_state;
38487 int ret = 0;
38488
38489 if (IS_ERR_OR_NULL(genpd))
38490diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
38491index 95b181d1..c4f0e19 100644
38492--- a/drivers/base/power/sysfs.c
38493+++ b/drivers/base/power/sysfs.c
38494@@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct device *dev,
38495 return -EIO;
38496 }
38497 }
38498- return sprintf(buf, p);
38499+ return sprintf(buf, "%s", p);
38500 }
38501
38502 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
38503diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
38504index eb1bd2e..2667d3a 100644
38505--- a/drivers/base/power/wakeup.c
38506+++ b/drivers/base/power/wakeup.c
38507@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
38508 * They need to be modified together atomically, so it's better to use one
38509 * atomic variable to hold them both.
38510 */
38511-static atomic_t combined_event_count = ATOMIC_INIT(0);
38512+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
38513
38514 #define IN_PROGRESS_BITS (sizeof(int) * 4)
38515 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
38516
38517 static void split_counters(unsigned int *cnt, unsigned int *inpr)
38518 {
38519- unsigned int comb = atomic_read(&combined_event_count);
38520+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
38521
38522 *cnt = (comb >> IN_PROGRESS_BITS);
38523 *inpr = comb & MAX_IN_PROGRESS;
38524@@ -401,7 +401,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
38525 ws->start_prevent_time = ws->last_time;
38526
38527 /* Increment the counter of events in progress. */
38528- cec = atomic_inc_return(&combined_event_count);
38529+ cec = atomic_inc_return_unchecked(&combined_event_count);
38530
38531 trace_wakeup_source_activate(ws->name, cec);
38532 }
38533@@ -527,7 +527,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
38534 * Increment the counter of registered wakeup events and decrement the
38535 * couter of wakeup events in progress simultaneously.
38536 */
38537- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
38538+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
38539 trace_wakeup_source_deactivate(ws->name, cec);
38540
38541 split_counters(&cnt, &inpr);
38542diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
38543index dbb8350..4762f4c 100644
38544--- a/drivers/base/syscore.c
38545+++ b/drivers/base/syscore.c
38546@@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
38547 void register_syscore_ops(struct syscore_ops *ops)
38548 {
38549 mutex_lock(&syscore_ops_lock);
38550- list_add_tail(&ops->node, &syscore_ops_list);
38551+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
38552 mutex_unlock(&syscore_ops_lock);
38553 }
38554 EXPORT_SYMBOL_GPL(register_syscore_ops);
38555@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
38556 void unregister_syscore_ops(struct syscore_ops *ops)
38557 {
38558 mutex_lock(&syscore_ops_lock);
38559- list_del(&ops->node);
38560+ pax_list_del((struct list_head *)&ops->node);
38561 mutex_unlock(&syscore_ops_lock);
38562 }
38563 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
38564diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
38565index ff20f19..018f1da 100644
38566--- a/drivers/block/cciss.c
38567+++ b/drivers/block/cciss.c
38568@@ -3008,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
38569 while (!list_empty(&h->reqQ)) {
38570 c = list_entry(h->reqQ.next, CommandList_struct, list);
38571 /* can't do anything if fifo is full */
38572- if ((h->access.fifo_full(h))) {
38573+ if ((h->access->fifo_full(h))) {
38574 dev_warn(&h->pdev->dev, "fifo full\n");
38575 break;
38576 }
38577@@ -3018,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
38578 h->Qdepth--;
38579
38580 /* Tell the controller execute command */
38581- h->access.submit_command(h, c);
38582+ h->access->submit_command(h, c);
38583
38584 /* Put job onto the completed Q */
38585 addQ(&h->cmpQ, c);
38586@@ -3444,17 +3444,17 @@ startio:
38587
38588 static inline unsigned long get_next_completion(ctlr_info_t *h)
38589 {
38590- return h->access.command_completed(h);
38591+ return h->access->command_completed(h);
38592 }
38593
38594 static inline int interrupt_pending(ctlr_info_t *h)
38595 {
38596- return h->access.intr_pending(h);
38597+ return h->access->intr_pending(h);
38598 }
38599
38600 static inline long interrupt_not_for_us(ctlr_info_t *h)
38601 {
38602- return ((h->access.intr_pending(h) == 0) ||
38603+ return ((h->access->intr_pending(h) == 0) ||
38604 (h->interrupts_enabled == 0));
38605 }
38606
38607@@ -3487,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
38608 u32 a;
38609
38610 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
38611- return h->access.command_completed(h);
38612+ return h->access->command_completed(h);
38613
38614 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
38615 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
38616@@ -4044,7 +4044,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
38617 trans_support & CFGTBL_Trans_use_short_tags);
38618
38619 /* Change the access methods to the performant access methods */
38620- h->access = SA5_performant_access;
38621+ h->access = &SA5_performant_access;
38622 h->transMethod = CFGTBL_Trans_Performant;
38623
38624 return;
38625@@ -4318,7 +4318,7 @@ static int cciss_pci_init(ctlr_info_t *h)
38626 if (prod_index < 0)
38627 return -ENODEV;
38628 h->product_name = products[prod_index].product_name;
38629- h->access = *(products[prod_index].access);
38630+ h->access = products[prod_index].access;
38631
38632 if (cciss_board_disabled(h)) {
38633 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
38634@@ -5050,7 +5050,7 @@ reinit_after_soft_reset:
38635 }
38636
38637 /* make sure the board interrupts are off */
38638- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38639+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38640 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
38641 if (rc)
38642 goto clean2;
38643@@ -5100,7 +5100,7 @@ reinit_after_soft_reset:
38644 * fake ones to scoop up any residual completions.
38645 */
38646 spin_lock_irqsave(&h->lock, flags);
38647- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38648+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38649 spin_unlock_irqrestore(&h->lock, flags);
38650 free_irq(h->intr[h->intr_mode], h);
38651 rc = cciss_request_irq(h, cciss_msix_discard_completions,
38652@@ -5120,9 +5120,9 @@ reinit_after_soft_reset:
38653 dev_info(&h->pdev->dev, "Board READY.\n");
38654 dev_info(&h->pdev->dev,
38655 "Waiting for stale completions to drain.\n");
38656- h->access.set_intr_mask(h, CCISS_INTR_ON);
38657+ h->access->set_intr_mask(h, CCISS_INTR_ON);
38658 msleep(10000);
38659- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38660+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38661
38662 rc = controller_reset_failed(h->cfgtable);
38663 if (rc)
38664@@ -5145,7 +5145,7 @@ reinit_after_soft_reset:
38665 cciss_scsi_setup(h);
38666
38667 /* Turn the interrupts on so we can service requests */
38668- h->access.set_intr_mask(h, CCISS_INTR_ON);
38669+ h->access->set_intr_mask(h, CCISS_INTR_ON);
38670
38671 /* Get the firmware version */
38672 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
38673@@ -5217,7 +5217,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
38674 kfree(flush_buf);
38675 if (return_code != IO_OK)
38676 dev_warn(&h->pdev->dev, "Error flushing cache\n");
38677- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38678+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38679 free_irq(h->intr[h->intr_mode], h);
38680 }
38681
38682diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
38683index 7fda30e..2f27946 100644
38684--- a/drivers/block/cciss.h
38685+++ b/drivers/block/cciss.h
38686@@ -101,7 +101,7 @@ struct ctlr_info
38687 /* information about each logical volume */
38688 drive_info_struct *drv[CISS_MAX_LUN];
38689
38690- struct access_method access;
38691+ struct access_method *access;
38692
38693 /* queue and queue Info */
38694 struct list_head reqQ;
38695@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
38696 }
38697
38698 static struct access_method SA5_access = {
38699- SA5_submit_command,
38700- SA5_intr_mask,
38701- SA5_fifo_full,
38702- SA5_intr_pending,
38703- SA5_completed,
38704+ .submit_command = SA5_submit_command,
38705+ .set_intr_mask = SA5_intr_mask,
38706+ .fifo_full = SA5_fifo_full,
38707+ .intr_pending = SA5_intr_pending,
38708+ .command_completed = SA5_completed,
38709 };
38710
38711 static struct access_method SA5B_access = {
38712- SA5_submit_command,
38713- SA5B_intr_mask,
38714- SA5_fifo_full,
38715- SA5B_intr_pending,
38716- SA5_completed,
38717+ .submit_command = SA5_submit_command,
38718+ .set_intr_mask = SA5B_intr_mask,
38719+ .fifo_full = SA5_fifo_full,
38720+ .intr_pending = SA5B_intr_pending,
38721+ .command_completed = SA5_completed,
38722 };
38723
38724 static struct access_method SA5_performant_access = {
38725- SA5_submit_command,
38726- SA5_performant_intr_mask,
38727- SA5_fifo_full,
38728- SA5_performant_intr_pending,
38729- SA5_performant_completed,
38730+ .submit_command = SA5_submit_command,
38731+ .set_intr_mask = SA5_performant_intr_mask,
38732+ .fifo_full = SA5_fifo_full,
38733+ .intr_pending = SA5_performant_intr_pending,
38734+ .command_completed = SA5_performant_completed,
38735 };
38736
38737 struct board_type {
38738diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
38739index 2b94403..fd6ad1f 100644
38740--- a/drivers/block/cpqarray.c
38741+++ b/drivers/block/cpqarray.c
38742@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38743 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
38744 goto Enomem4;
38745 }
38746- hba[i]->access.set_intr_mask(hba[i], 0);
38747+ hba[i]->access->set_intr_mask(hba[i], 0);
38748 if (request_irq(hba[i]->intr, do_ida_intr,
38749 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
38750 {
38751@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38752 add_timer(&hba[i]->timer);
38753
38754 /* Enable IRQ now that spinlock and rate limit timer are set up */
38755- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38756+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38757
38758 for(j=0; j<NWD; j++) {
38759 struct gendisk *disk = ida_gendisk[i][j];
38760@@ -694,7 +694,7 @@ DBGINFO(
38761 for(i=0; i<NR_PRODUCTS; i++) {
38762 if (board_id == products[i].board_id) {
38763 c->product_name = products[i].product_name;
38764- c->access = *(products[i].access);
38765+ c->access = products[i].access;
38766 break;
38767 }
38768 }
38769@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
38770 hba[ctlr]->intr = intr;
38771 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
38772 hba[ctlr]->product_name = products[j].product_name;
38773- hba[ctlr]->access = *(products[j].access);
38774+ hba[ctlr]->access = products[j].access;
38775 hba[ctlr]->ctlr = ctlr;
38776 hba[ctlr]->board_id = board_id;
38777 hba[ctlr]->pci_dev = NULL; /* not PCI */
38778@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
38779
38780 while((c = h->reqQ) != NULL) {
38781 /* Can't do anything if we're busy */
38782- if (h->access.fifo_full(h) == 0)
38783+ if (h->access->fifo_full(h) == 0)
38784 return;
38785
38786 /* Get the first entry from the request Q */
38787@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
38788 h->Qdepth--;
38789
38790 /* Tell the controller to do our bidding */
38791- h->access.submit_command(h, c);
38792+ h->access->submit_command(h, c);
38793
38794 /* Get onto the completion Q */
38795 addQ(&h->cmpQ, c);
38796@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38797 unsigned long flags;
38798 __u32 a,a1;
38799
38800- istat = h->access.intr_pending(h);
38801+ istat = h->access->intr_pending(h);
38802 /* Is this interrupt for us? */
38803 if (istat == 0)
38804 return IRQ_NONE;
38805@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38806 */
38807 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
38808 if (istat & FIFO_NOT_EMPTY) {
38809- while((a = h->access.command_completed(h))) {
38810+ while((a = h->access->command_completed(h))) {
38811 a1 = a; a &= ~3;
38812 if ((c = h->cmpQ) == NULL)
38813 {
38814@@ -1448,11 +1448,11 @@ static int sendcmd(
38815 /*
38816 * Disable interrupt
38817 */
38818- info_p->access.set_intr_mask(info_p, 0);
38819+ info_p->access->set_intr_mask(info_p, 0);
38820 /* Make sure there is room in the command FIFO */
38821 /* Actually it should be completely empty at this time. */
38822 for (i = 200000; i > 0; i--) {
38823- temp = info_p->access.fifo_full(info_p);
38824+ temp = info_p->access->fifo_full(info_p);
38825 if (temp != 0) {
38826 break;
38827 }
38828@@ -1465,7 +1465,7 @@ DBG(
38829 /*
38830 * Send the cmd
38831 */
38832- info_p->access.submit_command(info_p, c);
38833+ info_p->access->submit_command(info_p, c);
38834 complete = pollcomplete(ctlr);
38835
38836 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
38837@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
38838 * we check the new geometry. Then turn interrupts back on when
38839 * we're done.
38840 */
38841- host->access.set_intr_mask(host, 0);
38842+ host->access->set_intr_mask(host, 0);
38843 getgeometry(ctlr);
38844- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
38845+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
38846
38847 for(i=0; i<NWD; i++) {
38848 struct gendisk *disk = ida_gendisk[ctlr][i];
38849@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
38850 /* Wait (up to 2 seconds) for a command to complete */
38851
38852 for (i = 200000; i > 0; i--) {
38853- done = hba[ctlr]->access.command_completed(hba[ctlr]);
38854+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
38855 if (done == 0) {
38856 udelay(10); /* a short fixed delay */
38857 } else
38858diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
38859index be73e9d..7fbf140 100644
38860--- a/drivers/block/cpqarray.h
38861+++ b/drivers/block/cpqarray.h
38862@@ -99,7 +99,7 @@ struct ctlr_info {
38863 drv_info_t drv[NWD];
38864 struct proc_dir_entry *proc;
38865
38866- struct access_method access;
38867+ struct access_method *access;
38868
38869 cmdlist_t *reqQ;
38870 cmdlist_t *cmpQ;
38871diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
38872index 426c97a..8c58607 100644
38873--- a/drivers/block/drbd/drbd_bitmap.c
38874+++ b/drivers/block/drbd/drbd_bitmap.c
38875@@ -1036,7 +1036,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
38876 submit_bio(rw, bio);
38877 /* this should not count as user activity and cause the
38878 * resync to throttle -- see drbd_rs_should_slow_down(). */
38879- atomic_add(len >> 9, &device->rs_sect_ev);
38880+ atomic_add_unchecked(len >> 9, &device->rs_sect_ev);
38881 }
38882 }
38883
38884diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
38885index 1a00001..c0d4253 100644
38886--- a/drivers/block/drbd/drbd_int.h
38887+++ b/drivers/block/drbd/drbd_int.h
38888@@ -387,7 +387,7 @@ struct drbd_epoch {
38889 struct drbd_connection *connection;
38890 struct list_head list;
38891 unsigned int barrier_nr;
38892- atomic_t epoch_size; /* increased on every request added. */
38893+ atomic_unchecked_t epoch_size; /* increased on every request added. */
38894 atomic_t active; /* increased on every req. added, and dec on every finished. */
38895 unsigned long flags;
38896 };
38897@@ -948,7 +948,7 @@ struct drbd_device {
38898 unsigned int al_tr_number;
38899 int al_tr_cycle;
38900 wait_queue_head_t seq_wait;
38901- atomic_t packet_seq;
38902+ atomic_unchecked_t packet_seq;
38903 unsigned int peer_seq;
38904 spinlock_t peer_seq_lock;
38905 unsigned long comm_bm_set; /* communicated number of set bits. */
38906@@ -957,8 +957,8 @@ struct drbd_device {
38907 struct mutex own_state_mutex;
38908 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
38909 char congestion_reason; /* Why we where congested... */
38910- atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38911- atomic_t rs_sect_ev; /* for submitted resync data rate, both */
38912+ atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38913+ atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
38914 int rs_last_sect_ev; /* counter to compare with */
38915 int rs_last_events; /* counter of read or write "events" (unit sectors)
38916 * on the lower level device when we last looked. */
38917@@ -1569,7 +1569,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
38918 char __user *uoptval;
38919 int err;
38920
38921- uoptval = (char __user __force *)optval;
38922+ uoptval = (char __force_user *)optval;
38923
38924 set_fs(KERNEL_DS);
38925 if (level == SOL_SOCKET)
38926diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
38927index 04a14e0..5b8f0aa 100644
38928--- a/drivers/block/drbd/drbd_interval.c
38929+++ b/drivers/block/drbd/drbd_interval.c
38930@@ -67,9 +67,9 @@ static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new)
38931 }
38932
38933 static const struct rb_augment_callbacks augment_callbacks = {
38934- augment_propagate,
38935- augment_copy,
38936- augment_rotate,
38937+ .propagate = augment_propagate,
38938+ .copy = augment_copy,
38939+ .rotate = augment_rotate,
38940 };
38941
38942 /**
38943diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
38944index 9b465bb..00034ecf 100644
38945--- a/drivers/block/drbd/drbd_main.c
38946+++ b/drivers/block/drbd/drbd_main.c
38947@@ -1328,7 +1328,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
38948 p->sector = sector;
38949 p->block_id = block_id;
38950 p->blksize = blksize;
38951- p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
38952+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
38953 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
38954 }
38955
38956@@ -1634,7 +1634,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
38957 return -EIO;
38958 p->sector = cpu_to_be64(req->i.sector);
38959 p->block_id = (unsigned long)req;
38960- p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
38961+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
38962 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
38963 if (device->state.conn >= C_SYNC_SOURCE &&
38964 device->state.conn <= C_PAUSED_SYNC_T)
38965@@ -1915,8 +1915,8 @@ void drbd_init_set_defaults(struct drbd_device *device)
38966 atomic_set(&device->unacked_cnt, 0);
38967 atomic_set(&device->local_cnt, 0);
38968 atomic_set(&device->pp_in_use_by_net, 0);
38969- atomic_set(&device->rs_sect_in, 0);
38970- atomic_set(&device->rs_sect_ev, 0);
38971+ atomic_set_unchecked(&device->rs_sect_in, 0);
38972+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38973 atomic_set(&device->ap_in_flight, 0);
38974 atomic_set(&device->md_io.in_use, 0);
38975
38976@@ -2688,8 +2688,8 @@ void drbd_destroy_connection(struct kref *kref)
38977 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
38978 struct drbd_resource *resource = connection->resource;
38979
38980- if (atomic_read(&connection->current_epoch->epoch_size) != 0)
38981- drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
38982+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size) != 0)
38983+ drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
38984 kfree(connection->current_epoch);
38985
38986 idr_destroy(&connection->peer_devices);
38987diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
38988index 1cd47df..57c53c0 100644
38989--- a/drivers/block/drbd/drbd_nl.c
38990+++ b/drivers/block/drbd/drbd_nl.c
38991@@ -3645,13 +3645,13 @@ finish:
38992
38993 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
38994 {
38995- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38996+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
38997 struct sk_buff *msg;
38998 struct drbd_genlmsghdr *d_out;
38999 unsigned seq;
39000 int err = -ENOMEM;
39001
39002- seq = atomic_inc_return(&drbd_genl_seq);
39003+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
39004 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
39005 if (!msg)
39006 goto failed;
39007diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
39008index 9342b8d..b6a6825 100644
39009--- a/drivers/block/drbd/drbd_receiver.c
39010+++ b/drivers/block/drbd/drbd_receiver.c
39011@@ -870,7 +870,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
39012 struct drbd_device *device = peer_device->device;
39013 int err;
39014
39015- atomic_set(&device->packet_seq, 0);
39016+ atomic_set_unchecked(&device->packet_seq, 0);
39017 device->peer_seq = 0;
39018
39019 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
39020@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
39021 do {
39022 next_epoch = NULL;
39023
39024- epoch_size = atomic_read(&epoch->epoch_size);
39025+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
39026
39027 switch (ev & ~EV_CLEANUP) {
39028 case EV_PUT:
39029@@ -1273,7 +1273,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
39030 rv = FE_DESTROYED;
39031 } else {
39032 epoch->flags = 0;
39033- atomic_set(&epoch->epoch_size, 0);
39034+ atomic_set_unchecked(&epoch->epoch_size, 0);
39035 /* atomic_set(&epoch->active, 0); is already zero */
39036 if (rv == FE_STILL_LIVE)
39037 rv = FE_RECYCLED;
39038@@ -1550,7 +1550,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
39039 conn_wait_active_ee_empty(connection);
39040 drbd_flush(connection);
39041
39042- if (atomic_read(&connection->current_epoch->epoch_size)) {
39043+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
39044 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
39045 if (epoch)
39046 break;
39047@@ -1564,11 +1564,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
39048 }
39049
39050 epoch->flags = 0;
39051- atomic_set(&epoch->epoch_size, 0);
39052+ atomic_set_unchecked(&epoch->epoch_size, 0);
39053 atomic_set(&epoch->active, 0);
39054
39055 spin_lock(&connection->epoch_lock);
39056- if (atomic_read(&connection->current_epoch->epoch_size)) {
39057+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
39058 list_add(&epoch->list, &connection->current_epoch->list);
39059 connection->current_epoch = epoch;
39060 connection->epochs++;
39061@@ -1802,7 +1802,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
39062 list_add_tail(&peer_req->w.list, &device->sync_ee);
39063 spin_unlock_irq(&device->resource->req_lock);
39064
39065- atomic_add(pi->size >> 9, &device->rs_sect_ev);
39066+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev);
39067 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
39068 return 0;
39069
39070@@ -1900,7 +1900,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
39071 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
39072 }
39073
39074- atomic_add(pi->size >> 9, &device->rs_sect_in);
39075+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_in);
39076
39077 return err;
39078 }
39079@@ -2290,7 +2290,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
39080
39081 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
39082 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
39083- atomic_inc(&connection->current_epoch->epoch_size);
39084+ atomic_inc_unchecked(&connection->current_epoch->epoch_size);
39085 err2 = drbd_drain_block(peer_device, pi->size);
39086 if (!err)
39087 err = err2;
39088@@ -2334,7 +2334,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
39089
39090 spin_lock(&connection->epoch_lock);
39091 peer_req->epoch = connection->current_epoch;
39092- atomic_inc(&peer_req->epoch->epoch_size);
39093+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
39094 atomic_inc(&peer_req->epoch->active);
39095 spin_unlock(&connection->epoch_lock);
39096
39097@@ -2479,7 +2479,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
39098
39099 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
39100 (int)part_stat_read(&disk->part0, sectors[1]) -
39101- atomic_read(&device->rs_sect_ev);
39102+ atomic_read_unchecked(&device->rs_sect_ev);
39103
39104 if (atomic_read(&device->ap_actlog_cnt)
39105 || !device->rs_last_events || curr_events - device->rs_last_events > 64) {
39106@@ -2618,7 +2618,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
39107 device->use_csums = true;
39108 } else if (pi->cmd == P_OV_REPLY) {
39109 /* track progress, we may need to throttle */
39110- atomic_add(size >> 9, &device->rs_sect_in);
39111+ atomic_add_unchecked(size >> 9, &device->rs_sect_in);
39112 peer_req->w.cb = w_e_end_ov_reply;
39113 dec_rs_pending(device);
39114 /* drbd_rs_begin_io done when we sent this request,
39115@@ -2691,7 +2691,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
39116 goto out_free_e;
39117
39118 submit_for_resync:
39119- atomic_add(size >> 9, &device->rs_sect_ev);
39120+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
39121
39122 submit:
39123 update_receiver_timing_details(connection, drbd_submit_peer_request);
39124@@ -4564,7 +4564,7 @@ struct data_cmd {
39125 int expect_payload;
39126 size_t pkt_size;
39127 int (*fn)(struct drbd_connection *, struct packet_info *);
39128-};
39129+} __do_const;
39130
39131 static struct data_cmd drbd_cmd_handler[] = {
39132 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
39133@@ -4678,7 +4678,7 @@ static void conn_disconnect(struct drbd_connection *connection)
39134 if (!list_empty(&connection->current_epoch->list))
39135 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
39136 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
39137- atomic_set(&connection->current_epoch->epoch_size, 0);
39138+ atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
39139 connection->send.seen_any_write_yet = false;
39140
39141 drbd_info(connection, "Connection closed\n");
39142@@ -5182,7 +5182,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
39143 put_ldev(device);
39144 }
39145 dec_rs_pending(device);
39146- atomic_add(blksize >> 9, &device->rs_sect_in);
39147+ atomic_add_unchecked(blksize >> 9, &device->rs_sect_in);
39148
39149 return 0;
39150 }
39151@@ -5470,7 +5470,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
39152 struct asender_cmd {
39153 size_t pkt_size;
39154 int (*fn)(struct drbd_connection *connection, struct packet_info *);
39155-};
39156+} __do_const;
39157
39158 static struct asender_cmd asender_tbl[] = {
39159 [P_PING] = { 0, got_Ping },
39160diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
39161index 50776b3..1477c3f 100644
39162--- a/drivers/block/drbd/drbd_worker.c
39163+++ b/drivers/block/drbd/drbd_worker.c
39164@@ -408,7 +408,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
39165 list_add_tail(&peer_req->w.list, &device->read_ee);
39166 spin_unlock_irq(&device->resource->req_lock);
39167
39168- atomic_add(size >> 9, &device->rs_sect_ev);
39169+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
39170 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
39171 return 0;
39172
39173@@ -553,7 +553,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
39174 unsigned int sect_in; /* Number of sectors that came in since the last turn */
39175 int number, mxb;
39176
39177- sect_in = atomic_xchg(&device->rs_sect_in, 0);
39178+ sect_in = atomic_xchg_unchecked(&device->rs_sect_in, 0);
39179 device->rs_in_flight -= sect_in;
39180
39181 rcu_read_lock();
39182@@ -1594,8 +1594,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
39183 {
39184 struct fifo_buffer *plan;
39185
39186- atomic_set(&device->rs_sect_in, 0);
39187- atomic_set(&device->rs_sect_ev, 0);
39188+ atomic_set_unchecked(&device->rs_sect_in, 0);
39189+ atomic_set_unchecked(&device->rs_sect_ev, 0);
39190 device->rs_in_flight = 0;
39191
39192 /* Updating the RCU protected object in place is necessary since
39193diff --git a/drivers/block/loop.c b/drivers/block/loop.c
39194index 6cb1beb..bf490f7 100644
39195--- a/drivers/block/loop.c
39196+++ b/drivers/block/loop.c
39197@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
39198
39199 file_start_write(file);
39200 set_fs(get_ds());
39201- bw = file->f_op->write(file, buf, len, &pos);
39202+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
39203 set_fs(old_fs);
39204 file_end_write(file);
39205 if (likely(bw == len))
39206diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
39207index 02351e2..a9ea617 100644
39208--- a/drivers/block/nvme-core.c
39209+++ b/drivers/block/nvme-core.c
39210@@ -73,7 +73,6 @@ static LIST_HEAD(dev_list);
39211 static struct task_struct *nvme_thread;
39212 static struct workqueue_struct *nvme_workq;
39213 static wait_queue_head_t nvme_kthread_wait;
39214-static struct notifier_block nvme_nb;
39215
39216 static void nvme_reset_failed_dev(struct work_struct *ws);
39217
39218@@ -2925,6 +2924,10 @@ static struct pci_driver nvme_driver = {
39219 .err_handler = &nvme_err_handler,
39220 };
39221
39222+static struct notifier_block nvme_nb = {
39223+ .notifier_call = &nvme_cpu_notify,
39224+};
39225+
39226 static int __init nvme_init(void)
39227 {
39228 int result;
39229@@ -2941,7 +2944,6 @@ static int __init nvme_init(void)
39230 else if (result > 0)
39231 nvme_major = result;
39232
39233- nvme_nb.notifier_call = &nvme_cpu_notify;
39234 result = register_hotcpu_notifier(&nvme_nb);
39235 if (result)
39236 goto unregister_blkdev;
39237diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
39238index 758ac44..58087fd 100644
39239--- a/drivers/block/pktcdvd.c
39240+++ b/drivers/block/pktcdvd.c
39241@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
39242
39243 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
39244 {
39245- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
39246+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
39247 }
39248
39249 /*
39250@@ -1888,7 +1888,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
39251 return -EROFS;
39252 }
39253 pd->settings.fp = ti.fp;
39254- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
39255+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
39256
39257 if (ti.nwa_v) {
39258 pd->nwa = be32_to_cpu(ti.next_writable);
39259diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
39260index e5565fb..71be10b4 100644
39261--- a/drivers/block/smart1,2.h
39262+++ b/drivers/block/smart1,2.h
39263@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
39264 }
39265
39266 static struct access_method smart4_access = {
39267- smart4_submit_command,
39268- smart4_intr_mask,
39269- smart4_fifo_full,
39270- smart4_intr_pending,
39271- smart4_completed,
39272+ .submit_command = smart4_submit_command,
39273+ .set_intr_mask = smart4_intr_mask,
39274+ .fifo_full = smart4_fifo_full,
39275+ .intr_pending = smart4_intr_pending,
39276+ .command_completed = smart4_completed,
39277 };
39278
39279 /*
39280@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
39281 }
39282
39283 static struct access_method smart2_access = {
39284- smart2_submit_command,
39285- smart2_intr_mask,
39286- smart2_fifo_full,
39287- smart2_intr_pending,
39288- smart2_completed,
39289+ .submit_command = smart2_submit_command,
39290+ .set_intr_mask = smart2_intr_mask,
39291+ .fifo_full = smart2_fifo_full,
39292+ .intr_pending = smart2_intr_pending,
39293+ .command_completed = smart2_completed,
39294 };
39295
39296 /*
39297@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
39298 }
39299
39300 static struct access_method smart2e_access = {
39301- smart2e_submit_command,
39302- smart2e_intr_mask,
39303- smart2e_fifo_full,
39304- smart2e_intr_pending,
39305- smart2e_completed,
39306+ .submit_command = smart2e_submit_command,
39307+ .set_intr_mask = smart2e_intr_mask,
39308+ .fifo_full = smart2e_fifo_full,
39309+ .intr_pending = smart2e_intr_pending,
39310+ .command_completed = smart2e_completed,
39311 };
39312
39313 /*
39314@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
39315 }
39316
39317 static struct access_method smart1_access = {
39318- smart1_submit_command,
39319- smart1_intr_mask,
39320- smart1_fifo_full,
39321- smart1_intr_pending,
39322- smart1_completed,
39323+ .submit_command = smart1_submit_command,
39324+ .set_intr_mask = smart1_intr_mask,
39325+ .fifo_full = smart1_fifo_full,
39326+ .intr_pending = smart1_intr_pending,
39327+ .command_completed = smart1_completed,
39328 };
39329diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
39330index f038dba..bb74c08 100644
39331--- a/drivers/bluetooth/btwilink.c
39332+++ b/drivers/bluetooth/btwilink.c
39333@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
39334
39335 static int bt_ti_probe(struct platform_device *pdev)
39336 {
39337- static struct ti_st *hst;
39338+ struct ti_st *hst;
39339 struct hci_dev *hdev;
39340 int err;
39341
39342diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
39343index 898b84b..86f74b9 100644
39344--- a/drivers/cdrom/cdrom.c
39345+++ b/drivers/cdrom/cdrom.c
39346@@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
39347 ENSURE(reset, CDC_RESET);
39348 ENSURE(generic_packet, CDC_GENERIC_PACKET);
39349 cdi->mc_flags = 0;
39350- cdo->n_minors = 0;
39351 cdi->options = CDO_USE_FFLAGS;
39352
39353 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
39354@@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
39355 else
39356 cdi->cdda_method = CDDA_OLD;
39357
39358- if (!cdo->generic_packet)
39359- cdo->generic_packet = cdrom_dummy_generic_packet;
39360+ if (!cdo->generic_packet) {
39361+ pax_open_kernel();
39362+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
39363+ pax_close_kernel();
39364+ }
39365
39366 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
39367 mutex_lock(&cdrom_mutex);
39368@@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
39369 if (cdi->exit)
39370 cdi->exit(cdi);
39371
39372- cdi->ops->n_minors--;
39373 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
39374 }
39375
39376@@ -2126,7 +2127,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
39377 */
39378 nr = nframes;
39379 do {
39380- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
39381+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
39382 if (cgc.buffer)
39383 break;
39384
39385@@ -3434,7 +3435,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
39386 struct cdrom_device_info *cdi;
39387 int ret;
39388
39389- ret = scnprintf(info + *pos, max_size - *pos, header);
39390+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
39391 if (!ret)
39392 return 1;
39393
39394diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
39395index 584bc31..e64a12c 100644
39396--- a/drivers/cdrom/gdrom.c
39397+++ b/drivers/cdrom/gdrom.c
39398@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
39399 .audio_ioctl = gdrom_audio_ioctl,
39400 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
39401 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
39402- .n_minors = 1,
39403 };
39404
39405 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
39406diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
39407index 6e9f74a..50c7cea 100644
39408--- a/drivers/char/Kconfig
39409+++ b/drivers/char/Kconfig
39410@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
39411
39412 config DEVKMEM
39413 bool "/dev/kmem virtual device support"
39414- default y
39415+ default n
39416+ depends on !GRKERNSEC_KMEM
39417 help
39418 Say Y here if you want to support the /dev/kmem device. The
39419 /dev/kmem device is rarely used, but can be used for certain
39420@@ -577,6 +578,7 @@ config DEVPORT
39421 bool
39422 depends on !M68K
39423 depends on ISA || PCI
39424+ depends on !GRKERNSEC_KMEM
39425 default y
39426
39427 source "drivers/s390/char/Kconfig"
39428diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
39429index a48e05b..6bac831 100644
39430--- a/drivers/char/agp/compat_ioctl.c
39431+++ b/drivers/char/agp/compat_ioctl.c
39432@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
39433 return -ENOMEM;
39434 }
39435
39436- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
39437+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
39438 sizeof(*usegment) * ureserve.seg_count)) {
39439 kfree(usegment);
39440 kfree(ksegment);
39441diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
39442index 09f17eb..8531d2f 100644
39443--- a/drivers/char/agp/frontend.c
39444+++ b/drivers/char/agp/frontend.c
39445@@ -806,7 +806,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
39446 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
39447 return -EFAULT;
39448
39449- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
39450+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
39451 return -EFAULT;
39452
39453 client = agp_find_client_by_pid(reserve.pid);
39454@@ -836,7 +836,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
39455 if (segment == NULL)
39456 return -ENOMEM;
39457
39458- if (copy_from_user(segment, (void __user *) reserve.seg_list,
39459+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
39460 sizeof(struct agp_segment) * reserve.seg_count)) {
39461 kfree(segment);
39462 return -EFAULT;
39463diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
39464index 4f94375..413694e 100644
39465--- a/drivers/char/genrtc.c
39466+++ b/drivers/char/genrtc.c
39467@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
39468 switch (cmd) {
39469
39470 case RTC_PLL_GET:
39471+ memset(&pll, 0, sizeof(pll));
39472 if (get_rtc_pll(&pll))
39473 return -EINVAL;
39474 else
39475diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
39476index d5d4cd8..22d561d 100644
39477--- a/drivers/char/hpet.c
39478+++ b/drivers/char/hpet.c
39479@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
39480 }
39481
39482 static int
39483-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
39484+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
39485 struct hpet_info *info)
39486 {
39487 struct hpet_timer __iomem *timer;
39488diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
39489index 86fe45c..c0ea948 100644
39490--- a/drivers/char/hw_random/intel-rng.c
39491+++ b/drivers/char/hw_random/intel-rng.c
39492@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
39493
39494 if (no_fwh_detect)
39495 return -ENODEV;
39496- printk(warning);
39497+ printk("%s", warning);
39498 return -EBUSY;
39499 }
39500
39501diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
39502index e6db938..835e3a2 100644
39503--- a/drivers/char/ipmi/ipmi_msghandler.c
39504+++ b/drivers/char/ipmi/ipmi_msghandler.c
39505@@ -438,7 +438,7 @@ struct ipmi_smi {
39506 struct proc_dir_entry *proc_dir;
39507 char proc_dir_name[10];
39508
39509- atomic_t stats[IPMI_NUM_STATS];
39510+ atomic_unchecked_t stats[IPMI_NUM_STATS];
39511
39512 /*
39513 * run_to_completion duplicate of smb_info, smi_info
39514@@ -470,9 +470,9 @@ static LIST_HEAD(smi_watchers);
39515 static DEFINE_MUTEX(smi_watchers_mutex);
39516
39517 #define ipmi_inc_stat(intf, stat) \
39518- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
39519+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
39520 #define ipmi_get_stat(intf, stat) \
39521- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
39522+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
39523
39524 static int is_lan_addr(struct ipmi_addr *addr)
39525 {
39526@@ -2926,7 +2926,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
39527 INIT_LIST_HEAD(&intf->cmd_rcvrs);
39528 init_waitqueue_head(&intf->waitq);
39529 for (i = 0; i < IPMI_NUM_STATS; i++)
39530- atomic_set(&intf->stats[i], 0);
39531+ atomic_set_unchecked(&intf->stats[i], 0);
39532
39533 intf->proc_dir = NULL;
39534
39535diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
39536index 5d66568..c9d93c3 100644
39537--- a/drivers/char/ipmi/ipmi_si_intf.c
39538+++ b/drivers/char/ipmi/ipmi_si_intf.c
39539@@ -285,7 +285,7 @@ struct smi_info {
39540 unsigned char slave_addr;
39541
39542 /* Counters and things for the proc filesystem. */
39543- atomic_t stats[SI_NUM_STATS];
39544+ atomic_unchecked_t stats[SI_NUM_STATS];
39545
39546 struct task_struct *thread;
39547
39548@@ -294,9 +294,9 @@ struct smi_info {
39549 };
39550
39551 #define smi_inc_stat(smi, stat) \
39552- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
39553+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
39554 #define smi_get_stat(smi, stat) \
39555- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
39556+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
39557
39558 #define SI_MAX_PARMS 4
39559
39560@@ -3374,7 +3374,7 @@ static int try_smi_init(struct smi_info *new_smi)
39561 atomic_set(&new_smi->req_events, 0);
39562 new_smi->run_to_completion = false;
39563 for (i = 0; i < SI_NUM_STATS; i++)
39564- atomic_set(&new_smi->stats[i], 0);
39565+ atomic_set_unchecked(&new_smi->stats[i], 0);
39566
39567 new_smi->interrupt_disabled = true;
39568 atomic_set(&new_smi->stop_operation, 0);
39569diff --git a/drivers/char/mem.c b/drivers/char/mem.c
39570index 917403f..dddd899 100644
39571--- a/drivers/char/mem.c
39572+++ b/drivers/char/mem.c
39573@@ -18,6 +18,7 @@
39574 #include <linux/raw.h>
39575 #include <linux/tty.h>
39576 #include <linux/capability.h>
39577+#include <linux/security.h>
39578 #include <linux/ptrace.h>
39579 #include <linux/device.h>
39580 #include <linux/highmem.h>
39581@@ -36,6 +37,10 @@
39582
39583 #define DEVPORT_MINOR 4
39584
39585+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39586+extern const struct file_operations grsec_fops;
39587+#endif
39588+
39589 static inline unsigned long size_inside_page(unsigned long start,
39590 unsigned long size)
39591 {
39592@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
39593
39594 while (cursor < to) {
39595 if (!devmem_is_allowed(pfn)) {
39596+#ifdef CONFIG_GRKERNSEC_KMEM
39597+ gr_handle_mem_readwrite(from, to);
39598+#else
39599 printk(KERN_INFO
39600 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
39601 current->comm, from, to);
39602+#endif
39603 return 0;
39604 }
39605 cursor += PAGE_SIZE;
39606@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
39607 }
39608 return 1;
39609 }
39610+#elif defined(CONFIG_GRKERNSEC_KMEM)
39611+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
39612+{
39613+ return 0;
39614+}
39615 #else
39616 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
39617 {
39618@@ -122,6 +136,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
39619
39620 while (count > 0) {
39621 unsigned long remaining;
39622+ char *temp;
39623
39624 sz = size_inside_page(p, count);
39625
39626@@ -137,7 +152,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
39627 if (!ptr)
39628 return -EFAULT;
39629
39630- remaining = copy_to_user(buf, ptr, sz);
39631+#ifdef CONFIG_PAX_USERCOPY
39632+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
39633+ if (!temp) {
39634+ unxlate_dev_mem_ptr(p, ptr);
39635+ return -ENOMEM;
39636+ }
39637+ memcpy(temp, ptr, sz);
39638+#else
39639+ temp = ptr;
39640+#endif
39641+
39642+ remaining = copy_to_user(buf, temp, sz);
39643+
39644+#ifdef CONFIG_PAX_USERCOPY
39645+ kfree(temp);
39646+#endif
39647+
39648 unxlate_dev_mem_ptr(p, ptr);
39649 if (remaining)
39650 return -EFAULT;
39651@@ -369,9 +400,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
39652 size_t count, loff_t *ppos)
39653 {
39654 unsigned long p = *ppos;
39655- ssize_t low_count, read, sz;
39656+ ssize_t low_count, read, sz, err = 0;
39657 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
39658- int err = 0;
39659
39660 read = 0;
39661 if (p < (unsigned long) high_memory) {
39662@@ -393,6 +423,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
39663 }
39664 #endif
39665 while (low_count > 0) {
39666+ char *temp;
39667+
39668 sz = size_inside_page(p, low_count);
39669
39670 /*
39671@@ -402,7 +434,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
39672 */
39673 kbuf = xlate_dev_kmem_ptr((char *)p);
39674
39675- if (copy_to_user(buf, kbuf, sz))
39676+#ifdef CONFIG_PAX_USERCOPY
39677+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
39678+ if (!temp)
39679+ return -ENOMEM;
39680+ memcpy(temp, kbuf, sz);
39681+#else
39682+ temp = kbuf;
39683+#endif
39684+
39685+ err = copy_to_user(buf, temp, sz);
39686+
39687+#ifdef CONFIG_PAX_USERCOPY
39688+ kfree(temp);
39689+#endif
39690+
39691+ if (err)
39692 return -EFAULT;
39693 buf += sz;
39694 p += sz;
39695@@ -827,6 +874,9 @@ static const struct memdev {
39696 #ifdef CONFIG_PRINTK
39697 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
39698 #endif
39699+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39700+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
39701+#endif
39702 };
39703
39704 static int memory_open(struct inode *inode, struct file *filp)
39705@@ -898,7 +948,7 @@ static int __init chr_dev_init(void)
39706 continue;
39707
39708 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
39709- NULL, devlist[minor].name);
39710+ NULL, "%s", devlist[minor].name);
39711 }
39712
39713 return tty_init();
39714diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
39715index 9df78e2..01ba9ae 100644
39716--- a/drivers/char/nvram.c
39717+++ b/drivers/char/nvram.c
39718@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
39719
39720 spin_unlock_irq(&rtc_lock);
39721
39722- if (copy_to_user(buf, contents, tmp - contents))
39723+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
39724 return -EFAULT;
39725
39726 *ppos = i;
39727diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
39728index 0ea9986..e7b07e4 100644
39729--- a/drivers/char/pcmcia/synclink_cs.c
39730+++ b/drivers/char/pcmcia/synclink_cs.c
39731@@ -2345,7 +2345,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39732
39733 if (debug_level >= DEBUG_LEVEL_INFO)
39734 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
39735- __FILE__, __LINE__, info->device_name, port->count);
39736+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
39737
39738 if (tty_port_close_start(port, tty, filp) == 0)
39739 goto cleanup;
39740@@ -2363,7 +2363,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39741 cleanup:
39742 if (debug_level >= DEBUG_LEVEL_INFO)
39743 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
39744- tty->driver->name, port->count);
39745+ tty->driver->name, atomic_read(&port->count));
39746 }
39747
39748 /* Wait until the transmitter is empty.
39749@@ -2505,7 +2505,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39750
39751 if (debug_level >= DEBUG_LEVEL_INFO)
39752 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
39753- __FILE__, __LINE__, tty->driver->name, port->count);
39754+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
39755
39756 /* If port is closing, signal caller to try again */
39757 if (port->flags & ASYNC_CLOSING){
39758@@ -2525,11 +2525,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39759 goto cleanup;
39760 }
39761 spin_lock(&port->lock);
39762- port->count++;
39763+ atomic_inc(&port->count);
39764 spin_unlock(&port->lock);
39765 spin_unlock_irqrestore(&info->netlock, flags);
39766
39767- if (port->count == 1) {
39768+ if (atomic_read(&port->count) == 1) {
39769 /* 1st open on this device, init hardware */
39770 retval = startup(info, tty);
39771 if (retval < 0)
39772@@ -3918,7 +3918,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
39773 unsigned short new_crctype;
39774
39775 /* return error if TTY interface open */
39776- if (info->port.count)
39777+ if (atomic_read(&info->port.count))
39778 return -EBUSY;
39779
39780 switch (encoding)
39781@@ -4022,7 +4022,7 @@ static int hdlcdev_open(struct net_device *dev)
39782
39783 /* arbitrate between network and tty opens */
39784 spin_lock_irqsave(&info->netlock, flags);
39785- if (info->port.count != 0 || info->netcount != 0) {
39786+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
39787 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
39788 spin_unlock_irqrestore(&info->netlock, flags);
39789 return -EBUSY;
39790@@ -4112,7 +4112,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39791 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
39792
39793 /* return error if TTY interface open */
39794- if (info->port.count)
39795+ if (atomic_read(&info->port.count))
39796 return -EBUSY;
39797
39798 if (cmd != SIOCWANDEV)
39799diff --git a/drivers/char/random.c b/drivers/char/random.c
39800index 8c86a95..7c499f3 100644
39801--- a/drivers/char/random.c
39802+++ b/drivers/char/random.c
39803@@ -289,9 +289,6 @@
39804 /*
39805 * To allow fractional bits to be tracked, the entropy_count field is
39806 * denominated in units of 1/8th bits.
39807- *
39808- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
39809- * credit_entropy_bits() needs to be 64 bits wide.
39810 */
39811 #define ENTROPY_SHIFT 3
39812 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
39813@@ -439,9 +436,9 @@ struct entropy_store {
39814 };
39815
39816 static void push_to_pool(struct work_struct *work);
39817-static __u32 input_pool_data[INPUT_POOL_WORDS];
39818-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
39819-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
39820+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
39821+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39822+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39823
39824 static struct entropy_store input_pool = {
39825 .poolinfo = &poolinfo_table[0],
39826@@ -635,7 +632,7 @@ retry:
39827 /* The +2 corresponds to the /4 in the denominator */
39828
39829 do {
39830- unsigned int anfrac = min(pnfrac, pool_size/2);
39831+ u64 anfrac = min(pnfrac, pool_size/2);
39832 unsigned int add =
39833 ((pool_size - entropy_count)*anfrac*3) >> s;
39834
39835@@ -1207,7 +1204,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
39836
39837 extract_buf(r, tmp);
39838 i = min_t(int, nbytes, EXTRACT_SIZE);
39839- if (copy_to_user(buf, tmp, i)) {
39840+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
39841 ret = -EFAULT;
39842 break;
39843 }
39844@@ -1590,7 +1587,7 @@ static char sysctl_bootid[16];
39845 static int proc_do_uuid(struct ctl_table *table, int write,
39846 void __user *buffer, size_t *lenp, loff_t *ppos)
39847 {
39848- struct ctl_table fake_table;
39849+ ctl_table_no_const fake_table;
39850 unsigned char buf[64], tmp_uuid[16], *uuid;
39851
39852 uuid = table->data;
39853@@ -1620,7 +1617,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
39854 static int proc_do_entropy(struct ctl_table *table, int write,
39855 void __user *buffer, size_t *lenp, loff_t *ppos)
39856 {
39857- struct ctl_table fake_table;
39858+ ctl_table_no_const fake_table;
39859 int entropy_count;
39860
39861 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
39862diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
39863index 7cc1fe22..b602d6b 100644
39864--- a/drivers/char/sonypi.c
39865+++ b/drivers/char/sonypi.c
39866@@ -54,6 +54,7 @@
39867
39868 #include <asm/uaccess.h>
39869 #include <asm/io.h>
39870+#include <asm/local.h>
39871
39872 #include <linux/sonypi.h>
39873
39874@@ -490,7 +491,7 @@ static struct sonypi_device {
39875 spinlock_t fifo_lock;
39876 wait_queue_head_t fifo_proc_list;
39877 struct fasync_struct *fifo_async;
39878- int open_count;
39879+ local_t open_count;
39880 int model;
39881 struct input_dev *input_jog_dev;
39882 struct input_dev *input_key_dev;
39883@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
39884 static int sonypi_misc_release(struct inode *inode, struct file *file)
39885 {
39886 mutex_lock(&sonypi_device.lock);
39887- sonypi_device.open_count--;
39888+ local_dec(&sonypi_device.open_count);
39889 mutex_unlock(&sonypi_device.lock);
39890 return 0;
39891 }
39892@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
39893 {
39894 mutex_lock(&sonypi_device.lock);
39895 /* Flush input queue on first open */
39896- if (!sonypi_device.open_count)
39897+ if (!local_read(&sonypi_device.open_count))
39898 kfifo_reset(&sonypi_device.fifo);
39899- sonypi_device.open_count++;
39900+ local_inc(&sonypi_device.open_count);
39901 mutex_unlock(&sonypi_device.lock);
39902
39903 return 0;
39904diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
39905index 565a947..dcdc06e 100644
39906--- a/drivers/char/tpm/tpm_acpi.c
39907+++ b/drivers/char/tpm/tpm_acpi.c
39908@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
39909 virt = acpi_os_map_iomem(start, len);
39910 if (!virt) {
39911 kfree(log->bios_event_log);
39912+ log->bios_event_log = NULL;
39913 printk("%s: ERROR - Unable to map memory\n", __func__);
39914 return -EIO;
39915 }
39916
39917- memcpy_fromio(log->bios_event_log, virt, len);
39918+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
39919
39920 acpi_os_unmap_iomem(virt, len);
39921 return 0;
39922diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
39923index 3a56a13..f8cbd25 100644
39924--- a/drivers/char/tpm/tpm_eventlog.c
39925+++ b/drivers/char/tpm/tpm_eventlog.c
39926@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
39927 event = addr;
39928
39929 if ((event->event_type == 0 && event->event_size == 0) ||
39930- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
39931+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
39932 return NULL;
39933
39934 return addr;
39935@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
39936 return NULL;
39937
39938 if ((event->event_type == 0 && event->event_size == 0) ||
39939- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
39940+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
39941 return NULL;
39942
39943 (*pos)++;
39944@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
39945 int i;
39946
39947 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
39948- seq_putc(m, data[i]);
39949+ if (!seq_putc(m, data[i]))
39950+ return -EFAULT;
39951
39952 return 0;
39953 }
39954diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
39955index b585b47..488f43e 100644
39956--- a/drivers/char/virtio_console.c
39957+++ b/drivers/char/virtio_console.c
39958@@ -684,7 +684,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
39959 if (to_user) {
39960 ssize_t ret;
39961
39962- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
39963+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
39964 if (ret)
39965 return -EFAULT;
39966 } else {
39967@@ -787,7 +787,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
39968 if (!port_has_data(port) && !port->host_connected)
39969 return 0;
39970
39971- return fill_readbuf(port, ubuf, count, true);
39972+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
39973 }
39974
39975 static int wait_port_writable(struct port *port, bool nonblock)
39976diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
39977index b9355da..9611f4e 100644
39978--- a/drivers/clk/clk-composite.c
39979+++ b/drivers/clk/clk-composite.c
39980@@ -191,7 +191,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
39981 struct clk *clk;
39982 struct clk_init_data init;
39983 struct clk_composite *composite;
39984- struct clk_ops *clk_composite_ops;
39985+ clk_ops_no_const *clk_composite_ops;
39986
39987 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
39988 if (!composite) {
39989diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
39990index dd3a78c..386d49c 100644
39991--- a/drivers/clk/socfpga/clk-gate.c
39992+++ b/drivers/clk/socfpga/clk-gate.c
39993@@ -22,6 +22,7 @@
39994 #include <linux/mfd/syscon.h>
39995 #include <linux/of.h>
39996 #include <linux/regmap.h>
39997+#include <asm/pgtable.h>
39998
39999 #include "clk.h"
40000
40001@@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
40002 return 0;
40003 }
40004
40005-static struct clk_ops gateclk_ops = {
40006+static clk_ops_no_const gateclk_ops __read_only = {
40007 .prepare = socfpga_clk_prepare,
40008 .recalc_rate = socfpga_clk_recalc_rate,
40009 .get_parent = socfpga_clk_get_parent,
40010@@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
40011 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
40012 socfpga_clk->hw.bit_idx = clk_gate[1];
40013
40014- gateclk_ops.enable = clk_gate_ops.enable;
40015- gateclk_ops.disable = clk_gate_ops.disable;
40016+ pax_open_kernel();
40017+ *(void **)&gateclk_ops.enable = clk_gate_ops.enable;
40018+ *(void **)&gateclk_ops.disable = clk_gate_ops.disable;
40019+ pax_close_kernel();
40020 }
40021
40022 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
40023diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
40024index de6da95..c98278b 100644
40025--- a/drivers/clk/socfpga/clk-pll.c
40026+++ b/drivers/clk/socfpga/clk-pll.c
40027@@ -21,6 +21,7 @@
40028 #include <linux/io.h>
40029 #include <linux/of.h>
40030 #include <linux/of_address.h>
40031+#include <asm/pgtable.h>
40032
40033 #include "clk.h"
40034
40035@@ -76,7 +77,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
40036 CLK_MGR_PLL_CLK_SRC_MASK;
40037 }
40038
40039-static struct clk_ops clk_pll_ops = {
40040+static clk_ops_no_const clk_pll_ops __read_only = {
40041 .recalc_rate = clk_pll_recalc_rate,
40042 .get_parent = clk_pll_get_parent,
40043 };
40044@@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
40045 pll_clk->hw.hw.init = &init;
40046
40047 pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
40048- clk_pll_ops.enable = clk_gate_ops.enable;
40049- clk_pll_ops.disable = clk_gate_ops.disable;
40050+ pax_open_kernel();
40051+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
40052+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
40053+ pax_close_kernel();
40054
40055 clk = clk_register(NULL, &pll_clk->hw.hw);
40056 if (WARN_ON(IS_ERR(clk))) {
40057diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
40058index b0c18ed..1713a80 100644
40059--- a/drivers/cpufreq/acpi-cpufreq.c
40060+++ b/drivers/cpufreq/acpi-cpufreq.c
40061@@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
40062 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
40063 per_cpu(acfreq_data, cpu) = data;
40064
40065- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
40066- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
40067+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
40068+ pax_open_kernel();
40069+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
40070+ pax_close_kernel();
40071+ }
40072
40073 result = acpi_processor_register_performance(data->acpi_data, cpu);
40074 if (result)
40075@@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
40076 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
40077 break;
40078 case ACPI_ADR_SPACE_FIXED_HARDWARE:
40079- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
40080+ pax_open_kernel();
40081+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
40082+ pax_close_kernel();
40083 break;
40084 default:
40085 break;
40086@@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
40087 if (!msrs)
40088 return;
40089
40090- acpi_cpufreq_driver.boost_supported = true;
40091- acpi_cpufreq_driver.boost_enabled = boost_state(0);
40092+ pax_open_kernel();
40093+ *(bool *)&acpi_cpufreq_driver.boost_supported = true;
40094+ *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
40095+ pax_close_kernel();
40096
40097 cpu_notifier_register_begin();
40098
40099diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
40100index 07c8276..38bd07c 100644
40101--- a/drivers/cpufreq/cpufreq.c
40102+++ b/drivers/cpufreq/cpufreq.c
40103@@ -2107,7 +2107,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
40104 }
40105
40106 mutex_lock(&cpufreq_governor_mutex);
40107- list_del(&governor->governor_list);
40108+ pax_list_del(&governor->governor_list);
40109 mutex_unlock(&cpufreq_governor_mutex);
40110 return;
40111 }
40112@@ -2323,7 +2323,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
40113 return NOTIFY_OK;
40114 }
40115
40116-static struct notifier_block __refdata cpufreq_cpu_notifier = {
40117+static struct notifier_block cpufreq_cpu_notifier = {
40118 .notifier_call = cpufreq_cpu_callback,
40119 };
40120
40121@@ -2363,13 +2363,17 @@ int cpufreq_boost_trigger_state(int state)
40122 return 0;
40123
40124 write_lock_irqsave(&cpufreq_driver_lock, flags);
40125- cpufreq_driver->boost_enabled = state;
40126+ pax_open_kernel();
40127+ *(bool *)&cpufreq_driver->boost_enabled = state;
40128+ pax_close_kernel();
40129 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
40130
40131 ret = cpufreq_driver->set_boost(state);
40132 if (ret) {
40133 write_lock_irqsave(&cpufreq_driver_lock, flags);
40134- cpufreq_driver->boost_enabled = !state;
40135+ pax_open_kernel();
40136+ *(bool *)&cpufreq_driver->boost_enabled = !state;
40137+ pax_close_kernel();
40138 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
40139
40140 pr_err("%s: Cannot %s BOOST\n",
40141@@ -2426,8 +2430,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
40142
40143 pr_debug("trying to register driver %s\n", driver_data->name);
40144
40145- if (driver_data->setpolicy)
40146- driver_data->flags |= CPUFREQ_CONST_LOOPS;
40147+ if (driver_data->setpolicy) {
40148+ pax_open_kernel();
40149+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
40150+ pax_close_kernel();
40151+ }
40152
40153 write_lock_irqsave(&cpufreq_driver_lock, flags);
40154 if (cpufreq_driver) {
40155@@ -2442,8 +2449,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
40156 * Check if driver provides function to enable boost -
40157 * if not, use cpufreq_boost_set_sw as default
40158 */
40159- if (!cpufreq_driver->set_boost)
40160- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
40161+ if (!cpufreq_driver->set_boost) {
40162+ pax_open_kernel();
40163+ *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
40164+ pax_close_kernel();
40165+ }
40166
40167 ret = cpufreq_sysfs_create_file(&boost.attr);
40168 if (ret) {
40169diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
40170index 1b44496..b80ff5e 100644
40171--- a/drivers/cpufreq/cpufreq_governor.c
40172+++ b/drivers/cpufreq/cpufreq_governor.c
40173@@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
40174 struct dbs_data *dbs_data;
40175 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
40176 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
40177- struct od_ops *od_ops = NULL;
40178+ const struct od_ops *od_ops = NULL;
40179 struct od_dbs_tuners *od_tuners = NULL;
40180 struct cs_dbs_tuners *cs_tuners = NULL;
40181 struct cpu_dbs_common_info *cpu_cdbs;
40182@@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
40183
40184 if ((cdata->governor == GOV_CONSERVATIVE) &&
40185 (!policy->governor->initialized)) {
40186- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
40187+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
40188
40189 cpufreq_register_notifier(cs_ops->notifier_block,
40190 CPUFREQ_TRANSITION_NOTIFIER);
40191@@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
40192
40193 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
40194 (policy->governor->initialized == 1)) {
40195- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
40196+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
40197
40198 cpufreq_unregister_notifier(cs_ops->notifier_block,
40199 CPUFREQ_TRANSITION_NOTIFIER);
40200diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
40201index cc401d1..8197340 100644
40202--- a/drivers/cpufreq/cpufreq_governor.h
40203+++ b/drivers/cpufreq/cpufreq_governor.h
40204@@ -212,7 +212,7 @@ struct common_dbs_data {
40205 void (*exit)(struct dbs_data *dbs_data);
40206
40207 /* Governor specific ops, see below */
40208- void *gov_ops;
40209+ const void *gov_ops;
40210 };
40211
40212 /* Governor Per policy data */
40213@@ -232,7 +232,7 @@ struct od_ops {
40214 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
40215 unsigned int freq_next, unsigned int relation);
40216 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
40217-};
40218+} __no_const;
40219
40220 struct cs_ops {
40221 struct notifier_block *notifier_block;
40222diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
40223index ad3f38f..8f086cd 100644
40224--- a/drivers/cpufreq/cpufreq_ondemand.c
40225+++ b/drivers/cpufreq/cpufreq_ondemand.c
40226@@ -524,7 +524,7 @@ static void od_exit(struct dbs_data *dbs_data)
40227
40228 define_get_cpu_dbs_routines(od_cpu_dbs_info);
40229
40230-static struct od_ops od_ops = {
40231+static struct od_ops od_ops __read_only = {
40232 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
40233 .powersave_bias_target = generic_powersave_bias_target,
40234 .freq_increase = dbs_freq_increase,
40235@@ -579,14 +579,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
40236 (struct cpufreq_policy *, unsigned int, unsigned int),
40237 unsigned int powersave_bias)
40238 {
40239- od_ops.powersave_bias_target = f;
40240+ pax_open_kernel();
40241+ *(void **)&od_ops.powersave_bias_target = f;
40242+ pax_close_kernel();
40243 od_set_powersave_bias(powersave_bias);
40244 }
40245 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
40246
40247 void od_unregister_powersave_bias_handler(void)
40248 {
40249- od_ops.powersave_bias_target = generic_powersave_bias_target;
40250+ pax_open_kernel();
40251+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
40252+ pax_close_kernel();
40253 od_set_powersave_bias(0);
40254 }
40255 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
40256diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
40257index 27bb6d3..4cf595c 100644
40258--- a/drivers/cpufreq/intel_pstate.c
40259+++ b/drivers/cpufreq/intel_pstate.c
40260@@ -133,10 +133,10 @@ struct pstate_funcs {
40261 struct cpu_defaults {
40262 struct pstate_adjust_policy pid_policy;
40263 struct pstate_funcs funcs;
40264-};
40265+} __do_const;
40266
40267 static struct pstate_adjust_policy pid_params;
40268-static struct pstate_funcs pstate_funcs;
40269+static struct pstate_funcs *pstate_funcs;
40270
40271 struct perf_limits {
40272 int no_turbo;
40273@@ -594,18 +594,18 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
40274
40275 cpu->pstate.current_pstate = pstate;
40276
40277- pstate_funcs.set(cpu, pstate);
40278+ pstate_funcs->set(cpu, pstate);
40279 }
40280
40281 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
40282 {
40283- cpu->pstate.min_pstate = pstate_funcs.get_min();
40284- cpu->pstate.max_pstate = pstate_funcs.get_max();
40285- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
40286- cpu->pstate.scaling = pstate_funcs.get_scaling();
40287+ cpu->pstate.min_pstate = pstate_funcs->get_min();
40288+ cpu->pstate.max_pstate = pstate_funcs->get_max();
40289+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
40290+ cpu->pstate.scaling = pstate_funcs->get_scaling();
40291
40292- if (pstate_funcs.get_vid)
40293- pstate_funcs.get_vid(cpu);
40294+ if (pstate_funcs->get_vid)
40295+ pstate_funcs->get_vid(cpu);
40296 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
40297 }
40298
40299@@ -875,9 +875,9 @@ static int intel_pstate_msrs_not_valid(void)
40300 rdmsrl(MSR_IA32_APERF, aperf);
40301 rdmsrl(MSR_IA32_MPERF, mperf);
40302
40303- if (!pstate_funcs.get_max() ||
40304- !pstate_funcs.get_min() ||
40305- !pstate_funcs.get_turbo())
40306+ if (!pstate_funcs->get_max() ||
40307+ !pstate_funcs->get_min() ||
40308+ !pstate_funcs->get_turbo())
40309 return -ENODEV;
40310
40311 rdmsrl(MSR_IA32_APERF, tmp);
40312@@ -891,7 +891,7 @@ static int intel_pstate_msrs_not_valid(void)
40313 return 0;
40314 }
40315
40316-static void copy_pid_params(struct pstate_adjust_policy *policy)
40317+static void copy_pid_params(const struct pstate_adjust_policy *policy)
40318 {
40319 pid_params.sample_rate_ms = policy->sample_rate_ms;
40320 pid_params.p_gain_pct = policy->p_gain_pct;
40321@@ -903,12 +903,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
40322
40323 static void copy_cpu_funcs(struct pstate_funcs *funcs)
40324 {
40325- pstate_funcs.get_max = funcs->get_max;
40326- pstate_funcs.get_min = funcs->get_min;
40327- pstate_funcs.get_turbo = funcs->get_turbo;
40328- pstate_funcs.get_scaling = funcs->get_scaling;
40329- pstate_funcs.set = funcs->set;
40330- pstate_funcs.get_vid = funcs->get_vid;
40331+ pstate_funcs = funcs;
40332 }
40333
40334 #if IS_ENABLED(CONFIG_ACPI)
40335diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
40336index 529cfd9..0e28fff 100644
40337--- a/drivers/cpufreq/p4-clockmod.c
40338+++ b/drivers/cpufreq/p4-clockmod.c
40339@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
40340 case 0x0F: /* Core Duo */
40341 case 0x16: /* Celeron Core */
40342 case 0x1C: /* Atom */
40343- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
40344+ pax_open_kernel();
40345+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
40346+ pax_close_kernel();
40347 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
40348 case 0x0D: /* Pentium M (Dothan) */
40349- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
40350+ pax_open_kernel();
40351+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
40352+ pax_close_kernel();
40353 /* fall through */
40354 case 0x09: /* Pentium M (Banias) */
40355 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
40356@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
40357
40358 /* on P-4s, the TSC runs with constant frequency independent whether
40359 * throttling is active or not. */
40360- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
40361+ pax_open_kernel();
40362+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
40363+ pax_close_kernel();
40364
40365 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
40366 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
40367diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
40368index 9bb42ba..b01b4a2 100644
40369--- a/drivers/cpufreq/sparc-us3-cpufreq.c
40370+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
40371@@ -18,14 +18,12 @@
40372 #include <asm/head.h>
40373 #include <asm/timer.h>
40374
40375-static struct cpufreq_driver *cpufreq_us3_driver;
40376-
40377 struct us3_freq_percpu_info {
40378 struct cpufreq_frequency_table table[4];
40379 };
40380
40381 /* Indexed by cpu number. */
40382-static struct us3_freq_percpu_info *us3_freq_table;
40383+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
40384
40385 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
40386 * in the Safari config register.
40387@@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
40388
40389 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
40390 {
40391- if (cpufreq_us3_driver)
40392- us3_freq_target(policy, 0);
40393+ us3_freq_target(policy, 0);
40394
40395 return 0;
40396 }
40397
40398+static int __init us3_freq_init(void);
40399+static void __exit us3_freq_exit(void);
40400+
40401+static struct cpufreq_driver cpufreq_us3_driver = {
40402+ .init = us3_freq_cpu_init,
40403+ .verify = cpufreq_generic_frequency_table_verify,
40404+ .target_index = us3_freq_target,
40405+ .get = us3_freq_get,
40406+ .exit = us3_freq_cpu_exit,
40407+ .name = "UltraSPARC-III",
40408+
40409+};
40410+
40411 static int __init us3_freq_init(void)
40412 {
40413 unsigned long manuf, impl, ver;
40414- int ret;
40415
40416 if (tlb_type != cheetah && tlb_type != cheetah_plus)
40417 return -ENODEV;
40418@@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
40419 (impl == CHEETAH_IMPL ||
40420 impl == CHEETAH_PLUS_IMPL ||
40421 impl == JAGUAR_IMPL ||
40422- impl == PANTHER_IMPL)) {
40423- struct cpufreq_driver *driver;
40424-
40425- ret = -ENOMEM;
40426- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
40427- if (!driver)
40428- goto err_out;
40429-
40430- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
40431- GFP_KERNEL);
40432- if (!us3_freq_table)
40433- goto err_out;
40434-
40435- driver->init = us3_freq_cpu_init;
40436- driver->verify = cpufreq_generic_frequency_table_verify;
40437- driver->target_index = us3_freq_target;
40438- driver->get = us3_freq_get;
40439- driver->exit = us3_freq_cpu_exit;
40440- strcpy(driver->name, "UltraSPARC-III");
40441-
40442- cpufreq_us3_driver = driver;
40443- ret = cpufreq_register_driver(driver);
40444- if (ret)
40445- goto err_out;
40446-
40447- return 0;
40448-
40449-err_out:
40450- if (driver) {
40451- kfree(driver);
40452- cpufreq_us3_driver = NULL;
40453- }
40454- kfree(us3_freq_table);
40455- us3_freq_table = NULL;
40456- return ret;
40457- }
40458+ impl == PANTHER_IMPL))
40459+ return cpufreq_register_driver(&cpufreq_us3_driver);
40460
40461 return -ENODEV;
40462 }
40463
40464 static void __exit us3_freq_exit(void)
40465 {
40466- if (cpufreq_us3_driver) {
40467- cpufreq_unregister_driver(cpufreq_us3_driver);
40468- kfree(cpufreq_us3_driver);
40469- cpufreq_us3_driver = NULL;
40470- kfree(us3_freq_table);
40471- us3_freq_table = NULL;
40472- }
40473+ cpufreq_unregister_driver(&cpufreq_us3_driver);
40474 }
40475
40476 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
40477diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
40478index 7d4a315..21bb886 100644
40479--- a/drivers/cpufreq/speedstep-centrino.c
40480+++ b/drivers/cpufreq/speedstep-centrino.c
40481@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
40482 !cpu_has(cpu, X86_FEATURE_EST))
40483 return -ENODEV;
40484
40485- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
40486- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
40487+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
40488+ pax_open_kernel();
40489+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
40490+ pax_close_kernel();
40491+ }
40492
40493 if (policy->cpu != 0)
40494 return -ENODEV;
40495diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
40496index e431d11..d0b997e 100644
40497--- a/drivers/cpuidle/driver.c
40498+++ b/drivers/cpuidle/driver.c
40499@@ -194,7 +194,7 @@ static int poll_idle(struct cpuidle_device *dev,
40500
40501 static void poll_idle_init(struct cpuidle_driver *drv)
40502 {
40503- struct cpuidle_state *state = &drv->states[0];
40504+ cpuidle_state_no_const *state = &drv->states[0];
40505
40506 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
40507 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
40508diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
40509index ca89412..a7b9c49 100644
40510--- a/drivers/cpuidle/governor.c
40511+++ b/drivers/cpuidle/governor.c
40512@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
40513 mutex_lock(&cpuidle_lock);
40514 if (__cpuidle_find_governor(gov->name) == NULL) {
40515 ret = 0;
40516- list_add_tail(&gov->governor_list, &cpuidle_governors);
40517+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
40518 if (!cpuidle_curr_governor ||
40519 cpuidle_curr_governor->rating < gov->rating)
40520 cpuidle_switch_governor(gov);
40521diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
40522index 97c5903..023ad23 100644
40523--- a/drivers/cpuidle/sysfs.c
40524+++ b/drivers/cpuidle/sysfs.c
40525@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
40526 NULL
40527 };
40528
40529-static struct attribute_group cpuidle_attr_group = {
40530+static attribute_group_no_const cpuidle_attr_group = {
40531 .attrs = cpuidle_default_attrs,
40532 .name = "cpuidle",
40533 };
40534diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
40535index 8d2a772..33826c9 100644
40536--- a/drivers/crypto/hifn_795x.c
40537+++ b/drivers/crypto/hifn_795x.c
40538@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
40539 MODULE_PARM_DESC(hifn_pll_ref,
40540 "PLL reference clock (pci[freq] or ext[freq], default ext)");
40541
40542-static atomic_t hifn_dev_number;
40543+static atomic_unchecked_t hifn_dev_number;
40544
40545 #define ACRYPTO_OP_DECRYPT 0
40546 #define ACRYPTO_OP_ENCRYPT 1
40547@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
40548 goto err_out_disable_pci_device;
40549
40550 snprintf(name, sizeof(name), "hifn%d",
40551- atomic_inc_return(&hifn_dev_number)-1);
40552+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
40553
40554 err = pci_request_regions(pdev, name);
40555 if (err)
40556diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
40557index 9f90369..bfcacdb 100644
40558--- a/drivers/devfreq/devfreq.c
40559+++ b/drivers/devfreq/devfreq.c
40560@@ -673,7 +673,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
40561 goto err_out;
40562 }
40563
40564- list_add(&governor->node, &devfreq_governor_list);
40565+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
40566
40567 list_for_each_entry(devfreq, &devfreq_list, node) {
40568 int ret = 0;
40569@@ -761,7 +761,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
40570 }
40571 }
40572
40573- list_del(&governor->node);
40574+ pax_list_del((struct list_head *)&governor->node);
40575 err_out:
40576 mutex_unlock(&devfreq_list_lock);
40577
40578diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
40579index 42d4974..2714f36 100644
40580--- a/drivers/dma/sh/shdma-base.c
40581+++ b/drivers/dma/sh/shdma-base.c
40582@@ -228,8 +228,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
40583 schan->slave_id = -EINVAL;
40584 }
40585
40586- schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
40587- sdev->desc_size, GFP_KERNEL);
40588+ schan->desc = kcalloc(sdev->desc_size,
40589+ NR_DESCS_PER_CHANNEL, GFP_KERNEL);
40590 if (!schan->desc) {
40591 ret = -ENOMEM;
40592 goto edescalloc;
40593diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
40594index 58eb857..d7e42c8 100644
40595--- a/drivers/dma/sh/shdmac.c
40596+++ b/drivers/dma/sh/shdmac.c
40597@@ -513,7 +513,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
40598 return ret;
40599 }
40600
40601-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
40602+static struct notifier_block sh_dmae_nmi_notifier = {
40603 .notifier_call = sh_dmae_nmi_handler,
40604
40605 /* Run before NMI debug handler and KGDB */
40606diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
40607index 592af5f..bb1d583 100644
40608--- a/drivers/edac/edac_device.c
40609+++ b/drivers/edac/edac_device.c
40610@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
40611 */
40612 int edac_device_alloc_index(void)
40613 {
40614- static atomic_t device_indexes = ATOMIC_INIT(0);
40615+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
40616
40617- return atomic_inc_return(&device_indexes) - 1;
40618+ return atomic_inc_return_unchecked(&device_indexes) - 1;
40619 }
40620 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
40621
40622diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
40623index a6cd361..7bdbf53 100644
40624--- a/drivers/edac/edac_mc_sysfs.c
40625+++ b/drivers/edac/edac_mc_sysfs.c
40626@@ -154,7 +154,7 @@ static const char * const edac_caps[] = {
40627 struct dev_ch_attribute {
40628 struct device_attribute attr;
40629 int channel;
40630-};
40631+} __do_const;
40632
40633 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
40634 struct dev_ch_attribute dev_attr_legacy_##_name = \
40635@@ -1011,14 +1011,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
40636 }
40637
40638 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
40639+ pax_open_kernel();
40640 if (mci->get_sdram_scrub_rate) {
40641- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
40642- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
40643+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
40644+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
40645 }
40646 if (mci->set_sdram_scrub_rate) {
40647- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
40648- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
40649+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
40650+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
40651 }
40652+ pax_close_kernel();
40653 err = device_create_file(&mci->dev,
40654 &dev_attr_sdram_scrub_rate);
40655 if (err) {
40656diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
40657index 2cf44b4d..6dd2dc7 100644
40658--- a/drivers/edac/edac_pci.c
40659+++ b/drivers/edac/edac_pci.c
40660@@ -29,7 +29,7 @@
40661
40662 static DEFINE_MUTEX(edac_pci_ctls_mutex);
40663 static LIST_HEAD(edac_pci_list);
40664-static atomic_t pci_indexes = ATOMIC_INIT(0);
40665+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
40666
40667 /*
40668 * edac_pci_alloc_ctl_info
40669@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
40670 */
40671 int edac_pci_alloc_index(void)
40672 {
40673- return atomic_inc_return(&pci_indexes) - 1;
40674+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
40675 }
40676 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
40677
40678diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
40679index e8658e4..22746d6 100644
40680--- a/drivers/edac/edac_pci_sysfs.c
40681+++ b/drivers/edac/edac_pci_sysfs.c
40682@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
40683 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
40684 static int edac_pci_poll_msec = 1000; /* one second workq period */
40685
40686-static atomic_t pci_parity_count = ATOMIC_INIT(0);
40687-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
40688+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
40689+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
40690
40691 static struct kobject *edac_pci_top_main_kobj;
40692 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
40693@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
40694 void *value;
40695 ssize_t(*show) (void *, char *);
40696 ssize_t(*store) (void *, const char *, size_t);
40697-};
40698+} __do_const;
40699
40700 /* Set of show/store abstract level functions for PCI Parity object */
40701 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
40702@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40703 edac_printk(KERN_CRIT, EDAC_PCI,
40704 "Signaled System Error on %s\n",
40705 pci_name(dev));
40706- atomic_inc(&pci_nonparity_count);
40707+ atomic_inc_unchecked(&pci_nonparity_count);
40708 }
40709
40710 if (status & (PCI_STATUS_PARITY)) {
40711@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40712 "Master Data Parity Error on %s\n",
40713 pci_name(dev));
40714
40715- atomic_inc(&pci_parity_count);
40716+ atomic_inc_unchecked(&pci_parity_count);
40717 }
40718
40719 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40720@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40721 "Detected Parity Error on %s\n",
40722 pci_name(dev));
40723
40724- atomic_inc(&pci_parity_count);
40725+ atomic_inc_unchecked(&pci_parity_count);
40726 }
40727 }
40728
40729@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40730 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
40731 "Signaled System Error on %s\n",
40732 pci_name(dev));
40733- atomic_inc(&pci_nonparity_count);
40734+ atomic_inc_unchecked(&pci_nonparity_count);
40735 }
40736
40737 if (status & (PCI_STATUS_PARITY)) {
40738@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40739 "Master Data Parity Error on "
40740 "%s\n", pci_name(dev));
40741
40742- atomic_inc(&pci_parity_count);
40743+ atomic_inc_unchecked(&pci_parity_count);
40744 }
40745
40746 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40747@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40748 "Detected Parity Error on %s\n",
40749 pci_name(dev));
40750
40751- atomic_inc(&pci_parity_count);
40752+ atomic_inc_unchecked(&pci_parity_count);
40753 }
40754 }
40755 }
40756@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
40757 if (!check_pci_errors)
40758 return;
40759
40760- before_count = atomic_read(&pci_parity_count);
40761+ before_count = atomic_read_unchecked(&pci_parity_count);
40762
40763 /* scan all PCI devices looking for a Parity Error on devices and
40764 * bridges.
40765@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
40766 /* Only if operator has selected panic on PCI Error */
40767 if (edac_pci_get_panic_on_pe()) {
40768 /* If the count is different 'after' from 'before' */
40769- if (before_count != atomic_read(&pci_parity_count))
40770+ if (before_count != atomic_read_unchecked(&pci_parity_count))
40771 panic("EDAC: PCI Parity Error");
40772 }
40773 }
40774diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
40775index 51b7e3a..aa8a3e8 100644
40776--- a/drivers/edac/mce_amd.h
40777+++ b/drivers/edac/mce_amd.h
40778@@ -77,7 +77,7 @@ struct amd_decoder_ops {
40779 bool (*mc0_mce)(u16, u8);
40780 bool (*mc1_mce)(u16, u8);
40781 bool (*mc2_mce)(u16, u8);
40782-};
40783+} __no_const;
40784
40785 void amd_report_gart_errors(bool);
40786 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
40787diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
40788index 57ea7f4..af06b76 100644
40789--- a/drivers/firewire/core-card.c
40790+++ b/drivers/firewire/core-card.c
40791@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
40792 const struct fw_card_driver *driver,
40793 struct device *device)
40794 {
40795- static atomic_t index = ATOMIC_INIT(-1);
40796+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
40797
40798- card->index = atomic_inc_return(&index);
40799+ card->index = atomic_inc_return_unchecked(&index);
40800 card->driver = driver;
40801 card->device = device;
40802 card->current_tlabel = 0;
40803@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
40804
40805 void fw_core_remove_card(struct fw_card *card)
40806 {
40807- struct fw_card_driver dummy_driver = dummy_driver_template;
40808+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
40809
40810 card->driver->update_phy_reg(card, 4,
40811 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
40812diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
40813index 2c6d5e1..a2cca6b 100644
40814--- a/drivers/firewire/core-device.c
40815+++ b/drivers/firewire/core-device.c
40816@@ -253,7 +253,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
40817 struct config_rom_attribute {
40818 struct device_attribute attr;
40819 u32 key;
40820-};
40821+} __do_const;
40822
40823 static ssize_t show_immediate(struct device *dev,
40824 struct device_attribute *dattr, char *buf)
40825diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
40826index eb6935c..3cc2bfa 100644
40827--- a/drivers/firewire/core-transaction.c
40828+++ b/drivers/firewire/core-transaction.c
40829@@ -38,6 +38,7 @@
40830 #include <linux/timer.h>
40831 #include <linux/types.h>
40832 #include <linux/workqueue.h>
40833+#include <linux/sched.h>
40834
40835 #include <asm/byteorder.h>
40836
40837diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
40838index e1480ff6..1a429bd 100644
40839--- a/drivers/firewire/core.h
40840+++ b/drivers/firewire/core.h
40841@@ -111,6 +111,7 @@ struct fw_card_driver {
40842
40843 int (*stop_iso)(struct fw_iso_context *ctx);
40844 };
40845+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
40846
40847 void fw_card_initialize(struct fw_card *card,
40848 const struct fw_card_driver *driver, struct device *device);
40849diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
40850index a66a321..f6caf20 100644
40851--- a/drivers/firewire/ohci.c
40852+++ b/drivers/firewire/ohci.c
40853@@ -2056,10 +2056,12 @@ static void bus_reset_work(struct work_struct *work)
40854 be32_to_cpu(ohci->next_header));
40855 }
40856
40857+#ifndef CONFIG_GRKERNSEC
40858 if (param_remote_dma) {
40859 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
40860 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
40861 }
40862+#endif
40863
40864 spin_unlock_irq(&ohci->lock);
40865
40866@@ -2591,8 +2593,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
40867 unsigned long flags;
40868 int n, ret = 0;
40869
40870+#ifndef CONFIG_GRKERNSEC
40871 if (param_remote_dma)
40872 return 0;
40873+#endif
40874
40875 /*
40876 * FIXME: Make sure this bitmask is cleared when we clear the busReset
40877diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
40878index 94a58a0..f5eba42 100644
40879--- a/drivers/firmware/dmi-id.c
40880+++ b/drivers/firmware/dmi-id.c
40881@@ -16,7 +16,7 @@
40882 struct dmi_device_attribute{
40883 struct device_attribute dev_attr;
40884 int field;
40885-};
40886+} __do_const;
40887 #define to_dmi_dev_attr(_dev_attr) \
40888 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
40889
40890diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
40891index 17afc51..0ef90cd 100644
40892--- a/drivers/firmware/dmi_scan.c
40893+++ b/drivers/firmware/dmi_scan.c
40894@@ -835,7 +835,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
40895 if (buf == NULL)
40896 return -1;
40897
40898- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
40899+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
40900
40901 dmi_unmap(buf);
40902 return 0;
40903diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
40904index 5b53d61..72cee96 100644
40905--- a/drivers/firmware/efi/cper.c
40906+++ b/drivers/firmware/efi/cper.c
40907@@ -44,12 +44,12 @@ static char rcd_decode_str[CPER_REC_LEN];
40908 */
40909 u64 cper_next_record_id(void)
40910 {
40911- static atomic64_t seq;
40912+ static atomic64_unchecked_t seq;
40913
40914- if (!atomic64_read(&seq))
40915- atomic64_set(&seq, ((u64)get_seconds()) << 32);
40916+ if (!atomic64_read_unchecked(&seq))
40917+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
40918
40919- return atomic64_inc_return(&seq);
40920+ return atomic64_inc_return_unchecked(&seq);
40921 }
40922 EXPORT_SYMBOL_GPL(cper_next_record_id);
40923
40924diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
40925index 64ecbb5..d921eb3 100644
40926--- a/drivers/firmware/efi/efi.c
40927+++ b/drivers/firmware/efi/efi.c
40928@@ -126,14 +126,16 @@ static struct attribute_group efi_subsys_attr_group = {
40929 };
40930
40931 static struct efivars generic_efivars;
40932-static struct efivar_operations generic_ops;
40933+static efivar_operations_no_const generic_ops __read_only;
40934
40935 static int generic_ops_register(void)
40936 {
40937- generic_ops.get_variable = efi.get_variable;
40938- generic_ops.set_variable = efi.set_variable;
40939- generic_ops.get_next_variable = efi.get_next_variable;
40940- generic_ops.query_variable_store = efi_query_variable_store;
40941+ pax_open_kernel();
40942+ *(void **)&generic_ops.get_variable = efi.get_variable;
40943+ *(void **)&generic_ops.set_variable = efi.set_variable;
40944+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
40945+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
40946+ pax_close_kernel();
40947
40948 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
40949 }
40950diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
40951index f256ecd..387dcb1 100644
40952--- a/drivers/firmware/efi/efivars.c
40953+++ b/drivers/firmware/efi/efivars.c
40954@@ -589,7 +589,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
40955 static int
40956 create_efivars_bin_attributes(void)
40957 {
40958- struct bin_attribute *attr;
40959+ bin_attribute_no_const *attr;
40960 int error;
40961
40962 /* new_var */
40963diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
40964index 2f569aa..c95f4fb 100644
40965--- a/drivers/firmware/google/memconsole.c
40966+++ b/drivers/firmware/google/memconsole.c
40967@@ -155,7 +155,10 @@ static int __init memconsole_init(void)
40968 if (!found_memconsole())
40969 return -ENODEV;
40970
40971- memconsole_bin_attr.size = memconsole_length;
40972+ pax_open_kernel();
40973+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
40974+ pax_close_kernel();
40975+
40976 return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
40977 }
40978
40979diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
40980index fe49ec3..1ade794 100644
40981--- a/drivers/gpio/gpio-em.c
40982+++ b/drivers/gpio/gpio-em.c
40983@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
40984 struct em_gio_priv *p;
40985 struct resource *io[2], *irq[2];
40986 struct gpio_chip *gpio_chip;
40987- struct irq_chip *irq_chip;
40988+ irq_chip_no_const *irq_chip;
40989 const char *name = dev_name(&pdev->dev);
40990 int ret;
40991
40992diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
40993index 3784e81..73637b5 100644
40994--- a/drivers/gpio/gpio-ich.c
40995+++ b/drivers/gpio/gpio-ich.c
40996@@ -94,7 +94,7 @@ struct ichx_desc {
40997 * this option allows driver caching written output values
40998 */
40999 bool use_outlvl_cache;
41000-};
41001+} __do_const;
41002
41003 static struct {
41004 spinlock_t lock;
41005diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
41006index bf6c094..6573caf 100644
41007--- a/drivers/gpio/gpio-rcar.c
41008+++ b/drivers/gpio/gpio-rcar.c
41009@@ -357,7 +357,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
41010 struct gpio_rcar_priv *p;
41011 struct resource *io, *irq;
41012 struct gpio_chip *gpio_chip;
41013- struct irq_chip *irq_chip;
41014+ irq_chip_no_const *irq_chip;
41015 struct device *dev = &pdev->dev;
41016 const char *name = dev_name(dev);
41017 int ret;
41018diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
41019index dbf28fa..04dad4e 100644
41020--- a/drivers/gpio/gpio-vr41xx.c
41021+++ b/drivers/gpio/gpio-vr41xx.c
41022@@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
41023 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
41024 maskl, pendl, maskh, pendh);
41025
41026- atomic_inc(&irq_err_count);
41027+ atomic_inc_unchecked(&irq_err_count);
41028
41029 return -EINVAL;
41030 }
41031diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
41032index c68d037..2f4f9a9 100644
41033--- a/drivers/gpio/gpiolib.c
41034+++ b/drivers/gpio/gpiolib.c
41035@@ -529,8 +529,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
41036 }
41037
41038 if (gpiochip->irqchip) {
41039- gpiochip->irqchip->irq_request_resources = NULL;
41040- gpiochip->irqchip->irq_release_resources = NULL;
41041+ pax_open_kernel();
41042+ *(void **)&gpiochip->irqchip->irq_request_resources = NULL;
41043+ *(void **)&gpiochip->irqchip->irq_release_resources = NULL;
41044+ pax_close_kernel();
41045 gpiochip->irqchip = NULL;
41046 }
41047 }
41048@@ -596,8 +598,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
41049 gpiochip->irqchip = NULL;
41050 return -EINVAL;
41051 }
41052- irqchip->irq_request_resources = gpiochip_irq_reqres;
41053- irqchip->irq_release_resources = gpiochip_irq_relres;
41054+
41055+ pax_open_kernel();
41056+ *(void **)&irqchip->irq_request_resources = gpiochip_irq_reqres;
41057+ *(void **)&irqchip->irq_release_resources = gpiochip_irq_relres;
41058+ pax_close_kernel();
41059
41060 /*
41061 * Prepare the mapping since the irqchip shall be orthogonal to
41062diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
41063index 90e7730..3b41807 100644
41064--- a/drivers/gpu/drm/drm_crtc.c
41065+++ b/drivers/gpu/drm/drm_crtc.c
41066@@ -3861,7 +3861,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
41067 goto done;
41068 }
41069
41070- if (copy_to_user(&enum_ptr[copied].name,
41071+ if (copy_to_user(enum_ptr[copied].name,
41072 &prop_enum->name, DRM_PROP_NAME_LEN)) {
41073 ret = -EFAULT;
41074 goto done;
41075diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
41076index 3242e20..7e4f621 100644
41077--- a/drivers/gpu/drm/drm_drv.c
41078+++ b/drivers/gpu/drm/drm_drv.c
41079@@ -463,7 +463,7 @@ void drm_unplug_dev(struct drm_device *dev)
41080
41081 drm_device_set_unplugged(dev);
41082
41083- if (dev->open_count == 0) {
41084+ if (local_read(&dev->open_count) == 0) {
41085 drm_put_dev(dev);
41086 }
41087 mutex_unlock(&drm_global_mutex);
41088diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
41089index 79d5221..7ff73496 100644
41090--- a/drivers/gpu/drm/drm_fops.c
41091+++ b/drivers/gpu/drm/drm_fops.c
41092@@ -89,7 +89,7 @@ int drm_open(struct inode *inode, struct file *filp)
41093 return PTR_ERR(minor);
41094
41095 dev = minor->dev;
41096- if (!dev->open_count++)
41097+ if (local_inc_return(&dev->open_count) == 1)
41098 need_setup = 1;
41099
41100 /* share address_space across all char-devs of a single device */
41101@@ -106,7 +106,7 @@ int drm_open(struct inode *inode, struct file *filp)
41102 return 0;
41103
41104 err_undo:
41105- dev->open_count--;
41106+ local_dec(&dev->open_count);
41107 drm_minor_release(minor);
41108 return retcode;
41109 }
41110@@ -384,7 +384,7 @@ int drm_release(struct inode *inode, struct file *filp)
41111
41112 mutex_lock(&drm_global_mutex);
41113
41114- DRM_DEBUG("open_count = %d\n", dev->open_count);
41115+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
41116
41117 mutex_lock(&dev->struct_mutex);
41118 list_del(&file_priv->lhead);
41119@@ -397,10 +397,10 @@ int drm_release(struct inode *inode, struct file *filp)
41120 * Begin inline drm_release
41121 */
41122
41123- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
41124+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
41125 task_pid_nr(current),
41126 (long)old_encode_dev(file_priv->minor->kdev->devt),
41127- dev->open_count);
41128+ local_read(&dev->open_count));
41129
41130 /* Release any auth tokens that might point to this file_priv,
41131 (do that under the drm_global_mutex) */
41132@@ -471,7 +471,7 @@ int drm_release(struct inode *inode, struct file *filp)
41133 * End inline drm_release
41134 */
41135
41136- if (!--dev->open_count) {
41137+ if (local_dec_and_test(&dev->open_count)) {
41138 retcode = drm_lastclose(dev);
41139 if (drm_device_is_unplugged(dev))
41140 drm_put_dev(dev);
41141diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
41142index 3d2e91c..d31c4c9 100644
41143--- a/drivers/gpu/drm/drm_global.c
41144+++ b/drivers/gpu/drm/drm_global.c
41145@@ -36,7 +36,7 @@
41146 struct drm_global_item {
41147 struct mutex mutex;
41148 void *object;
41149- int refcount;
41150+ atomic_t refcount;
41151 };
41152
41153 static struct drm_global_item glob[DRM_GLOBAL_NUM];
41154@@ -49,7 +49,7 @@ void drm_global_init(void)
41155 struct drm_global_item *item = &glob[i];
41156 mutex_init(&item->mutex);
41157 item->object = NULL;
41158- item->refcount = 0;
41159+ atomic_set(&item->refcount, 0);
41160 }
41161 }
41162
41163@@ -59,7 +59,7 @@ void drm_global_release(void)
41164 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
41165 struct drm_global_item *item = &glob[i];
41166 BUG_ON(item->object != NULL);
41167- BUG_ON(item->refcount != 0);
41168+ BUG_ON(atomic_read(&item->refcount) != 0);
41169 }
41170 }
41171
41172@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
41173 struct drm_global_item *item = &glob[ref->global_type];
41174
41175 mutex_lock(&item->mutex);
41176- if (item->refcount == 0) {
41177+ if (atomic_read(&item->refcount) == 0) {
41178 item->object = kzalloc(ref->size, GFP_KERNEL);
41179 if (unlikely(item->object == NULL)) {
41180 ret = -ENOMEM;
41181@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
41182 goto out_err;
41183
41184 }
41185- ++item->refcount;
41186+ atomic_inc(&item->refcount);
41187 ref->object = item->object;
41188 mutex_unlock(&item->mutex);
41189 return 0;
41190@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
41191 struct drm_global_item *item = &glob[ref->global_type];
41192
41193 mutex_lock(&item->mutex);
41194- BUG_ON(item->refcount == 0);
41195+ BUG_ON(atomic_read(&item->refcount) == 0);
41196 BUG_ON(ref->object != item->object);
41197- if (--item->refcount == 0) {
41198+ if (atomic_dec_and_test(&item->refcount)) {
41199 ref->release(ref);
41200 item->object = NULL;
41201 }
41202diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
41203index ecaf0fa..a49cee9 100644
41204--- a/drivers/gpu/drm/drm_info.c
41205+++ b/drivers/gpu/drm/drm_info.c
41206@@ -73,10 +73,13 @@ int drm_vm_info(struct seq_file *m, void *data)
41207 struct drm_local_map *map;
41208 struct drm_map_list *r_list;
41209
41210- /* Hardcoded from _DRM_FRAME_BUFFER,
41211- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
41212- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
41213- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
41214+ static const char * const types[] = {
41215+ [_DRM_FRAME_BUFFER] = "FB",
41216+ [_DRM_REGISTERS] = "REG",
41217+ [_DRM_SHM] = "SHM",
41218+ [_DRM_AGP] = "AGP",
41219+ [_DRM_SCATTER_GATHER] = "SG",
41220+ [_DRM_CONSISTENT] = "PCI"};
41221 const char *type;
41222 int i;
41223
41224@@ -87,7 +90,7 @@ int drm_vm_info(struct seq_file *m, void *data)
41225 map = r_list->map;
41226 if (!map)
41227 continue;
41228- if (map->type < 0 || map->type > 5)
41229+ if (map->type >= ARRAY_SIZE(types))
41230 type = "??";
41231 else
41232 type = types[map->type];
41233@@ -259,7 +262,11 @@ int drm_vma_info(struct seq_file *m, void *data)
41234 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
41235 vma->vm_flags & VM_LOCKED ? 'l' : '-',
41236 vma->vm_flags & VM_IO ? 'i' : '-',
41237+#ifdef CONFIG_GRKERNSEC_HIDESYM
41238+ 0);
41239+#else
41240 vma->vm_pgoff);
41241+#endif
41242
41243 #if defined(__i386__)
41244 pgprot = pgprot_val(vma->vm_page_prot);
41245diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
41246index 2f4c4343..dd12cd2 100644
41247--- a/drivers/gpu/drm/drm_ioc32.c
41248+++ b/drivers/gpu/drm/drm_ioc32.c
41249@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
41250 request = compat_alloc_user_space(nbytes);
41251 if (!access_ok(VERIFY_WRITE, request, nbytes))
41252 return -EFAULT;
41253- list = (struct drm_buf_desc *) (request + 1);
41254+ list = (struct drm_buf_desc __user *) (request + 1);
41255
41256 if (__put_user(count, &request->count)
41257 || __put_user(list, &request->list))
41258@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
41259 request = compat_alloc_user_space(nbytes);
41260 if (!access_ok(VERIFY_WRITE, request, nbytes))
41261 return -EFAULT;
41262- list = (struct drm_buf_pub *) (request + 1);
41263+ list = (struct drm_buf_pub __user *) (request + 1);
41264
41265 if (__put_user(count, &request->count)
41266 || __put_user(list, &request->list))
41267@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
41268 return 0;
41269 }
41270
41271-drm_ioctl_compat_t *drm_compat_ioctls[] = {
41272+drm_ioctl_compat_t drm_compat_ioctls[] = {
41273 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
41274 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
41275 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
41276@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
41277 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41278 {
41279 unsigned int nr = DRM_IOCTL_NR(cmd);
41280- drm_ioctl_compat_t *fn;
41281 int ret;
41282
41283 /* Assume that ioctls without an explicit compat routine will just
41284@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41285 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
41286 return drm_ioctl(filp, cmd, arg);
41287
41288- fn = drm_compat_ioctls[nr];
41289-
41290- if (fn != NULL)
41291- ret = (*fn) (filp, cmd, arg);
41292+ if (drm_compat_ioctls[nr] != NULL)
41293+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
41294 else
41295 ret = drm_ioctl(filp, cmd, arg);
41296
41297diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
41298index 40be746..fd78faf 100644
41299--- a/drivers/gpu/drm/drm_ioctl.c
41300+++ b/drivers/gpu/drm/drm_ioctl.c
41301@@ -642,7 +642,7 @@ long drm_ioctl(struct file *filp,
41302 struct drm_file *file_priv = filp->private_data;
41303 struct drm_device *dev;
41304 const struct drm_ioctl_desc *ioctl = NULL;
41305- drm_ioctl_t *func;
41306+ drm_ioctl_no_const_t func;
41307 unsigned int nr = DRM_IOCTL_NR(cmd);
41308 int retcode = -EINVAL;
41309 char stack_kdata[128];
41310diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
41311index d4d16ed..8fb0b51 100644
41312--- a/drivers/gpu/drm/i810/i810_drv.h
41313+++ b/drivers/gpu/drm/i810/i810_drv.h
41314@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
41315 int page_flipping;
41316
41317 wait_queue_head_t irq_queue;
41318- atomic_t irq_received;
41319- atomic_t irq_emitted;
41320+ atomic_unchecked_t irq_received;
41321+ atomic_unchecked_t irq_emitted;
41322
41323 int front_offset;
41324 } drm_i810_private_t;
41325diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
41326index 2d23e57..1c61d41 100644
41327--- a/drivers/gpu/drm/i915/i915_dma.c
41328+++ b/drivers/gpu/drm/i915/i915_dma.c
41329@@ -1292,7 +1292,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
41330 * locking inversion with the driver load path. And the access here is
41331 * completely racy anyway. So don't bother with locking for now.
41332 */
41333- return dev->open_count == 0;
41334+ return local_read(&dev->open_count) == 0;
41335 }
41336
41337 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
41338diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
41339index 60998fc..3b244bc 100644
41340--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
41341+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
41342@@ -891,9 +891,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
41343
41344 static int
41345 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
41346- int count)
41347+ unsigned int count)
41348 {
41349- int i;
41350+ unsigned int i;
41351 unsigned relocs_total = 0;
41352 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
41353
41354diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
41355index 2e0613e..a8b94d9 100644
41356--- a/drivers/gpu/drm/i915/i915_ioc32.c
41357+++ b/drivers/gpu/drm/i915/i915_ioc32.c
41358@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
41359 (unsigned long)request);
41360 }
41361
41362-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
41363+static drm_ioctl_compat_t i915_compat_ioctls[] = {
41364 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
41365 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
41366 [DRM_I915_GETPARAM] = compat_i915_getparam,
41367@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
41368 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41369 {
41370 unsigned int nr = DRM_IOCTL_NR(cmd);
41371- drm_ioctl_compat_t *fn = NULL;
41372 int ret;
41373
41374 if (nr < DRM_COMMAND_BASE)
41375 return drm_compat_ioctl(filp, cmd, arg);
41376
41377- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
41378- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
41379-
41380- if (fn != NULL)
41381+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls)) {
41382+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
41383 ret = (*fn) (filp, cmd, arg);
41384- else
41385+ } else
41386 ret = drm_ioctl(filp, cmd, arg);
41387
41388 return ret;
41389diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
41390index 7bd17b3..ffa0a11 100644
41391--- a/drivers/gpu/drm/i915/intel_display.c
41392+++ b/drivers/gpu/drm/i915/intel_display.c
41393@@ -12441,13 +12441,13 @@ struct intel_quirk {
41394 int subsystem_vendor;
41395 int subsystem_device;
41396 void (*hook)(struct drm_device *dev);
41397-};
41398+} __do_const;
41399
41400 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
41401 struct intel_dmi_quirk {
41402 void (*hook)(struct drm_device *dev);
41403 const struct dmi_system_id (*dmi_id_list)[];
41404-};
41405+} __do_const;
41406
41407 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
41408 {
41409@@ -12455,18 +12455,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
41410 return 1;
41411 }
41412
41413-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
41414+static const struct dmi_system_id intel_dmi_quirks_table[] = {
41415 {
41416- .dmi_id_list = &(const struct dmi_system_id[]) {
41417- {
41418- .callback = intel_dmi_reverse_brightness,
41419- .ident = "NCR Corporation",
41420- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
41421- DMI_MATCH(DMI_PRODUCT_NAME, ""),
41422- },
41423- },
41424- { } /* terminating entry */
41425+ .callback = intel_dmi_reverse_brightness,
41426+ .ident = "NCR Corporation",
41427+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
41428+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
41429 },
41430+ },
41431+ { } /* terminating entry */
41432+};
41433+
41434+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
41435+ {
41436+ .dmi_id_list = &intel_dmi_quirks_table,
41437 .hook = quirk_invert_brightness,
41438 },
41439 };
41440diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
41441index fe45321..836fdca 100644
41442--- a/drivers/gpu/drm/mga/mga_drv.h
41443+++ b/drivers/gpu/drm/mga/mga_drv.h
41444@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
41445 u32 clear_cmd;
41446 u32 maccess;
41447
41448- atomic_t vbl_received; /**< Number of vblanks received. */
41449+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
41450 wait_queue_head_t fence_queue;
41451- atomic_t last_fence_retired;
41452+ atomic_unchecked_t last_fence_retired;
41453 u32 next_fence_to_post;
41454
41455 unsigned int fb_cpp;
41456diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
41457index 729bfd5..ead8823 100644
41458--- a/drivers/gpu/drm/mga/mga_ioc32.c
41459+++ b/drivers/gpu/drm/mga/mga_ioc32.c
41460@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
41461 return 0;
41462 }
41463
41464-drm_ioctl_compat_t *mga_compat_ioctls[] = {
41465+drm_ioctl_compat_t mga_compat_ioctls[] = {
41466 [DRM_MGA_INIT] = compat_mga_init,
41467 [DRM_MGA_GETPARAM] = compat_mga_getparam,
41468 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
41469@@ -208,18 +208,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
41470 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41471 {
41472 unsigned int nr = DRM_IOCTL_NR(cmd);
41473- drm_ioctl_compat_t *fn = NULL;
41474 int ret;
41475
41476 if (nr < DRM_COMMAND_BASE)
41477 return drm_compat_ioctl(filp, cmd, arg);
41478
41479- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
41480- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
41481-
41482- if (fn != NULL)
41483+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls)) {
41484+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
41485 ret = (*fn) (filp, cmd, arg);
41486- else
41487+ } else
41488 ret = drm_ioctl(filp, cmd, arg);
41489
41490 return ret;
41491diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
41492index 1b071b8..de8601a 100644
41493--- a/drivers/gpu/drm/mga/mga_irq.c
41494+++ b/drivers/gpu/drm/mga/mga_irq.c
41495@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
41496 if (crtc != 0)
41497 return 0;
41498
41499- return atomic_read(&dev_priv->vbl_received);
41500+ return atomic_read_unchecked(&dev_priv->vbl_received);
41501 }
41502
41503
41504@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
41505 /* VBLANK interrupt */
41506 if (status & MGA_VLINEPEN) {
41507 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
41508- atomic_inc(&dev_priv->vbl_received);
41509+ atomic_inc_unchecked(&dev_priv->vbl_received);
41510 drm_handle_vblank(dev, 0);
41511 handled = 1;
41512 }
41513@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
41514 if ((prim_start & ~0x03) != (prim_end & ~0x03))
41515 MGA_WRITE(MGA_PRIMEND, prim_end);
41516
41517- atomic_inc(&dev_priv->last_fence_retired);
41518+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
41519 wake_up(&dev_priv->fence_queue);
41520 handled = 1;
41521 }
41522@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
41523 * using fences.
41524 */
41525 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
41526- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
41527+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
41528 - *sequence) <= (1 << 23)));
41529
41530 *sequence = cur_fence;
41531diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
41532index dae2c96..324dbe4 100644
41533--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
41534+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
41535@@ -963,7 +963,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
41536 struct bit_table {
41537 const char id;
41538 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
41539-};
41540+} __no_const;
41541
41542 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
41543
41544diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
41545index b02b024..aed7bad 100644
41546--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
41547+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
41548@@ -119,7 +119,6 @@ struct nouveau_drm {
41549 struct drm_global_reference mem_global_ref;
41550 struct ttm_bo_global_ref bo_global_ref;
41551 struct ttm_bo_device bdev;
41552- atomic_t validate_sequence;
41553 int (*move)(struct nouveau_channel *,
41554 struct ttm_buffer_object *,
41555 struct ttm_mem_reg *, struct ttm_mem_reg *);
41556diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41557index 462679a..88e32a7 100644
41558--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41559+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41560@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
41561 unsigned long arg)
41562 {
41563 unsigned int nr = DRM_IOCTL_NR(cmd);
41564- drm_ioctl_compat_t *fn = NULL;
41565+ drm_ioctl_compat_t fn = NULL;
41566 int ret;
41567
41568 if (nr < DRM_COMMAND_BASE)
41569diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
41570index 53874b7..1db0a68 100644
41571--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
41572+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
41573@@ -127,11 +127,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41574 }
41575
41576 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
41577- nouveau_vram_manager_init,
41578- nouveau_vram_manager_fini,
41579- nouveau_vram_manager_new,
41580- nouveau_vram_manager_del,
41581- nouveau_vram_manager_debug
41582+ .init = nouveau_vram_manager_init,
41583+ .takedown = nouveau_vram_manager_fini,
41584+ .get_node = nouveau_vram_manager_new,
41585+ .put_node = nouveau_vram_manager_del,
41586+ .debug = nouveau_vram_manager_debug
41587 };
41588
41589 static int
41590@@ -196,11 +196,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41591 }
41592
41593 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
41594- nouveau_gart_manager_init,
41595- nouveau_gart_manager_fini,
41596- nouveau_gart_manager_new,
41597- nouveau_gart_manager_del,
41598- nouveau_gart_manager_debug
41599+ .init = nouveau_gart_manager_init,
41600+ .takedown = nouveau_gart_manager_fini,
41601+ .get_node = nouveau_gart_manager_new,
41602+ .put_node = nouveau_gart_manager_del,
41603+ .debug = nouveau_gart_manager_debug
41604 };
41605
41606 /*XXX*/
41607@@ -270,11 +270,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41608 }
41609
41610 const struct ttm_mem_type_manager_func nv04_gart_manager = {
41611- nv04_gart_manager_init,
41612- nv04_gart_manager_fini,
41613- nv04_gart_manager_new,
41614- nv04_gart_manager_del,
41615- nv04_gart_manager_debug
41616+ .init = nv04_gart_manager_init,
41617+ .takedown = nv04_gart_manager_fini,
41618+ .get_node = nv04_gart_manager_new,
41619+ .put_node = nv04_gart_manager_del,
41620+ .debug = nv04_gart_manager_debug
41621 };
41622
41623 int
41624diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
41625index c7592ec..dd45ebc 100644
41626--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
41627+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
41628@@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
41629 * locking inversion with the driver load path. And the access here is
41630 * completely racy anyway. So don't bother with locking for now.
41631 */
41632- return dev->open_count == 0;
41633+ return local_read(&dev->open_count) == 0;
41634 }
41635
41636 static const struct vga_switcheroo_client_ops
41637diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
41638index eb89653..613cf71 100644
41639--- a/drivers/gpu/drm/qxl/qxl_cmd.c
41640+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
41641@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
41642 int ret;
41643
41644 mutex_lock(&qdev->async_io_mutex);
41645- irq_num = atomic_read(&qdev->irq_received_io_cmd);
41646+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
41647 if (qdev->last_sent_io_cmd > irq_num) {
41648 if (intr)
41649 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
41650- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41651+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41652 else
41653 ret = wait_event_timeout(qdev->io_cmd_event,
41654- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41655+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41656 /* 0 is timeout, just bail the "hw" has gone away */
41657 if (ret <= 0)
41658 goto out;
41659- irq_num = atomic_read(&qdev->irq_received_io_cmd);
41660+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
41661 }
41662 outb(val, addr);
41663 qdev->last_sent_io_cmd = irq_num + 1;
41664 if (intr)
41665 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
41666- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41667+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41668 else
41669 ret = wait_event_timeout(qdev->io_cmd_event,
41670- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41671+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41672 out:
41673 if (ret > 0)
41674 ret = 0;
41675diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
41676index c3c2bbd..bc3c0fb 100644
41677--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
41678+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
41679@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
41680 struct drm_info_node *node = (struct drm_info_node *) m->private;
41681 struct qxl_device *qdev = node->minor->dev->dev_private;
41682
41683- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
41684- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
41685- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
41686- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
41687+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
41688+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
41689+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
41690+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
41691 seq_printf(m, "%d\n", qdev->irq_received_error);
41692 return 0;
41693 }
41694diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
41695index 36ed40b..0397633 100644
41696--- a/drivers/gpu/drm/qxl/qxl_drv.h
41697+++ b/drivers/gpu/drm/qxl/qxl_drv.h
41698@@ -290,10 +290,10 @@ struct qxl_device {
41699 unsigned int last_sent_io_cmd;
41700
41701 /* interrupt handling */
41702- atomic_t irq_received;
41703- atomic_t irq_received_display;
41704- atomic_t irq_received_cursor;
41705- atomic_t irq_received_io_cmd;
41706+ atomic_unchecked_t irq_received;
41707+ atomic_unchecked_t irq_received_display;
41708+ atomic_unchecked_t irq_received_cursor;
41709+ atomic_unchecked_t irq_received_io_cmd;
41710 unsigned irq_received_error;
41711 wait_queue_head_t display_event;
41712 wait_queue_head_t cursor_event;
41713diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
41714index b110883..dd06418 100644
41715--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
41716+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
41717@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41718
41719 /* TODO copy slow path code from i915 */
41720 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
41721- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
41722+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
41723
41724 {
41725 struct qxl_drawable *draw = fb_cmd;
41726@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41727 struct drm_qxl_reloc reloc;
41728
41729 if (copy_from_user(&reloc,
41730- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
41731+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
41732 sizeof(reloc))) {
41733 ret = -EFAULT;
41734 goto out_free_bos;
41735@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
41736
41737 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
41738
41739- struct drm_qxl_command *commands =
41740- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
41741+ struct drm_qxl_command __user *commands =
41742+ (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
41743
41744- if (copy_from_user(&user_cmd, &commands[cmd_num],
41745+ if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
41746 sizeof(user_cmd)))
41747 return -EFAULT;
41748
41749diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
41750index 0bf1e20..42a7310 100644
41751--- a/drivers/gpu/drm/qxl/qxl_irq.c
41752+++ b/drivers/gpu/drm/qxl/qxl_irq.c
41753@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
41754 if (!pending)
41755 return IRQ_NONE;
41756
41757- atomic_inc(&qdev->irq_received);
41758+ atomic_inc_unchecked(&qdev->irq_received);
41759
41760 if (pending & QXL_INTERRUPT_DISPLAY) {
41761- atomic_inc(&qdev->irq_received_display);
41762+ atomic_inc_unchecked(&qdev->irq_received_display);
41763 wake_up_all(&qdev->display_event);
41764 qxl_queue_garbage_collect(qdev, false);
41765 }
41766 if (pending & QXL_INTERRUPT_CURSOR) {
41767- atomic_inc(&qdev->irq_received_cursor);
41768+ atomic_inc_unchecked(&qdev->irq_received_cursor);
41769 wake_up_all(&qdev->cursor_event);
41770 }
41771 if (pending & QXL_INTERRUPT_IO_CMD) {
41772- atomic_inc(&qdev->irq_received_io_cmd);
41773+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
41774 wake_up_all(&qdev->io_cmd_event);
41775 }
41776 if (pending & QXL_INTERRUPT_ERROR) {
41777@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
41778 init_waitqueue_head(&qdev->io_cmd_event);
41779 INIT_WORK(&qdev->client_monitors_config_work,
41780 qxl_client_monitors_config_work_func);
41781- atomic_set(&qdev->irq_received, 0);
41782- atomic_set(&qdev->irq_received_display, 0);
41783- atomic_set(&qdev->irq_received_cursor, 0);
41784- atomic_set(&qdev->irq_received_io_cmd, 0);
41785+ atomic_set_unchecked(&qdev->irq_received, 0);
41786+ atomic_set_unchecked(&qdev->irq_received_display, 0);
41787+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
41788+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
41789 qdev->irq_received_error = 0;
41790 ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
41791 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
41792diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
41793index 71a1bae..cb1f103 100644
41794--- a/drivers/gpu/drm/qxl/qxl_ttm.c
41795+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
41796@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
41797 }
41798 }
41799
41800-static struct vm_operations_struct qxl_ttm_vm_ops;
41801+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
41802 static const struct vm_operations_struct *ttm_vm_ops;
41803
41804 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41805@@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
41806 return r;
41807 if (unlikely(ttm_vm_ops == NULL)) {
41808 ttm_vm_ops = vma->vm_ops;
41809+ pax_open_kernel();
41810 qxl_ttm_vm_ops = *ttm_vm_ops;
41811 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
41812+ pax_close_kernel();
41813 }
41814 vma->vm_ops = &qxl_ttm_vm_ops;
41815 return 0;
41816@@ -555,25 +557,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
41817 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
41818 {
41819 #if defined(CONFIG_DEBUG_FS)
41820- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
41821- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
41822- unsigned i;
41823+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
41824+ {
41825+ .name = "qxl_mem_mm",
41826+ .show = &qxl_mm_dump_table,
41827+ },
41828+ {
41829+ .name = "qxl_surf_mm",
41830+ .show = &qxl_mm_dump_table,
41831+ }
41832+ };
41833
41834- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
41835- if (i == 0)
41836- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
41837- else
41838- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
41839- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
41840- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
41841- qxl_mem_types_list[i].driver_features = 0;
41842- if (i == 0)
41843- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41844- else
41845- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41846+ pax_open_kernel();
41847+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41848+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41849+ pax_close_kernel();
41850
41851- }
41852- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
41853+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
41854 #else
41855 return 0;
41856 #endif
41857diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
41858index 59459fe..be26b31 100644
41859--- a/drivers/gpu/drm/r128/r128_cce.c
41860+++ b/drivers/gpu/drm/r128/r128_cce.c
41861@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
41862
41863 /* GH: Simple idle check.
41864 */
41865- atomic_set(&dev_priv->idle_count, 0);
41866+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41867
41868 /* We don't support anything other than bus-mastering ring mode,
41869 * but the ring can be in either AGP or PCI space for the ring
41870diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
41871index 5bf3f5f..7000661 100644
41872--- a/drivers/gpu/drm/r128/r128_drv.h
41873+++ b/drivers/gpu/drm/r128/r128_drv.h
41874@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
41875 int is_pci;
41876 unsigned long cce_buffers_offset;
41877
41878- atomic_t idle_count;
41879+ atomic_unchecked_t idle_count;
41880
41881 int page_flipping;
41882 int current_page;
41883 u32 crtc_offset;
41884 u32 crtc_offset_cntl;
41885
41886- atomic_t vbl_received;
41887+ atomic_unchecked_t vbl_received;
41888
41889 u32 color_fmt;
41890 unsigned int front_offset;
41891diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
41892index 663f38c..c689495 100644
41893--- a/drivers/gpu/drm/r128/r128_ioc32.c
41894+++ b/drivers/gpu/drm/r128/r128_ioc32.c
41895@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
41896 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
41897 }
41898
41899-drm_ioctl_compat_t *r128_compat_ioctls[] = {
41900+drm_ioctl_compat_t r128_compat_ioctls[] = {
41901 [DRM_R128_INIT] = compat_r128_init,
41902 [DRM_R128_DEPTH] = compat_r128_depth,
41903 [DRM_R128_STIPPLE] = compat_r128_stipple,
41904@@ -197,18 +197,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
41905 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41906 {
41907 unsigned int nr = DRM_IOCTL_NR(cmd);
41908- drm_ioctl_compat_t *fn = NULL;
41909 int ret;
41910
41911 if (nr < DRM_COMMAND_BASE)
41912 return drm_compat_ioctl(filp, cmd, arg);
41913
41914- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
41915- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41916-
41917- if (fn != NULL)
41918+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls)) {
41919+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41920 ret = (*fn) (filp, cmd, arg);
41921- else
41922+ } else
41923 ret = drm_ioctl(filp, cmd, arg);
41924
41925 return ret;
41926diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
41927index c2ae496..30b5993 100644
41928--- a/drivers/gpu/drm/r128/r128_irq.c
41929+++ b/drivers/gpu/drm/r128/r128_irq.c
41930@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
41931 if (crtc != 0)
41932 return 0;
41933
41934- return atomic_read(&dev_priv->vbl_received);
41935+ return atomic_read_unchecked(&dev_priv->vbl_received);
41936 }
41937
41938 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41939@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41940 /* VBLANK interrupt */
41941 if (status & R128_CRTC_VBLANK_INT) {
41942 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
41943- atomic_inc(&dev_priv->vbl_received);
41944+ atomic_inc_unchecked(&dev_priv->vbl_received);
41945 drm_handle_vblank(dev, 0);
41946 return IRQ_HANDLED;
41947 }
41948diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
41949index 575e986..66e62ca 100644
41950--- a/drivers/gpu/drm/r128/r128_state.c
41951+++ b/drivers/gpu/drm/r128/r128_state.c
41952@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
41953
41954 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
41955 {
41956- if (atomic_read(&dev_priv->idle_count) == 0)
41957+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
41958 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
41959 else
41960- atomic_set(&dev_priv->idle_count, 0);
41961+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41962 }
41963
41964 #endif
41965diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
41966index 4a85bb6..aaea819 100644
41967--- a/drivers/gpu/drm/radeon/mkregtable.c
41968+++ b/drivers/gpu/drm/radeon/mkregtable.c
41969@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
41970 regex_t mask_rex;
41971 regmatch_t match[4];
41972 char buf[1024];
41973- size_t end;
41974+ long end;
41975 int len;
41976 int done = 0;
41977 int r;
41978 unsigned o;
41979 struct offset *offset;
41980 char last_reg_s[10];
41981- int last_reg;
41982+ unsigned long last_reg;
41983
41984 if (regcomp
41985 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
41986diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
41987index 5d4416f..80b7fc4 100644
41988--- a/drivers/gpu/drm/radeon/radeon_device.c
41989+++ b/drivers/gpu/drm/radeon/radeon_device.c
41990@@ -1214,7 +1214,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
41991 * locking inversion with the driver load path. And the access here is
41992 * completely racy anyway. So don't bother with locking for now.
41993 */
41994- return dev->open_count == 0;
41995+ return local_read(&dev->open_count) == 0;
41996 }
41997
41998 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
41999diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
42000index dafd812..1bf20c7 100644
42001--- a/drivers/gpu/drm/radeon/radeon_drv.h
42002+++ b/drivers/gpu/drm/radeon/radeon_drv.h
42003@@ -262,7 +262,7 @@ typedef struct drm_radeon_private {
42004
42005 /* SW interrupt */
42006 wait_queue_head_t swi_queue;
42007- atomic_t swi_emitted;
42008+ atomic_unchecked_t swi_emitted;
42009 int vblank_crtc;
42010 uint32_t irq_enable_reg;
42011 uint32_t r500_disp_irq_reg;
42012diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
42013index 0b98ea1..0881827 100644
42014--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
42015+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
42016@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
42017 request = compat_alloc_user_space(sizeof(*request));
42018 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
42019 || __put_user(req32.param, &request->param)
42020- || __put_user((void __user *)(unsigned long)req32.value,
42021+ || __put_user((unsigned long)req32.value,
42022 &request->value))
42023 return -EFAULT;
42024
42025@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
42026 #define compat_radeon_cp_setparam NULL
42027 #endif /* X86_64 || IA64 */
42028
42029-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
42030+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
42031 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
42032 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
42033 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
42034@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
42035 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42036 {
42037 unsigned int nr = DRM_IOCTL_NR(cmd);
42038- drm_ioctl_compat_t *fn = NULL;
42039 int ret;
42040
42041 if (nr < DRM_COMMAND_BASE)
42042 return drm_compat_ioctl(filp, cmd, arg);
42043
42044- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
42045- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
42046-
42047- if (fn != NULL)
42048+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls)) {
42049+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
42050 ret = (*fn) (filp, cmd, arg);
42051- else
42052+ } else
42053 ret = drm_ioctl(filp, cmd, arg);
42054
42055 return ret;
42056diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
42057index 244b19b..c19226d 100644
42058--- a/drivers/gpu/drm/radeon/radeon_irq.c
42059+++ b/drivers/gpu/drm/radeon/radeon_irq.c
42060@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
42061 unsigned int ret;
42062 RING_LOCALS;
42063
42064- atomic_inc(&dev_priv->swi_emitted);
42065- ret = atomic_read(&dev_priv->swi_emitted);
42066+ atomic_inc_unchecked(&dev_priv->swi_emitted);
42067+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
42068
42069 BEGIN_RING(4);
42070 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
42071@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
42072 drm_radeon_private_t *dev_priv =
42073 (drm_radeon_private_t *) dev->dev_private;
42074
42075- atomic_set(&dev_priv->swi_emitted, 0);
42076+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
42077 init_waitqueue_head(&dev_priv->swi_queue);
42078
42079 dev->max_vblank_count = 0x001fffff;
42080diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
42081index 23bb64f..69d7234 100644
42082--- a/drivers/gpu/drm/radeon/radeon_state.c
42083+++ b/drivers/gpu/drm/radeon/radeon_state.c
42084@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
42085 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
42086 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
42087
42088- if (copy_from_user(&depth_boxes, clear->depth_boxes,
42089+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
42090 sarea_priv->nbox * sizeof(depth_boxes[0])))
42091 return -EFAULT;
42092
42093@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
42094 {
42095 drm_radeon_private_t *dev_priv = dev->dev_private;
42096 drm_radeon_getparam_t *param = data;
42097- int value;
42098+ int value = 0;
42099
42100 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
42101
42102diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
42103index 72afe82..056a57a 100644
42104--- a/drivers/gpu/drm/radeon/radeon_ttm.c
42105+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
42106@@ -801,7 +801,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
42107 man->size = size >> PAGE_SHIFT;
42108 }
42109
42110-static struct vm_operations_struct radeon_ttm_vm_ops;
42111+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
42112 static const struct vm_operations_struct *ttm_vm_ops = NULL;
42113
42114 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42115@@ -842,8 +842,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
42116 }
42117 if (unlikely(ttm_vm_ops == NULL)) {
42118 ttm_vm_ops = vma->vm_ops;
42119+ pax_open_kernel();
42120 radeon_ttm_vm_ops = *ttm_vm_ops;
42121 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
42122+ pax_close_kernel();
42123 }
42124 vma->vm_ops = &radeon_ttm_vm_ops;
42125 return 0;
42126diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
42127index 6553fd2..aecd29c 100644
42128--- a/drivers/gpu/drm/tegra/dc.c
42129+++ b/drivers/gpu/drm/tegra/dc.c
42130@@ -1243,7 +1243,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
42131 }
42132
42133 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
42134- dc->debugfs_files[i].data = dc;
42135+ *(void **)&dc->debugfs_files[i].data = dc;
42136
42137 err = drm_debugfs_create_files(dc->debugfs_files,
42138 ARRAY_SIZE(debugfs_files),
42139diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
42140index f787445..2df2c65 100644
42141--- a/drivers/gpu/drm/tegra/dsi.c
42142+++ b/drivers/gpu/drm/tegra/dsi.c
42143@@ -41,7 +41,7 @@ struct tegra_dsi {
42144 struct clk *clk_lp;
42145 struct clk *clk;
42146
42147- struct drm_info_list *debugfs_files;
42148+ drm_info_list_no_const *debugfs_files;
42149 struct drm_minor *minor;
42150 struct dentry *debugfs;
42151
42152diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
42153index ffe2654..03c7b1c 100644
42154--- a/drivers/gpu/drm/tegra/hdmi.c
42155+++ b/drivers/gpu/drm/tegra/hdmi.c
42156@@ -60,7 +60,7 @@ struct tegra_hdmi {
42157 bool stereo;
42158 bool dvi;
42159
42160- struct drm_info_list *debugfs_files;
42161+ drm_info_list_no_const *debugfs_files;
42162 struct drm_minor *minor;
42163 struct dentry *debugfs;
42164 };
42165diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
42166index 9e103a48..0e117f3 100644
42167--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
42168+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
42169@@ -147,10 +147,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
42170 }
42171
42172 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
42173- ttm_bo_man_init,
42174- ttm_bo_man_takedown,
42175- ttm_bo_man_get_node,
42176- ttm_bo_man_put_node,
42177- ttm_bo_man_debug
42178+ .init = ttm_bo_man_init,
42179+ .takedown = ttm_bo_man_takedown,
42180+ .get_node = ttm_bo_man_get_node,
42181+ .put_node = ttm_bo_man_put_node,
42182+ .debug = ttm_bo_man_debug
42183 };
42184 EXPORT_SYMBOL(ttm_bo_manager_func);
42185diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
42186index dbc2def..0a9f710 100644
42187--- a/drivers/gpu/drm/ttm/ttm_memory.c
42188+++ b/drivers/gpu/drm/ttm/ttm_memory.c
42189@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
42190 zone->glob = glob;
42191 glob->zone_kernel = zone;
42192 ret = kobject_init_and_add(
42193- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
42194+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
42195 if (unlikely(ret != 0)) {
42196 kobject_put(&zone->kobj);
42197 return ret;
42198@@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
42199 zone->glob = glob;
42200 glob->zone_dma32 = zone;
42201 ret = kobject_init_and_add(
42202- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
42203+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
42204 if (unlikely(ret != 0)) {
42205 kobject_put(&zone->kobj);
42206 return ret;
42207diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
42208index d1da339..829235e 100644
42209--- a/drivers/gpu/drm/udl/udl_fb.c
42210+++ b/drivers/gpu/drm/udl/udl_fb.c
42211@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
42212 fb_deferred_io_cleanup(info);
42213 kfree(info->fbdefio);
42214 info->fbdefio = NULL;
42215- info->fbops->fb_mmap = udl_fb_mmap;
42216 }
42217
42218 pr_warn("released /dev/fb%d user=%d count=%d\n",
42219diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
42220index ad02732..144f5ed 100644
42221--- a/drivers/gpu/drm/via/via_drv.h
42222+++ b/drivers/gpu/drm/via/via_drv.h
42223@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
42224 typedef uint32_t maskarray_t[5];
42225
42226 typedef struct drm_via_irq {
42227- atomic_t irq_received;
42228+ atomic_unchecked_t irq_received;
42229 uint32_t pending_mask;
42230 uint32_t enable_mask;
42231 wait_queue_head_t irq_queue;
42232@@ -75,7 +75,7 @@ typedef struct drm_via_private {
42233 struct timeval last_vblank;
42234 int last_vblank_valid;
42235 unsigned usec_per_vblank;
42236- atomic_t vbl_received;
42237+ atomic_unchecked_t vbl_received;
42238 drm_via_state_t hc_state;
42239 char pci_buf[VIA_PCI_BUF_SIZE];
42240 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
42241diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
42242index 1319433..a993b0c 100644
42243--- a/drivers/gpu/drm/via/via_irq.c
42244+++ b/drivers/gpu/drm/via/via_irq.c
42245@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
42246 if (crtc != 0)
42247 return 0;
42248
42249- return atomic_read(&dev_priv->vbl_received);
42250+ return atomic_read_unchecked(&dev_priv->vbl_received);
42251 }
42252
42253 irqreturn_t via_driver_irq_handler(int irq, void *arg)
42254@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
42255
42256 status = VIA_READ(VIA_REG_INTERRUPT);
42257 if (status & VIA_IRQ_VBLANK_PENDING) {
42258- atomic_inc(&dev_priv->vbl_received);
42259- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
42260+ atomic_inc_unchecked(&dev_priv->vbl_received);
42261+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
42262 do_gettimeofday(&cur_vblank);
42263 if (dev_priv->last_vblank_valid) {
42264 dev_priv->usec_per_vblank =
42265@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
42266 dev_priv->last_vblank = cur_vblank;
42267 dev_priv->last_vblank_valid = 1;
42268 }
42269- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
42270+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
42271 DRM_DEBUG("US per vblank is: %u\n",
42272 dev_priv->usec_per_vblank);
42273 }
42274@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
42275
42276 for (i = 0; i < dev_priv->num_irqs; ++i) {
42277 if (status & cur_irq->pending_mask) {
42278- atomic_inc(&cur_irq->irq_received);
42279+ atomic_inc_unchecked(&cur_irq->irq_received);
42280 wake_up(&cur_irq->irq_queue);
42281 handled = 1;
42282 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
42283@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
42284 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
42285 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
42286 masks[irq][4]));
42287- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
42288+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
42289 } else {
42290 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
42291 (((cur_irq_sequence =
42292- atomic_read(&cur_irq->irq_received)) -
42293+ atomic_read_unchecked(&cur_irq->irq_received)) -
42294 *sequence) <= (1 << 23)));
42295 }
42296 *sequence = cur_irq_sequence;
42297@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
42298 }
42299
42300 for (i = 0; i < dev_priv->num_irqs; ++i) {
42301- atomic_set(&cur_irq->irq_received, 0);
42302+ atomic_set_unchecked(&cur_irq->irq_received, 0);
42303 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
42304 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
42305 init_waitqueue_head(&cur_irq->irq_queue);
42306@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
42307 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
42308 case VIA_IRQ_RELATIVE:
42309 irqwait->request.sequence +=
42310- atomic_read(&cur_irq->irq_received);
42311+ atomic_read_unchecked(&cur_irq->irq_received);
42312 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
42313 case VIA_IRQ_ABSOLUTE:
42314 break;
42315diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
42316index 99f7317..33a835b 100644
42317--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
42318+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
42319@@ -447,7 +447,7 @@ struct vmw_private {
42320 * Fencing and IRQs.
42321 */
42322
42323- atomic_t marker_seq;
42324+ atomic_unchecked_t marker_seq;
42325 wait_queue_head_t fence_queue;
42326 wait_queue_head_t fifo_queue;
42327 int fence_queue_waiters; /* Protected by hw_mutex */
42328diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
42329index 6eae14d..aa311b3 100644
42330--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
42331+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
42332@@ -154,7 +154,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
42333 (unsigned int) min,
42334 (unsigned int) fifo->capabilities);
42335
42336- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
42337+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
42338 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
42339 vmw_marker_queue_init(&fifo->marker_queue);
42340 return vmw_fifo_send_fence(dev_priv, &dummy);
42341@@ -373,7 +373,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
42342 if (reserveable)
42343 iowrite32(bytes, fifo_mem +
42344 SVGA_FIFO_RESERVED);
42345- return fifo_mem + (next_cmd >> 2);
42346+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
42347 } else {
42348 need_bounce = true;
42349 }
42350@@ -493,7 +493,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
42351
42352 fm = vmw_fifo_reserve(dev_priv, bytes);
42353 if (unlikely(fm == NULL)) {
42354- *seqno = atomic_read(&dev_priv->marker_seq);
42355+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
42356 ret = -ENOMEM;
42357 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
42358 false, 3*HZ);
42359@@ -501,7 +501,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
42360 }
42361
42362 do {
42363- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
42364+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
42365 } while (*seqno == 0);
42366
42367 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
42368diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
42369index 26f8bdd..90a0008 100644
42370--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
42371+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
42372@@ -165,9 +165,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
42373 }
42374
42375 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
42376- vmw_gmrid_man_init,
42377- vmw_gmrid_man_takedown,
42378- vmw_gmrid_man_get_node,
42379- vmw_gmrid_man_put_node,
42380- vmw_gmrid_man_debug
42381+ .init = vmw_gmrid_man_init,
42382+ .takedown = vmw_gmrid_man_takedown,
42383+ .get_node = vmw_gmrid_man_get_node,
42384+ .put_node = vmw_gmrid_man_put_node,
42385+ .debug = vmw_gmrid_man_debug
42386 };
42387diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
42388index 37881ec..319065d 100644
42389--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
42390+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
42391@@ -235,7 +235,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
42392 int ret;
42393
42394 num_clips = arg->num_clips;
42395- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
42396+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
42397
42398 if (unlikely(num_clips == 0))
42399 return 0;
42400@@ -318,7 +318,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
42401 int ret;
42402
42403 num_clips = arg->num_clips;
42404- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
42405+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
42406
42407 if (unlikely(num_clips == 0))
42408 return 0;
42409diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42410index 0c42376..6febe77 100644
42411--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42412+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42413@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
42414 * emitted. Then the fence is stale and signaled.
42415 */
42416
42417- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
42418+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
42419 > VMW_FENCE_WRAP);
42420
42421 return ret;
42422@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
42423
42424 if (fifo_idle)
42425 down_read(&fifo_state->rwsem);
42426- signal_seq = atomic_read(&dev_priv->marker_seq);
42427+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
42428 ret = 0;
42429
42430 for (;;) {
42431diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42432index efd1ffd..0ae13ca 100644
42433--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42434+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42435@@ -135,7 +135,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
42436 while (!vmw_lag_lt(queue, us)) {
42437 spin_lock(&queue->lock);
42438 if (list_empty(&queue->head))
42439- seqno = atomic_read(&dev_priv->marker_seq);
42440+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
42441 else {
42442 marker = list_first_entry(&queue->head,
42443 struct vmw_marker, head);
42444diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
42445index 37ac7b5..d52a5c9 100644
42446--- a/drivers/gpu/vga/vga_switcheroo.c
42447+++ b/drivers/gpu/vga/vga_switcheroo.c
42448@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
42449
42450 /* this version is for the case where the power switch is separate
42451 to the device being powered down. */
42452-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
42453+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
42454 {
42455 /* copy over all the bus versions */
42456 if (dev->bus && dev->bus->pm) {
42457@@ -695,7 +695,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
42458 return ret;
42459 }
42460
42461-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
42462+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
42463 {
42464 /* copy over all the bus versions */
42465 if (dev->bus && dev->bus->pm) {
42466diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
42467index 12b6e67..ddd983c 100644
42468--- a/drivers/hid/hid-core.c
42469+++ b/drivers/hid/hid-core.c
42470@@ -2500,7 +2500,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
42471
42472 int hid_add_device(struct hid_device *hdev)
42473 {
42474- static atomic_t id = ATOMIC_INIT(0);
42475+ static atomic_unchecked_t id = ATOMIC_INIT(0);
42476 int ret;
42477
42478 if (WARN_ON(hdev->status & HID_STAT_ADDED))
42479@@ -2542,7 +2542,7 @@ int hid_add_device(struct hid_device *hdev)
42480 /* XXX hack, any other cleaner solution after the driver core
42481 * is converted to allow more than 20 bytes as the device name? */
42482 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
42483- hdev->vendor, hdev->product, atomic_inc_return(&id));
42484+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
42485
42486 hid_debug_register(hdev, dev_name(&hdev->dev));
42487 ret = device_add(&hdev->dev);
42488diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
42489index 9bf8637..f462416 100644
42490--- a/drivers/hid/hid-logitech-dj.c
42491+++ b/drivers/hid/hid-logitech-dj.c
42492@@ -682,6 +682,12 @@ static int logi_dj_raw_event(struct hid_device *hdev,
42493 * device (via hid_input_report() ) and return 1 so hid-core does not do
42494 * anything else with it.
42495 */
42496+ if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
42497+ (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
42498+ dev_err(&hdev->dev, "%s: invalid device index:%d\n",
42499+ __func__, dj_report->device_index);
42500+ return false;
42501+ }
42502
42503 /* case 1) */
42504 if (data[0] != REPORT_ID_DJ_SHORT)
42505diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
42506index c13fb5b..55a3802 100644
42507--- a/drivers/hid/hid-wiimote-debug.c
42508+++ b/drivers/hid/hid-wiimote-debug.c
42509@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
42510 else if (size == 0)
42511 return -EIO;
42512
42513- if (copy_to_user(u, buf, size))
42514+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
42515 return -EFAULT;
42516
42517 *off += size;
42518diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
42519index 0cb92e3..c7d453d 100644
42520--- a/drivers/hid/uhid.c
42521+++ b/drivers/hid/uhid.c
42522@@ -47,7 +47,7 @@ struct uhid_device {
42523 struct mutex report_lock;
42524 wait_queue_head_t report_wait;
42525 atomic_t report_done;
42526- atomic_t report_id;
42527+ atomic_unchecked_t report_id;
42528 struct uhid_event report_buf;
42529 };
42530
42531@@ -163,7 +163,7 @@ static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum,
42532
42533 spin_lock_irqsave(&uhid->qlock, flags);
42534 ev->type = UHID_FEATURE;
42535- ev->u.feature.id = atomic_inc_return(&uhid->report_id);
42536+ ev->u.feature.id = atomic_inc_return_unchecked(&uhid->report_id);
42537 ev->u.feature.rnum = rnum;
42538 ev->u.feature.rtype = report_type;
42539
42540@@ -538,7 +538,7 @@ static int uhid_dev_feature_answer(struct uhid_device *uhid,
42541 spin_lock_irqsave(&uhid->qlock, flags);
42542
42543 /* id for old report; drop it silently */
42544- if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id)
42545+ if (atomic_read_unchecked(&uhid->report_id) != ev->u.feature_answer.id)
42546 goto unlock;
42547 if (atomic_read(&uhid->report_done))
42548 goto unlock;
42549diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
42550index 19bad59..ca24eaf 100644
42551--- a/drivers/hv/channel.c
42552+++ b/drivers/hv/channel.c
42553@@ -366,8 +366,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
42554 unsigned long flags;
42555 int ret = 0;
42556
42557- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
42558- atomic_inc(&vmbus_connection.next_gpadl_handle);
42559+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
42560+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
42561
42562 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
42563 if (ret)
42564diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
42565index 3e4235c..877d0e5 100644
42566--- a/drivers/hv/hv.c
42567+++ b/drivers/hv/hv.c
42568@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
42569 u64 output_address = (output) ? virt_to_phys(output) : 0;
42570 u32 output_address_hi = output_address >> 32;
42571 u32 output_address_lo = output_address & 0xFFFFFFFF;
42572- void *hypercall_page = hv_context.hypercall_page;
42573+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
42574
42575 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
42576 "=a"(hv_status_lo) : "d" (control_hi),
42577@@ -156,7 +156,7 @@ int hv_init(void)
42578 /* See if the hypercall page is already set */
42579 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
42580
42581- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
42582+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
42583
42584 if (!virtaddr)
42585 goto cleanup;
42586diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
42587index 5e90c5d..d8fcefb 100644
42588--- a/drivers/hv/hv_balloon.c
42589+++ b/drivers/hv/hv_balloon.c
42590@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
42591
42592 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
42593 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
42594-static atomic_t trans_id = ATOMIC_INIT(0);
42595+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
42596
42597 static int dm_ring_size = (5 * PAGE_SIZE);
42598
42599@@ -893,7 +893,7 @@ static void hot_add_req(struct work_struct *dummy)
42600 pr_info("Memory hot add failed\n");
42601
42602 dm->state = DM_INITIALIZED;
42603- resp.hdr.trans_id = atomic_inc_return(&trans_id);
42604+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42605 vmbus_sendpacket(dm->dev->channel, &resp,
42606 sizeof(struct dm_hot_add_response),
42607 (unsigned long)NULL,
42608@@ -973,7 +973,7 @@ static void post_status(struct hv_dynmem_device *dm)
42609 memset(&status, 0, sizeof(struct dm_status));
42610 status.hdr.type = DM_STATUS_REPORT;
42611 status.hdr.size = sizeof(struct dm_status);
42612- status.hdr.trans_id = atomic_inc_return(&trans_id);
42613+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42614
42615 /*
42616 * The host expects the guest to report free memory.
42617@@ -993,7 +993,7 @@ static void post_status(struct hv_dynmem_device *dm)
42618 * send the status. This can happen if we were interrupted
42619 * after we picked our transaction ID.
42620 */
42621- if (status.hdr.trans_id != atomic_read(&trans_id))
42622+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
42623 return;
42624
42625 /*
42626@@ -1129,7 +1129,7 @@ static void balloon_up(struct work_struct *dummy)
42627 */
42628
42629 do {
42630- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
42631+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42632 ret = vmbus_sendpacket(dm_device.dev->channel,
42633 bl_resp,
42634 bl_resp->hdr.size,
42635@@ -1175,7 +1175,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
42636
42637 memset(&resp, 0, sizeof(struct dm_unballoon_response));
42638 resp.hdr.type = DM_UNBALLOON_RESPONSE;
42639- resp.hdr.trans_id = atomic_inc_return(&trans_id);
42640+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42641 resp.hdr.size = sizeof(struct dm_unballoon_response);
42642
42643 vmbus_sendpacket(dm_device.dev->channel, &resp,
42644@@ -1239,7 +1239,7 @@ static void version_resp(struct hv_dynmem_device *dm,
42645 memset(&version_req, 0, sizeof(struct dm_version_request));
42646 version_req.hdr.type = DM_VERSION_REQUEST;
42647 version_req.hdr.size = sizeof(struct dm_version_request);
42648- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42649+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42650 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
42651 version_req.is_last_attempt = 1;
42652
42653@@ -1409,7 +1409,7 @@ static int balloon_probe(struct hv_device *dev,
42654 memset(&version_req, 0, sizeof(struct dm_version_request));
42655 version_req.hdr.type = DM_VERSION_REQUEST;
42656 version_req.hdr.size = sizeof(struct dm_version_request);
42657- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42658+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42659 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
42660 version_req.is_last_attempt = 0;
42661
42662@@ -1440,7 +1440,7 @@ static int balloon_probe(struct hv_device *dev,
42663 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
42664 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
42665 cap_msg.hdr.size = sizeof(struct dm_capabilities);
42666- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
42667+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42668
42669 cap_msg.caps.cap_bits.balloon = 1;
42670 cap_msg.caps.cap_bits.hot_add = 1;
42671diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
42672index c386d8d..d6004c4 100644
42673--- a/drivers/hv/hyperv_vmbus.h
42674+++ b/drivers/hv/hyperv_vmbus.h
42675@@ -611,7 +611,7 @@ enum vmbus_connect_state {
42676 struct vmbus_connection {
42677 enum vmbus_connect_state conn_state;
42678
42679- atomic_t next_gpadl_handle;
42680+ atomic_unchecked_t next_gpadl_handle;
42681
42682 /*
42683 * Represents channel interrupts. Each bit position represents a
42684diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
42685index 4d6b269..2e23b86 100644
42686--- a/drivers/hv/vmbus_drv.c
42687+++ b/drivers/hv/vmbus_drv.c
42688@@ -807,10 +807,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
42689 {
42690 int ret = 0;
42691
42692- static atomic_t device_num = ATOMIC_INIT(0);
42693+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
42694
42695 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
42696- atomic_inc_return(&device_num));
42697+ atomic_inc_return_unchecked(&device_num));
42698
42699 child_device_obj->device.bus = &hv_bus;
42700 child_device_obj->device.parent = &hv_acpi_dev->dev;
42701diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
42702index 579bdf9..75118b5 100644
42703--- a/drivers/hwmon/acpi_power_meter.c
42704+++ b/drivers/hwmon/acpi_power_meter.c
42705@@ -116,7 +116,7 @@ struct sensor_template {
42706 struct device_attribute *devattr,
42707 const char *buf, size_t count);
42708 int index;
42709-};
42710+} __do_const;
42711
42712 /* Averaging interval */
42713 static int update_avg_interval(struct acpi_power_meter_resource *resource)
42714@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
42715 struct sensor_template *attrs)
42716 {
42717 struct device *dev = &resource->acpi_dev->dev;
42718- struct sensor_device_attribute *sensors =
42719+ sensor_device_attribute_no_const *sensors =
42720 &resource->sensors[resource->num_sensors];
42721 int res = 0;
42722
42723diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
42724index 3288f13..71cfb4e 100644
42725--- a/drivers/hwmon/applesmc.c
42726+++ b/drivers/hwmon/applesmc.c
42727@@ -1106,7 +1106,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
42728 {
42729 struct applesmc_node_group *grp;
42730 struct applesmc_dev_attr *node;
42731- struct attribute *attr;
42732+ attribute_no_const *attr;
42733 int ret, i;
42734
42735 for (grp = groups; grp->format; grp++) {
42736diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
42737index cccef87..06ce8ec 100644
42738--- a/drivers/hwmon/asus_atk0110.c
42739+++ b/drivers/hwmon/asus_atk0110.c
42740@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
42741 struct atk_sensor_data {
42742 struct list_head list;
42743 struct atk_data *data;
42744- struct device_attribute label_attr;
42745- struct device_attribute input_attr;
42746- struct device_attribute limit1_attr;
42747- struct device_attribute limit2_attr;
42748+ device_attribute_no_const label_attr;
42749+ device_attribute_no_const input_attr;
42750+ device_attribute_no_const limit1_attr;
42751+ device_attribute_no_const limit2_attr;
42752 char label_attr_name[ATTR_NAME_SIZE];
42753 char input_attr_name[ATTR_NAME_SIZE];
42754 char limit1_attr_name[ATTR_NAME_SIZE];
42755@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
42756 static struct device_attribute atk_name_attr =
42757 __ATTR(name, 0444, atk_name_show, NULL);
42758
42759-static void atk_init_attribute(struct device_attribute *attr, char *name,
42760+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
42761 sysfs_show_func show)
42762 {
42763 sysfs_attr_init(&attr->attr);
42764diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
42765index d76f0b7..55ae976 100644
42766--- a/drivers/hwmon/coretemp.c
42767+++ b/drivers/hwmon/coretemp.c
42768@@ -784,7 +784,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
42769 return NOTIFY_OK;
42770 }
42771
42772-static struct notifier_block coretemp_cpu_notifier __refdata = {
42773+static struct notifier_block coretemp_cpu_notifier = {
42774 .notifier_call = coretemp_cpu_callback,
42775 };
42776
42777diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
42778index 7a8a6fb..015c1fd 100644
42779--- a/drivers/hwmon/ibmaem.c
42780+++ b/drivers/hwmon/ibmaem.c
42781@@ -924,7 +924,7 @@ static int aem_register_sensors(struct aem_data *data,
42782 struct aem_rw_sensor_template *rw)
42783 {
42784 struct device *dev = &data->pdev->dev;
42785- struct sensor_device_attribute *sensors = data->sensors;
42786+ sensor_device_attribute_no_const *sensors = data->sensors;
42787 int err;
42788
42789 /* Set up read-only sensors */
42790diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
42791index 14c82da..09b25d7 100644
42792--- a/drivers/hwmon/iio_hwmon.c
42793+++ b/drivers/hwmon/iio_hwmon.c
42794@@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
42795 {
42796 struct device *dev = &pdev->dev;
42797 struct iio_hwmon_state *st;
42798- struct sensor_device_attribute *a;
42799+ sensor_device_attribute_no_const *a;
42800 int ret, i;
42801 int in_i = 1, temp_i = 1, curr_i = 1;
42802 enum iio_chan_type type;
42803diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
42804index 7710f46..427a28d 100644
42805--- a/drivers/hwmon/nct6683.c
42806+++ b/drivers/hwmon/nct6683.c
42807@@ -397,11 +397,11 @@ static struct attribute_group *
42808 nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42809 int repeat)
42810 {
42811- struct sensor_device_attribute_2 *a2;
42812- struct sensor_device_attribute *a;
42813+ sensor_device_attribute_2_no_const *a2;
42814+ sensor_device_attribute_no_const *a;
42815 struct sensor_device_template **t;
42816 struct sensor_device_attr_u *su;
42817- struct attribute_group *group;
42818+ attribute_group_no_const *group;
42819 struct attribute **attrs;
42820 int i, j, count;
42821
42822diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
42823index 504cbdd..35d6f25 100644
42824--- a/drivers/hwmon/nct6775.c
42825+++ b/drivers/hwmon/nct6775.c
42826@@ -943,10 +943,10 @@ static struct attribute_group *
42827 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42828 int repeat)
42829 {
42830- struct attribute_group *group;
42831+ attribute_group_no_const *group;
42832 struct sensor_device_attr_u *su;
42833- struct sensor_device_attribute *a;
42834- struct sensor_device_attribute_2 *a2;
42835+ sensor_device_attribute_no_const *a;
42836+ sensor_device_attribute_2_no_const *a2;
42837 struct attribute **attrs;
42838 struct sensor_device_template **t;
42839 int i, count;
42840diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
42841index 291d11f..3f0dbbd 100644
42842--- a/drivers/hwmon/pmbus/pmbus_core.c
42843+++ b/drivers/hwmon/pmbus/pmbus_core.c
42844@@ -783,7 +783,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
42845 return 0;
42846 }
42847
42848-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42849+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
42850 const char *name,
42851 umode_t mode,
42852 ssize_t (*show)(struct device *dev,
42853@@ -800,7 +800,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42854 dev_attr->store = store;
42855 }
42856
42857-static void pmbus_attr_init(struct sensor_device_attribute *a,
42858+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
42859 const char *name,
42860 umode_t mode,
42861 ssize_t (*show)(struct device *dev,
42862@@ -822,7 +822,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
42863 u16 reg, u8 mask)
42864 {
42865 struct pmbus_boolean *boolean;
42866- struct sensor_device_attribute *a;
42867+ sensor_device_attribute_no_const *a;
42868
42869 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
42870 if (!boolean)
42871@@ -847,7 +847,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
42872 bool update, bool readonly)
42873 {
42874 struct pmbus_sensor *sensor;
42875- struct device_attribute *a;
42876+ device_attribute_no_const *a;
42877
42878 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
42879 if (!sensor)
42880@@ -878,7 +878,7 @@ static int pmbus_add_label(struct pmbus_data *data,
42881 const char *lstring, int index)
42882 {
42883 struct pmbus_label *label;
42884- struct device_attribute *a;
42885+ device_attribute_no_const *a;
42886
42887 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
42888 if (!label)
42889diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
42890index 97cd45a..ac54d8b 100644
42891--- a/drivers/hwmon/sht15.c
42892+++ b/drivers/hwmon/sht15.c
42893@@ -169,7 +169,7 @@ struct sht15_data {
42894 int supply_uv;
42895 bool supply_uv_valid;
42896 struct work_struct update_supply_work;
42897- atomic_t interrupt_handled;
42898+ atomic_unchecked_t interrupt_handled;
42899 };
42900
42901 /**
42902@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
42903 ret = gpio_direction_input(data->pdata->gpio_data);
42904 if (ret)
42905 return ret;
42906- atomic_set(&data->interrupt_handled, 0);
42907+ atomic_set_unchecked(&data->interrupt_handled, 0);
42908
42909 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42910 if (gpio_get_value(data->pdata->gpio_data) == 0) {
42911 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
42912 /* Only relevant if the interrupt hasn't occurred. */
42913- if (!atomic_read(&data->interrupt_handled))
42914+ if (!atomic_read_unchecked(&data->interrupt_handled))
42915 schedule_work(&data->read_work);
42916 }
42917 ret = wait_event_timeout(data->wait_queue,
42918@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
42919
42920 /* First disable the interrupt */
42921 disable_irq_nosync(irq);
42922- atomic_inc(&data->interrupt_handled);
42923+ atomic_inc_unchecked(&data->interrupt_handled);
42924 /* Then schedule a reading work struct */
42925 if (data->state != SHT15_READING_NOTHING)
42926 schedule_work(&data->read_work);
42927@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
42928 * If not, then start the interrupt again - care here as could
42929 * have gone low in meantime so verify it hasn't!
42930 */
42931- atomic_set(&data->interrupt_handled, 0);
42932+ atomic_set_unchecked(&data->interrupt_handled, 0);
42933 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42934 /* If still not occurred or another handler was scheduled */
42935 if (gpio_get_value(data->pdata->gpio_data)
42936- || atomic_read(&data->interrupt_handled))
42937+ || atomic_read_unchecked(&data->interrupt_handled))
42938 return;
42939 }
42940
42941diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
42942index 8df43c5..b07b91d 100644
42943--- a/drivers/hwmon/via-cputemp.c
42944+++ b/drivers/hwmon/via-cputemp.c
42945@@ -296,7 +296,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
42946 return NOTIFY_OK;
42947 }
42948
42949-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
42950+static struct notifier_block via_cputemp_cpu_notifier = {
42951 .notifier_call = via_cputemp_cpu_callback,
42952 };
42953
42954diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
42955index 41fc683..a39cfea 100644
42956--- a/drivers/i2c/busses/i2c-amd756-s4882.c
42957+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
42958@@ -43,7 +43,7 @@
42959 extern struct i2c_adapter amd756_smbus;
42960
42961 static struct i2c_adapter *s4882_adapter;
42962-static struct i2c_algorithm *s4882_algo;
42963+static i2c_algorithm_no_const *s4882_algo;
42964
42965 /* Wrapper access functions for multiplexed SMBus */
42966 static DEFINE_MUTEX(amd756_lock);
42967diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
42968index b19a310..d6eece0 100644
42969--- a/drivers/i2c/busses/i2c-diolan-u2c.c
42970+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
42971@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
42972 /* usb layer */
42973
42974 /* Send command to device, and get response. */
42975-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42976+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42977 {
42978 int ret = 0;
42979 int actual;
42980diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
42981index b170bdf..3c76427 100644
42982--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
42983+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
42984@@ -41,7 +41,7 @@
42985 extern struct i2c_adapter *nforce2_smbus;
42986
42987 static struct i2c_adapter *s4985_adapter;
42988-static struct i2c_algorithm *s4985_algo;
42989+static i2c_algorithm_no_const *s4985_algo;
42990
42991 /* Wrapper access functions for multiplexed SMBus */
42992 static DEFINE_MUTEX(nforce2_lock);
42993diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
42994index 80b47e8..1a6040d9 100644
42995--- a/drivers/i2c/i2c-dev.c
42996+++ b/drivers/i2c/i2c-dev.c
42997@@ -277,7 +277,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
42998 break;
42999 }
43000
43001- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
43002+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
43003 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
43004 if (IS_ERR(rdwr_pa[i].buf)) {
43005 res = PTR_ERR(rdwr_pa[i].buf);
43006diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
43007index 0b510ba..4fbb5085 100644
43008--- a/drivers/ide/ide-cd.c
43009+++ b/drivers/ide/ide-cd.c
43010@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
43011 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
43012 if ((unsigned long)buf & alignment
43013 || blk_rq_bytes(rq) & q->dma_pad_mask
43014- || object_is_on_stack(buf))
43015+ || object_starts_on_stack(buf))
43016 drive->dma = 0;
43017 }
43018 }
43019diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
43020index af3e76d..96dfe5e 100644
43021--- a/drivers/iio/industrialio-core.c
43022+++ b/drivers/iio/industrialio-core.c
43023@@ -555,7 +555,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
43024 }
43025
43026 static
43027-int __iio_device_attr_init(struct device_attribute *dev_attr,
43028+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
43029 const char *postfix,
43030 struct iio_chan_spec const *chan,
43031 ssize_t (*readfunc)(struct device *dev,
43032diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
43033index e28a494..f7c2671 100644
43034--- a/drivers/infiniband/core/cm.c
43035+++ b/drivers/infiniband/core/cm.c
43036@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
43037
43038 struct cm_counter_group {
43039 struct kobject obj;
43040- atomic_long_t counter[CM_ATTR_COUNT];
43041+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
43042 };
43043
43044 struct cm_counter_attribute {
43045@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work,
43046 struct ib_mad_send_buf *msg = NULL;
43047 int ret;
43048
43049- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43050+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43051 counter[CM_REQ_COUNTER]);
43052
43053 /* Quick state check to discard duplicate REQs. */
43054@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
43055 if (!cm_id_priv)
43056 return;
43057
43058- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43059+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43060 counter[CM_REP_COUNTER]);
43061 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
43062 if (ret)
43063@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work)
43064 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
43065 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
43066 spin_unlock_irq(&cm_id_priv->lock);
43067- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43068+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43069 counter[CM_RTU_COUNTER]);
43070 goto out;
43071 }
43072@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work)
43073 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
43074 dreq_msg->local_comm_id);
43075 if (!cm_id_priv) {
43076- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43077+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43078 counter[CM_DREQ_COUNTER]);
43079 cm_issue_drep(work->port, work->mad_recv_wc);
43080 return -EINVAL;
43081@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work)
43082 case IB_CM_MRA_REP_RCVD:
43083 break;
43084 case IB_CM_TIMEWAIT:
43085- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43086+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43087 counter[CM_DREQ_COUNTER]);
43088 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
43089 goto unlock;
43090@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work)
43091 cm_free_msg(msg);
43092 goto deref;
43093 case IB_CM_DREQ_RCVD:
43094- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43095+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43096 counter[CM_DREQ_COUNTER]);
43097 goto unlock;
43098 default:
43099@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
43100 ib_modify_mad(cm_id_priv->av.port->mad_agent,
43101 cm_id_priv->msg, timeout)) {
43102 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
43103- atomic_long_inc(&work->port->
43104+ atomic_long_inc_unchecked(&work->port->
43105 counter_group[CM_RECV_DUPLICATES].
43106 counter[CM_MRA_COUNTER]);
43107 goto out;
43108@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work)
43109 break;
43110 case IB_CM_MRA_REQ_RCVD:
43111 case IB_CM_MRA_REP_RCVD:
43112- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43113+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43114 counter[CM_MRA_COUNTER]);
43115 /* fall through */
43116 default:
43117@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work)
43118 case IB_CM_LAP_IDLE:
43119 break;
43120 case IB_CM_MRA_LAP_SENT:
43121- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43122+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43123 counter[CM_LAP_COUNTER]);
43124 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
43125 goto unlock;
43126@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work)
43127 cm_free_msg(msg);
43128 goto deref;
43129 case IB_CM_LAP_RCVD:
43130- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43131+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43132 counter[CM_LAP_COUNTER]);
43133 goto unlock;
43134 default:
43135@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
43136 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
43137 if (cur_cm_id_priv) {
43138 spin_unlock_irq(&cm.lock);
43139- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43140+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43141 counter[CM_SIDR_REQ_COUNTER]);
43142 goto out; /* Duplicate message. */
43143 }
43144@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
43145 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
43146 msg->retries = 1;
43147
43148- atomic_long_add(1 + msg->retries,
43149+ atomic_long_add_unchecked(1 + msg->retries,
43150 &port->counter_group[CM_XMIT].counter[attr_index]);
43151 if (msg->retries)
43152- atomic_long_add(msg->retries,
43153+ atomic_long_add_unchecked(msg->retries,
43154 &port->counter_group[CM_XMIT_RETRIES].
43155 counter[attr_index]);
43156
43157@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
43158 }
43159
43160 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
43161- atomic_long_inc(&port->counter_group[CM_RECV].
43162+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
43163 counter[attr_id - CM_ATTR_ID_OFFSET]);
43164
43165 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
43166@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
43167 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
43168
43169 return sprintf(buf, "%ld\n",
43170- atomic_long_read(&group->counter[cm_attr->index]));
43171+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
43172 }
43173
43174 static const struct sysfs_ops cm_counter_ops = {
43175diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
43176index 9f5ad7c..588cd84 100644
43177--- a/drivers/infiniband/core/fmr_pool.c
43178+++ b/drivers/infiniband/core/fmr_pool.c
43179@@ -98,8 +98,8 @@ struct ib_fmr_pool {
43180
43181 struct task_struct *thread;
43182
43183- atomic_t req_ser;
43184- atomic_t flush_ser;
43185+ atomic_unchecked_t req_ser;
43186+ atomic_unchecked_t flush_ser;
43187
43188 wait_queue_head_t force_wait;
43189 };
43190@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
43191 struct ib_fmr_pool *pool = pool_ptr;
43192
43193 do {
43194- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
43195+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
43196 ib_fmr_batch_release(pool);
43197
43198- atomic_inc(&pool->flush_ser);
43199+ atomic_inc_unchecked(&pool->flush_ser);
43200 wake_up_interruptible(&pool->force_wait);
43201
43202 if (pool->flush_function)
43203@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
43204 }
43205
43206 set_current_state(TASK_INTERRUPTIBLE);
43207- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
43208+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
43209 !kthread_should_stop())
43210 schedule();
43211 __set_current_state(TASK_RUNNING);
43212@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
43213 pool->dirty_watermark = params->dirty_watermark;
43214 pool->dirty_len = 0;
43215 spin_lock_init(&pool->pool_lock);
43216- atomic_set(&pool->req_ser, 0);
43217- atomic_set(&pool->flush_ser, 0);
43218+ atomic_set_unchecked(&pool->req_ser, 0);
43219+ atomic_set_unchecked(&pool->flush_ser, 0);
43220 init_waitqueue_head(&pool->force_wait);
43221
43222 pool->thread = kthread_run(ib_fmr_cleanup_thread,
43223@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
43224 }
43225 spin_unlock_irq(&pool->pool_lock);
43226
43227- serial = atomic_inc_return(&pool->req_ser);
43228+ serial = atomic_inc_return_unchecked(&pool->req_ser);
43229 wake_up_process(pool->thread);
43230
43231 if (wait_event_interruptible(pool->force_wait,
43232- atomic_read(&pool->flush_ser) - serial >= 0))
43233+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
43234 return -EINTR;
43235
43236 return 0;
43237@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
43238 } else {
43239 list_add_tail(&fmr->list, &pool->dirty_list);
43240 if (++pool->dirty_len >= pool->dirty_watermark) {
43241- atomic_inc(&pool->req_ser);
43242+ atomic_inc_unchecked(&pool->req_ser);
43243 wake_up_process(pool->thread);
43244 }
43245 }
43246diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
43247index ec7a298..8742e59 100644
43248--- a/drivers/infiniband/hw/cxgb4/mem.c
43249+++ b/drivers/infiniband/hw/cxgb4/mem.c
43250@@ -249,7 +249,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
43251 int err;
43252 struct fw_ri_tpte tpt;
43253 u32 stag_idx;
43254- static atomic_t key;
43255+ static atomic_unchecked_t key;
43256
43257 if (c4iw_fatal_error(rdev))
43258 return -EIO;
43259@@ -270,7 +270,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
43260 if (rdev->stats.stag.cur > rdev->stats.stag.max)
43261 rdev->stats.stag.max = rdev->stats.stag.cur;
43262 mutex_unlock(&rdev->stats.lock);
43263- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
43264+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
43265 }
43266 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
43267 __func__, stag_state, type, pdid, stag_idx);
43268diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
43269index 79b3dbc..96e5fcc 100644
43270--- a/drivers/infiniband/hw/ipath/ipath_rc.c
43271+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
43272@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
43273 struct ib_atomic_eth *ateth;
43274 struct ipath_ack_entry *e;
43275 u64 vaddr;
43276- atomic64_t *maddr;
43277+ atomic64_unchecked_t *maddr;
43278 u64 sdata;
43279 u32 rkey;
43280 u8 next;
43281@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
43282 IB_ACCESS_REMOTE_ATOMIC)))
43283 goto nack_acc_unlck;
43284 /* Perform atomic OP and save result. */
43285- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
43286+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
43287 sdata = be64_to_cpu(ateth->swap_data);
43288 e = &qp->s_ack_queue[qp->r_head_ack_queue];
43289 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
43290- (u64) atomic64_add_return(sdata, maddr) - sdata :
43291+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
43292 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
43293 be64_to_cpu(ateth->compare_data),
43294 sdata);
43295diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
43296index 1f95bba..9530f87 100644
43297--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
43298+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
43299@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
43300 unsigned long flags;
43301 struct ib_wc wc;
43302 u64 sdata;
43303- atomic64_t *maddr;
43304+ atomic64_unchecked_t *maddr;
43305 enum ib_wc_status send_status;
43306
43307 /*
43308@@ -382,11 +382,11 @@ again:
43309 IB_ACCESS_REMOTE_ATOMIC)))
43310 goto acc_err;
43311 /* Perform atomic OP and save result. */
43312- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
43313+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
43314 sdata = wqe->wr.wr.atomic.compare_add;
43315 *(u64 *) sqp->s_sge.sge.vaddr =
43316 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
43317- (u64) atomic64_add_return(sdata, maddr) - sdata :
43318+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
43319 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
43320 sdata, wqe->wr.wr.atomic.swap);
43321 goto send_comp;
43322diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
43323index 82a7dd8..8fb6ba6 100644
43324--- a/drivers/infiniband/hw/mlx4/mad.c
43325+++ b/drivers/infiniband/hw/mlx4/mad.c
43326@@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void)
43327
43328 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
43329 {
43330- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
43331+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
43332 cpu_to_be64(0xff00000000000000LL);
43333 }
43334
43335diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
43336index ed327e6..ca1739e0 100644
43337--- a/drivers/infiniband/hw/mlx4/mcg.c
43338+++ b/drivers/infiniband/hw/mlx4/mcg.c
43339@@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
43340 {
43341 char name[20];
43342
43343- atomic_set(&ctx->tid, 0);
43344+ atomic_set_unchecked(&ctx->tid, 0);
43345 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
43346 ctx->mcg_wq = create_singlethread_workqueue(name);
43347 if (!ctx->mcg_wq)
43348diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
43349index 6eb743f..a7b0f6d 100644
43350--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
43351+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
43352@@ -426,7 +426,7 @@ struct mlx4_ib_demux_ctx {
43353 struct list_head mcg_mgid0_list;
43354 struct workqueue_struct *mcg_wq;
43355 struct mlx4_ib_demux_pv_ctx **tun;
43356- atomic_t tid;
43357+ atomic_unchecked_t tid;
43358 int flushing; /* flushing the work queue */
43359 };
43360
43361diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
43362index 9d3e5c1..6f166df 100644
43363--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
43364+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
43365@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
43366 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
43367 }
43368
43369-int mthca_QUERY_FW(struct mthca_dev *dev)
43370+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
43371 {
43372 struct mthca_mailbox *mailbox;
43373 u32 *outbox;
43374@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43375 CMD_TIME_CLASS_B);
43376 }
43377
43378-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43379+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43380 int num_mtt)
43381 {
43382 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
43383@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
43384 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
43385 }
43386
43387-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43388+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43389 int eq_num)
43390 {
43391 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
43392@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
43393 CMD_TIME_CLASS_B);
43394 }
43395
43396-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
43397+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
43398 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
43399 void *in_mad, void *response_mad)
43400 {
43401diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
43402index ded76c1..0cf0a08 100644
43403--- a/drivers/infiniband/hw/mthca/mthca_main.c
43404+++ b/drivers/infiniband/hw/mthca/mthca_main.c
43405@@ -692,7 +692,7 @@ err_close:
43406 return err;
43407 }
43408
43409-static int mthca_setup_hca(struct mthca_dev *dev)
43410+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
43411 {
43412 int err;
43413
43414diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
43415index ed9a989..6aa5dc2 100644
43416--- a/drivers/infiniband/hw/mthca/mthca_mr.c
43417+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
43418@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
43419 * through the bitmaps)
43420 */
43421
43422-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
43423+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
43424 {
43425 int o;
43426 int m;
43427@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
43428 return key;
43429 }
43430
43431-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
43432+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
43433 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
43434 {
43435 struct mthca_mailbox *mailbox;
43436@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
43437 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
43438 }
43439
43440-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
43441+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
43442 u64 *buffer_list, int buffer_size_shift,
43443 int list_len, u64 iova, u64 total_size,
43444 u32 access, struct mthca_mr *mr)
43445diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
43446index 415f8e1..e34214e 100644
43447--- a/drivers/infiniband/hw/mthca/mthca_provider.c
43448+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
43449@@ -764,7 +764,7 @@ unlock:
43450 return 0;
43451 }
43452
43453-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
43454+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
43455 {
43456 struct mthca_dev *dev = to_mdev(ibcq->device);
43457 struct mthca_cq *cq = to_mcq(ibcq);
43458diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
43459index 3b2a6dc..bce26ff 100644
43460--- a/drivers/infiniband/hw/nes/nes.c
43461+++ b/drivers/infiniband/hw/nes/nes.c
43462@@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
43463 LIST_HEAD(nes_adapter_list);
43464 static LIST_HEAD(nes_dev_list);
43465
43466-atomic_t qps_destroyed;
43467+atomic_unchecked_t qps_destroyed;
43468
43469 static unsigned int ee_flsh_adapter;
43470 static unsigned int sysfs_nonidx_addr;
43471@@ -278,7 +278,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
43472 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
43473 struct nes_adapter *nesadapter = nesdev->nesadapter;
43474
43475- atomic_inc(&qps_destroyed);
43476+ atomic_inc_unchecked(&qps_destroyed);
43477
43478 /* Free the control structures */
43479
43480diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
43481index bd9d132..70d84f4 100644
43482--- a/drivers/infiniband/hw/nes/nes.h
43483+++ b/drivers/infiniband/hw/nes/nes.h
43484@@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
43485 extern unsigned int wqm_quanta;
43486 extern struct list_head nes_adapter_list;
43487
43488-extern atomic_t cm_connects;
43489-extern atomic_t cm_accepts;
43490-extern atomic_t cm_disconnects;
43491-extern atomic_t cm_closes;
43492-extern atomic_t cm_connecteds;
43493-extern atomic_t cm_connect_reqs;
43494-extern atomic_t cm_rejects;
43495-extern atomic_t mod_qp_timouts;
43496-extern atomic_t qps_created;
43497-extern atomic_t qps_destroyed;
43498-extern atomic_t sw_qps_destroyed;
43499+extern atomic_unchecked_t cm_connects;
43500+extern atomic_unchecked_t cm_accepts;
43501+extern atomic_unchecked_t cm_disconnects;
43502+extern atomic_unchecked_t cm_closes;
43503+extern atomic_unchecked_t cm_connecteds;
43504+extern atomic_unchecked_t cm_connect_reqs;
43505+extern atomic_unchecked_t cm_rejects;
43506+extern atomic_unchecked_t mod_qp_timouts;
43507+extern atomic_unchecked_t qps_created;
43508+extern atomic_unchecked_t qps_destroyed;
43509+extern atomic_unchecked_t sw_qps_destroyed;
43510 extern u32 mh_detected;
43511 extern u32 mh_pauses_sent;
43512 extern u32 cm_packets_sent;
43513@@ -199,16 +199,16 @@ extern u32 cm_packets_created;
43514 extern u32 cm_packets_received;
43515 extern u32 cm_packets_dropped;
43516 extern u32 cm_packets_retrans;
43517-extern atomic_t cm_listens_created;
43518-extern atomic_t cm_listens_destroyed;
43519+extern atomic_unchecked_t cm_listens_created;
43520+extern atomic_unchecked_t cm_listens_destroyed;
43521 extern u32 cm_backlog_drops;
43522-extern atomic_t cm_loopbacks;
43523-extern atomic_t cm_nodes_created;
43524-extern atomic_t cm_nodes_destroyed;
43525-extern atomic_t cm_accel_dropped_pkts;
43526-extern atomic_t cm_resets_recvd;
43527-extern atomic_t pau_qps_created;
43528-extern atomic_t pau_qps_destroyed;
43529+extern atomic_unchecked_t cm_loopbacks;
43530+extern atomic_unchecked_t cm_nodes_created;
43531+extern atomic_unchecked_t cm_nodes_destroyed;
43532+extern atomic_unchecked_t cm_accel_dropped_pkts;
43533+extern atomic_unchecked_t cm_resets_recvd;
43534+extern atomic_unchecked_t pau_qps_created;
43535+extern atomic_unchecked_t pau_qps_destroyed;
43536
43537 extern u32 int_mod_timer_init;
43538 extern u32 int_mod_cq_depth_256;
43539diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
43540index 6f09a72..cf4399d 100644
43541--- a/drivers/infiniband/hw/nes/nes_cm.c
43542+++ b/drivers/infiniband/hw/nes/nes_cm.c
43543@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
43544 u32 cm_packets_retrans;
43545 u32 cm_packets_created;
43546 u32 cm_packets_received;
43547-atomic_t cm_listens_created;
43548-atomic_t cm_listens_destroyed;
43549+atomic_unchecked_t cm_listens_created;
43550+atomic_unchecked_t cm_listens_destroyed;
43551 u32 cm_backlog_drops;
43552-atomic_t cm_loopbacks;
43553-atomic_t cm_nodes_created;
43554-atomic_t cm_nodes_destroyed;
43555-atomic_t cm_accel_dropped_pkts;
43556-atomic_t cm_resets_recvd;
43557+atomic_unchecked_t cm_loopbacks;
43558+atomic_unchecked_t cm_nodes_created;
43559+atomic_unchecked_t cm_nodes_destroyed;
43560+atomic_unchecked_t cm_accel_dropped_pkts;
43561+atomic_unchecked_t cm_resets_recvd;
43562
43563 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
43564 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
43565@@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
43566 /* instance of function pointers for client API */
43567 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
43568 static struct nes_cm_ops nes_cm_api = {
43569- mini_cm_accelerated,
43570- mini_cm_listen,
43571- mini_cm_del_listen,
43572- mini_cm_connect,
43573- mini_cm_close,
43574- mini_cm_accept,
43575- mini_cm_reject,
43576- mini_cm_recv_pkt,
43577- mini_cm_dealloc_core,
43578- mini_cm_get,
43579- mini_cm_set
43580+ .accelerated = mini_cm_accelerated,
43581+ .listen = mini_cm_listen,
43582+ .stop_listener = mini_cm_del_listen,
43583+ .connect = mini_cm_connect,
43584+ .close = mini_cm_close,
43585+ .accept = mini_cm_accept,
43586+ .reject = mini_cm_reject,
43587+ .recv_pkt = mini_cm_recv_pkt,
43588+ .destroy_cm_core = mini_cm_dealloc_core,
43589+ .get = mini_cm_get,
43590+ .set = mini_cm_set
43591 };
43592
43593 static struct nes_cm_core *g_cm_core;
43594
43595-atomic_t cm_connects;
43596-atomic_t cm_accepts;
43597-atomic_t cm_disconnects;
43598-atomic_t cm_closes;
43599-atomic_t cm_connecteds;
43600-atomic_t cm_connect_reqs;
43601-atomic_t cm_rejects;
43602+atomic_unchecked_t cm_connects;
43603+atomic_unchecked_t cm_accepts;
43604+atomic_unchecked_t cm_disconnects;
43605+atomic_unchecked_t cm_closes;
43606+atomic_unchecked_t cm_connecteds;
43607+atomic_unchecked_t cm_connect_reqs;
43608+atomic_unchecked_t cm_rejects;
43609
43610 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
43611 {
43612@@ -1436,7 +1436,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
43613 kfree(listener);
43614 listener = NULL;
43615 ret = 0;
43616- atomic_inc(&cm_listens_destroyed);
43617+ atomic_inc_unchecked(&cm_listens_destroyed);
43618 } else {
43619 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
43620 }
43621@@ -1637,7 +1637,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
43622 cm_node->rem_mac);
43623
43624 add_hte_node(cm_core, cm_node);
43625- atomic_inc(&cm_nodes_created);
43626+ atomic_inc_unchecked(&cm_nodes_created);
43627
43628 return cm_node;
43629 }
43630@@ -1698,7 +1698,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
43631 }
43632
43633 atomic_dec(&cm_core->node_cnt);
43634- atomic_inc(&cm_nodes_destroyed);
43635+ atomic_inc_unchecked(&cm_nodes_destroyed);
43636 nesqp = cm_node->nesqp;
43637 if (nesqp) {
43638 nesqp->cm_node = NULL;
43639@@ -1762,7 +1762,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
43640
43641 static void drop_packet(struct sk_buff *skb)
43642 {
43643- atomic_inc(&cm_accel_dropped_pkts);
43644+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43645 dev_kfree_skb_any(skb);
43646 }
43647
43648@@ -1825,7 +1825,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
43649 {
43650
43651 int reset = 0; /* whether to send reset in case of err.. */
43652- atomic_inc(&cm_resets_recvd);
43653+ atomic_inc_unchecked(&cm_resets_recvd);
43654 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
43655 " refcnt=%d\n", cm_node, cm_node->state,
43656 atomic_read(&cm_node->ref_count));
43657@@ -2492,7 +2492,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
43658 rem_ref_cm_node(cm_node->cm_core, cm_node);
43659 return NULL;
43660 }
43661- atomic_inc(&cm_loopbacks);
43662+ atomic_inc_unchecked(&cm_loopbacks);
43663 loopbackremotenode->loopbackpartner = cm_node;
43664 loopbackremotenode->tcp_cntxt.rcv_wscale =
43665 NES_CM_DEFAULT_RCV_WND_SCALE;
43666@@ -2773,7 +2773,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
43667 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
43668 else {
43669 rem_ref_cm_node(cm_core, cm_node);
43670- atomic_inc(&cm_accel_dropped_pkts);
43671+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43672 dev_kfree_skb_any(skb);
43673 }
43674 break;
43675@@ -3081,7 +3081,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43676
43677 if ((cm_id) && (cm_id->event_handler)) {
43678 if (issue_disconn) {
43679- atomic_inc(&cm_disconnects);
43680+ atomic_inc_unchecked(&cm_disconnects);
43681 cm_event.event = IW_CM_EVENT_DISCONNECT;
43682 cm_event.status = disconn_status;
43683 cm_event.local_addr = cm_id->local_addr;
43684@@ -3103,7 +3103,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43685 }
43686
43687 if (issue_close) {
43688- atomic_inc(&cm_closes);
43689+ atomic_inc_unchecked(&cm_closes);
43690 nes_disconnect(nesqp, 1);
43691
43692 cm_id->provider_data = nesqp;
43693@@ -3241,7 +3241,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43694
43695 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
43696 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
43697- atomic_inc(&cm_accepts);
43698+ atomic_inc_unchecked(&cm_accepts);
43699
43700 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
43701 netdev_refcnt_read(nesvnic->netdev));
43702@@ -3439,7 +3439,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
43703 struct nes_cm_core *cm_core;
43704 u8 *start_buff;
43705
43706- atomic_inc(&cm_rejects);
43707+ atomic_inc_unchecked(&cm_rejects);
43708 cm_node = (struct nes_cm_node *)cm_id->provider_data;
43709 loopback = cm_node->loopbackpartner;
43710 cm_core = cm_node->cm_core;
43711@@ -3504,7 +3504,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43712 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
43713 ntohs(laddr->sin_port));
43714
43715- atomic_inc(&cm_connects);
43716+ atomic_inc_unchecked(&cm_connects);
43717 nesqp->active_conn = 1;
43718
43719 /* cache the cm_id in the qp */
43720@@ -3649,7 +3649,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
43721 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
43722 return err;
43723 }
43724- atomic_inc(&cm_listens_created);
43725+ atomic_inc_unchecked(&cm_listens_created);
43726 }
43727
43728 cm_id->add_ref(cm_id);
43729@@ -3756,7 +3756,7 @@ static void cm_event_connected(struct nes_cm_event *event)
43730
43731 if (nesqp->destroyed)
43732 return;
43733- atomic_inc(&cm_connecteds);
43734+ atomic_inc_unchecked(&cm_connecteds);
43735 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
43736 " local port 0x%04X. jiffies = %lu.\n",
43737 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
43738@@ -3941,7 +3941,7 @@ static void cm_event_reset(struct nes_cm_event *event)
43739
43740 cm_id->add_ref(cm_id);
43741 ret = cm_id->event_handler(cm_id, &cm_event);
43742- atomic_inc(&cm_closes);
43743+ atomic_inc_unchecked(&cm_closes);
43744 cm_event.event = IW_CM_EVENT_CLOSE;
43745 cm_event.status = 0;
43746 cm_event.provider_data = cm_id->provider_data;
43747@@ -3981,7 +3981,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
43748 return;
43749 cm_id = cm_node->cm_id;
43750
43751- atomic_inc(&cm_connect_reqs);
43752+ atomic_inc_unchecked(&cm_connect_reqs);
43753 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43754 cm_node, cm_id, jiffies);
43755
43756@@ -4030,7 +4030,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
43757 return;
43758 cm_id = cm_node->cm_id;
43759
43760- atomic_inc(&cm_connect_reqs);
43761+ atomic_inc_unchecked(&cm_connect_reqs);
43762 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43763 cm_node, cm_id, jiffies);
43764
43765diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
43766index 4166452..fc952c3 100644
43767--- a/drivers/infiniband/hw/nes/nes_mgt.c
43768+++ b/drivers/infiniband/hw/nes/nes_mgt.c
43769@@ -40,8 +40,8 @@
43770 #include "nes.h"
43771 #include "nes_mgt.h"
43772
43773-atomic_t pau_qps_created;
43774-atomic_t pau_qps_destroyed;
43775+atomic_unchecked_t pau_qps_created;
43776+atomic_unchecked_t pau_qps_destroyed;
43777
43778 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
43779 {
43780@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
43781 {
43782 struct sk_buff *skb;
43783 unsigned long flags;
43784- atomic_inc(&pau_qps_destroyed);
43785+ atomic_inc_unchecked(&pau_qps_destroyed);
43786
43787 /* Free packets that have not yet been forwarded */
43788 /* Lock is acquired by skb_dequeue when removing the skb */
43789@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
43790 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
43791 skb_queue_head_init(&nesqp->pau_list);
43792 spin_lock_init(&nesqp->pau_lock);
43793- atomic_inc(&pau_qps_created);
43794+ atomic_inc_unchecked(&pau_qps_created);
43795 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
43796 }
43797
43798diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
43799index 49eb511..a774366 100644
43800--- a/drivers/infiniband/hw/nes/nes_nic.c
43801+++ b/drivers/infiniband/hw/nes/nes_nic.c
43802@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
43803 target_stat_values[++index] = mh_detected;
43804 target_stat_values[++index] = mh_pauses_sent;
43805 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
43806- target_stat_values[++index] = atomic_read(&cm_connects);
43807- target_stat_values[++index] = atomic_read(&cm_accepts);
43808- target_stat_values[++index] = atomic_read(&cm_disconnects);
43809- target_stat_values[++index] = atomic_read(&cm_connecteds);
43810- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
43811- target_stat_values[++index] = atomic_read(&cm_rejects);
43812- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
43813- target_stat_values[++index] = atomic_read(&qps_created);
43814- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
43815- target_stat_values[++index] = atomic_read(&qps_destroyed);
43816- target_stat_values[++index] = atomic_read(&cm_closes);
43817+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
43818+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
43819+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
43820+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
43821+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
43822+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
43823+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
43824+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
43825+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
43826+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
43827+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
43828 target_stat_values[++index] = cm_packets_sent;
43829 target_stat_values[++index] = cm_packets_bounced;
43830 target_stat_values[++index] = cm_packets_created;
43831 target_stat_values[++index] = cm_packets_received;
43832 target_stat_values[++index] = cm_packets_dropped;
43833 target_stat_values[++index] = cm_packets_retrans;
43834- target_stat_values[++index] = atomic_read(&cm_listens_created);
43835- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
43836+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
43837+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
43838 target_stat_values[++index] = cm_backlog_drops;
43839- target_stat_values[++index] = atomic_read(&cm_loopbacks);
43840- target_stat_values[++index] = atomic_read(&cm_nodes_created);
43841- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
43842- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
43843- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
43844+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
43845+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
43846+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
43847+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
43848+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
43849 target_stat_values[++index] = nesadapter->free_4kpbl;
43850 target_stat_values[++index] = nesadapter->free_256pbl;
43851 target_stat_values[++index] = int_mod_timer_init;
43852 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
43853 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
43854 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
43855- target_stat_values[++index] = atomic_read(&pau_qps_created);
43856- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
43857+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
43858+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
43859 }
43860
43861 /**
43862diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
43863index fef067c..6a25ccd 100644
43864--- a/drivers/infiniband/hw/nes/nes_verbs.c
43865+++ b/drivers/infiniband/hw/nes/nes_verbs.c
43866@@ -46,9 +46,9 @@
43867
43868 #include <rdma/ib_umem.h>
43869
43870-atomic_t mod_qp_timouts;
43871-atomic_t qps_created;
43872-atomic_t sw_qps_destroyed;
43873+atomic_unchecked_t mod_qp_timouts;
43874+atomic_unchecked_t qps_created;
43875+atomic_unchecked_t sw_qps_destroyed;
43876
43877 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
43878
43879@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
43880 if (init_attr->create_flags)
43881 return ERR_PTR(-EINVAL);
43882
43883- atomic_inc(&qps_created);
43884+ atomic_inc_unchecked(&qps_created);
43885 switch (init_attr->qp_type) {
43886 case IB_QPT_RC:
43887 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
43888@@ -1468,7 +1468,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
43889 struct iw_cm_event cm_event;
43890 int ret = 0;
43891
43892- atomic_inc(&sw_qps_destroyed);
43893+ atomic_inc_unchecked(&sw_qps_destroyed);
43894 nesqp->destroyed = 1;
43895
43896 /* Blow away the connection if it exists. */
43897diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
43898index c00ae09..04e91be 100644
43899--- a/drivers/infiniband/hw/qib/qib.h
43900+++ b/drivers/infiniband/hw/qib/qib.h
43901@@ -52,6 +52,7 @@
43902 #include <linux/kref.h>
43903 #include <linux/sched.h>
43904 #include <linux/kthread.h>
43905+#include <linux/slab.h>
43906
43907 #include "qib_common.h"
43908 #include "qib_verbs.h"
43909diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
43910index de05545..b535322 100644
43911--- a/drivers/input/evdev.c
43912+++ b/drivers/input/evdev.c
43913@@ -421,7 +421,7 @@ static int evdev_open(struct inode *inode, struct file *file)
43914
43915 err_free_client:
43916 evdev_detach_client(evdev, client);
43917- kfree(client);
43918+ kvfree(client);
43919 return error;
43920 }
43921
43922diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
43923index 24c41ba..102d71f 100644
43924--- a/drivers/input/gameport/gameport.c
43925+++ b/drivers/input/gameport/gameport.c
43926@@ -490,14 +490,14 @@ EXPORT_SYMBOL(gameport_set_phys);
43927 */
43928 static void gameport_init_port(struct gameport *gameport)
43929 {
43930- static atomic_t gameport_no = ATOMIC_INIT(0);
43931+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
43932
43933 __module_get(THIS_MODULE);
43934
43935 mutex_init(&gameport->drv_mutex);
43936 device_initialize(&gameport->dev);
43937 dev_set_name(&gameport->dev, "gameport%lu",
43938- (unsigned long)atomic_inc_return(&gameport_no) - 1);
43939+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
43940 gameport->dev.bus = &gameport_bus;
43941 gameport->dev.release = gameport_release_port;
43942 if (gameport->parent)
43943diff --git a/drivers/input/input.c b/drivers/input/input.c
43944index 29ca0bb..f4bc2e3 100644
43945--- a/drivers/input/input.c
43946+++ b/drivers/input/input.c
43947@@ -1774,7 +1774,7 @@ EXPORT_SYMBOL_GPL(input_class);
43948 */
43949 struct input_dev *input_allocate_device(void)
43950 {
43951- static atomic_t input_no = ATOMIC_INIT(0);
43952+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
43953 struct input_dev *dev;
43954
43955 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
43956@@ -1789,7 +1789,7 @@ struct input_dev *input_allocate_device(void)
43957 INIT_LIST_HEAD(&dev->node);
43958
43959 dev_set_name(&dev->dev, "input%ld",
43960- (unsigned long) atomic_inc_return(&input_no) - 1);
43961+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
43962
43963 __module_get(THIS_MODULE);
43964 }
43965diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
43966index 4a95b22..874c182 100644
43967--- a/drivers/input/joystick/sidewinder.c
43968+++ b/drivers/input/joystick/sidewinder.c
43969@@ -30,6 +30,7 @@
43970 #include <linux/kernel.h>
43971 #include <linux/module.h>
43972 #include <linux/slab.h>
43973+#include <linux/sched.h>
43974 #include <linux/input.h>
43975 #include <linux/gameport.h>
43976 #include <linux/jiffies.h>
43977diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
43978index e65d9c0..ad3942e 100644
43979--- a/drivers/input/joystick/xpad.c
43980+++ b/drivers/input/joystick/xpad.c
43981@@ -850,7 +850,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
43982
43983 static int xpad_led_probe(struct usb_xpad *xpad)
43984 {
43985- static atomic_t led_seq = ATOMIC_INIT(0);
43986+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
43987 long led_no;
43988 struct xpad_led *led;
43989 struct led_classdev *led_cdev;
43990@@ -863,7 +863,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
43991 if (!led)
43992 return -ENOMEM;
43993
43994- led_no = (long)atomic_inc_return(&led_seq) - 1;
43995+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
43996
43997 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
43998 led->xpad = xpad;
43999diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
44000index 719410f..1896169 100644
44001--- a/drivers/input/misc/ims-pcu.c
44002+++ b/drivers/input/misc/ims-pcu.c
44003@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
44004
44005 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
44006 {
44007- static atomic_t device_no = ATOMIC_INIT(0);
44008+ static atomic_unchecked_t device_no = ATOMIC_INIT(0);
44009
44010 const struct ims_pcu_device_info *info;
44011 int error;
44012@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
44013 }
44014
44015 /* Device appears to be operable, complete initialization */
44016- pcu->device_no = atomic_inc_return(&device_no) - 1;
44017+ pcu->device_no = atomic_inc_return_unchecked(&device_no) - 1;
44018
44019 /*
44020 * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
44021diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
44022index 2f0b39d..7370f13 100644
44023--- a/drivers/input/mouse/psmouse.h
44024+++ b/drivers/input/mouse/psmouse.h
44025@@ -116,7 +116,7 @@ struct psmouse_attribute {
44026 ssize_t (*set)(struct psmouse *psmouse, void *data,
44027 const char *buf, size_t count);
44028 bool protect;
44029-};
44030+} __do_const;
44031 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
44032
44033 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
44034diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
44035index b604564..3f14ae4 100644
44036--- a/drivers/input/mousedev.c
44037+++ b/drivers/input/mousedev.c
44038@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
44039
44040 spin_unlock_irq(&client->packet_lock);
44041
44042- if (copy_to_user(buffer, data, count))
44043+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
44044 return -EFAULT;
44045
44046 return count;
44047diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
44048index b29134d..394deb0 100644
44049--- a/drivers/input/serio/serio.c
44050+++ b/drivers/input/serio/serio.c
44051@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
44052 */
44053 static void serio_init_port(struct serio *serio)
44054 {
44055- static atomic_t serio_no = ATOMIC_INIT(0);
44056+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
44057
44058 __module_get(THIS_MODULE);
44059
44060@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
44061 mutex_init(&serio->drv_mutex);
44062 device_initialize(&serio->dev);
44063 dev_set_name(&serio->dev, "serio%ld",
44064- (long)atomic_inc_return(&serio_no) - 1);
44065+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
44066 serio->dev.bus = &serio_bus;
44067 serio->dev.release = serio_release_port;
44068 serio->dev.groups = serio_device_attr_groups;
44069diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
44070index c9a02fe..0debc75 100644
44071--- a/drivers/input/serio/serio_raw.c
44072+++ b/drivers/input/serio/serio_raw.c
44073@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
44074
44075 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
44076 {
44077- static atomic_t serio_raw_no = ATOMIC_INIT(0);
44078+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(0);
44079 struct serio_raw *serio_raw;
44080 int err;
44081
44082@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
44083 }
44084
44085 snprintf(serio_raw->name, sizeof(serio_raw->name),
44086- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no) - 1);
44087+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no) - 1);
44088 kref_init(&serio_raw->kref);
44089 INIT_LIST_HEAD(&serio_raw->client_list);
44090 init_waitqueue_head(&serio_raw->wait);
44091diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
44092index a83cc2a..64462e6 100644
44093--- a/drivers/iommu/arm-smmu.c
44094+++ b/drivers/iommu/arm-smmu.c
44095@@ -921,7 +921,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44096 cfg->irptndx = cfg->cbndx;
44097 }
44098
44099- ACCESS_ONCE(smmu_domain->smmu) = smmu;
44100+ ACCESS_ONCE_RW(smmu_domain->smmu) = smmu;
44101 arm_smmu_init_context_bank(smmu_domain);
44102 spin_unlock_irqrestore(&smmu_domain->lock, flags);
44103
44104diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
44105index 33c4395..e06447e 100644
44106--- a/drivers/iommu/irq_remapping.c
44107+++ b/drivers/iommu/irq_remapping.c
44108@@ -354,7 +354,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
44109 void panic_if_irq_remap(const char *msg)
44110 {
44111 if (irq_remapping_enabled)
44112- panic(msg);
44113+ panic("%s", msg);
44114 }
44115
44116 static void ir_ack_apic_edge(struct irq_data *data)
44117@@ -375,10 +375,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
44118
44119 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
44120 {
44121- chip->irq_print_chip = ir_print_prefix;
44122- chip->irq_ack = ir_ack_apic_edge;
44123- chip->irq_eoi = ir_ack_apic_level;
44124- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
44125+ pax_open_kernel();
44126+ *(void **)&chip->irq_print_chip = ir_print_prefix;
44127+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
44128+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
44129+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
44130+ pax_close_kernel();
44131 }
44132
44133 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
44134diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
44135index dda6dbc..f9adebb 100644
44136--- a/drivers/irqchip/irq-gic.c
44137+++ b/drivers/irqchip/irq-gic.c
44138@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
44139 * Supported arch specific GIC irq extension.
44140 * Default make them NULL.
44141 */
44142-struct irq_chip gic_arch_extn = {
44143+irq_chip_no_const gic_arch_extn = {
44144 .irq_eoi = NULL,
44145 .irq_mask = NULL,
44146 .irq_unmask = NULL,
44147@@ -312,7 +312,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
44148 chained_irq_exit(chip, desc);
44149 }
44150
44151-static struct irq_chip gic_chip = {
44152+static irq_chip_no_const gic_chip __read_only = {
44153 .name = "GIC",
44154 .irq_mask = gic_mask_irq,
44155 .irq_unmask = gic_unmask_irq,
44156diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
44157index 8777065..a4a9967 100644
44158--- a/drivers/irqchip/irq-renesas-irqc.c
44159+++ b/drivers/irqchip/irq-renesas-irqc.c
44160@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev)
44161 struct irqc_priv *p;
44162 struct resource *io;
44163 struct resource *irq;
44164- struct irq_chip *irq_chip;
44165+ irq_chip_no_const *irq_chip;
44166 const char *name = dev_name(&pdev->dev);
44167 int ret;
44168 int k;
44169diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
44170index 6a2df32..dc962f1 100644
44171--- a/drivers/isdn/capi/capi.c
44172+++ b/drivers/isdn/capi/capi.c
44173@@ -81,8 +81,8 @@ struct capiminor {
44174
44175 struct capi20_appl *ap;
44176 u32 ncci;
44177- atomic_t datahandle;
44178- atomic_t msgid;
44179+ atomic_unchecked_t datahandle;
44180+ atomic_unchecked_t msgid;
44181
44182 struct tty_port port;
44183 int ttyinstop;
44184@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
44185 capimsg_setu16(s, 2, mp->ap->applid);
44186 capimsg_setu8 (s, 4, CAPI_DATA_B3);
44187 capimsg_setu8 (s, 5, CAPI_RESP);
44188- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
44189+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
44190 capimsg_setu32(s, 8, mp->ncci);
44191 capimsg_setu16(s, 12, datahandle);
44192 }
44193@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
44194 mp->outbytes -= len;
44195 spin_unlock_bh(&mp->outlock);
44196
44197- datahandle = atomic_inc_return(&mp->datahandle);
44198+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
44199 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
44200 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
44201 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
44202 capimsg_setu16(skb->data, 2, mp->ap->applid);
44203 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
44204 capimsg_setu8 (skb->data, 5, CAPI_REQ);
44205- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
44206+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
44207 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
44208 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
44209 capimsg_setu16(skb->data, 16, len); /* Data length */
44210diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
44211index b7ae0a0..04590fa 100644
44212--- a/drivers/isdn/gigaset/bas-gigaset.c
44213+++ b/drivers/isdn/gigaset/bas-gigaset.c
44214@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
44215
44216
44217 static const struct gigaset_ops gigops = {
44218- gigaset_write_cmd,
44219- gigaset_write_room,
44220- gigaset_chars_in_buffer,
44221- gigaset_brkchars,
44222- gigaset_init_bchannel,
44223- gigaset_close_bchannel,
44224- gigaset_initbcshw,
44225- gigaset_freebcshw,
44226- gigaset_reinitbcshw,
44227- gigaset_initcshw,
44228- gigaset_freecshw,
44229- gigaset_set_modem_ctrl,
44230- gigaset_baud_rate,
44231- gigaset_set_line_ctrl,
44232- gigaset_isoc_send_skb,
44233- gigaset_isoc_input,
44234+ .write_cmd = gigaset_write_cmd,
44235+ .write_room = gigaset_write_room,
44236+ .chars_in_buffer = gigaset_chars_in_buffer,
44237+ .brkchars = gigaset_brkchars,
44238+ .init_bchannel = gigaset_init_bchannel,
44239+ .close_bchannel = gigaset_close_bchannel,
44240+ .initbcshw = gigaset_initbcshw,
44241+ .freebcshw = gigaset_freebcshw,
44242+ .reinitbcshw = gigaset_reinitbcshw,
44243+ .initcshw = gigaset_initcshw,
44244+ .freecshw = gigaset_freecshw,
44245+ .set_modem_ctrl = gigaset_set_modem_ctrl,
44246+ .baud_rate = gigaset_baud_rate,
44247+ .set_line_ctrl = gigaset_set_line_ctrl,
44248+ .send_skb = gigaset_isoc_send_skb,
44249+ .handle_input = gigaset_isoc_input,
44250 };
44251
44252 /* bas_gigaset_init
44253diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
44254index 600c79b..3752bab 100644
44255--- a/drivers/isdn/gigaset/interface.c
44256+++ b/drivers/isdn/gigaset/interface.c
44257@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
44258 }
44259 tty->driver_data = cs;
44260
44261- ++cs->port.count;
44262+ atomic_inc(&cs->port.count);
44263
44264- if (cs->port.count == 1) {
44265+ if (atomic_read(&cs->port.count) == 1) {
44266 tty_port_tty_set(&cs->port, tty);
44267 cs->port.low_latency = 1;
44268 }
44269@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
44270
44271 if (!cs->connected)
44272 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
44273- else if (!cs->port.count)
44274+ else if (!atomic_read(&cs->port.count))
44275 dev_warn(cs->dev, "%s: device not opened\n", __func__);
44276- else if (!--cs->port.count)
44277+ else if (!atomic_dec_return(&cs->port.count))
44278 tty_port_tty_set(&cs->port, NULL);
44279
44280 mutex_unlock(&cs->mutex);
44281diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
44282index 8c91fd5..14f13ce 100644
44283--- a/drivers/isdn/gigaset/ser-gigaset.c
44284+++ b/drivers/isdn/gigaset/ser-gigaset.c
44285@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
44286 }
44287
44288 static const struct gigaset_ops ops = {
44289- gigaset_write_cmd,
44290- gigaset_write_room,
44291- gigaset_chars_in_buffer,
44292- gigaset_brkchars,
44293- gigaset_init_bchannel,
44294- gigaset_close_bchannel,
44295- gigaset_initbcshw,
44296- gigaset_freebcshw,
44297- gigaset_reinitbcshw,
44298- gigaset_initcshw,
44299- gigaset_freecshw,
44300- gigaset_set_modem_ctrl,
44301- gigaset_baud_rate,
44302- gigaset_set_line_ctrl,
44303- gigaset_m10x_send_skb, /* asyncdata.c */
44304- gigaset_m10x_input, /* asyncdata.c */
44305+ .write_cmd = gigaset_write_cmd,
44306+ .write_room = gigaset_write_room,
44307+ .chars_in_buffer = gigaset_chars_in_buffer,
44308+ .brkchars = gigaset_brkchars,
44309+ .init_bchannel = gigaset_init_bchannel,
44310+ .close_bchannel = gigaset_close_bchannel,
44311+ .initbcshw = gigaset_initbcshw,
44312+ .freebcshw = gigaset_freebcshw,
44313+ .reinitbcshw = gigaset_reinitbcshw,
44314+ .initcshw = gigaset_initcshw,
44315+ .freecshw = gigaset_freecshw,
44316+ .set_modem_ctrl = gigaset_set_modem_ctrl,
44317+ .baud_rate = gigaset_baud_rate,
44318+ .set_line_ctrl = gigaset_set_line_ctrl,
44319+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
44320+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
44321 };
44322
44323
44324diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
44325index d0a41cb..b953e50 100644
44326--- a/drivers/isdn/gigaset/usb-gigaset.c
44327+++ b/drivers/isdn/gigaset/usb-gigaset.c
44328@@ -547,7 +547,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
44329 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
44330 memcpy(cs->hw.usb->bchars, buf, 6);
44331 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
44332- 0, 0, &buf, 6, 2000);
44333+ 0, 0, buf, 6, 2000);
44334 }
44335
44336 static void gigaset_freebcshw(struct bc_state *bcs)
44337@@ -869,22 +869,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
44338 }
44339
44340 static const struct gigaset_ops ops = {
44341- gigaset_write_cmd,
44342- gigaset_write_room,
44343- gigaset_chars_in_buffer,
44344- gigaset_brkchars,
44345- gigaset_init_bchannel,
44346- gigaset_close_bchannel,
44347- gigaset_initbcshw,
44348- gigaset_freebcshw,
44349- gigaset_reinitbcshw,
44350- gigaset_initcshw,
44351- gigaset_freecshw,
44352- gigaset_set_modem_ctrl,
44353- gigaset_baud_rate,
44354- gigaset_set_line_ctrl,
44355- gigaset_m10x_send_skb,
44356- gigaset_m10x_input,
44357+ .write_cmd = gigaset_write_cmd,
44358+ .write_room = gigaset_write_room,
44359+ .chars_in_buffer = gigaset_chars_in_buffer,
44360+ .brkchars = gigaset_brkchars,
44361+ .init_bchannel = gigaset_init_bchannel,
44362+ .close_bchannel = gigaset_close_bchannel,
44363+ .initbcshw = gigaset_initbcshw,
44364+ .freebcshw = gigaset_freebcshw,
44365+ .reinitbcshw = gigaset_reinitbcshw,
44366+ .initcshw = gigaset_initcshw,
44367+ .freecshw = gigaset_freecshw,
44368+ .set_modem_ctrl = gigaset_set_modem_ctrl,
44369+ .baud_rate = gigaset_baud_rate,
44370+ .set_line_ctrl = gigaset_set_line_ctrl,
44371+ .send_skb = gigaset_m10x_send_skb,
44372+ .handle_input = gigaset_m10x_input,
44373 };
44374
44375 /*
44376diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
44377index 4d9b195..455075c 100644
44378--- a/drivers/isdn/hardware/avm/b1.c
44379+++ b/drivers/isdn/hardware/avm/b1.c
44380@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
44381 }
44382 if (left) {
44383 if (t4file->user) {
44384- if (copy_from_user(buf, dp, left))
44385+ if (left > sizeof buf || copy_from_user(buf, dp, left))
44386 return -EFAULT;
44387 } else {
44388 memcpy(buf, dp, left);
44389@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
44390 }
44391 if (left) {
44392 if (config->user) {
44393- if (copy_from_user(buf, dp, left))
44394+ if (left > sizeof buf || copy_from_user(buf, dp, left))
44395 return -EFAULT;
44396 } else {
44397 memcpy(buf, dp, left);
44398diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
44399index 9b856e1..fa03c92 100644
44400--- a/drivers/isdn/i4l/isdn_common.c
44401+++ b/drivers/isdn/i4l/isdn_common.c
44402@@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
44403 } else
44404 return -EINVAL;
44405 case IIOCDBGVAR:
44406+ if (!capable(CAP_SYS_RAWIO))
44407+ return -EPERM;
44408 if (arg) {
44409 if (copy_to_user(argp, &dev, sizeof(ulong)))
44410 return -EFAULT;
44411diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
44412index 91d5730..336523e 100644
44413--- a/drivers/isdn/i4l/isdn_concap.c
44414+++ b/drivers/isdn/i4l/isdn_concap.c
44415@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
44416 }
44417
44418 struct concap_device_ops isdn_concap_reliable_dl_dops = {
44419- &isdn_concap_dl_data_req,
44420- &isdn_concap_dl_connect_req,
44421- &isdn_concap_dl_disconn_req
44422+ .data_req = &isdn_concap_dl_data_req,
44423+ .connect_req = &isdn_concap_dl_connect_req,
44424+ .disconn_req = &isdn_concap_dl_disconn_req
44425 };
44426
44427 /* The following should better go into a dedicated source file such that
44428diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
44429index 3c5f249..5fac4d0 100644
44430--- a/drivers/isdn/i4l/isdn_tty.c
44431+++ b/drivers/isdn/i4l/isdn_tty.c
44432@@ -1508,9 +1508,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
44433
44434 #ifdef ISDN_DEBUG_MODEM_OPEN
44435 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
44436- port->count);
44437+ atomic_read(&port->count));
44438 #endif
44439- port->count++;
44440+ atomic_inc(&port->count);
44441 port->tty = tty;
44442 /*
44443 * Start up serial port
44444@@ -1554,7 +1554,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
44445 #endif
44446 return;
44447 }
44448- if ((tty->count == 1) && (port->count != 1)) {
44449+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
44450 /*
44451 * Uh, oh. tty->count is 1, which means that the tty
44452 * structure will be freed. Info->count should always
44453@@ -1563,15 +1563,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
44454 * serial port won't be shutdown.
44455 */
44456 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
44457- "info->count is %d\n", port->count);
44458- port->count = 1;
44459+ "info->count is %d\n", atomic_read(&port->count));
44460+ atomic_set(&port->count, 1);
44461 }
44462- if (--port->count < 0) {
44463+ if (atomic_dec_return(&port->count) < 0) {
44464 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
44465- info->line, port->count);
44466- port->count = 0;
44467+ info->line, atomic_read(&port->count));
44468+ atomic_set(&port->count, 0);
44469 }
44470- if (port->count) {
44471+ if (atomic_read(&port->count)) {
44472 #ifdef ISDN_DEBUG_MODEM_OPEN
44473 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
44474 #endif
44475@@ -1625,7 +1625,7 @@ isdn_tty_hangup(struct tty_struct *tty)
44476 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
44477 return;
44478 isdn_tty_shutdown(info);
44479- port->count = 0;
44480+ atomic_set(&port->count, 0);
44481 port->flags &= ~ASYNC_NORMAL_ACTIVE;
44482 port->tty = NULL;
44483 wake_up_interruptible(&port->open_wait);
44484@@ -1970,7 +1970,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
44485 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
44486 modem_info *info = &dev->mdm.info[i];
44487
44488- if (info->port.count == 0)
44489+ if (atomic_read(&info->port.count) == 0)
44490 continue;
44491 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
44492 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
44493diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
44494index e2d4e58..40cd045 100644
44495--- a/drivers/isdn/i4l/isdn_x25iface.c
44496+++ b/drivers/isdn/i4l/isdn_x25iface.c
44497@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
44498
44499
44500 static struct concap_proto_ops ix25_pops = {
44501- &isdn_x25iface_proto_new,
44502- &isdn_x25iface_proto_del,
44503- &isdn_x25iface_proto_restart,
44504- &isdn_x25iface_proto_close,
44505- &isdn_x25iface_xmit,
44506- &isdn_x25iface_receive,
44507- &isdn_x25iface_connect_ind,
44508- &isdn_x25iface_disconn_ind
44509+ .proto_new = &isdn_x25iface_proto_new,
44510+ .proto_del = &isdn_x25iface_proto_del,
44511+ .restart = &isdn_x25iface_proto_restart,
44512+ .close = &isdn_x25iface_proto_close,
44513+ .encap_and_xmit = &isdn_x25iface_xmit,
44514+ .data_ind = &isdn_x25iface_receive,
44515+ .connect_ind = &isdn_x25iface_connect_ind,
44516+ .disconn_ind = &isdn_x25iface_disconn_ind
44517 };
44518
44519 /* error message helper function */
44520diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
44521index 6a7447c..cae33fe 100644
44522--- a/drivers/isdn/icn/icn.c
44523+++ b/drivers/isdn/icn/icn.c
44524@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
44525 if (count > len)
44526 count = len;
44527 if (user) {
44528- if (copy_from_user(msg, buf, count))
44529+ if (count > sizeof msg || copy_from_user(msg, buf, count))
44530 return -EFAULT;
44531 } else
44532 memcpy(msg, buf, count);
44533diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
44534index a4f05c5..1433bc5 100644
44535--- a/drivers/isdn/mISDN/dsp_cmx.c
44536+++ b/drivers/isdn/mISDN/dsp_cmx.c
44537@@ -1628,7 +1628,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
44538 static u16 dsp_count; /* last sample count */
44539 static int dsp_count_valid; /* if we have last sample count */
44540
44541-void
44542+void __intentional_overflow(-1)
44543 dsp_cmx_send(void *arg)
44544 {
44545 struct dsp_conf *conf;
44546diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
44547index f58a354..fbae176 100644
44548--- a/drivers/leds/leds-clevo-mail.c
44549+++ b/drivers/leds/leds-clevo-mail.c
44550@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
44551 * detected as working, but in reality it is not) as low as
44552 * possible.
44553 */
44554-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
44555+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
44556 {
44557 .callback = clevo_mail_led_dmi_callback,
44558 .ident = "Clevo D410J",
44559diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
44560index 046cb70..6b20d39 100644
44561--- a/drivers/leds/leds-ss4200.c
44562+++ b/drivers/leds/leds-ss4200.c
44563@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
44564 * detected as working, but in reality it is not) as low as
44565 * possible.
44566 */
44567-static struct dmi_system_id nas_led_whitelist[] __initdata = {
44568+static struct dmi_system_id nas_led_whitelist[] __initconst = {
44569 {
44570 .callback = ss4200_led_dmi_callback,
44571 .ident = "Intel SS4200-E",
44572diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
44573index 6590558..a74c5dd 100644
44574--- a/drivers/lguest/core.c
44575+++ b/drivers/lguest/core.c
44576@@ -96,9 +96,17 @@ static __init int map_switcher(void)
44577 * The end address needs +1 because __get_vm_area allocates an
44578 * extra guard page, so we need space for that.
44579 */
44580+
44581+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
44582+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
44583+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
44584+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
44585+#else
44586 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
44587 VM_ALLOC, switcher_addr, switcher_addr
44588 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
44589+#endif
44590+
44591 if (!switcher_vma) {
44592 err = -ENOMEM;
44593 printk("lguest: could not map switcher pages high\n");
44594@@ -121,7 +129,7 @@ static __init int map_switcher(void)
44595 * Now the Switcher is mapped at the right address, we can't fail!
44596 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
44597 */
44598- memcpy(switcher_vma->addr, start_switcher_text,
44599+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
44600 end_switcher_text - start_switcher_text);
44601
44602 printk(KERN_INFO "lguest: mapped switcher at %p\n",
44603diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
44604index e8b55c3..3514c37 100644
44605--- a/drivers/lguest/page_tables.c
44606+++ b/drivers/lguest/page_tables.c
44607@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
44608 /*:*/
44609
44610 #ifdef CONFIG_X86_PAE
44611-static void release_pmd(pmd_t *spmd)
44612+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
44613 {
44614 /* If the entry's not present, there's nothing to release. */
44615 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
44616diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
44617index 922a1ac..9dd0c2a 100644
44618--- a/drivers/lguest/x86/core.c
44619+++ b/drivers/lguest/x86/core.c
44620@@ -59,7 +59,7 @@ static struct {
44621 /* Offset from where switcher.S was compiled to where we've copied it */
44622 static unsigned long switcher_offset(void)
44623 {
44624- return switcher_addr - (unsigned long)start_switcher_text;
44625+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
44626 }
44627
44628 /* This cpu's struct lguest_pages (after the Switcher text page) */
44629@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
44630 * These copies are pretty cheap, so we do them unconditionally: */
44631 /* Save the current Host top-level page directory.
44632 */
44633+
44634+#ifdef CONFIG_PAX_PER_CPU_PGD
44635+ pages->state.host_cr3 = read_cr3();
44636+#else
44637 pages->state.host_cr3 = __pa(current->mm->pgd);
44638+#endif
44639+
44640 /*
44641 * Set up the Guest's page tables to see this CPU's pages (and no
44642 * other CPU's pages).
44643@@ -477,7 +483,7 @@ void __init lguest_arch_host_init(void)
44644 * compiled-in switcher code and the high-mapped copy we just made.
44645 */
44646 for (i = 0; i < IDT_ENTRIES; i++)
44647- default_idt_entries[i] += switcher_offset();
44648+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
44649
44650 /*
44651 * Set up the Switcher's per-cpu areas.
44652@@ -560,7 +566,7 @@ void __init lguest_arch_host_init(void)
44653 * it will be undisturbed when we switch. To change %cs and jump we
44654 * need this structure to feed to Intel's "lcall" instruction.
44655 */
44656- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
44657+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
44658 lguest_entry.segment = LGUEST_CS;
44659
44660 /*
44661diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
44662index 40634b0..4f5855e 100644
44663--- a/drivers/lguest/x86/switcher_32.S
44664+++ b/drivers/lguest/x86/switcher_32.S
44665@@ -87,6 +87,7 @@
44666 #include <asm/page.h>
44667 #include <asm/segment.h>
44668 #include <asm/lguest.h>
44669+#include <asm/processor-flags.h>
44670
44671 // We mark the start of the code to copy
44672 // It's placed in .text tho it's never run here
44673@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
44674 // Changes type when we load it: damn Intel!
44675 // For after we switch over our page tables
44676 // That entry will be read-only: we'd crash.
44677+
44678+#ifdef CONFIG_PAX_KERNEXEC
44679+ mov %cr0, %edx
44680+ xor $X86_CR0_WP, %edx
44681+ mov %edx, %cr0
44682+#endif
44683+
44684 movl $(GDT_ENTRY_TSS*8), %edx
44685 ltr %dx
44686
44687@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
44688 // Let's clear it again for our return.
44689 // The GDT descriptor of the Host
44690 // Points to the table after two "size" bytes
44691- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
44692+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
44693 // Clear "used" from type field (byte 5, bit 2)
44694- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
44695+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
44696+
44697+#ifdef CONFIG_PAX_KERNEXEC
44698+ mov %cr0, %eax
44699+ xor $X86_CR0_WP, %eax
44700+ mov %eax, %cr0
44701+#endif
44702
44703 // Once our page table's switched, the Guest is live!
44704 // The Host fades as we run this final step.
44705@@ -295,13 +309,12 @@ deliver_to_host:
44706 // I consulted gcc, and it gave
44707 // These instructions, which I gladly credit:
44708 leal (%edx,%ebx,8), %eax
44709- movzwl (%eax),%edx
44710- movl 4(%eax), %eax
44711- xorw %ax, %ax
44712- orl %eax, %edx
44713+ movl 4(%eax), %edx
44714+ movw (%eax), %dx
44715 // Now the address of the handler's in %edx
44716 // We call it now: its "iret" drops us home.
44717- jmp *%edx
44718+ ljmp $__KERNEL_CS, $1f
44719+1: jmp *%edx
44720
44721 // Every interrupt can come to us here
44722 // But we must truly tell each apart.
44723diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
44724index a08e3ee..df8ade2 100644
44725--- a/drivers/md/bcache/closure.h
44726+++ b/drivers/md/bcache/closure.h
44727@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
44728 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
44729 struct workqueue_struct *wq)
44730 {
44731- BUG_ON(object_is_on_stack(cl));
44732+ BUG_ON(object_starts_on_stack(cl));
44733 closure_set_ip(cl);
44734 cl->fn = fn;
44735 cl->wq = wq;
44736diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
44737index 67f8b31..9418f2b 100644
44738--- a/drivers/md/bitmap.c
44739+++ b/drivers/md/bitmap.c
44740@@ -1775,7 +1775,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
44741 chunk_kb ? "KB" : "B");
44742 if (bitmap->storage.file) {
44743 seq_printf(seq, ", file: ");
44744- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
44745+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
44746 }
44747
44748 seq_printf(seq, "\n");
44749diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
44750index 5152142..623d141 100644
44751--- a/drivers/md/dm-ioctl.c
44752+++ b/drivers/md/dm-ioctl.c
44753@@ -1769,7 +1769,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
44754 cmd == DM_LIST_VERSIONS_CMD)
44755 return 0;
44756
44757- if ((cmd == DM_DEV_CREATE_CMD)) {
44758+ if (cmd == DM_DEV_CREATE_CMD) {
44759 if (!*param->name) {
44760 DMWARN("name not supplied when creating device");
44761 return -EINVAL;
44762diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
44763index 7dfdb5c..4caada6 100644
44764--- a/drivers/md/dm-raid1.c
44765+++ b/drivers/md/dm-raid1.c
44766@@ -40,7 +40,7 @@ enum dm_raid1_error {
44767
44768 struct mirror {
44769 struct mirror_set *ms;
44770- atomic_t error_count;
44771+ atomic_unchecked_t error_count;
44772 unsigned long error_type;
44773 struct dm_dev *dev;
44774 sector_t offset;
44775@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
44776 struct mirror *m;
44777
44778 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
44779- if (!atomic_read(&m->error_count))
44780+ if (!atomic_read_unchecked(&m->error_count))
44781 return m;
44782
44783 return NULL;
44784@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
44785 * simple way to tell if a device has encountered
44786 * errors.
44787 */
44788- atomic_inc(&m->error_count);
44789+ atomic_inc_unchecked(&m->error_count);
44790
44791 if (test_and_set_bit(error_type, &m->error_type))
44792 return;
44793@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
44794 struct mirror *m = get_default_mirror(ms);
44795
44796 do {
44797- if (likely(!atomic_read(&m->error_count)))
44798+ if (likely(!atomic_read_unchecked(&m->error_count)))
44799 return m;
44800
44801 if (m-- == ms->mirror)
44802@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
44803 {
44804 struct mirror *default_mirror = get_default_mirror(m->ms);
44805
44806- return !atomic_read(&default_mirror->error_count);
44807+ return !atomic_read_unchecked(&default_mirror->error_count);
44808 }
44809
44810 static int mirror_available(struct mirror_set *ms, struct bio *bio)
44811@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
44812 */
44813 if (likely(region_in_sync(ms, region, 1)))
44814 m = choose_mirror(ms, bio->bi_iter.bi_sector);
44815- else if (m && atomic_read(&m->error_count))
44816+ else if (m && atomic_read_unchecked(&m->error_count))
44817 m = NULL;
44818
44819 if (likely(m))
44820@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
44821 }
44822
44823 ms->mirror[mirror].ms = ms;
44824- atomic_set(&(ms->mirror[mirror].error_count), 0);
44825+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
44826 ms->mirror[mirror].error_type = 0;
44827 ms->mirror[mirror].offset = offset;
44828
44829@@ -1342,7 +1342,7 @@ static void mirror_resume(struct dm_target *ti)
44830 */
44831 static char device_status_char(struct mirror *m)
44832 {
44833- if (!atomic_read(&(m->error_count)))
44834+ if (!atomic_read_unchecked(&(m->error_count)))
44835 return 'A';
44836
44837 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
44838diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
44839index 28a9012..9c0f6a5 100644
44840--- a/drivers/md/dm-stats.c
44841+++ b/drivers/md/dm-stats.c
44842@@ -382,7 +382,7 @@ do_sync_free:
44843 synchronize_rcu_expedited();
44844 dm_stat_free(&s->rcu_head);
44845 } else {
44846- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
44847+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
44848 call_rcu(&s->rcu_head, dm_stat_free);
44849 }
44850 return 0;
44851@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
44852 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
44853 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
44854 ));
44855- ACCESS_ONCE(last->last_sector) = end_sector;
44856- ACCESS_ONCE(last->last_rw) = bi_rw;
44857+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
44858+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
44859 }
44860
44861 rcu_read_lock();
44862diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
44863index d1600d2..4c3af3a 100644
44864--- a/drivers/md/dm-stripe.c
44865+++ b/drivers/md/dm-stripe.c
44866@@ -21,7 +21,7 @@ struct stripe {
44867 struct dm_dev *dev;
44868 sector_t physical_start;
44869
44870- atomic_t error_count;
44871+ atomic_unchecked_t error_count;
44872 };
44873
44874 struct stripe_c {
44875@@ -186,7 +186,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
44876 kfree(sc);
44877 return r;
44878 }
44879- atomic_set(&(sc->stripe[i].error_count), 0);
44880+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
44881 }
44882
44883 ti->private = sc;
44884@@ -330,7 +330,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
44885 DMEMIT("%d ", sc->stripes);
44886 for (i = 0; i < sc->stripes; i++) {
44887 DMEMIT("%s ", sc->stripe[i].dev->name);
44888- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
44889+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
44890 'D' : 'A';
44891 }
44892 buffer[i] = '\0';
44893@@ -375,8 +375,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
44894 */
44895 for (i = 0; i < sc->stripes; i++)
44896 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
44897- atomic_inc(&(sc->stripe[i].error_count));
44898- if (atomic_read(&(sc->stripe[i].error_count)) <
44899+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
44900+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
44901 DM_IO_ERROR_THRESHOLD)
44902 schedule_work(&sc->trigger_event);
44903 }
44904diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
44905index f9c6cb8..e272df6 100644
44906--- a/drivers/md/dm-table.c
44907+++ b/drivers/md/dm-table.c
44908@@ -274,7 +274,7 @@ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
44909 static int open_dev(struct dm_dev_internal *d, dev_t dev,
44910 struct mapped_device *md)
44911 {
44912- static char *_claim_ptr = "I belong to device-mapper";
44913+ static char _claim_ptr[] = "I belong to device-mapper";
44914 struct block_device *bdev;
44915
44916 int r;
44917@@ -342,7 +342,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
44918 if (!dev_size)
44919 return 0;
44920
44921- if ((start >= dev_size) || (start + len > dev_size)) {
44922+ if ((start >= dev_size) || (len > dev_size - start)) {
44923 DMWARN("%s: %s too small for target: "
44924 "start=%llu, len=%llu, dev_size=%llu",
44925 dm_device_name(ti->table->md), bdevname(bdev, b),
44926diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
44927index e9d33ad..dae9880d 100644
44928--- a/drivers/md/dm-thin-metadata.c
44929+++ b/drivers/md/dm-thin-metadata.c
44930@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44931 {
44932 pmd->info.tm = pmd->tm;
44933 pmd->info.levels = 2;
44934- pmd->info.value_type.context = pmd->data_sm;
44935+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44936 pmd->info.value_type.size = sizeof(__le64);
44937 pmd->info.value_type.inc = data_block_inc;
44938 pmd->info.value_type.dec = data_block_dec;
44939@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44940
44941 pmd->bl_info.tm = pmd->tm;
44942 pmd->bl_info.levels = 1;
44943- pmd->bl_info.value_type.context = pmd->data_sm;
44944+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44945 pmd->bl_info.value_type.size = sizeof(__le64);
44946 pmd->bl_info.value_type.inc = data_block_inc;
44947 pmd->bl_info.value_type.dec = data_block_dec;
44948diff --git a/drivers/md/dm.c b/drivers/md/dm.c
44949index 32b958d..34011e8 100644
44950--- a/drivers/md/dm.c
44951+++ b/drivers/md/dm.c
44952@@ -180,9 +180,9 @@ struct mapped_device {
44953 /*
44954 * Event handling.
44955 */
44956- atomic_t event_nr;
44957+ atomic_unchecked_t event_nr;
44958 wait_queue_head_t eventq;
44959- atomic_t uevent_seq;
44960+ atomic_unchecked_t uevent_seq;
44961 struct list_head uevent_list;
44962 spinlock_t uevent_lock; /* Protect access to uevent_list */
44963
44964@@ -1952,8 +1952,8 @@ static struct mapped_device *alloc_dev(int minor)
44965 spin_lock_init(&md->deferred_lock);
44966 atomic_set(&md->holders, 1);
44967 atomic_set(&md->open_count, 0);
44968- atomic_set(&md->event_nr, 0);
44969- atomic_set(&md->uevent_seq, 0);
44970+ atomic_set_unchecked(&md->event_nr, 0);
44971+ atomic_set_unchecked(&md->uevent_seq, 0);
44972 INIT_LIST_HEAD(&md->uevent_list);
44973 spin_lock_init(&md->uevent_lock);
44974
44975@@ -2107,7 +2107,7 @@ static void event_callback(void *context)
44976
44977 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
44978
44979- atomic_inc(&md->event_nr);
44980+ atomic_inc_unchecked(&md->event_nr);
44981 wake_up(&md->eventq);
44982 }
44983
44984@@ -2800,18 +2800,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
44985
44986 uint32_t dm_next_uevent_seq(struct mapped_device *md)
44987 {
44988- return atomic_add_return(1, &md->uevent_seq);
44989+ return atomic_add_return_unchecked(1, &md->uevent_seq);
44990 }
44991
44992 uint32_t dm_get_event_nr(struct mapped_device *md)
44993 {
44994- return atomic_read(&md->event_nr);
44995+ return atomic_read_unchecked(&md->event_nr);
44996 }
44997
44998 int dm_wait_event(struct mapped_device *md, int event_nr)
44999 {
45000 return wait_event_interruptible(md->eventq,
45001- (event_nr != atomic_read(&md->event_nr)));
45002+ (event_nr != atomic_read_unchecked(&md->event_nr)));
45003 }
45004
45005 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
45006diff --git a/drivers/md/md.c b/drivers/md/md.c
45007index b7f603c..723d2bd 100644
45008--- a/drivers/md/md.c
45009+++ b/drivers/md/md.c
45010@@ -194,10 +194,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
45011 * start build, activate spare
45012 */
45013 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
45014-static atomic_t md_event_count;
45015+static atomic_unchecked_t md_event_count;
45016 void md_new_event(struct mddev *mddev)
45017 {
45018- atomic_inc(&md_event_count);
45019+ atomic_inc_unchecked(&md_event_count);
45020 wake_up(&md_event_waiters);
45021 }
45022 EXPORT_SYMBOL_GPL(md_new_event);
45023@@ -207,7 +207,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
45024 */
45025 static void md_new_event_inintr(struct mddev *mddev)
45026 {
45027- atomic_inc(&md_event_count);
45028+ atomic_inc_unchecked(&md_event_count);
45029 wake_up(&md_event_waiters);
45030 }
45031
45032@@ -1462,7 +1462,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
45033 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
45034 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
45035 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
45036- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
45037+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
45038
45039 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
45040 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
45041@@ -1713,7 +1713,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
45042 else
45043 sb->resync_offset = cpu_to_le64(0);
45044
45045- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
45046+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
45047
45048 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
45049 sb->size = cpu_to_le64(mddev->dev_sectors);
45050@@ -2725,7 +2725,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
45051 static ssize_t
45052 errors_show(struct md_rdev *rdev, char *page)
45053 {
45054- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
45055+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
45056 }
45057
45058 static ssize_t
45059@@ -2734,7 +2734,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
45060 char *e;
45061 unsigned long n = simple_strtoul(buf, &e, 10);
45062 if (*buf && (*e == 0 || *e == '\n')) {
45063- atomic_set(&rdev->corrected_errors, n);
45064+ atomic_set_unchecked(&rdev->corrected_errors, n);
45065 return len;
45066 }
45067 return -EINVAL;
45068@@ -3183,8 +3183,8 @@ int md_rdev_init(struct md_rdev *rdev)
45069 rdev->sb_loaded = 0;
45070 rdev->bb_page = NULL;
45071 atomic_set(&rdev->nr_pending, 0);
45072- atomic_set(&rdev->read_errors, 0);
45073- atomic_set(&rdev->corrected_errors, 0);
45074+ atomic_set_unchecked(&rdev->read_errors, 0);
45075+ atomic_set_unchecked(&rdev->corrected_errors, 0);
45076
45077 INIT_LIST_HEAD(&rdev->same_set);
45078 init_waitqueue_head(&rdev->blocked_wait);
45079@@ -7072,7 +7072,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
45080
45081 spin_unlock(&pers_lock);
45082 seq_printf(seq, "\n");
45083- seq->poll_event = atomic_read(&md_event_count);
45084+ seq->poll_event = atomic_read_unchecked(&md_event_count);
45085 return 0;
45086 }
45087 if (v == (void*)2) {
45088@@ -7175,7 +7175,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
45089 return error;
45090
45091 seq = file->private_data;
45092- seq->poll_event = atomic_read(&md_event_count);
45093+ seq->poll_event = atomic_read_unchecked(&md_event_count);
45094 return error;
45095 }
45096
45097@@ -7192,7 +7192,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
45098 /* always allow read */
45099 mask = POLLIN | POLLRDNORM;
45100
45101- if (seq->poll_event != atomic_read(&md_event_count))
45102+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
45103 mask |= POLLERR | POLLPRI;
45104 return mask;
45105 }
45106@@ -7236,7 +7236,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
45107 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
45108 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
45109 (int)part_stat_read(&disk->part0, sectors[1]) -
45110- atomic_read(&disk->sync_io);
45111+ atomic_read_unchecked(&disk->sync_io);
45112 /* sync IO will cause sync_io to increase before the disk_stats
45113 * as sync_io is counted when a request starts, and
45114 * disk_stats is counted when it completes.
45115diff --git a/drivers/md/md.h b/drivers/md/md.h
45116index a49d991..3582bb7 100644
45117--- a/drivers/md/md.h
45118+++ b/drivers/md/md.h
45119@@ -94,13 +94,13 @@ struct md_rdev {
45120 * only maintained for arrays that
45121 * support hot removal
45122 */
45123- atomic_t read_errors; /* number of consecutive read errors that
45124+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
45125 * we have tried to ignore.
45126 */
45127 struct timespec last_read_error; /* monotonic time since our
45128 * last read error
45129 */
45130- atomic_t corrected_errors; /* number of corrected read errors,
45131+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
45132 * for reporting to userspace and storing
45133 * in superblock.
45134 */
45135@@ -449,7 +449,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
45136
45137 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
45138 {
45139- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
45140+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
45141 }
45142
45143 struct md_personality
45144diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
45145index 786b689..ea8c956 100644
45146--- a/drivers/md/persistent-data/dm-space-map-metadata.c
45147+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
45148@@ -679,7 +679,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
45149 * Flick into a mode where all blocks get allocated in the new area.
45150 */
45151 smm->begin = old_len;
45152- memcpy(sm, &bootstrap_ops, sizeof(*sm));
45153+ memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
45154
45155 /*
45156 * Extend.
45157@@ -710,7 +710,7 @@ out:
45158 /*
45159 * Switch back to normal behaviour.
45160 */
45161- memcpy(sm, &ops, sizeof(*sm));
45162+ memcpy((void *)sm, &ops, sizeof(*sm));
45163 return r;
45164 }
45165
45166diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
45167index 3e6d115..ffecdeb 100644
45168--- a/drivers/md/persistent-data/dm-space-map.h
45169+++ b/drivers/md/persistent-data/dm-space-map.h
45170@@ -71,6 +71,7 @@ struct dm_space_map {
45171 dm_sm_threshold_fn fn,
45172 void *context);
45173 };
45174+typedef struct dm_space_map __no_const dm_space_map_no_const;
45175
45176 /*----------------------------------------------------------------*/
45177
45178diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
45179index 55de4f6..b1c57fe 100644
45180--- a/drivers/md/raid1.c
45181+++ b/drivers/md/raid1.c
45182@@ -1936,7 +1936,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
45183 if (r1_sync_page_io(rdev, sect, s,
45184 bio->bi_io_vec[idx].bv_page,
45185 READ) != 0)
45186- atomic_add(s, &rdev->corrected_errors);
45187+ atomic_add_unchecked(s, &rdev->corrected_errors);
45188 }
45189 sectors -= s;
45190 sect += s;
45191@@ -2170,7 +2170,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
45192 !test_bit(Faulty, &rdev->flags)) {
45193 if (r1_sync_page_io(rdev, sect, s,
45194 conf->tmppage, READ)) {
45195- atomic_add(s, &rdev->corrected_errors);
45196+ atomic_add_unchecked(s, &rdev->corrected_errors);
45197 printk(KERN_INFO
45198 "md/raid1:%s: read error corrected "
45199 "(%d sectors at %llu on %s)\n",
45200diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
45201index 6703751..187af1e 100644
45202--- a/drivers/md/raid10.c
45203+++ b/drivers/md/raid10.c
45204@@ -1948,7 +1948,7 @@ static void end_sync_read(struct bio *bio, int error)
45205 /* The write handler will notice the lack of
45206 * R10BIO_Uptodate and record any errors etc
45207 */
45208- atomic_add(r10_bio->sectors,
45209+ atomic_add_unchecked(r10_bio->sectors,
45210 &conf->mirrors[d].rdev->corrected_errors);
45211
45212 /* for reconstruct, we always reschedule after a read.
45213@@ -2306,7 +2306,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
45214 {
45215 struct timespec cur_time_mon;
45216 unsigned long hours_since_last;
45217- unsigned int read_errors = atomic_read(&rdev->read_errors);
45218+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
45219
45220 ktime_get_ts(&cur_time_mon);
45221
45222@@ -2328,9 +2328,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
45223 * overflowing the shift of read_errors by hours_since_last.
45224 */
45225 if (hours_since_last >= 8 * sizeof(read_errors))
45226- atomic_set(&rdev->read_errors, 0);
45227+ atomic_set_unchecked(&rdev->read_errors, 0);
45228 else
45229- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
45230+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
45231 }
45232
45233 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
45234@@ -2384,8 +2384,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45235 return;
45236
45237 check_decay_read_errors(mddev, rdev);
45238- atomic_inc(&rdev->read_errors);
45239- if (atomic_read(&rdev->read_errors) > max_read_errors) {
45240+ atomic_inc_unchecked(&rdev->read_errors);
45241+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
45242 char b[BDEVNAME_SIZE];
45243 bdevname(rdev->bdev, b);
45244
45245@@ -2393,7 +2393,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45246 "md/raid10:%s: %s: Raid device exceeded "
45247 "read_error threshold [cur %d:max %d]\n",
45248 mdname(mddev), b,
45249- atomic_read(&rdev->read_errors), max_read_errors);
45250+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
45251 printk(KERN_NOTICE
45252 "md/raid10:%s: %s: Failing raid device\n",
45253 mdname(mddev), b);
45254@@ -2548,7 +2548,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45255 sect +
45256 choose_data_offset(r10_bio, rdev)),
45257 bdevname(rdev->bdev, b));
45258- atomic_add(s, &rdev->corrected_errors);
45259+ atomic_add_unchecked(s, &rdev->corrected_errors);
45260 }
45261
45262 rdev_dec_pending(rdev, mddev);
45263diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
45264index 9f0fbec..991e7a1 100644
45265--- a/drivers/md/raid5.c
45266+++ b/drivers/md/raid5.c
45267@@ -1735,6 +1735,10 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
45268 return 1;
45269 }
45270
45271+#ifdef CONFIG_GRKERNSEC_HIDESYM
45272+static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
45273+#endif
45274+
45275 static int grow_stripes(struct r5conf *conf, int num)
45276 {
45277 struct kmem_cache *sc;
45278@@ -1746,7 +1750,11 @@ static int grow_stripes(struct r5conf *conf, int num)
45279 "raid%d-%s", conf->level, mdname(conf->mddev));
45280 else
45281 sprintf(conf->cache_name[0],
45282+#ifdef CONFIG_GRKERNSEC_HIDESYM
45283+ "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
45284+#else
45285 "raid%d-%p", conf->level, conf->mddev);
45286+#endif
45287 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
45288
45289 conf->active_name = 0;
45290@@ -2022,21 +2030,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
45291 mdname(conf->mddev), STRIPE_SECTORS,
45292 (unsigned long long)s,
45293 bdevname(rdev->bdev, b));
45294- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
45295+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
45296 clear_bit(R5_ReadError, &sh->dev[i].flags);
45297 clear_bit(R5_ReWrite, &sh->dev[i].flags);
45298 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
45299 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
45300
45301- if (atomic_read(&rdev->read_errors))
45302- atomic_set(&rdev->read_errors, 0);
45303+ if (atomic_read_unchecked(&rdev->read_errors))
45304+ atomic_set_unchecked(&rdev->read_errors, 0);
45305 } else {
45306 const char *bdn = bdevname(rdev->bdev, b);
45307 int retry = 0;
45308 int set_bad = 0;
45309
45310 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
45311- atomic_inc(&rdev->read_errors);
45312+ atomic_inc_unchecked(&rdev->read_errors);
45313 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
45314 printk_ratelimited(
45315 KERN_WARNING
45316@@ -2064,7 +2072,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
45317 mdname(conf->mddev),
45318 (unsigned long long)s,
45319 bdn);
45320- } else if (atomic_read(&rdev->read_errors)
45321+ } else if (atomic_read_unchecked(&rdev->read_errors)
45322 > conf->max_nr_stripes)
45323 printk(KERN_WARNING
45324 "md/raid:%s: Too many read errors, failing device %s.\n",
45325diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
45326index 983db75..ef9248c 100644
45327--- a/drivers/media/dvb-core/dvbdev.c
45328+++ b/drivers/media/dvb-core/dvbdev.c
45329@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
45330 const struct dvb_device *template, void *priv, int type)
45331 {
45332 struct dvb_device *dvbdev;
45333- struct file_operations *dvbdevfops;
45334+ file_operations_no_const *dvbdevfops;
45335 struct device *clsdev;
45336 int minor;
45337 int id;
45338diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
45339index 539f4db..cdd403b 100644
45340--- a/drivers/media/dvb-frontends/af9033.h
45341+++ b/drivers/media/dvb-frontends/af9033.h
45342@@ -82,7 +82,7 @@ struct af9033_ops {
45343 int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
45344 int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
45345 int onoff);
45346-};
45347+} __no_const;
45348
45349
45350 #if IS_ENABLED(CONFIG_DVB_AF9033)
45351diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
45352index 9b6c3bb..baeb5c7 100644
45353--- a/drivers/media/dvb-frontends/dib3000.h
45354+++ b/drivers/media/dvb-frontends/dib3000.h
45355@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
45356 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
45357 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
45358 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
45359-};
45360+} __no_const;
45361
45362 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
45363 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
45364diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h
45365index 1fea0e9..321ce8f 100644
45366--- a/drivers/media/dvb-frontends/dib7000p.h
45367+++ b/drivers/media/dvb-frontends/dib7000p.h
45368@@ -64,7 +64,7 @@ struct dib7000p_ops {
45369 int (*get_adc_power)(struct dvb_frontend *fe);
45370 int (*slave_reset)(struct dvb_frontend *fe);
45371 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg);
45372-};
45373+} __no_const;
45374
45375 #if IS_ENABLED(CONFIG_DVB_DIB7000P)
45376 void *dib7000p_attach(struct dib7000p_ops *ops);
45377diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
45378index 84cc103..5780c54 100644
45379--- a/drivers/media/dvb-frontends/dib8000.h
45380+++ b/drivers/media/dvb-frontends/dib8000.h
45381@@ -61,7 +61,7 @@ struct dib8000_ops {
45382 int (*pid_filter_ctrl)(struct dvb_frontend *fe, u8 onoff);
45383 int (*pid_filter)(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff);
45384 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
45385-};
45386+} __no_const;
45387
45388 #if IS_ENABLED(CONFIG_DVB_DIB8000)
45389 void *dib8000_attach(struct dib8000_ops *ops);
45390diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
45391index ed8cb90..5ef7f79 100644
45392--- a/drivers/media/pci/cx88/cx88-video.c
45393+++ b/drivers/media/pci/cx88/cx88-video.c
45394@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
45395
45396 /* ------------------------------------------------------------------ */
45397
45398-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45399-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45400-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45401+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45402+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45403+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45404
45405 module_param_array(video_nr, int, NULL, 0444);
45406 module_param_array(vbi_nr, int, NULL, 0444);
45407diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
45408index 802642d..5534900 100644
45409--- a/drivers/media/pci/ivtv/ivtv-driver.c
45410+++ b/drivers/media/pci/ivtv/ivtv-driver.c
45411@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
45412 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
45413
45414 /* ivtv instance counter */
45415-static atomic_t ivtv_instance = ATOMIC_INIT(0);
45416+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
45417
45418 /* Parameter declarations */
45419 static int cardtype[IVTV_MAX_CARDS];
45420diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
45421index 172583d..0f806f4 100644
45422--- a/drivers/media/pci/solo6x10/solo6x10-core.c
45423+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
45424@@ -430,7 +430,7 @@ static void solo_device_release(struct device *dev)
45425
45426 static int solo_sysfs_init(struct solo_dev *solo_dev)
45427 {
45428- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
45429+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
45430 struct device *dev = &solo_dev->dev;
45431 const char *driver;
45432 int i;
45433diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
45434index c7141f2..5301fec 100644
45435--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
45436+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
45437@@ -351,7 +351,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
45438
45439 int solo_g723_init(struct solo_dev *solo_dev)
45440 {
45441- static struct snd_device_ops ops = { NULL };
45442+ static struct snd_device_ops ops = { };
45443 struct snd_card *card;
45444 struct snd_kcontrol_new kctl;
45445 char name[32];
45446diff --git a/drivers/media/pci/solo6x10/solo6x10-p2m.c b/drivers/media/pci/solo6x10/solo6x10-p2m.c
45447index 8c84846..27b4f83 100644
45448--- a/drivers/media/pci/solo6x10/solo6x10-p2m.c
45449+++ b/drivers/media/pci/solo6x10/solo6x10-p2m.c
45450@@ -73,7 +73,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
45451
45452 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
45453 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
45454- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
45455+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
45456 if (p2m_id < 0)
45457 p2m_id = -p2m_id;
45458 }
45459diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
45460index c6154b0..73e4ae9 100644
45461--- a/drivers/media/pci/solo6x10/solo6x10.h
45462+++ b/drivers/media/pci/solo6x10/solo6x10.h
45463@@ -219,7 +219,7 @@ struct solo_dev {
45464
45465 /* P2M DMA Engine */
45466 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
45467- atomic_t p2m_count;
45468+ atomic_unchecked_t p2m_count;
45469 int p2m_jiffies;
45470 unsigned int p2m_timeouts;
45471
45472diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
45473index 2d177fa..5b925a1 100644
45474--- a/drivers/media/platform/omap/omap_vout.c
45475+++ b/drivers/media/platform/omap/omap_vout.c
45476@@ -63,7 +63,6 @@ enum omap_vout_channels {
45477 OMAP_VIDEO2,
45478 };
45479
45480-static struct videobuf_queue_ops video_vbq_ops;
45481 /* Variables configurable through module params*/
45482 static u32 video1_numbuffers = 3;
45483 static u32 video2_numbuffers = 3;
45484@@ -1014,6 +1013,12 @@ static int omap_vout_open(struct file *file)
45485 {
45486 struct videobuf_queue *q;
45487 struct omap_vout_device *vout = NULL;
45488+ static struct videobuf_queue_ops video_vbq_ops = {
45489+ .buf_setup = omap_vout_buffer_setup,
45490+ .buf_prepare = omap_vout_buffer_prepare,
45491+ .buf_release = omap_vout_buffer_release,
45492+ .buf_queue = omap_vout_buffer_queue,
45493+ };
45494
45495 vout = video_drvdata(file);
45496 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
45497@@ -1031,10 +1036,6 @@ static int omap_vout_open(struct file *file)
45498 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
45499
45500 q = &vout->vbq;
45501- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
45502- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
45503- video_vbq_ops.buf_release = omap_vout_buffer_release;
45504- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
45505 spin_lock_init(&vout->vbq_lock);
45506
45507 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
45508diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
45509index fb2acc5..a2fcbdc4 100644
45510--- a/drivers/media/platform/s5p-tv/mixer.h
45511+++ b/drivers/media/platform/s5p-tv/mixer.h
45512@@ -156,7 +156,7 @@ struct mxr_layer {
45513 /** layer index (unique identifier) */
45514 int idx;
45515 /** callbacks for layer methods */
45516- struct mxr_layer_ops ops;
45517+ struct mxr_layer_ops *ops;
45518 /** format array */
45519 const struct mxr_format **fmt_array;
45520 /** size of format array */
45521diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
45522index 74344c7..a39e70e 100644
45523--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
45524+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
45525@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
45526 {
45527 struct mxr_layer *layer;
45528 int ret;
45529- struct mxr_layer_ops ops = {
45530+ static struct mxr_layer_ops ops = {
45531 .release = mxr_graph_layer_release,
45532 .buffer_set = mxr_graph_buffer_set,
45533 .stream_set = mxr_graph_stream_set,
45534diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
45535index b713403..53cb5ad 100644
45536--- a/drivers/media/platform/s5p-tv/mixer_reg.c
45537+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
45538@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
45539 layer->update_buf = next;
45540 }
45541
45542- layer->ops.buffer_set(layer, layer->update_buf);
45543+ layer->ops->buffer_set(layer, layer->update_buf);
45544
45545 if (done && done != layer->shadow_buf)
45546 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
45547diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
45548index b4d2696..91df48e 100644
45549--- a/drivers/media/platform/s5p-tv/mixer_video.c
45550+++ b/drivers/media/platform/s5p-tv/mixer_video.c
45551@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
45552 layer->geo.src.height = layer->geo.src.full_height;
45553
45554 mxr_geometry_dump(mdev, &layer->geo);
45555- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45556+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45557 mxr_geometry_dump(mdev, &layer->geo);
45558 }
45559
45560@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
45561 layer->geo.dst.full_width = mbus_fmt.width;
45562 layer->geo.dst.full_height = mbus_fmt.height;
45563 layer->geo.dst.field = mbus_fmt.field;
45564- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45565+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45566
45567 mxr_geometry_dump(mdev, &layer->geo);
45568 }
45569@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
45570 /* set source size to highest accepted value */
45571 geo->src.full_width = max(geo->dst.full_width, pix->width);
45572 geo->src.full_height = max(geo->dst.full_height, pix->height);
45573- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45574+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45575 mxr_geometry_dump(mdev, &layer->geo);
45576 /* set cropping to total visible screen */
45577 geo->src.width = pix->width;
45578@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
45579 geo->src.x_offset = 0;
45580 geo->src.y_offset = 0;
45581 /* assure consistency of geometry */
45582- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
45583+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
45584 mxr_geometry_dump(mdev, &layer->geo);
45585 /* set full size to lowest possible value */
45586 geo->src.full_width = 0;
45587 geo->src.full_height = 0;
45588- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45589+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45590 mxr_geometry_dump(mdev, &layer->geo);
45591
45592 /* returning results */
45593@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
45594 target->width = s->r.width;
45595 target->height = s->r.height;
45596
45597- layer->ops.fix_geometry(layer, stage, s->flags);
45598+ layer->ops->fix_geometry(layer, stage, s->flags);
45599
45600 /* retrieve update selection rectangle */
45601 res.left = target->x_offset;
45602@@ -954,13 +954,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
45603 mxr_output_get(mdev);
45604
45605 mxr_layer_update_output(layer);
45606- layer->ops.format_set(layer);
45607+ layer->ops->format_set(layer);
45608 /* enabling layer in hardware */
45609 spin_lock_irqsave(&layer->enq_slock, flags);
45610 layer->state = MXR_LAYER_STREAMING;
45611 spin_unlock_irqrestore(&layer->enq_slock, flags);
45612
45613- layer->ops.stream_set(layer, MXR_ENABLE);
45614+ layer->ops->stream_set(layer, MXR_ENABLE);
45615 mxr_streamer_get(mdev);
45616
45617 return 0;
45618@@ -1030,7 +1030,7 @@ static void stop_streaming(struct vb2_queue *vq)
45619 spin_unlock_irqrestore(&layer->enq_slock, flags);
45620
45621 /* disabling layer in hardware */
45622- layer->ops.stream_set(layer, MXR_DISABLE);
45623+ layer->ops->stream_set(layer, MXR_DISABLE);
45624 /* remove one streamer */
45625 mxr_streamer_put(mdev);
45626 /* allow changes in output configuration */
45627@@ -1068,8 +1068,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
45628
45629 void mxr_layer_release(struct mxr_layer *layer)
45630 {
45631- if (layer->ops.release)
45632- layer->ops.release(layer);
45633+ if (layer->ops->release)
45634+ layer->ops->release(layer);
45635 }
45636
45637 void mxr_base_layer_release(struct mxr_layer *layer)
45638@@ -1095,7 +1095,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
45639
45640 layer->mdev = mdev;
45641 layer->idx = idx;
45642- layer->ops = *ops;
45643+ layer->ops = ops;
45644
45645 spin_lock_init(&layer->enq_slock);
45646 INIT_LIST_HEAD(&layer->enq_list);
45647diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45648index c9388c4..ce71ece 100644
45649--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45650+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45651@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
45652 {
45653 struct mxr_layer *layer;
45654 int ret;
45655- struct mxr_layer_ops ops = {
45656+ static struct mxr_layer_ops ops = {
45657 .release = mxr_vp_layer_release,
45658 .buffer_set = mxr_vp_buffer_set,
45659 .stream_set = mxr_vp_stream_set,
45660diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c
45661index 8033371..de5bca0 100644
45662--- a/drivers/media/platform/vivi.c
45663+++ b/drivers/media/platform/vivi.c
45664@@ -58,8 +58,8 @@ MODULE_AUTHOR("Mauro Carvalho Chehab, Ted Walther and John Sokol");
45665 MODULE_LICENSE("Dual BSD/GPL");
45666 MODULE_VERSION(VIVI_VERSION);
45667
45668-static unsigned video_nr = -1;
45669-module_param(video_nr, uint, 0644);
45670+static int video_nr = -1;
45671+module_param(video_nr, int, 0644);
45672 MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
45673
45674 static unsigned n_devs = 1;
45675diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
45676index 82affae..42833ec 100644
45677--- a/drivers/media/radio/radio-cadet.c
45678+++ b/drivers/media/radio/radio-cadet.c
45679@@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
45680 unsigned char readbuf[RDS_BUFFER];
45681 int i = 0;
45682
45683+ if (count > RDS_BUFFER)
45684+ return -EFAULT;
45685 mutex_lock(&dev->lock);
45686 if (dev->rdsstat == 0)
45687 cadet_start_rds(dev);
45688@@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
45689 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
45690 mutex_unlock(&dev->lock);
45691
45692- if (i && copy_to_user(data, readbuf, i))
45693- return -EFAULT;
45694+ if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
45695+ i = -EFAULT;
45696+
45697 return i;
45698 }
45699
45700diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
45701index 5236035..c622c74 100644
45702--- a/drivers/media/radio/radio-maxiradio.c
45703+++ b/drivers/media/radio/radio-maxiradio.c
45704@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
45705 /* TEA5757 pin mappings */
45706 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
45707
45708-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
45709+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
45710
45711 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
45712 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
45713diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
45714index 050b3bb..79f62b9 100644
45715--- a/drivers/media/radio/radio-shark.c
45716+++ b/drivers/media/radio/radio-shark.c
45717@@ -79,7 +79,7 @@ struct shark_device {
45718 u32 last_val;
45719 };
45720
45721-static atomic_t shark_instance = ATOMIC_INIT(0);
45722+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
45723
45724 static void shark_write_val(struct snd_tea575x *tea, u32 val)
45725 {
45726diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
45727index 8654e0d..0608a64 100644
45728--- a/drivers/media/radio/radio-shark2.c
45729+++ b/drivers/media/radio/radio-shark2.c
45730@@ -74,7 +74,7 @@ struct shark_device {
45731 u8 *transfer_buffer;
45732 };
45733
45734-static atomic_t shark_instance = ATOMIC_INIT(0);
45735+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
45736
45737 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
45738 {
45739diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
45740index 633022b..7f10754 100644
45741--- a/drivers/media/radio/radio-si476x.c
45742+++ b/drivers/media/radio/radio-si476x.c
45743@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
45744 struct si476x_radio *radio;
45745 struct v4l2_ctrl *ctrl;
45746
45747- static atomic_t instance = ATOMIC_INIT(0);
45748+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
45749
45750 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
45751 if (!radio)
45752diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
45753index 9fd1527..8927230 100644
45754--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
45755+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
45756@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
45757
45758 static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
45759 {
45760- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
45761- char result[64];
45762- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
45763- sizeof(result), 0);
45764+ char *buf;
45765+ char *result;
45766+ int retval;
45767+
45768+ buf = kmalloc(2, GFP_KERNEL);
45769+ if (buf == NULL)
45770+ return -ENOMEM;
45771+ result = kmalloc(64, GFP_KERNEL);
45772+ if (result == NULL) {
45773+ kfree(buf);
45774+ return -ENOMEM;
45775+ }
45776+
45777+ buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
45778+ buf[1] = enable ? 1 : 0;
45779+
45780+ retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
45781+
45782+ kfree(buf);
45783+ kfree(result);
45784+ return retval;
45785 }
45786
45787 static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
45788 {
45789- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
45790- char state[3];
45791- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
45792+ char *buf;
45793+ char *state;
45794+ int retval;
45795+
45796+ buf = kmalloc(2, GFP_KERNEL);
45797+ if (buf == NULL)
45798+ return -ENOMEM;
45799+ state = kmalloc(3, GFP_KERNEL);
45800+ if (state == NULL) {
45801+ kfree(buf);
45802+ return -ENOMEM;
45803+ }
45804+
45805+ buf[0] = CINERGYT2_EP1_SLEEP_MODE;
45806+ buf[1] = enable ? 1 : 0;
45807+
45808+ retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
45809+
45810+ kfree(buf);
45811+ kfree(state);
45812+ return retval;
45813 }
45814
45815 static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
45816 {
45817- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
45818- char state[3];
45819+ char *query;
45820+ char *state;
45821 int ret;
45822+ query = kmalloc(1, GFP_KERNEL);
45823+ if (query == NULL)
45824+ return -ENOMEM;
45825+ state = kmalloc(3, GFP_KERNEL);
45826+ if (state == NULL) {
45827+ kfree(query);
45828+ return -ENOMEM;
45829+ }
45830+
45831+ query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
45832
45833 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
45834
45835- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
45836- sizeof(state), 0);
45837+ ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
45838 if (ret < 0) {
45839 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
45840 "state info\n");
45841@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
45842
45843 /* Copy this pointer as we are gonna need it in the release phase */
45844 cinergyt2_usb_device = adap->dev;
45845-
45846+ kfree(query);
45847+ kfree(state);
45848 return 0;
45849 }
45850
45851@@ -141,12 +186,23 @@ static int repeatable_keys[] = {
45852 static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45853 {
45854 struct cinergyt2_state *st = d->priv;
45855- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
45856+ u8 *key, *cmd;
45857 int i;
45858
45859+ cmd = kmalloc(1, GFP_KERNEL);
45860+ if (cmd == NULL)
45861+ return -EINVAL;
45862+ key = kzalloc(5, GFP_KERNEL);
45863+ if (key == NULL) {
45864+ kfree(cmd);
45865+ return -EINVAL;
45866+ }
45867+
45868+ cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
45869+
45870 *state = REMOTE_NO_KEY_PRESSED;
45871
45872- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
45873+ dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
45874 if (key[4] == 0xff) {
45875 /* key repeat */
45876 st->rc_counter++;
45877@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45878 *event = d->last_event;
45879 deb_rc("repeat key, event %x\n",
45880 *event);
45881- return 0;
45882+ goto out;
45883 }
45884 }
45885 deb_rc("repeated key (non repeatable)\n");
45886 }
45887- return 0;
45888+ goto out;
45889 }
45890
45891 /* hack to pass checksum on the custom field */
45892@@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45893
45894 deb_rc("key: %*ph\n", 5, key);
45895 }
45896+out:
45897+ kfree(cmd);
45898+ kfree(key);
45899 return 0;
45900 }
45901
45902diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45903index c890fe4..f9b2ae6 100644
45904--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45905+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45906@@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
45907 fe_status_t *status)
45908 {
45909 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45910- struct dvbt_get_status_msg result;
45911- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45912+ struct dvbt_get_status_msg *result;
45913+ u8 *cmd;
45914 int ret;
45915
45916- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
45917- sizeof(result), 0);
45918+ cmd = kmalloc(1, GFP_KERNEL);
45919+ if (cmd == NULL)
45920+ return -ENOMEM;
45921+ result = kmalloc(sizeof(*result), GFP_KERNEL);
45922+ if (result == NULL) {
45923+ kfree(cmd);
45924+ return -ENOMEM;
45925+ }
45926+
45927+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45928+
45929+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
45930+ sizeof(*result), 0);
45931 if (ret < 0)
45932- return ret;
45933+ goto out;
45934
45935 *status = 0;
45936
45937- if (0xffff - le16_to_cpu(result.gain) > 30)
45938+ if (0xffff - le16_to_cpu(result->gain) > 30)
45939 *status |= FE_HAS_SIGNAL;
45940- if (result.lock_bits & (1 << 6))
45941+ if (result->lock_bits & (1 << 6))
45942 *status |= FE_HAS_LOCK;
45943- if (result.lock_bits & (1 << 5))
45944+ if (result->lock_bits & (1 << 5))
45945 *status |= FE_HAS_SYNC;
45946- if (result.lock_bits & (1 << 4))
45947+ if (result->lock_bits & (1 << 4))
45948 *status |= FE_HAS_CARRIER;
45949- if (result.lock_bits & (1 << 1))
45950+ if (result->lock_bits & (1 << 1))
45951 *status |= FE_HAS_VITERBI;
45952
45953 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
45954 (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
45955 *status &= ~FE_HAS_LOCK;
45956
45957- return 0;
45958+out:
45959+ kfree(cmd);
45960+ kfree(result);
45961+ return ret;
45962 }
45963
45964 static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
45965 {
45966 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45967- struct dvbt_get_status_msg status;
45968- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45969+ struct dvbt_get_status_msg *status;
45970+ char *cmd;
45971 int ret;
45972
45973- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45974- sizeof(status), 0);
45975+ cmd = kmalloc(1, GFP_KERNEL);
45976+ if (cmd == NULL)
45977+ return -ENOMEM;
45978+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45979+ if (status == NULL) {
45980+ kfree(cmd);
45981+ return -ENOMEM;
45982+ }
45983+
45984+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45985+
45986+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45987+ sizeof(*status), 0);
45988 if (ret < 0)
45989- return ret;
45990+ goto out;
45991
45992- *ber = le32_to_cpu(status.viterbi_error_rate);
45993+ *ber = le32_to_cpu(status->viterbi_error_rate);
45994+out:
45995+ kfree(cmd);
45996+ kfree(status);
45997 return 0;
45998 }
45999
46000 static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
46001 {
46002 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46003- struct dvbt_get_status_msg status;
46004- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46005+ struct dvbt_get_status_msg *status;
46006+ u8 *cmd;
46007 int ret;
46008
46009- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
46010- sizeof(status), 0);
46011+ cmd = kmalloc(1, GFP_KERNEL);
46012+ if (cmd == NULL)
46013+ return -ENOMEM;
46014+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46015+ if (status == NULL) {
46016+ kfree(cmd);
46017+ return -ENOMEM;
46018+ }
46019+
46020+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46021+
46022+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
46023+ sizeof(*status), 0);
46024 if (ret < 0) {
46025 err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
46026 ret);
46027- return ret;
46028+ goto out;
46029 }
46030- *unc = le32_to_cpu(status.uncorrected_block_count);
46031- return 0;
46032+ *unc = le32_to_cpu(status->uncorrected_block_count);
46033+
46034+out:
46035+ kfree(cmd);
46036+ kfree(status);
46037+ return ret;
46038 }
46039
46040 static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
46041 u16 *strength)
46042 {
46043 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46044- struct dvbt_get_status_msg status;
46045- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46046+ struct dvbt_get_status_msg *status;
46047+ char *cmd;
46048 int ret;
46049
46050- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46051- sizeof(status), 0);
46052+ cmd = kmalloc(1, GFP_KERNEL);
46053+ if (cmd == NULL)
46054+ return -ENOMEM;
46055+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46056+ if (status == NULL) {
46057+ kfree(cmd);
46058+ return -ENOMEM;
46059+ }
46060+
46061+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46062+
46063+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46064+ sizeof(*status), 0);
46065 if (ret < 0) {
46066 err("cinergyt2_fe_read_signal_strength() Failed!"
46067 " (Error=%d)\n", ret);
46068- return ret;
46069+ goto out;
46070 }
46071- *strength = (0xffff - le16_to_cpu(status.gain));
46072+ *strength = (0xffff - le16_to_cpu(status->gain));
46073+
46074+out:
46075+ kfree(cmd);
46076+ kfree(status);
46077 return 0;
46078 }
46079
46080 static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
46081 {
46082 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46083- struct dvbt_get_status_msg status;
46084- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46085+ struct dvbt_get_status_msg *status;
46086+ char *cmd;
46087 int ret;
46088
46089- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46090- sizeof(status), 0);
46091+ cmd = kmalloc(1, GFP_KERNEL);
46092+ if (cmd == NULL)
46093+ return -ENOMEM;
46094+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46095+ if (status == NULL) {
46096+ kfree(cmd);
46097+ return -ENOMEM;
46098+ }
46099+
46100+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46101+
46102+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46103+ sizeof(*status), 0);
46104 if (ret < 0) {
46105 err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
46106- return ret;
46107+ goto out;
46108 }
46109- *snr = (status.snr << 8) | status.snr;
46110- return 0;
46111+ *snr = (status->snr << 8) | status->snr;
46112+
46113+out:
46114+ kfree(cmd);
46115+ kfree(status);
46116+ return ret;
46117 }
46118
46119 static int cinergyt2_fe_init(struct dvb_frontend *fe)
46120@@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
46121 {
46122 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
46123 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46124- struct dvbt_set_parameters_msg param;
46125- char result[2];
46126+ struct dvbt_set_parameters_msg *param;
46127+ char *result;
46128 int err;
46129
46130- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
46131- param.tps = cpu_to_le16(compute_tps(fep));
46132- param.freq = cpu_to_le32(fep->frequency / 1000);
46133- param.flags = 0;
46134+ result = kmalloc(2, GFP_KERNEL);
46135+ if (result == NULL)
46136+ return -ENOMEM;
46137+ param = kmalloc(sizeof(*param), GFP_KERNEL);
46138+ if (param == NULL) {
46139+ kfree(result);
46140+ return -ENOMEM;
46141+ }
46142+
46143+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
46144+ param->tps = cpu_to_le16(compute_tps(fep));
46145+ param->freq = cpu_to_le32(fep->frequency / 1000);
46146+ param->flags = 0;
46147
46148 switch (fep->bandwidth_hz) {
46149 default:
46150 case 8000000:
46151- param.bandwidth = 8;
46152+ param->bandwidth = 8;
46153 break;
46154 case 7000000:
46155- param.bandwidth = 7;
46156+ param->bandwidth = 7;
46157 break;
46158 case 6000000:
46159- param.bandwidth = 6;
46160+ param->bandwidth = 6;
46161 break;
46162 }
46163
46164 err = dvb_usb_generic_rw(state->d,
46165- (char *)&param, sizeof(param),
46166- result, sizeof(result), 0);
46167+ (char *)param, sizeof(*param),
46168+ result, 2, 0);
46169 if (err < 0)
46170 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
46171
46172- return (err < 0) ? err : 0;
46173+ kfree(result);
46174+ kfree(param);
46175+ return err;
46176 }
46177
46178 static void cinergyt2_fe_release(struct dvb_frontend *fe)
46179diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46180index 733a7ff..f8b52e3 100644
46181--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46182+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46183@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
46184
46185 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
46186 {
46187- struct hexline hx;
46188- u8 reset;
46189+ struct hexline *hx;
46190+ u8 *reset;
46191 int ret,pos=0;
46192
46193+ reset = kmalloc(1, GFP_KERNEL);
46194+ if (reset == NULL)
46195+ return -ENOMEM;
46196+
46197+ hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
46198+ if (hx == NULL) {
46199+ kfree(reset);
46200+ return -ENOMEM;
46201+ }
46202+
46203 /* stop the CPU */
46204- reset = 1;
46205- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
46206+ reset[0] = 1;
46207+ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
46208 err("could not stop the USB controller CPU.");
46209
46210- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
46211- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
46212- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
46213+ while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
46214+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
46215+ ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
46216
46217- if (ret != hx.len) {
46218+ if (ret != hx->len) {
46219 err("error while transferring firmware "
46220 "(transferred size: %d, block size: %d)",
46221- ret,hx.len);
46222+ ret,hx->len);
46223 ret = -EINVAL;
46224 break;
46225 }
46226 }
46227 if (ret < 0) {
46228 err("firmware download failed at %d with %d",pos,ret);
46229+ kfree(reset);
46230+ kfree(hx);
46231 return ret;
46232 }
46233
46234 if (ret == 0) {
46235 /* restart the CPU */
46236- reset = 0;
46237- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
46238+ reset[0] = 0;
46239+ if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
46240 err("could not restart the USB controller CPU.");
46241 ret = -EINVAL;
46242 }
46243 } else
46244 ret = -EIO;
46245
46246+ kfree(reset);
46247+ kfree(hx);
46248+
46249 return ret;
46250 }
46251 EXPORT_SYMBOL(usb_cypress_load_firmware);
46252diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
46253index 2add8c5..c33b854 100644
46254--- a/drivers/media/usb/dvb-usb/dw2102.c
46255+++ b/drivers/media/usb/dvb-usb/dw2102.c
46256@@ -118,7 +118,7 @@ struct su3000_state {
46257
46258 struct s6x0_state {
46259 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
46260-};
46261+} __no_const;
46262
46263 /* debug */
46264 static int dvb_usb_dw2102_debug;
46265diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
46266index 6b0b8b6b..4038398 100644
46267--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
46268+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
46269@@ -87,8 +87,11 @@ struct technisat_usb2_state {
46270 static int technisat_usb2_i2c_access(struct usb_device *udev,
46271 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
46272 {
46273- u8 b[64];
46274- int ret, actual_length;
46275+ u8 *b = kmalloc(64, GFP_KERNEL);
46276+ int ret, actual_length, error = 0;
46277+
46278+ if (b == NULL)
46279+ return -ENOMEM;
46280
46281 deb_i2c("i2c-access: %02x, tx: ", device_addr);
46282 debug_dump(tx, txlen, deb_i2c);
46283@@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46284
46285 if (ret < 0) {
46286 err("i2c-error: out failed %02x = %d", device_addr, ret);
46287- return -ENODEV;
46288+ error = -ENODEV;
46289+ goto out;
46290 }
46291
46292 ret = usb_bulk_msg(udev,
46293@@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46294 b, 64, &actual_length, 1000);
46295 if (ret < 0) {
46296 err("i2c-error: in failed %02x = %d", device_addr, ret);
46297- return -ENODEV;
46298+ error = -ENODEV;
46299+ goto out;
46300 }
46301
46302 if (b[0] != I2C_STATUS_OK) {
46303@@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46304 /* handle tuner-i2c-nak */
46305 if (!(b[0] == I2C_STATUS_NAK &&
46306 device_addr == 0x60
46307- /* && device_is_technisat_usb2 */))
46308- return -ENODEV;
46309+ /* && device_is_technisat_usb2 */)) {
46310+ error = -ENODEV;
46311+ goto out;
46312+ }
46313 }
46314
46315 deb_i2c("status: %d, ", b[0]);
46316@@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46317
46318 deb_i2c("\n");
46319
46320- return 0;
46321+out:
46322+ kfree(b);
46323+ return error;
46324 }
46325
46326 static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
46327@@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
46328 {
46329 int ret;
46330
46331- u8 led[8] = {
46332- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
46333- 0
46334- };
46335+ u8 *led = kzalloc(8, GFP_KERNEL);
46336+
46337+ if (led == NULL)
46338+ return -ENOMEM;
46339
46340 if (disable_led_control && state != TECH_LED_OFF)
46341 return 0;
46342
46343+ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
46344+
46345 switch (state) {
46346 case TECH_LED_ON:
46347 led[1] = 0x82;
46348@@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
46349 red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
46350 USB_TYPE_VENDOR | USB_DIR_OUT,
46351 0, 0,
46352- led, sizeof(led), 500);
46353+ led, 8, 500);
46354
46355 mutex_unlock(&d->i2c_mutex);
46356+
46357+ kfree(led);
46358+
46359 return ret;
46360 }
46361
46362 static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
46363 {
46364 int ret;
46365- u8 b = 0;
46366+ u8 *b = kzalloc(1, GFP_KERNEL);
46367+
46368+ if (b == NULL)
46369+ return -ENOMEM;
46370
46371 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
46372 return -EAGAIN;
46373@@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
46374 SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
46375 USB_TYPE_VENDOR | USB_DIR_OUT,
46376 (red << 8) | green, 0,
46377- &b, 1, 500);
46378+ b, 1, 500);
46379
46380 mutex_unlock(&d->i2c_mutex);
46381
46382+ kfree(b);
46383+
46384 return ret;
46385 }
46386
46387@@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
46388 struct dvb_usb_device_description **desc, int *cold)
46389 {
46390 int ret;
46391- u8 version[3];
46392+ u8 *version = kmalloc(3, GFP_KERNEL);
46393
46394 /* first select the interface */
46395 if (usb_set_interface(udev, 0, 1) != 0)
46396@@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
46397
46398 *cold = 0; /* by default do not download a firmware - just in case something is wrong */
46399
46400+ if (version == NULL)
46401+ return 0;
46402+
46403 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
46404 GET_VERSION_INFO_VENDOR_REQUEST,
46405 USB_TYPE_VENDOR | USB_DIR_IN,
46406 0, 0,
46407- version, sizeof(version), 500);
46408+ version, 3, 500);
46409
46410 if (ret < 0)
46411 *cold = 1;
46412@@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
46413 *cold = 0;
46414 }
46415
46416+ kfree(version);
46417+
46418 return 0;
46419 }
46420
46421@@ -591,10 +615,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
46422
46423 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
46424 {
46425- u8 buf[62], *b;
46426+ u8 *buf, *b;
46427 int ret;
46428 struct ir_raw_event ev;
46429
46430+ buf = kmalloc(62, GFP_KERNEL);
46431+
46432+ if (buf == NULL)
46433+ return -ENOMEM;
46434+
46435 buf[0] = GET_IR_DATA_VENDOR_REQUEST;
46436 buf[1] = 0x08;
46437 buf[2] = 0x8f;
46438@@ -617,16 +646,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
46439 GET_IR_DATA_VENDOR_REQUEST,
46440 USB_TYPE_VENDOR | USB_DIR_IN,
46441 0x8080, 0,
46442- buf, sizeof(buf), 500);
46443+ buf, 62, 500);
46444
46445 unlock:
46446 mutex_unlock(&d->i2c_mutex);
46447
46448- if (ret < 0)
46449+ if (ret < 0) {
46450+ kfree(buf);
46451 return ret;
46452+ }
46453
46454- if (ret == 1)
46455+ if (ret == 1) {
46456+ kfree(buf);
46457 return 0; /* no key pressed */
46458+ }
46459
46460 /* decoding */
46461 b = buf+1;
46462@@ -653,6 +686,8 @@ unlock:
46463
46464 ir_raw_event_handle(d->rc_dev);
46465
46466+ kfree(buf);
46467+
46468 return 1;
46469 }
46470
46471diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
46472index cca6c2f..77b9a18 100644
46473--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
46474+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
46475@@ -328,7 +328,7 @@ struct v4l2_buffer32 {
46476 __u32 reserved;
46477 };
46478
46479-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
46480+static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
46481 enum v4l2_memory memory)
46482 {
46483 void __user *up_pln;
46484@@ -357,7 +357,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
46485 return 0;
46486 }
46487
46488-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
46489+static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
46490 enum v4l2_memory memory)
46491 {
46492 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
46493@@ -427,7 +427,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
46494 * by passing a very big num_planes value */
46495 uplane = compat_alloc_user_space(num_planes *
46496 sizeof(struct v4l2_plane));
46497- kp->m.planes = uplane;
46498+ kp->m.planes = (struct v4l2_plane __force_kernel *)uplane;
46499
46500 while (--num_planes >= 0) {
46501 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
46502@@ -498,7 +498,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
46503 if (num_planes == 0)
46504 return 0;
46505
46506- uplane = kp->m.planes;
46507+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
46508 if (get_user(p, &up->m.planes))
46509 return -EFAULT;
46510 uplane32 = compat_ptr(p);
46511@@ -562,7 +562,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
46512 get_user(kp->flags, &up->flags) ||
46513 copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
46514 return -EFAULT;
46515- kp->base = compat_ptr(tmp);
46516+ kp->base = (void __force_kernel *)compat_ptr(tmp);
46517 return 0;
46518 }
46519
46520@@ -667,7 +667,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
46521 n * sizeof(struct v4l2_ext_control32)))
46522 return -EFAULT;
46523 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
46524- kp->controls = kcontrols;
46525+ kp->controls = (struct v4l2_ext_control __force_kernel *)kcontrols;
46526 while (--n >= 0) {
46527 if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
46528 return -EFAULT;
46529@@ -689,7 +689,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
46530 static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
46531 {
46532 struct v4l2_ext_control32 __user *ucontrols;
46533- struct v4l2_ext_control __user *kcontrols = kp->controls;
46534+ struct v4l2_ext_control __user *kcontrols = (struct v4l2_ext_control __force_user *)kp->controls;
46535 int n = kp->count;
46536 compat_caddr_t p;
46537
46538@@ -783,7 +783,7 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
46539 put_user(kp->start_block, &up->start_block) ||
46540 put_user(kp->blocks, &up->blocks) ||
46541 put_user(tmp, &up->edid) ||
46542- copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
46543+ copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
46544 return -EFAULT;
46545 return 0;
46546 }
46547diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
46548index 015f92a..59e311e 100644
46549--- a/drivers/media/v4l2-core/v4l2-device.c
46550+++ b/drivers/media/v4l2-core/v4l2-device.c
46551@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
46552 EXPORT_SYMBOL_GPL(v4l2_device_put);
46553
46554 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
46555- atomic_t *instance)
46556+ atomic_unchecked_t *instance)
46557 {
46558- int num = atomic_inc_return(instance) - 1;
46559+ int num = atomic_inc_return_unchecked(instance) - 1;
46560 int len = strlen(basename);
46561
46562 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
46563diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
46564index d15e167..337f374 100644
46565--- a/drivers/media/v4l2-core/v4l2-ioctl.c
46566+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
46567@@ -2142,7 +2142,8 @@ struct v4l2_ioctl_info {
46568 struct file *file, void *fh, void *p);
46569 } u;
46570 void (*debug)(const void *arg, bool write_only);
46571-};
46572+} __do_const;
46573+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
46574
46575 /* This control needs a priority check */
46576 #define INFO_FL_PRIO (1 << 0)
46577@@ -2326,7 +2327,7 @@ static long __video_do_ioctl(struct file *file,
46578 struct video_device *vfd = video_devdata(file);
46579 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
46580 bool write_only = false;
46581- struct v4l2_ioctl_info default_info;
46582+ v4l2_ioctl_info_no_const default_info;
46583 const struct v4l2_ioctl_info *info;
46584 void *fh = file->private_data;
46585 struct v4l2_fh *vfh = NULL;
46586@@ -2413,7 +2414,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
46587 ret = -EINVAL;
46588 break;
46589 }
46590- *user_ptr = (void __user *)buf->m.planes;
46591+ *user_ptr = (void __force_user *)buf->m.planes;
46592 *kernel_ptr = (void **)&buf->m.planes;
46593 *array_size = sizeof(struct v4l2_plane) * buf->length;
46594 ret = 1;
46595@@ -2430,7 +2431,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
46596 ret = -EINVAL;
46597 break;
46598 }
46599- *user_ptr = (void __user *)edid->edid;
46600+ *user_ptr = (void __force_user *)edid->edid;
46601 *kernel_ptr = (void **)&edid->edid;
46602 *array_size = edid->blocks * 128;
46603 ret = 1;
46604@@ -2448,7 +2449,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
46605 ret = -EINVAL;
46606 break;
46607 }
46608- *user_ptr = (void __user *)ctrls->controls;
46609+ *user_ptr = (void __force_user *)ctrls->controls;
46610 *kernel_ptr = (void **)&ctrls->controls;
46611 *array_size = sizeof(struct v4l2_ext_control)
46612 * ctrls->count;
46613@@ -2549,7 +2550,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
46614 }
46615
46616 if (has_array_args) {
46617- *kernel_ptr = (void __force *)user_ptr;
46618+ *kernel_ptr = (void __force_kernel *)user_ptr;
46619 if (copy_to_user(user_ptr, mbuf, array_size))
46620 err = -EFAULT;
46621 goto out_array_args;
46622diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
46623index a896d94..a5d56b1 100644
46624--- a/drivers/message/fusion/mptbase.c
46625+++ b/drivers/message/fusion/mptbase.c
46626@@ -6752,8 +6752,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
46627 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
46628 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
46629
46630+#ifdef CONFIG_GRKERNSEC_HIDESYM
46631+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
46632+#else
46633 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
46634 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
46635+#endif
46636+
46637 /*
46638 * Rounding UP to nearest 4-kB boundary here...
46639 */
46640@@ -6766,7 +6771,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
46641 ioc->facts.GlobalCredits);
46642
46643 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
46644+#ifdef CONFIG_GRKERNSEC_HIDESYM
46645+ NULL, NULL);
46646+#else
46647 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
46648+#endif
46649 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
46650 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
46651 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
46652diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
46653index 0707fa2..70ca794 100644
46654--- a/drivers/message/fusion/mptsas.c
46655+++ b/drivers/message/fusion/mptsas.c
46656@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
46657 return 0;
46658 }
46659
46660+static inline void
46661+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
46662+{
46663+ if (phy_info->port_details) {
46664+ phy_info->port_details->rphy = rphy;
46665+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
46666+ ioc->name, rphy));
46667+ }
46668+
46669+ if (rphy) {
46670+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
46671+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
46672+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
46673+ ioc->name, rphy, rphy->dev.release));
46674+ }
46675+}
46676+
46677 /* no mutex */
46678 static void
46679 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
46680@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
46681 return NULL;
46682 }
46683
46684-static inline void
46685-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
46686-{
46687- if (phy_info->port_details) {
46688- phy_info->port_details->rphy = rphy;
46689- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
46690- ioc->name, rphy));
46691- }
46692-
46693- if (rphy) {
46694- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
46695- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
46696- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
46697- ioc->name, rphy, rphy->dev.release));
46698- }
46699-}
46700-
46701 static inline struct sas_port *
46702 mptsas_get_port(struct mptsas_phyinfo *phy_info)
46703 {
46704diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
46705index b7d87cd..3fb36da 100644
46706--- a/drivers/message/i2o/i2o_proc.c
46707+++ b/drivers/message/i2o/i2o_proc.c
46708@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
46709 "Array Controller Device"
46710 };
46711
46712-static char *chtostr(char *tmp, u8 *chars, int n)
46713-{
46714- tmp[0] = 0;
46715- return strncat(tmp, (char *)chars, n);
46716-}
46717-
46718 static int i2o_report_query_status(struct seq_file *seq, int block_status,
46719 char *group)
46720 {
46721@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
46722 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
46723 {
46724 struct i2o_controller *c = (struct i2o_controller *)seq->private;
46725- static u32 work32[5];
46726- static u8 *work8 = (u8 *) work32;
46727- static u16 *work16 = (u16 *) work32;
46728+ u32 work32[5];
46729+ u8 *work8 = (u8 *) work32;
46730+ u16 *work16 = (u16 *) work32;
46731 int token;
46732 u32 hwcap;
46733
46734@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
46735 } *result;
46736
46737 i2o_exec_execute_ddm_table ddm_table;
46738- char tmp[28 + 1];
46739
46740 result = kmalloc(sizeof(*result), GFP_KERNEL);
46741 if (!result)
46742@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
46743
46744 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
46745 seq_printf(seq, "%-#8x", ddm_table.module_id);
46746- seq_printf(seq, "%-29s",
46747- chtostr(tmp, ddm_table.module_name_version, 28));
46748+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
46749 seq_printf(seq, "%9d ", ddm_table.data_size);
46750 seq_printf(seq, "%8d", ddm_table.code_size);
46751
46752@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
46753
46754 i2o_driver_result_table *result;
46755 i2o_driver_store_table *dst;
46756- char tmp[28 + 1];
46757
46758 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
46759 if (result == NULL)
46760@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
46761
46762 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
46763 seq_printf(seq, "%-#8x", dst->module_id);
46764- seq_printf(seq, "%-29s",
46765- chtostr(tmp, dst->module_name_version, 28));
46766- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
46767+ seq_printf(seq, "%-.28s", dst->module_name_version);
46768+ seq_printf(seq, "%-.8s", dst->date);
46769 seq_printf(seq, "%8d ", dst->module_size);
46770 seq_printf(seq, "%8d ", dst->mpb_size);
46771 seq_printf(seq, "0x%04x", dst->module_flags);
46772@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
46773 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
46774 {
46775 struct i2o_device *d = (struct i2o_device *)seq->private;
46776- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
46777+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
46778 // == (allow) 512d bytes (max)
46779- static u16 *work16 = (u16 *) work32;
46780+ u16 *work16 = (u16 *) work32;
46781 int token;
46782- char tmp[16 + 1];
46783
46784 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
46785
46786@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
46787 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
46788 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
46789 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
46790- seq_printf(seq, "Vendor info : %s\n",
46791- chtostr(tmp, (u8 *) (work32 + 2), 16));
46792- seq_printf(seq, "Product info : %s\n",
46793- chtostr(tmp, (u8 *) (work32 + 6), 16));
46794- seq_printf(seq, "Description : %s\n",
46795- chtostr(tmp, (u8 *) (work32 + 10), 16));
46796- seq_printf(seq, "Product rev. : %s\n",
46797- chtostr(tmp, (u8 *) (work32 + 14), 8));
46798+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
46799+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
46800+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
46801+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
46802
46803 seq_printf(seq, "Serial number : ");
46804 print_serial_number(seq, (u8 *) (work32 + 16),
46805@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
46806 u8 pad[256]; // allow up to 256 byte (max) serial number
46807 } result;
46808
46809- char tmp[24 + 1];
46810-
46811 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
46812
46813 if (token < 0) {
46814@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
46815 }
46816
46817 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
46818- seq_printf(seq, "Module name : %s\n",
46819- chtostr(tmp, result.module_name, 24));
46820- seq_printf(seq, "Module revision : %s\n",
46821- chtostr(tmp, result.module_rev, 8));
46822+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
46823+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
46824
46825 seq_printf(seq, "Serial number : ");
46826 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
46827@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46828 u8 instance_number[4];
46829 } result;
46830
46831- char tmp[64 + 1];
46832-
46833 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
46834
46835 if (token < 0) {
46836@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46837 return 0;
46838 }
46839
46840- seq_printf(seq, "Device name : %s\n",
46841- chtostr(tmp, result.device_name, 64));
46842- seq_printf(seq, "Service name : %s\n",
46843- chtostr(tmp, result.service_name, 64));
46844- seq_printf(seq, "Physical name : %s\n",
46845- chtostr(tmp, result.physical_location, 64));
46846- seq_printf(seq, "Instance number : %s\n",
46847- chtostr(tmp, result.instance_number, 4));
46848+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
46849+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
46850+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
46851+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
46852
46853 return 0;
46854 }
46855@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46856 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
46857 {
46858 struct i2o_device *d = (struct i2o_device *)seq->private;
46859- static u32 work32[12];
46860- static u16 *work16 = (u16 *) work32;
46861- static u8 *work8 = (u8 *) work32;
46862+ u32 work32[12];
46863+ u16 *work16 = (u16 *) work32;
46864+ u8 *work8 = (u8 *) work32;
46865 int token;
46866
46867 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
46868diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
46869index 92752fb..a7494f6 100644
46870--- a/drivers/message/i2o/iop.c
46871+++ b/drivers/message/i2o/iop.c
46872@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
46873
46874 spin_lock_irqsave(&c->context_list_lock, flags);
46875
46876- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
46877- atomic_inc(&c->context_list_counter);
46878+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
46879+ atomic_inc_unchecked(&c->context_list_counter);
46880
46881- entry->context = atomic_read(&c->context_list_counter);
46882+ entry->context = atomic_read_unchecked(&c->context_list_counter);
46883
46884 list_add(&entry->list, &c->context_list);
46885
46886@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
46887
46888 #if BITS_PER_LONG == 64
46889 spin_lock_init(&c->context_list_lock);
46890- atomic_set(&c->context_list_counter, 0);
46891+ atomic_set_unchecked(&c->context_list_counter, 0);
46892 INIT_LIST_HEAD(&c->context_list);
46893 #endif
46894
46895diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
46896index b2c7e3b..85aa4764 100644
46897--- a/drivers/mfd/ab8500-debugfs.c
46898+++ b/drivers/mfd/ab8500-debugfs.c
46899@@ -100,7 +100,7 @@ static int irq_last;
46900 static u32 *irq_count;
46901 static int num_irqs;
46902
46903-static struct device_attribute **dev_attr;
46904+static device_attribute_no_const **dev_attr;
46905 static char **event_name;
46906
46907 static u8 avg_sample = SAMPLE_16;
46908diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
46909index ecbe78e..b2ca870 100644
46910--- a/drivers/mfd/max8925-i2c.c
46911+++ b/drivers/mfd/max8925-i2c.c
46912@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
46913 const struct i2c_device_id *id)
46914 {
46915 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
46916- static struct max8925_chip *chip;
46917+ struct max8925_chip *chip;
46918 struct device_node *node = client->dev.of_node;
46919
46920 if (node && !pdata) {
46921diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
46922index f243e75..322176c 100644
46923--- a/drivers/mfd/tps65910.c
46924+++ b/drivers/mfd/tps65910.c
46925@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
46926 struct tps65910_platform_data *pdata)
46927 {
46928 int ret = 0;
46929- static struct regmap_irq_chip *tps6591x_irqs_chip;
46930+ struct regmap_irq_chip *tps6591x_irqs_chip;
46931
46932 if (!irq) {
46933 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
46934diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
46935index b1dabba..24a88f2 100644
46936--- a/drivers/mfd/twl4030-irq.c
46937+++ b/drivers/mfd/twl4030-irq.c
46938@@ -34,6 +34,7 @@
46939 #include <linux/of.h>
46940 #include <linux/irqdomain.h>
46941 #include <linux/i2c/twl.h>
46942+#include <asm/pgtable.h>
46943
46944 #include "twl-core.h"
46945
46946@@ -725,10 +726,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
46947 * Install an irq handler for each of the SIH modules;
46948 * clone dummy irq_chip since PIH can't *do* anything
46949 */
46950- twl4030_irq_chip = dummy_irq_chip;
46951- twl4030_irq_chip.name = "twl4030";
46952+ pax_open_kernel();
46953+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
46954+ *(const char **)&twl4030_irq_chip.name = "twl4030";
46955
46956- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
46957+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
46958+ pax_close_kernel();
46959
46960 for (i = irq_base; i < irq_end; i++) {
46961 irq_set_chip_and_handler(i, &twl4030_irq_chip,
46962diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
46963index 464419b..64bae8d 100644
46964--- a/drivers/misc/c2port/core.c
46965+++ b/drivers/misc/c2port/core.c
46966@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
46967 goto error_idr_alloc;
46968 c2dev->id = ret;
46969
46970- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
46971+ pax_open_kernel();
46972+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
46973+ pax_close_kernel();
46974
46975 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
46976 "c2port%d", c2dev->id);
46977diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
46978index 3f2b625..945e179 100644
46979--- a/drivers/misc/eeprom/sunxi_sid.c
46980+++ b/drivers/misc/eeprom/sunxi_sid.c
46981@@ -126,7 +126,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
46982
46983 platform_set_drvdata(pdev, sid_data);
46984
46985- sid_bin_attr.size = sid_data->keysize;
46986+ pax_open_kernel();
46987+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
46988+ pax_close_kernel();
46989 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
46990 return -ENODEV;
46991
46992diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
46993index 36f5d52..32311c3 100644
46994--- a/drivers/misc/kgdbts.c
46995+++ b/drivers/misc/kgdbts.c
46996@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
46997 char before[BREAK_INSTR_SIZE];
46998 char after[BREAK_INSTR_SIZE];
46999
47000- probe_kernel_read(before, (char *)kgdbts_break_test,
47001+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
47002 BREAK_INSTR_SIZE);
47003 init_simple_test();
47004 ts.tst = plant_and_detach_test;
47005@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
47006 /* Activate test with initial breakpoint */
47007 if (!is_early)
47008 kgdb_breakpoint();
47009- probe_kernel_read(after, (char *)kgdbts_break_test,
47010+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
47011 BREAK_INSTR_SIZE);
47012 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
47013 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
47014diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
47015index 3ef4627..8d00486 100644
47016--- a/drivers/misc/lis3lv02d/lis3lv02d.c
47017+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
47018@@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
47019 * the lid is closed. This leads to interrupts as soon as a little move
47020 * is done.
47021 */
47022- atomic_inc(&lis3->count);
47023+ atomic_inc_unchecked(&lis3->count);
47024
47025 wake_up_interruptible(&lis3->misc_wait);
47026 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
47027@@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
47028 if (lis3->pm_dev)
47029 pm_runtime_get_sync(lis3->pm_dev);
47030
47031- atomic_set(&lis3->count, 0);
47032+ atomic_set_unchecked(&lis3->count, 0);
47033 return 0;
47034 }
47035
47036@@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
47037 add_wait_queue(&lis3->misc_wait, &wait);
47038 while (true) {
47039 set_current_state(TASK_INTERRUPTIBLE);
47040- data = atomic_xchg(&lis3->count, 0);
47041+ data = atomic_xchg_unchecked(&lis3->count, 0);
47042 if (data)
47043 break;
47044
47045@@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
47046 struct lis3lv02d, miscdev);
47047
47048 poll_wait(file, &lis3->misc_wait, wait);
47049- if (atomic_read(&lis3->count))
47050+ if (atomic_read_unchecked(&lis3->count))
47051 return POLLIN | POLLRDNORM;
47052 return 0;
47053 }
47054diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
47055index c439c82..1f20f57 100644
47056--- a/drivers/misc/lis3lv02d/lis3lv02d.h
47057+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
47058@@ -297,7 +297,7 @@ struct lis3lv02d {
47059 struct input_polled_dev *idev; /* input device */
47060 struct platform_device *pdev; /* platform device */
47061 struct regulator_bulk_data regulators[2];
47062- atomic_t count; /* interrupt count after last read */
47063+ atomic_unchecked_t count; /* interrupt count after last read */
47064 union axis_conversion ac; /* hw -> logical axis */
47065 int mapped_btns[3];
47066
47067diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
47068index 2f30bad..c4c13d0 100644
47069--- a/drivers/misc/sgi-gru/gruhandles.c
47070+++ b/drivers/misc/sgi-gru/gruhandles.c
47071@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
47072 unsigned long nsec;
47073
47074 nsec = CLKS2NSEC(clks);
47075- atomic_long_inc(&mcs_op_statistics[op].count);
47076- atomic_long_add(nsec, &mcs_op_statistics[op].total);
47077+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
47078+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
47079 if (mcs_op_statistics[op].max < nsec)
47080 mcs_op_statistics[op].max = nsec;
47081 }
47082diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
47083index 4f76359..cdfcb2e 100644
47084--- a/drivers/misc/sgi-gru/gruprocfs.c
47085+++ b/drivers/misc/sgi-gru/gruprocfs.c
47086@@ -32,9 +32,9 @@
47087
47088 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
47089
47090-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
47091+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
47092 {
47093- unsigned long val = atomic_long_read(v);
47094+ unsigned long val = atomic_long_read_unchecked(v);
47095
47096 seq_printf(s, "%16lu %s\n", val, id);
47097 }
47098@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
47099
47100 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
47101 for (op = 0; op < mcsop_last; op++) {
47102- count = atomic_long_read(&mcs_op_statistics[op].count);
47103- total = atomic_long_read(&mcs_op_statistics[op].total);
47104+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
47105+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
47106 max = mcs_op_statistics[op].max;
47107 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
47108 count ? total / count : 0, max);
47109diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
47110index 5c3ce24..4915ccb 100644
47111--- a/drivers/misc/sgi-gru/grutables.h
47112+++ b/drivers/misc/sgi-gru/grutables.h
47113@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
47114 * GRU statistics.
47115 */
47116 struct gru_stats_s {
47117- atomic_long_t vdata_alloc;
47118- atomic_long_t vdata_free;
47119- atomic_long_t gts_alloc;
47120- atomic_long_t gts_free;
47121- atomic_long_t gms_alloc;
47122- atomic_long_t gms_free;
47123- atomic_long_t gts_double_allocate;
47124- atomic_long_t assign_context;
47125- atomic_long_t assign_context_failed;
47126- atomic_long_t free_context;
47127- atomic_long_t load_user_context;
47128- atomic_long_t load_kernel_context;
47129- atomic_long_t lock_kernel_context;
47130- atomic_long_t unlock_kernel_context;
47131- atomic_long_t steal_user_context;
47132- atomic_long_t steal_kernel_context;
47133- atomic_long_t steal_context_failed;
47134- atomic_long_t nopfn;
47135- atomic_long_t asid_new;
47136- atomic_long_t asid_next;
47137- atomic_long_t asid_wrap;
47138- atomic_long_t asid_reuse;
47139- atomic_long_t intr;
47140- atomic_long_t intr_cbr;
47141- atomic_long_t intr_tfh;
47142- atomic_long_t intr_spurious;
47143- atomic_long_t intr_mm_lock_failed;
47144- atomic_long_t call_os;
47145- atomic_long_t call_os_wait_queue;
47146- atomic_long_t user_flush_tlb;
47147- atomic_long_t user_unload_context;
47148- atomic_long_t user_exception;
47149- atomic_long_t set_context_option;
47150- atomic_long_t check_context_retarget_intr;
47151- atomic_long_t check_context_unload;
47152- atomic_long_t tlb_dropin;
47153- atomic_long_t tlb_preload_page;
47154- atomic_long_t tlb_dropin_fail_no_asid;
47155- atomic_long_t tlb_dropin_fail_upm;
47156- atomic_long_t tlb_dropin_fail_invalid;
47157- atomic_long_t tlb_dropin_fail_range_active;
47158- atomic_long_t tlb_dropin_fail_idle;
47159- atomic_long_t tlb_dropin_fail_fmm;
47160- atomic_long_t tlb_dropin_fail_no_exception;
47161- atomic_long_t tfh_stale_on_fault;
47162- atomic_long_t mmu_invalidate_range;
47163- atomic_long_t mmu_invalidate_page;
47164- atomic_long_t flush_tlb;
47165- atomic_long_t flush_tlb_gru;
47166- atomic_long_t flush_tlb_gru_tgh;
47167- atomic_long_t flush_tlb_gru_zero_asid;
47168+ atomic_long_unchecked_t vdata_alloc;
47169+ atomic_long_unchecked_t vdata_free;
47170+ atomic_long_unchecked_t gts_alloc;
47171+ atomic_long_unchecked_t gts_free;
47172+ atomic_long_unchecked_t gms_alloc;
47173+ atomic_long_unchecked_t gms_free;
47174+ atomic_long_unchecked_t gts_double_allocate;
47175+ atomic_long_unchecked_t assign_context;
47176+ atomic_long_unchecked_t assign_context_failed;
47177+ atomic_long_unchecked_t free_context;
47178+ atomic_long_unchecked_t load_user_context;
47179+ atomic_long_unchecked_t load_kernel_context;
47180+ atomic_long_unchecked_t lock_kernel_context;
47181+ atomic_long_unchecked_t unlock_kernel_context;
47182+ atomic_long_unchecked_t steal_user_context;
47183+ atomic_long_unchecked_t steal_kernel_context;
47184+ atomic_long_unchecked_t steal_context_failed;
47185+ atomic_long_unchecked_t nopfn;
47186+ atomic_long_unchecked_t asid_new;
47187+ atomic_long_unchecked_t asid_next;
47188+ atomic_long_unchecked_t asid_wrap;
47189+ atomic_long_unchecked_t asid_reuse;
47190+ atomic_long_unchecked_t intr;
47191+ atomic_long_unchecked_t intr_cbr;
47192+ atomic_long_unchecked_t intr_tfh;
47193+ atomic_long_unchecked_t intr_spurious;
47194+ atomic_long_unchecked_t intr_mm_lock_failed;
47195+ atomic_long_unchecked_t call_os;
47196+ atomic_long_unchecked_t call_os_wait_queue;
47197+ atomic_long_unchecked_t user_flush_tlb;
47198+ atomic_long_unchecked_t user_unload_context;
47199+ atomic_long_unchecked_t user_exception;
47200+ atomic_long_unchecked_t set_context_option;
47201+ atomic_long_unchecked_t check_context_retarget_intr;
47202+ atomic_long_unchecked_t check_context_unload;
47203+ atomic_long_unchecked_t tlb_dropin;
47204+ atomic_long_unchecked_t tlb_preload_page;
47205+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
47206+ atomic_long_unchecked_t tlb_dropin_fail_upm;
47207+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
47208+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
47209+ atomic_long_unchecked_t tlb_dropin_fail_idle;
47210+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
47211+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
47212+ atomic_long_unchecked_t tfh_stale_on_fault;
47213+ atomic_long_unchecked_t mmu_invalidate_range;
47214+ atomic_long_unchecked_t mmu_invalidate_page;
47215+ atomic_long_unchecked_t flush_tlb;
47216+ atomic_long_unchecked_t flush_tlb_gru;
47217+ atomic_long_unchecked_t flush_tlb_gru_tgh;
47218+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
47219
47220- atomic_long_t copy_gpa;
47221- atomic_long_t read_gpa;
47222+ atomic_long_unchecked_t copy_gpa;
47223+ atomic_long_unchecked_t read_gpa;
47224
47225- atomic_long_t mesq_receive;
47226- atomic_long_t mesq_receive_none;
47227- atomic_long_t mesq_send;
47228- atomic_long_t mesq_send_failed;
47229- atomic_long_t mesq_noop;
47230- atomic_long_t mesq_send_unexpected_error;
47231- atomic_long_t mesq_send_lb_overflow;
47232- atomic_long_t mesq_send_qlimit_reached;
47233- atomic_long_t mesq_send_amo_nacked;
47234- atomic_long_t mesq_send_put_nacked;
47235- atomic_long_t mesq_page_overflow;
47236- atomic_long_t mesq_qf_locked;
47237- atomic_long_t mesq_qf_noop_not_full;
47238- atomic_long_t mesq_qf_switch_head_failed;
47239- atomic_long_t mesq_qf_unexpected_error;
47240- atomic_long_t mesq_noop_unexpected_error;
47241- atomic_long_t mesq_noop_lb_overflow;
47242- atomic_long_t mesq_noop_qlimit_reached;
47243- atomic_long_t mesq_noop_amo_nacked;
47244- atomic_long_t mesq_noop_put_nacked;
47245- atomic_long_t mesq_noop_page_overflow;
47246+ atomic_long_unchecked_t mesq_receive;
47247+ atomic_long_unchecked_t mesq_receive_none;
47248+ atomic_long_unchecked_t mesq_send;
47249+ atomic_long_unchecked_t mesq_send_failed;
47250+ atomic_long_unchecked_t mesq_noop;
47251+ atomic_long_unchecked_t mesq_send_unexpected_error;
47252+ atomic_long_unchecked_t mesq_send_lb_overflow;
47253+ atomic_long_unchecked_t mesq_send_qlimit_reached;
47254+ atomic_long_unchecked_t mesq_send_amo_nacked;
47255+ atomic_long_unchecked_t mesq_send_put_nacked;
47256+ atomic_long_unchecked_t mesq_page_overflow;
47257+ atomic_long_unchecked_t mesq_qf_locked;
47258+ atomic_long_unchecked_t mesq_qf_noop_not_full;
47259+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
47260+ atomic_long_unchecked_t mesq_qf_unexpected_error;
47261+ atomic_long_unchecked_t mesq_noop_unexpected_error;
47262+ atomic_long_unchecked_t mesq_noop_lb_overflow;
47263+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
47264+ atomic_long_unchecked_t mesq_noop_amo_nacked;
47265+ atomic_long_unchecked_t mesq_noop_put_nacked;
47266+ atomic_long_unchecked_t mesq_noop_page_overflow;
47267
47268 };
47269
47270@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
47271 tghop_invalidate, mcsop_last};
47272
47273 struct mcs_op_statistic {
47274- atomic_long_t count;
47275- atomic_long_t total;
47276+ atomic_long_unchecked_t count;
47277+ atomic_long_unchecked_t total;
47278 unsigned long max;
47279 };
47280
47281@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
47282
47283 #define STAT(id) do { \
47284 if (gru_options & OPT_STATS) \
47285- atomic_long_inc(&gru_stats.id); \
47286+ atomic_long_inc_unchecked(&gru_stats.id); \
47287 } while (0)
47288
47289 #ifdef CONFIG_SGI_GRU_DEBUG
47290diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
47291index c862cd4..0d176fe 100644
47292--- a/drivers/misc/sgi-xp/xp.h
47293+++ b/drivers/misc/sgi-xp/xp.h
47294@@ -288,7 +288,7 @@ struct xpc_interface {
47295 xpc_notify_func, void *);
47296 void (*received) (short, int, void *);
47297 enum xp_retval (*partid_to_nasids) (short, void *);
47298-};
47299+} __no_const;
47300
47301 extern struct xpc_interface xpc_interface;
47302
47303diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
47304index 01be66d..e3a0c7e 100644
47305--- a/drivers/misc/sgi-xp/xp_main.c
47306+++ b/drivers/misc/sgi-xp/xp_main.c
47307@@ -78,13 +78,13 @@ xpc_notloaded(void)
47308 }
47309
47310 struct xpc_interface xpc_interface = {
47311- (void (*)(int))xpc_notloaded,
47312- (void (*)(int))xpc_notloaded,
47313- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
47314- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
47315+ .connect = (void (*)(int))xpc_notloaded,
47316+ .disconnect = (void (*)(int))xpc_notloaded,
47317+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
47318+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
47319 void *))xpc_notloaded,
47320- (void (*)(short, int, void *))xpc_notloaded,
47321- (enum xp_retval(*)(short, void *))xpc_notloaded
47322+ .received = (void (*)(short, int, void *))xpc_notloaded,
47323+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
47324 };
47325 EXPORT_SYMBOL_GPL(xpc_interface);
47326
47327diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
47328index b94d5f7..7f494c5 100644
47329--- a/drivers/misc/sgi-xp/xpc.h
47330+++ b/drivers/misc/sgi-xp/xpc.h
47331@@ -835,6 +835,7 @@ struct xpc_arch_operations {
47332 void (*received_payload) (struct xpc_channel *, void *);
47333 void (*notify_senders_of_disconnect) (struct xpc_channel *);
47334 };
47335+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
47336
47337 /* struct xpc_partition act_state values (for XPC HB) */
47338
47339@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
47340 /* found in xpc_main.c */
47341 extern struct device *xpc_part;
47342 extern struct device *xpc_chan;
47343-extern struct xpc_arch_operations xpc_arch_ops;
47344+extern xpc_arch_operations_no_const xpc_arch_ops;
47345 extern int xpc_disengage_timelimit;
47346 extern int xpc_disengage_timedout;
47347 extern int xpc_activate_IRQ_rcvd;
47348diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
47349index 82dc574..8539ab2 100644
47350--- a/drivers/misc/sgi-xp/xpc_main.c
47351+++ b/drivers/misc/sgi-xp/xpc_main.c
47352@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
47353 .notifier_call = xpc_system_die,
47354 };
47355
47356-struct xpc_arch_operations xpc_arch_ops;
47357+xpc_arch_operations_no_const xpc_arch_ops;
47358
47359 /*
47360 * Timer function to enforce the timelimit on the partition disengage.
47361@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
47362
47363 if (((die_args->trapnr == X86_TRAP_MF) ||
47364 (die_args->trapnr == X86_TRAP_XF)) &&
47365- !user_mode_vm(die_args->regs))
47366+ !user_mode(die_args->regs))
47367 xpc_die_deactivate();
47368
47369 break;
47370diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
47371index ede41f0..744fbd9 100644
47372--- a/drivers/mmc/card/block.c
47373+++ b/drivers/mmc/card/block.c
47374@@ -574,7 +574,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
47375 if (idata->ic.postsleep_min_us)
47376 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
47377
47378- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
47379+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
47380 err = -EFAULT;
47381 goto cmd_rel_host;
47382 }
47383diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
47384index f51b5ba..86614a7 100644
47385--- a/drivers/mmc/core/mmc_ops.c
47386+++ b/drivers/mmc/core/mmc_ops.c
47387@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
47388 void *data_buf;
47389 int is_on_stack;
47390
47391- is_on_stack = object_is_on_stack(buf);
47392+ is_on_stack = object_starts_on_stack(buf);
47393 if (is_on_stack) {
47394 /*
47395 * dma onto stack is unsafe/nonportable, but callers to this
47396diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
47397index 08fd956..370487a 100644
47398--- a/drivers/mmc/host/dw_mmc.h
47399+++ b/drivers/mmc/host/dw_mmc.h
47400@@ -262,5 +262,5 @@ struct dw_mci_drv_data {
47401 int (*parse_dt)(struct dw_mci *host);
47402 int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
47403 struct dw_mci_tuning_data *tuning_data);
47404-};
47405+} __do_const;
47406 #endif /* _DW_MMC_H_ */
47407diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
47408index e4d4707..28262a3 100644
47409--- a/drivers/mmc/host/mmci.c
47410+++ b/drivers/mmc/host/mmci.c
47411@@ -1612,7 +1612,9 @@ static int mmci_probe(struct amba_device *dev,
47412 mmc->caps |= MMC_CAP_CMD23;
47413
47414 if (variant->busy_detect) {
47415- mmci_ops.card_busy = mmci_card_busy;
47416+ pax_open_kernel();
47417+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
47418+ pax_close_kernel();
47419 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
47420 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
47421 mmc->max_busy_timeout = 0;
47422diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
47423index ccec0e3..199f9ce 100644
47424--- a/drivers/mmc/host/sdhci-esdhc-imx.c
47425+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
47426@@ -1034,9 +1034,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
47427 host->mmc->caps |= MMC_CAP_1_8V_DDR;
47428 }
47429
47430- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
47431- sdhci_esdhc_ops.platform_execute_tuning =
47432+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
47433+ pax_open_kernel();
47434+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
47435 esdhc_executing_tuning;
47436+ pax_close_kernel();
47437+ }
47438
47439 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
47440 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
47441diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
47442index 1e47903..7683916 100644
47443--- a/drivers/mmc/host/sdhci-s3c.c
47444+++ b/drivers/mmc/host/sdhci-s3c.c
47445@@ -584,9 +584,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
47446 * we can use overriding functions instead of default.
47447 */
47448 if (sc->no_divider) {
47449- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
47450- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
47451- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
47452+ pax_open_kernel();
47453+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
47454+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
47455+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
47456+ pax_close_kernel();
47457 }
47458
47459 /* It supports additional host capabilities if needed */
47460diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
47461index 423666b..81ff5eb 100644
47462--- a/drivers/mtd/chips/cfi_cmdset_0020.c
47463+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
47464@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
47465 size_t totlen = 0, thislen;
47466 int ret = 0;
47467 size_t buflen = 0;
47468- static char *buffer;
47469+ char *buffer;
47470
47471 if (!ECCBUF_SIZE) {
47472 /* We should fall back to a general writev implementation.
47473diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
47474index 0b071a3..8ec3d5b 100644
47475--- a/drivers/mtd/nand/denali.c
47476+++ b/drivers/mtd/nand/denali.c
47477@@ -24,6 +24,7 @@
47478 #include <linux/slab.h>
47479 #include <linux/mtd/mtd.h>
47480 #include <linux/module.h>
47481+#include <linux/slab.h>
47482
47483 #include "denali.h"
47484
47485diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
47486index 959cb9b..8520fe5 100644
47487--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
47488+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
47489@@ -386,7 +386,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
47490
47491 /* first try to map the upper buffer directly */
47492 if (virt_addr_valid(this->upper_buf) &&
47493- !object_is_on_stack(this->upper_buf)) {
47494+ !object_starts_on_stack(this->upper_buf)) {
47495 sg_init_one(sgl, this->upper_buf, this->upper_len);
47496 ret = dma_map_sg(this->dev, sgl, 1, dr);
47497 if (ret == 0)
47498diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
47499index 51b9d6a..52af9a7 100644
47500--- a/drivers/mtd/nftlmount.c
47501+++ b/drivers/mtd/nftlmount.c
47502@@ -24,6 +24,7 @@
47503 #include <asm/errno.h>
47504 #include <linux/delay.h>
47505 #include <linux/slab.h>
47506+#include <linux/sched.h>
47507 #include <linux/mtd/mtd.h>
47508 #include <linux/mtd/nand.h>
47509 #include <linux/mtd/nftl.h>
47510diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
47511index cf49c22..971b133 100644
47512--- a/drivers/mtd/sm_ftl.c
47513+++ b/drivers/mtd/sm_ftl.c
47514@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
47515 #define SM_CIS_VENDOR_OFFSET 0x59
47516 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
47517 {
47518- struct attribute_group *attr_group;
47519+ attribute_group_no_const *attr_group;
47520 struct attribute **attributes;
47521 struct sm_sysfs_attribute *vendor_attribute;
47522 char *vendor;
47523diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
47524index d163e11..f517018 100644
47525--- a/drivers/net/bonding/bond_netlink.c
47526+++ b/drivers/net/bonding/bond_netlink.c
47527@@ -548,7 +548,7 @@ nla_put_failure:
47528 return -EMSGSIZE;
47529 }
47530
47531-struct rtnl_link_ops bond_link_ops __read_mostly = {
47532+struct rtnl_link_ops bond_link_ops = {
47533 .kind = "bond",
47534 .priv_size = sizeof(struct bonding),
47535 .setup = bond_setup,
47536diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
47537index 4168822..f38eeddf 100644
47538--- a/drivers/net/can/Kconfig
47539+++ b/drivers/net/can/Kconfig
47540@@ -98,7 +98,7 @@ config CAN_JANZ_ICAN3
47541
47542 config CAN_FLEXCAN
47543 tristate "Support for Freescale FLEXCAN based chips"
47544- depends on ARM || PPC
47545+ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
47546 ---help---
47547 Say Y here if you want to support for Freescale FlexCAN.
47548
47549diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
47550index 1d162cc..b546a75 100644
47551--- a/drivers/net/ethernet/8390/ax88796.c
47552+++ b/drivers/net/ethernet/8390/ax88796.c
47553@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
47554 if (ax->plat->reg_offsets)
47555 ei_local->reg_offset = ax->plat->reg_offsets;
47556 else {
47557+ resource_size_t _mem_size = mem_size;
47558+ do_div(_mem_size, 0x18);
47559 ei_local->reg_offset = ax->reg_offsets;
47560 for (ret = 0; ret < 0x18; ret++)
47561- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
47562+ ax->reg_offsets[ret] = _mem_size * ret;
47563 }
47564
47565 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
47566diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
47567index 7330681..7e9e463 100644
47568--- a/drivers/net/ethernet/altera/altera_tse_main.c
47569+++ b/drivers/net/ethernet/altera/altera_tse_main.c
47570@@ -1182,7 +1182,7 @@ static int tse_shutdown(struct net_device *dev)
47571 return 0;
47572 }
47573
47574-static struct net_device_ops altera_tse_netdev_ops = {
47575+static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
47576 .ndo_open = tse_open,
47577 .ndo_stop = tse_shutdown,
47578 .ndo_start_xmit = tse_start_xmit,
47579@@ -1439,11 +1439,13 @@ static int altera_tse_probe(struct platform_device *pdev)
47580 ndev->netdev_ops = &altera_tse_netdev_ops;
47581 altera_tse_set_ethtool_ops(ndev);
47582
47583+ pax_open_kernel();
47584 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
47585
47586 if (priv->hash_filter)
47587 altera_tse_netdev_ops.ndo_set_rx_mode =
47588 tse_set_rx_mode_hashfilter;
47589+ pax_close_kernel();
47590
47591 /* Scatter/gather IO is not supported,
47592 * so it is turned off
47593diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47594index cc25a3a..c8d72d3 100644
47595--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47596+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47597@@ -1083,14 +1083,14 @@ do { \
47598 * operations, everything works on mask values.
47599 */
47600 #define XMDIO_READ(_pdata, _mmd, _reg) \
47601- ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
47602+ ((_pdata)->hw_if->read_mmd_regs((_pdata), 0, \
47603 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
47604
47605 #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
47606 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
47607
47608 #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
47609- ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
47610+ ((_pdata)->hw_if->write_mmd_regs((_pdata), 0, \
47611 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
47612
47613 #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
47614diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47615index 7d6a49b..e6d403b 100644
47616--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47617+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47618@@ -188,7 +188,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
47619
47620 memcpy(pdata->ets, ets, sizeof(*pdata->ets));
47621
47622- pdata->hw_if.config_dcb_tc(pdata);
47623+ pdata->hw_if->config_dcb_tc(pdata);
47624
47625 return 0;
47626 }
47627@@ -227,7 +227,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
47628
47629 memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
47630
47631- pdata->hw_if.config_dcb_pfc(pdata);
47632+ pdata->hw_if->config_dcb_pfc(pdata);
47633
47634 return 0;
47635 }
47636diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47637index 1c5d62e..8e14d54 100644
47638--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47639+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47640@@ -236,7 +236,7 @@ err_ring:
47641
47642 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
47643 {
47644- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47645+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47646 struct xgbe_channel *channel;
47647 struct xgbe_ring *ring;
47648 struct xgbe_ring_data *rdata;
47649@@ -277,7 +277,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
47650
47651 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
47652 {
47653- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47654+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47655 struct xgbe_channel *channel;
47656 struct xgbe_ring *ring;
47657 struct xgbe_ring_desc *rdesc;
47658@@ -506,7 +506,7 @@ err_out:
47659 static void xgbe_realloc_skb(struct xgbe_channel *channel)
47660 {
47661 struct xgbe_prv_data *pdata = channel->pdata;
47662- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47663+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47664 struct xgbe_ring *ring = channel->rx_ring;
47665 struct xgbe_ring_data *rdata;
47666 struct sk_buff *skb = NULL;
47667@@ -550,17 +550,12 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel)
47668 DBGPR("<--xgbe_realloc_skb\n");
47669 }
47670
47671-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
47672-{
47673- DBGPR("-->xgbe_init_function_ptrs_desc\n");
47674-
47675- desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
47676- desc_if->free_ring_resources = xgbe_free_ring_resources;
47677- desc_if->map_tx_skb = xgbe_map_tx_skb;
47678- desc_if->realloc_skb = xgbe_realloc_skb;
47679- desc_if->unmap_skb = xgbe_unmap_skb;
47680- desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
47681- desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
47682-
47683- DBGPR("<--xgbe_init_function_ptrs_desc\n");
47684-}
47685+const struct xgbe_desc_if default_xgbe_desc_if = {
47686+ .alloc_ring_resources = xgbe_alloc_ring_resources,
47687+ .free_ring_resources = xgbe_free_ring_resources,
47688+ .map_tx_skb = xgbe_map_tx_skb,
47689+ .realloc_skb = xgbe_realloc_skb,
47690+ .unmap_skb = xgbe_unmap_skb,
47691+ .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
47692+ .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
47693+};
47694diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47695index ea27383..faa8936 100644
47696--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47697+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47698@@ -2463,7 +2463,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
47699
47700 static int xgbe_init(struct xgbe_prv_data *pdata)
47701 {
47702- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47703+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47704 int ret;
47705
47706 DBGPR("-->xgbe_init\n");
47707@@ -2525,101 +2525,96 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
47708 return 0;
47709 }
47710
47711-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
47712-{
47713- DBGPR("-->xgbe_init_function_ptrs\n");
47714-
47715- hw_if->tx_complete = xgbe_tx_complete;
47716-
47717- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
47718- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
47719- hw_if->add_mac_addresses = xgbe_add_mac_addresses;
47720- hw_if->set_mac_address = xgbe_set_mac_address;
47721-
47722- hw_if->enable_rx_csum = xgbe_enable_rx_csum;
47723- hw_if->disable_rx_csum = xgbe_disable_rx_csum;
47724-
47725- hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
47726- hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
47727- hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
47728- hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
47729- hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
47730-
47731- hw_if->read_mmd_regs = xgbe_read_mmd_regs;
47732- hw_if->write_mmd_regs = xgbe_write_mmd_regs;
47733-
47734- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
47735- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
47736- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
47737-
47738- hw_if->enable_tx = xgbe_enable_tx;
47739- hw_if->disable_tx = xgbe_disable_tx;
47740- hw_if->enable_rx = xgbe_enable_rx;
47741- hw_if->disable_rx = xgbe_disable_rx;
47742-
47743- hw_if->powerup_tx = xgbe_powerup_tx;
47744- hw_if->powerdown_tx = xgbe_powerdown_tx;
47745- hw_if->powerup_rx = xgbe_powerup_rx;
47746- hw_if->powerdown_rx = xgbe_powerdown_rx;
47747-
47748- hw_if->pre_xmit = xgbe_pre_xmit;
47749- hw_if->dev_read = xgbe_dev_read;
47750- hw_if->enable_int = xgbe_enable_int;
47751- hw_if->disable_int = xgbe_disable_int;
47752- hw_if->init = xgbe_init;
47753- hw_if->exit = xgbe_exit;
47754+const struct xgbe_hw_if default_xgbe_hw_if = {
47755+ .tx_complete = xgbe_tx_complete,
47756+
47757+ .set_promiscuous_mode = xgbe_set_promiscuous_mode,
47758+ .set_all_multicast_mode = xgbe_set_all_multicast_mode,
47759+ .add_mac_addresses = xgbe_add_mac_addresses,
47760+ .set_mac_address = xgbe_set_mac_address,
47761+
47762+ .enable_rx_csum = xgbe_enable_rx_csum,
47763+ .disable_rx_csum = xgbe_disable_rx_csum,
47764+
47765+ .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
47766+ .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
47767+ .enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering,
47768+ .disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering,
47769+ .update_vlan_hash_table = xgbe_update_vlan_hash_table,
47770+
47771+ .read_mmd_regs = xgbe_read_mmd_regs,
47772+ .write_mmd_regs = xgbe_write_mmd_regs,
47773+
47774+ .set_gmii_speed = xgbe_set_gmii_speed,
47775+ .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
47776+ .set_xgmii_speed = xgbe_set_xgmii_speed,
47777+
47778+ .enable_tx = xgbe_enable_tx,
47779+ .disable_tx = xgbe_disable_tx,
47780+ .enable_rx = xgbe_enable_rx,
47781+ .disable_rx = xgbe_disable_rx,
47782+
47783+ .powerup_tx = xgbe_powerup_tx,
47784+ .powerdown_tx = xgbe_powerdown_tx,
47785+ .powerup_rx = xgbe_powerup_rx,
47786+ .powerdown_rx = xgbe_powerdown_rx,
47787+
47788+ .pre_xmit = xgbe_pre_xmit,
47789+ .dev_read = xgbe_dev_read,
47790+ .enable_int = xgbe_enable_int,
47791+ .disable_int = xgbe_disable_int,
47792+ .init = xgbe_init,
47793+ .exit = xgbe_exit,
47794
47795 /* Descriptor related Sequences have to be initialized here */
47796- hw_if->tx_desc_init = xgbe_tx_desc_init;
47797- hw_if->rx_desc_init = xgbe_rx_desc_init;
47798- hw_if->tx_desc_reset = xgbe_tx_desc_reset;
47799- hw_if->rx_desc_reset = xgbe_rx_desc_reset;
47800- hw_if->is_last_desc = xgbe_is_last_desc;
47801- hw_if->is_context_desc = xgbe_is_context_desc;
47802+ .tx_desc_init = xgbe_tx_desc_init,
47803+ .rx_desc_init = xgbe_rx_desc_init,
47804+ .tx_desc_reset = xgbe_tx_desc_reset,
47805+ .rx_desc_reset = xgbe_rx_desc_reset,
47806+ .is_last_desc = xgbe_is_last_desc,
47807+ .is_context_desc = xgbe_is_context_desc,
47808
47809 /* For FLOW ctrl */
47810- hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
47811- hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
47812+ .config_tx_flow_control = xgbe_config_tx_flow_control,
47813+ .config_rx_flow_control = xgbe_config_rx_flow_control,
47814
47815 /* For RX coalescing */
47816- hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
47817- hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
47818- hw_if->usec_to_riwt = xgbe_usec_to_riwt;
47819- hw_if->riwt_to_usec = xgbe_riwt_to_usec;
47820+ .config_rx_coalesce = xgbe_config_rx_coalesce,
47821+ .config_tx_coalesce = xgbe_config_tx_coalesce,
47822+ .usec_to_riwt = xgbe_usec_to_riwt,
47823+ .riwt_to_usec = xgbe_riwt_to_usec,
47824
47825 /* For RX and TX threshold config */
47826- hw_if->config_rx_threshold = xgbe_config_rx_threshold;
47827- hw_if->config_tx_threshold = xgbe_config_tx_threshold;
47828+ .config_rx_threshold = xgbe_config_rx_threshold,
47829+ .config_tx_threshold = xgbe_config_tx_threshold,
47830
47831 /* For RX and TX Store and Forward Mode config */
47832- hw_if->config_rsf_mode = xgbe_config_rsf_mode;
47833- hw_if->config_tsf_mode = xgbe_config_tsf_mode;
47834+ .config_rsf_mode = xgbe_config_rsf_mode,
47835+ .config_tsf_mode = xgbe_config_tsf_mode,
47836
47837 /* For TX DMA Operating on Second Frame config */
47838- hw_if->config_osp_mode = xgbe_config_osp_mode;
47839+ .config_osp_mode = xgbe_config_osp_mode,
47840
47841 /* For RX and TX PBL config */
47842- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
47843- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
47844- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
47845- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
47846- hw_if->config_pblx8 = xgbe_config_pblx8;
47847+ .config_rx_pbl_val = xgbe_config_rx_pbl_val,
47848+ .get_rx_pbl_val = xgbe_get_rx_pbl_val,
47849+ .config_tx_pbl_val = xgbe_config_tx_pbl_val,
47850+ .get_tx_pbl_val = xgbe_get_tx_pbl_val,
47851+ .config_pblx8 = xgbe_config_pblx8,
47852
47853 /* For MMC statistics support */
47854- hw_if->tx_mmc_int = xgbe_tx_mmc_int;
47855- hw_if->rx_mmc_int = xgbe_rx_mmc_int;
47856- hw_if->read_mmc_stats = xgbe_read_mmc_stats;
47857+ .tx_mmc_int = xgbe_tx_mmc_int,
47858+ .rx_mmc_int = xgbe_rx_mmc_int,
47859+ .read_mmc_stats = xgbe_read_mmc_stats,
47860
47861 /* For PTP config */
47862- hw_if->config_tstamp = xgbe_config_tstamp;
47863- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
47864- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
47865- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
47866- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
47867+ .config_tstamp = xgbe_config_tstamp,
47868+ .update_tstamp_addend = xgbe_update_tstamp_addend,
47869+ .set_tstamp_time = xgbe_set_tstamp_time,
47870+ .get_tstamp_time = xgbe_get_tstamp_time,
47871+ .get_tx_tstamp = xgbe_get_tx_tstamp,
47872
47873 /* For Data Center Bridging config */
47874- hw_if->config_dcb_tc = xgbe_config_dcb_tc;
47875- hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
47876-
47877- DBGPR("<--xgbe_init_function_ptrs\n");
47878-}
47879+ .config_dcb_tc = xgbe_config_dcb_tc,
47880+ .config_dcb_pfc = xgbe_config_dcb_pfc,
47881+};
47882diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47883index b26d758..b0d1c3b 100644
47884--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47885+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47886@@ -155,7 +155,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
47887
47888 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
47889 {
47890- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47891+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47892 struct xgbe_channel *channel;
47893 enum xgbe_int int_id;
47894 unsigned int i;
47895@@ -177,7 +177,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
47896
47897 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
47898 {
47899- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47900+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47901 struct xgbe_channel *channel;
47902 enum xgbe_int int_id;
47903 unsigned int i;
47904@@ -200,7 +200,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
47905 static irqreturn_t xgbe_isr(int irq, void *data)
47906 {
47907 struct xgbe_prv_data *pdata = data;
47908- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47909+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47910 struct xgbe_channel *channel;
47911 unsigned int dma_isr, dma_ch_isr;
47912 unsigned int mac_isr, mac_tssr;
47913@@ -447,7 +447,7 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
47914
47915 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
47916 {
47917- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47918+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47919
47920 DBGPR("-->xgbe_init_tx_coalesce\n");
47921
47922@@ -461,7 +461,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
47923
47924 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
47925 {
47926- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47927+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47928
47929 DBGPR("-->xgbe_init_rx_coalesce\n");
47930
47931@@ -475,7 +475,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
47932
47933 static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
47934 {
47935- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47936+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47937 struct xgbe_channel *channel;
47938 struct xgbe_ring *ring;
47939 struct xgbe_ring_data *rdata;
47940@@ -500,7 +500,7 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
47941
47942 static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
47943 {
47944- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47945+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47946 struct xgbe_channel *channel;
47947 struct xgbe_ring *ring;
47948 struct xgbe_ring_data *rdata;
47949@@ -526,7 +526,7 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
47950 static void xgbe_adjust_link(struct net_device *netdev)
47951 {
47952 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47953- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47954+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47955 struct phy_device *phydev = pdata->phydev;
47956 int new_state = 0;
47957
47958@@ -634,7 +634,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
47959 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
47960 {
47961 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47962- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47963+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47964 unsigned long flags;
47965
47966 DBGPR("-->xgbe_powerdown\n");
47967@@ -672,7 +672,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
47968 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
47969 {
47970 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47971- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47972+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47973 unsigned long flags;
47974
47975 DBGPR("-->xgbe_powerup\n");
47976@@ -709,7 +709,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
47977
47978 static int xgbe_start(struct xgbe_prv_data *pdata)
47979 {
47980- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47981+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47982 struct net_device *netdev = pdata->netdev;
47983
47984 DBGPR("-->xgbe_start\n");
47985@@ -735,7 +735,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
47986
47987 static void xgbe_stop(struct xgbe_prv_data *pdata)
47988 {
47989- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47990+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47991 struct net_device *netdev = pdata->netdev;
47992
47993 DBGPR("-->xgbe_stop\n");
47994@@ -755,7 +755,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
47995
47996 static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
47997 {
47998- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47999+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48000
48001 DBGPR("-->xgbe_restart_dev\n");
48002
48003@@ -952,7 +952,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
48004 return -ERANGE;
48005 }
48006
48007- pdata->hw_if.config_tstamp(pdata, mac_tscr);
48008+ pdata->hw_if->config_tstamp(pdata, mac_tscr);
48009
48010 memcpy(&pdata->tstamp_config, &config, sizeof(config));
48011
48012@@ -1090,8 +1090,8 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
48013 static int xgbe_open(struct net_device *netdev)
48014 {
48015 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48016- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48017- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48018+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48019+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48020 int ret;
48021
48022 DBGPR("-->xgbe_open\n");
48023@@ -1171,8 +1171,8 @@ err_phy_init:
48024 static int xgbe_close(struct net_device *netdev)
48025 {
48026 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48027- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48028- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48029+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48030+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48031
48032 DBGPR("-->xgbe_close\n");
48033
48034@@ -1206,8 +1206,8 @@ static int xgbe_close(struct net_device *netdev)
48035 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
48036 {
48037 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48038- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48039- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48040+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48041+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48042 struct xgbe_channel *channel;
48043 struct xgbe_ring *ring;
48044 struct xgbe_packet_data *packet;
48045@@ -1276,7 +1276,7 @@ tx_netdev_return:
48046 static void xgbe_set_rx_mode(struct net_device *netdev)
48047 {
48048 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48049- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48050+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48051 unsigned int pr_mode, am_mode;
48052
48053 DBGPR("-->xgbe_set_rx_mode\n");
48054@@ -1295,7 +1295,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
48055 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
48056 {
48057 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48058- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48059+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48060 struct sockaddr *saddr = addr;
48061
48062 DBGPR("-->xgbe_set_mac_address\n");
48063@@ -1362,7 +1362,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
48064
48065 DBGPR("-->%s\n", __func__);
48066
48067- pdata->hw_if.read_mmc_stats(pdata);
48068+ pdata->hw_if->read_mmc_stats(pdata);
48069
48070 s->rx_packets = pstats->rxframecount_gb;
48071 s->rx_bytes = pstats->rxoctetcount_gb;
48072@@ -1389,7 +1389,7 @@ static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
48073 u16 vid)
48074 {
48075 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48076- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48077+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48078
48079 DBGPR("-->%s\n", __func__);
48080
48081@@ -1405,7 +1405,7 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
48082 u16 vid)
48083 {
48084 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48085- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48086+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48087
48088 DBGPR("-->%s\n", __func__);
48089
48090@@ -1465,7 +1465,7 @@ static int xgbe_set_features(struct net_device *netdev,
48091 netdev_features_t features)
48092 {
48093 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48094- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48095+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48096 unsigned int rxcsum, rxvlan, rxvlan_filter;
48097
48098 rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
48099@@ -1521,7 +1521,7 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
48100 static void xgbe_rx_refresh(struct xgbe_channel *channel)
48101 {
48102 struct xgbe_prv_data *pdata = channel->pdata;
48103- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48104+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48105 struct xgbe_ring *ring = channel->rx_ring;
48106 struct xgbe_ring_data *rdata;
48107
48108@@ -1537,8 +1537,8 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
48109 static int xgbe_tx_poll(struct xgbe_channel *channel)
48110 {
48111 struct xgbe_prv_data *pdata = channel->pdata;
48112- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48113- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48114+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48115+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48116 struct xgbe_ring *ring = channel->tx_ring;
48117 struct xgbe_ring_data *rdata;
48118 struct xgbe_ring_desc *rdesc;
48119@@ -1590,7 +1590,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
48120 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
48121 {
48122 struct xgbe_prv_data *pdata = channel->pdata;
48123- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48124+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48125 struct xgbe_ring *ring = channel->rx_ring;
48126 struct xgbe_ring_data *rdata;
48127 struct xgbe_packet_data *packet;
48128diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48129index 46f6130..f37dde3 100644
48130--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48131+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48132@@ -203,7 +203,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
48133
48134 DBGPR("-->%s\n", __func__);
48135
48136- pdata->hw_if.read_mmc_stats(pdata);
48137+ pdata->hw_if->read_mmc_stats(pdata);
48138 for (i = 0; i < XGBE_STATS_COUNT; i++) {
48139 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
48140 *data++ = *(u64 *)stat;
48141@@ -378,7 +378,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
48142 struct ethtool_coalesce *ec)
48143 {
48144 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48145- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48146+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48147 unsigned int riwt;
48148
48149 DBGPR("-->xgbe_get_coalesce\n");
48150@@ -401,7 +401,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
48151 struct ethtool_coalesce *ec)
48152 {
48153 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48154- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48155+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48156 unsigned int rx_frames, rx_riwt, rx_usecs;
48157 unsigned int tx_frames, tx_usecs;
48158
48159diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48160index bdf9cfa..340aea1 100644
48161--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48162+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48163@@ -210,12 +210,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
48164 DBGPR("<--xgbe_default_config\n");
48165 }
48166
48167-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
48168-{
48169- xgbe_init_function_ptrs_dev(&pdata->hw_if);
48170- xgbe_init_function_ptrs_desc(&pdata->desc_if);
48171-}
48172-
48173 static int xgbe_probe(struct platform_device *pdev)
48174 {
48175 struct xgbe_prv_data *pdata;
48176@@ -328,9 +322,8 @@ static int xgbe_probe(struct platform_device *pdev)
48177 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
48178
48179 /* Set all the function pointers */
48180- xgbe_init_all_fptrs(pdata);
48181- hw_if = &pdata->hw_if;
48182- desc_if = &pdata->desc_if;
48183+ hw_if = pdata->hw_if = &default_xgbe_hw_if;
48184+ desc_if = pdata->desc_if = &default_xgbe_desc_if;
48185
48186 /* Issue software reset to device */
48187 hw_if->exit(pdata);
48188diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48189index 6d2221e..47d1325 100644
48190--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48191+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48192@@ -127,7 +127,7 @@
48193 static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
48194 {
48195 struct xgbe_prv_data *pdata = mii->priv;
48196- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48197+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48198 int mmd_data;
48199
48200 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
48201@@ -144,7 +144,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
48202 u16 mmd_val)
48203 {
48204 struct xgbe_prv_data *pdata = mii->priv;
48205- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48206+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48207 int mmd_data = mmd_val;
48208
48209 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
48210diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48211index 37e64cf..c3b61cf 100644
48212--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48213+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48214@@ -130,7 +130,7 @@ static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
48215 tstamp_cc);
48216 u64 nsec;
48217
48218- nsec = pdata->hw_if.get_tstamp_time(pdata);
48219+ nsec = pdata->hw_if->get_tstamp_time(pdata);
48220
48221 return nsec;
48222 }
48223@@ -159,7 +159,7 @@ static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
48224
48225 spin_lock_irqsave(&pdata->tstamp_lock, flags);
48226
48227- pdata->hw_if.update_tstamp_addend(pdata, addend);
48228+ pdata->hw_if->update_tstamp_addend(pdata, addend);
48229
48230 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
48231
48232diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
48233index e9fe6e6..875fbaf 100644
48234--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
48235+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
48236@@ -585,8 +585,8 @@ struct xgbe_prv_data {
48237
48238 int irq_number;
48239
48240- struct xgbe_hw_if hw_if;
48241- struct xgbe_desc_if desc_if;
48242+ const struct xgbe_hw_if *hw_if;
48243+ const struct xgbe_desc_if *desc_if;
48244
48245 /* AXI DMA settings */
48246 unsigned int axdomain;
48247@@ -699,6 +699,9 @@ struct xgbe_prv_data {
48248 #endif
48249 };
48250
48251+extern const struct xgbe_hw_if default_xgbe_hw_if;
48252+extern const struct xgbe_desc_if default_xgbe_desc_if;
48253+
48254 /* Function prototypes*/
48255
48256 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
48257diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48258index 571427c..e9fe9e7 100644
48259--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48260+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48261@@ -1058,7 +1058,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
48262 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
48263 {
48264 /* RX_MODE controlling object */
48265- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
48266+ bnx2x_init_rx_mode_obj(bp);
48267
48268 /* multicast configuration controlling object */
48269 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
48270diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48271index b193604..8873bfd 100644
48272--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48273+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48274@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
48275 return rc;
48276 }
48277
48278-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
48279- struct bnx2x_rx_mode_obj *o)
48280+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
48281 {
48282 if (CHIP_IS_E1x(bp)) {
48283- o->wait_comp = bnx2x_empty_rx_mode_wait;
48284- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
48285+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
48286+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
48287 } else {
48288- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
48289- o->config_rx_mode = bnx2x_set_rx_mode_e2;
48290+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
48291+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
48292 }
48293 }
48294
48295diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48296index 718ecd2..2183b2f 100644
48297--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48298+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48299@@ -1340,8 +1340,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
48300
48301 /********************* RX MODE ****************/
48302
48303-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
48304- struct bnx2x_rx_mode_obj *o);
48305+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
48306
48307 /**
48308 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
48309diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
48310index 31c9f82..e65e986 100644
48311--- a/drivers/net/ethernet/broadcom/tg3.h
48312+++ b/drivers/net/ethernet/broadcom/tg3.h
48313@@ -150,6 +150,7 @@
48314 #define CHIPREV_ID_5750_A0 0x4000
48315 #define CHIPREV_ID_5750_A1 0x4001
48316 #define CHIPREV_ID_5750_A3 0x4003
48317+#define CHIPREV_ID_5750_C1 0x4201
48318 #define CHIPREV_ID_5750_C2 0x4202
48319 #define CHIPREV_ID_5752_A0_HW 0x5000
48320 #define CHIPREV_ID_5752_A0 0x6000
48321diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
48322index 13f9636..228040f 100644
48323--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
48324+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
48325@@ -1690,10 +1690,10 @@ bna_cb_ioceth_reset(void *arg)
48326 }
48327
48328 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
48329- bna_cb_ioceth_enable,
48330- bna_cb_ioceth_disable,
48331- bna_cb_ioceth_hbfail,
48332- bna_cb_ioceth_reset
48333+ .enable_cbfn = bna_cb_ioceth_enable,
48334+ .disable_cbfn = bna_cb_ioceth_disable,
48335+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
48336+ .reset_cbfn = bna_cb_ioceth_reset
48337 };
48338
48339 static void bna_attr_init(struct bna_ioceth *ioceth)
48340diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
48341index ffc92a4..40edc77 100644
48342--- a/drivers/net/ethernet/brocade/bna/bnad.c
48343+++ b/drivers/net/ethernet/brocade/bna/bnad.c
48344@@ -552,6 +552,7 @@ bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
48345
48346 len = (vec == nvecs) ?
48347 last_fraglen : unmap->vector.len;
48348+ skb->truesize += unmap->vector.len;
48349 totlen += len;
48350
48351 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
48352@@ -563,7 +564,6 @@ bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
48353
48354 skb->len += totlen;
48355 skb->data_len += totlen;
48356- skb->truesize += totlen;
48357 }
48358
48359 static inline void
48360diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
48361index 8cffcdf..aadf043 100644
48362--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
48363+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
48364@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
48365 */
48366 struct l2t_skb_cb {
48367 arp_failure_handler_func arp_failure_handler;
48368-};
48369+} __no_const;
48370
48371 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
48372
48373diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
48374index 9f5f3c3..86d21a6 100644
48375--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
48376+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
48377@@ -2359,7 +2359,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
48378
48379 int i;
48380 struct adapter *ap = netdev2adap(dev);
48381- static const unsigned int *reg_ranges;
48382+ const unsigned int *reg_ranges;
48383 int arr_size = 0, buf_size = 0;
48384
48385 if (is_t4(ap->params.chip)) {
48386diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
48387index cf8b6ff..274271e 100644
48388--- a/drivers/net/ethernet/dec/tulip/de4x5.c
48389+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
48390@@ -5387,7 +5387,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
48391 for (i=0; i<ETH_ALEN; i++) {
48392 tmp.addr[i] = dev->dev_addr[i];
48393 }
48394- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
48395+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
48396 break;
48397
48398 case DE4X5_SET_HWADDR: /* Set the hardware address */
48399@@ -5427,7 +5427,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
48400 spin_lock_irqsave(&lp->lock, flags);
48401 memcpy(&statbuf, &lp->pktStats, ioc->len);
48402 spin_unlock_irqrestore(&lp->lock, flags);
48403- if (copy_to_user(ioc->data, &statbuf, ioc->len))
48404+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
48405 return -EFAULT;
48406 break;
48407 }
48408diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
48409index 93ff8ef..39c64dd 100644
48410--- a/drivers/net/ethernet/emulex/benet/be_main.c
48411+++ b/drivers/net/ethernet/emulex/benet/be_main.c
48412@@ -533,7 +533,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
48413
48414 if (wrapped)
48415 newacc += 65536;
48416- ACCESS_ONCE(*acc) = newacc;
48417+ ACCESS_ONCE_RW(*acc) = newacc;
48418 }
48419
48420 static void populate_erx_stats(struct be_adapter *adapter,
48421@@ -4286,6 +4286,9 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
48422 if (nla_type(attr) != IFLA_BRIDGE_MODE)
48423 continue;
48424
48425+ if (nla_len(attr) < sizeof(mode))
48426+ return -EINVAL;
48427+
48428 mode = nla_get_u16(attr);
48429 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
48430 return -EINVAL;
48431diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
48432index c77fa4a..7fd42fc 100644
48433--- a/drivers/net/ethernet/faraday/ftgmac100.c
48434+++ b/drivers/net/ethernet/faraday/ftgmac100.c
48435@@ -30,6 +30,8 @@
48436 #include <linux/netdevice.h>
48437 #include <linux/phy.h>
48438 #include <linux/platform_device.h>
48439+#include <linux/interrupt.h>
48440+#include <linux/irqreturn.h>
48441 #include <net/ip.h>
48442
48443 #include "ftgmac100.h"
48444diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
48445index 4ff1adc..0ea6bf4 100644
48446--- a/drivers/net/ethernet/faraday/ftmac100.c
48447+++ b/drivers/net/ethernet/faraday/ftmac100.c
48448@@ -31,6 +31,8 @@
48449 #include <linux/module.h>
48450 #include <linux/netdevice.h>
48451 #include <linux/platform_device.h>
48452+#include <linux/interrupt.h>
48453+#include <linux/irqreturn.h>
48454
48455 #include "ftmac100.h"
48456
48457diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
48458index 537b621..07f87ce 100644
48459--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
48460+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
48461@@ -401,7 +401,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
48462 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
48463
48464 /* Update the base adjustement value. */
48465- ACCESS_ONCE(pf->ptp_base_adj) = incval;
48466+ ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
48467 smp_mb(); /* Force the above update. */
48468 }
48469
48470diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
48471index e82821f..c7dd0af 100644
48472--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
48473+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
48474@@ -7789,6 +7789,9 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
48475 if (nla_type(attr) != IFLA_BRIDGE_MODE)
48476 continue;
48477
48478+ if (nla_len(attr) < sizeof(mode))
48479+ return -EINVAL;
48480+
48481 mode = nla_get_u16(attr);
48482 if (mode == BRIDGE_MODE_VEPA) {
48483 reg = 0;
48484diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48485index 5fd4b52..87aa34b 100644
48486--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48487+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48488@@ -794,7 +794,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
48489 }
48490
48491 /* update the base incval used to calculate frequency adjustment */
48492- ACCESS_ONCE(adapter->base_incval) = incval;
48493+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
48494 smp_mb();
48495
48496 /* need lock to prevent incorrect read while modifying cyclecounter */
48497diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
48498index c14d4d8..66da603 100644
48499--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
48500+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
48501@@ -1259,6 +1259,9 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
48502 struct ixgbe_hw *hw = &adapter->hw;
48503 u32 regval;
48504
48505+ if (vf >= adapter->num_vfs)
48506+ return -EINVAL;
48507+
48508 adapter->vfinfo[vf].spoofchk_enabled = setting;
48509
48510 regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
48511diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
48512index 2bbd01f..e8baa64 100644
48513--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
48514+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
48515@@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
48516 struct __vxge_hw_fifo *fifo;
48517 struct vxge_hw_fifo_config *config;
48518 u32 txdl_size, txdl_per_memblock;
48519- struct vxge_hw_mempool_cbs fifo_mp_callback;
48520+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
48521+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
48522+ };
48523+
48524 struct __vxge_hw_virtualpath *vpath;
48525
48526 if ((vp == NULL) || (attr == NULL)) {
48527@@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
48528 goto exit;
48529 }
48530
48531- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
48532-
48533 fifo->mempool =
48534 __vxge_hw_mempool_create(vpath->hldev,
48535 fifo->config->memblock_size,
48536diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48537index 3172cdf..d01ab34 100644
48538--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48539+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48540@@ -2190,7 +2190,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
48541 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
48542 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
48543 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
48544- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
48545+ pax_open_kernel();
48546+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
48547+ pax_close_kernel();
48548 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48549 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
48550 max_tx_rings = QLCNIC_MAX_TX_RINGS;
48551diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48552index be7d7a6..a8983f8 100644
48553--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48554+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48555@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
48556 case QLCNIC_NON_PRIV_FUNC:
48557 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
48558 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48559- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
48560+ pax_open_kernel();
48561+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
48562+ pax_close_kernel();
48563 break;
48564 case QLCNIC_PRIV_FUNC:
48565 ahw->op_mode = QLCNIC_PRIV_FUNC;
48566 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
48567- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48568+ pax_open_kernel();
48569+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48570+ pax_close_kernel();
48571 break;
48572 case QLCNIC_MGMT_FUNC:
48573 ahw->op_mode = QLCNIC_MGMT_FUNC;
48574 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48575- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48576+ pax_open_kernel();
48577+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48578+ pax_close_kernel();
48579 break;
48580 default:
48581 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
48582diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48583index c9f57fb..208bdc1 100644
48584--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48585+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48586@@ -1285,7 +1285,7 @@ flash_temp:
48587 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
48588 {
48589 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
48590- static const struct qlcnic_dump_operations *fw_dump_ops;
48591+ const struct qlcnic_dump_operations *fw_dump_ops;
48592 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
48593 u32 entry_offset, dump, no_entries, buf_offset = 0;
48594 int i, k, ops_cnt, ops_index, dump_size = 0;
48595diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
48596index 0921302..927f761 100644
48597--- a/drivers/net/ethernet/realtek/r8169.c
48598+++ b/drivers/net/ethernet/realtek/r8169.c
48599@@ -744,22 +744,22 @@ struct rtl8169_private {
48600 struct mdio_ops {
48601 void (*write)(struct rtl8169_private *, int, int);
48602 int (*read)(struct rtl8169_private *, int);
48603- } mdio_ops;
48604+ } __no_const mdio_ops;
48605
48606 struct pll_power_ops {
48607 void (*down)(struct rtl8169_private *);
48608 void (*up)(struct rtl8169_private *);
48609- } pll_power_ops;
48610+ } __no_const pll_power_ops;
48611
48612 struct jumbo_ops {
48613 void (*enable)(struct rtl8169_private *);
48614 void (*disable)(struct rtl8169_private *);
48615- } jumbo_ops;
48616+ } __no_const jumbo_ops;
48617
48618 struct csi_ops {
48619 void (*write)(struct rtl8169_private *, int, int);
48620 u32 (*read)(struct rtl8169_private *, int);
48621- } csi_ops;
48622+ } __no_const csi_ops;
48623
48624 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
48625 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
48626diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
48627index 6b861e3..204ac86 100644
48628--- a/drivers/net/ethernet/sfc/ptp.c
48629+++ b/drivers/net/ethernet/sfc/ptp.c
48630@@ -822,7 +822,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
48631 ptp->start.dma_addr);
48632
48633 /* Clear flag that signals MC ready */
48634- ACCESS_ONCE(*start) = 0;
48635+ ACCESS_ONCE_RW(*start) = 0;
48636 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
48637 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
48638 EFX_BUG_ON_PARANOID(rc);
48639diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48640index 08c483b..2c4a553 100644
48641--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48642+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48643@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
48644
48645 writel(value, ioaddr + MMC_CNTRL);
48646
48647- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
48648- MMC_CNTRL, value);
48649+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
48650+// MMC_CNTRL, value);
48651 }
48652
48653 /* To mask all all interrupts.*/
48654diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
48655index d5e07de..e3bf20a 100644
48656--- a/drivers/net/hyperv/hyperv_net.h
48657+++ b/drivers/net/hyperv/hyperv_net.h
48658@@ -171,7 +171,7 @@ struct rndis_device {
48659 enum rndis_device_state state;
48660 bool link_state;
48661 bool link_change;
48662- atomic_t new_req_id;
48663+ atomic_unchecked_t new_req_id;
48664
48665 spinlock_t request_lock;
48666 struct list_head req_list;
48667diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
48668index 2b86f0b..ecc996f 100644
48669--- a/drivers/net/hyperv/rndis_filter.c
48670+++ b/drivers/net/hyperv/rndis_filter.c
48671@@ -102,7 +102,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
48672 * template
48673 */
48674 set = &rndis_msg->msg.set_req;
48675- set->req_id = atomic_inc_return(&dev->new_req_id);
48676+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
48677
48678 /* Add to the request list */
48679 spin_lock_irqsave(&dev->request_lock, flags);
48680@@ -911,7 +911,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
48681
48682 /* Setup the rndis set */
48683 halt = &request->request_msg.msg.halt_req;
48684- halt->req_id = atomic_inc_return(&dev->new_req_id);
48685+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
48686
48687 /* Ignore return since this msg is optional. */
48688 rndis_filter_send_request(dev, request);
48689diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
48690index 6cbc56a..5f7e6c8 100644
48691--- a/drivers/net/ieee802154/fakehard.c
48692+++ b/drivers/net/ieee802154/fakehard.c
48693@@ -365,7 +365,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
48694 phy->transmit_power = 0xbf;
48695
48696 dev->netdev_ops = &fake_ops;
48697- dev->ml_priv = &fake_mlme;
48698+ dev->ml_priv = (void *)&fake_mlme;
48699
48700 priv = netdev_priv(dev);
48701 priv->phy = phy;
48702diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
48703index 5f17ad0..e0463c8 100644
48704--- a/drivers/net/macvlan.c
48705+++ b/drivers/net/macvlan.c
48706@@ -264,7 +264,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
48707 free_nskb:
48708 kfree_skb(nskb);
48709 err:
48710- atomic_long_inc(&skb->dev->rx_dropped);
48711+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
48712 }
48713
48714 /* called under rcu_read_lock() from netif_receive_skb */
48715@@ -1150,13 +1150,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
48716 int macvlan_link_register(struct rtnl_link_ops *ops)
48717 {
48718 /* common fields */
48719- ops->priv_size = sizeof(struct macvlan_dev);
48720- ops->validate = macvlan_validate;
48721- ops->maxtype = IFLA_MACVLAN_MAX;
48722- ops->policy = macvlan_policy;
48723- ops->changelink = macvlan_changelink;
48724- ops->get_size = macvlan_get_size;
48725- ops->fill_info = macvlan_fill_info;
48726+ pax_open_kernel();
48727+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
48728+ *(void **)&ops->validate = macvlan_validate;
48729+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
48730+ *(const void **)&ops->policy = macvlan_policy;
48731+ *(void **)&ops->changelink = macvlan_changelink;
48732+ *(void **)&ops->get_size = macvlan_get_size;
48733+ *(void **)&ops->fill_info = macvlan_fill_info;
48734+ pax_close_kernel();
48735
48736 return rtnl_link_register(ops);
48737 };
48738@@ -1236,7 +1238,7 @@ static int macvlan_device_event(struct notifier_block *unused,
48739 return NOTIFY_DONE;
48740 }
48741
48742-static struct notifier_block macvlan_notifier_block __read_mostly = {
48743+static struct notifier_block macvlan_notifier_block = {
48744 .notifier_call = macvlan_device_event,
48745 };
48746
48747diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
48748index 07c942b..2d8b073 100644
48749--- a/drivers/net/macvtap.c
48750+++ b/drivers/net/macvtap.c
48751@@ -1023,7 +1023,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
48752 }
48753
48754 ret = 0;
48755- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
48756+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
48757 put_user(q->flags, &ifr->ifr_flags))
48758 ret = -EFAULT;
48759 macvtap_put_vlan(vlan);
48760@@ -1193,7 +1193,7 @@ static int macvtap_device_event(struct notifier_block *unused,
48761 return NOTIFY_DONE;
48762 }
48763
48764-static struct notifier_block macvtap_notifier_block __read_mostly = {
48765+static struct notifier_block macvtap_notifier_block = {
48766 .notifier_call = macvtap_device_event,
48767 };
48768
48769diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
48770index 17ecdd6..79ad848 100644
48771--- a/drivers/net/ppp/ppp_generic.c
48772+++ b/drivers/net/ppp/ppp_generic.c
48773@@ -1020,7 +1020,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48774 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
48775 struct ppp_stats stats;
48776 struct ppp_comp_stats cstats;
48777- char *vers;
48778
48779 switch (cmd) {
48780 case SIOCGPPPSTATS:
48781@@ -1042,8 +1041,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48782 break;
48783
48784 case SIOCGPPPVER:
48785- vers = PPP_VERSION;
48786- if (copy_to_user(addr, vers, strlen(vers) + 1))
48787+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
48788 break;
48789 err = 0;
48790 break;
48791diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
48792index 079f7ad..b2a2bfa7 100644
48793--- a/drivers/net/slip/slhc.c
48794+++ b/drivers/net/slip/slhc.c
48795@@ -487,7 +487,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
48796 register struct tcphdr *thp;
48797 register struct iphdr *ip;
48798 register struct cstate *cs;
48799- int len, hdrlen;
48800+ long len, hdrlen;
48801 unsigned char *cp = icp;
48802
48803 /* We've got a compressed packet; read the change byte */
48804diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
48805index 1f76c2ea..9681171 100644
48806--- a/drivers/net/team/team.c
48807+++ b/drivers/net/team/team.c
48808@@ -2862,7 +2862,7 @@ static int team_device_event(struct notifier_block *unused,
48809 return NOTIFY_DONE;
48810 }
48811
48812-static struct notifier_block team_notifier_block __read_mostly = {
48813+static struct notifier_block team_notifier_block = {
48814 .notifier_call = team_device_event,
48815 };
48816
48817diff --git a/drivers/net/tun.c b/drivers/net/tun.c
48818index d965e8a..f119e64 100644
48819--- a/drivers/net/tun.c
48820+++ b/drivers/net/tun.c
48821@@ -1861,7 +1861,7 @@ unlock:
48822 }
48823
48824 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
48825- unsigned long arg, int ifreq_len)
48826+ unsigned long arg, size_t ifreq_len)
48827 {
48828 struct tun_file *tfile = file->private_data;
48829 struct tun_struct *tun;
48830@@ -1874,6 +1874,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
48831 unsigned int ifindex;
48832 int ret;
48833
48834+ if (ifreq_len > sizeof ifr)
48835+ return -EFAULT;
48836+
48837 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
48838 if (copy_from_user(&ifr, argp, ifreq_len))
48839 return -EFAULT;
48840diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
48841index babda7d..e40c90a 100644
48842--- a/drivers/net/usb/hso.c
48843+++ b/drivers/net/usb/hso.c
48844@@ -71,7 +71,7 @@
48845 #include <asm/byteorder.h>
48846 #include <linux/serial_core.h>
48847 #include <linux/serial.h>
48848-
48849+#include <asm/local.h>
48850
48851 #define MOD_AUTHOR "Option Wireless"
48852 #define MOD_DESCRIPTION "USB High Speed Option driver"
48853@@ -1178,7 +1178,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
48854 struct urb *urb;
48855
48856 urb = serial->rx_urb[0];
48857- if (serial->port.count > 0) {
48858+ if (atomic_read(&serial->port.count) > 0) {
48859 count = put_rxbuf_data(urb, serial);
48860 if (count == -1)
48861 return;
48862@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
48863 DUMP1(urb->transfer_buffer, urb->actual_length);
48864
48865 /* Anyone listening? */
48866- if (serial->port.count == 0)
48867+ if (atomic_read(&serial->port.count) == 0)
48868 return;
48869
48870 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
48871@@ -1278,8 +1278,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
48872 tty_port_tty_set(&serial->port, tty);
48873
48874 /* check for port already opened, if not set the termios */
48875- serial->port.count++;
48876- if (serial->port.count == 1) {
48877+ if (atomic_inc_return(&serial->port.count) == 1) {
48878 serial->rx_state = RX_IDLE;
48879 /* Force default termio settings */
48880 _hso_serial_set_termios(tty, NULL);
48881@@ -1289,7 +1288,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
48882 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
48883 if (result) {
48884 hso_stop_serial_device(serial->parent);
48885- serial->port.count--;
48886+ atomic_dec(&serial->port.count);
48887 kref_put(&serial->parent->ref, hso_serial_ref_free);
48888 }
48889 } else {
48890@@ -1326,10 +1325,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
48891
48892 /* reset the rts and dtr */
48893 /* do the actual close */
48894- serial->port.count--;
48895+ atomic_dec(&serial->port.count);
48896
48897- if (serial->port.count <= 0) {
48898- serial->port.count = 0;
48899+ if (atomic_read(&serial->port.count) <= 0) {
48900+ atomic_set(&serial->port.count, 0);
48901 tty_port_tty_set(&serial->port, NULL);
48902 if (!usb_gone)
48903 hso_stop_serial_device(serial->parent);
48904@@ -1404,7 +1403,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
48905
48906 /* the actual setup */
48907 spin_lock_irqsave(&serial->serial_lock, flags);
48908- if (serial->port.count)
48909+ if (atomic_read(&serial->port.count))
48910 _hso_serial_set_termios(tty, old);
48911 else
48912 tty->termios = *old;
48913@@ -1873,7 +1872,7 @@ static void intr_callback(struct urb *urb)
48914 D1("Pending read interrupt on port %d\n", i);
48915 spin_lock(&serial->serial_lock);
48916 if (serial->rx_state == RX_IDLE &&
48917- serial->port.count > 0) {
48918+ atomic_read(&serial->port.count) > 0) {
48919 /* Setup and send a ctrl req read on
48920 * port i */
48921 if (!serial->rx_urb_filled[0]) {
48922@@ -3047,7 +3046,7 @@ static int hso_resume(struct usb_interface *iface)
48923 /* Start all serial ports */
48924 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
48925 if (serial_table[i] && (serial_table[i]->interface == iface)) {
48926- if (dev2ser(serial_table[i])->port.count) {
48927+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
48928 result =
48929 hso_start_serial_device(serial_table[i], GFP_NOIO);
48930 hso_kick_transmit(dev2ser(serial_table[i]));
48931diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
48932index 604ef21..d1f49a1 100644
48933--- a/drivers/net/usb/r8152.c
48934+++ b/drivers/net/usb/r8152.c
48935@@ -575,7 +575,7 @@ struct r8152 {
48936 void (*up)(struct r8152 *);
48937 void (*down)(struct r8152 *);
48938 void (*unload)(struct r8152 *);
48939- } rtl_ops;
48940+ } __no_const rtl_ops;
48941
48942 int intr_interval;
48943 u32 saved_wolopts;
48944diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
48945index a2515887..6d13233 100644
48946--- a/drivers/net/usb/sierra_net.c
48947+++ b/drivers/net/usb/sierra_net.c
48948@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
48949 /* atomic counter partially included in MAC address to make sure 2 devices
48950 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
48951 */
48952-static atomic_t iface_counter = ATOMIC_INIT(0);
48953+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
48954
48955 /*
48956 * SYNC Timer Delay definition used to set the expiry time
48957@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
48958 dev->net->netdev_ops = &sierra_net_device_ops;
48959
48960 /* change MAC addr to include, ifacenum, and to be unique */
48961- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
48962+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
48963 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
48964
48965 /* we will have to manufacture ethernet headers, prepare template */
48966diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
48967index 59caa06..de191b3 100644
48968--- a/drivers/net/virtio_net.c
48969+++ b/drivers/net/virtio_net.c
48970@@ -48,7 +48,7 @@ module_param(gso, bool, 0444);
48971 #define RECEIVE_AVG_WEIGHT 64
48972
48973 /* Minimum alignment for mergeable packet buffers. */
48974-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
48975+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
48976
48977 #define VIRTNET_DRIVER_VERSION "1.0.0"
48978
48979diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
48980index 81a8a29..ae60a58 100644
48981--- a/drivers/net/vxlan.c
48982+++ b/drivers/net/vxlan.c
48983@@ -2762,7 +2762,7 @@ nla_put_failure:
48984 return -EMSGSIZE;
48985 }
48986
48987-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
48988+static struct rtnl_link_ops vxlan_link_ops = {
48989 .kind = "vxlan",
48990 .maxtype = IFLA_VXLAN_MAX,
48991 .policy = vxlan_policy,
48992@@ -2809,7 +2809,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
48993 return NOTIFY_DONE;
48994 }
48995
48996-static struct notifier_block vxlan_notifier_block __read_mostly = {
48997+static struct notifier_block vxlan_notifier_block = {
48998 .notifier_call = vxlan_lowerdev_event,
48999 };
49000
49001diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
49002index 5920c99..ff2e4a5 100644
49003--- a/drivers/net/wan/lmc/lmc_media.c
49004+++ b/drivers/net/wan/lmc/lmc_media.c
49005@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
49006 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
49007
49008 lmc_media_t lmc_ds3_media = {
49009- lmc_ds3_init, /* special media init stuff */
49010- lmc_ds3_default, /* reset to default state */
49011- lmc_ds3_set_status, /* reset status to state provided */
49012- lmc_dummy_set_1, /* set clock source */
49013- lmc_dummy_set2_1, /* set line speed */
49014- lmc_ds3_set_100ft, /* set cable length */
49015- lmc_ds3_set_scram, /* set scrambler */
49016- lmc_ds3_get_link_status, /* get link status */
49017- lmc_dummy_set_1, /* set link status */
49018- lmc_ds3_set_crc_length, /* set CRC length */
49019- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49020- lmc_ds3_watchdog
49021+ .init = lmc_ds3_init, /* special media init stuff */
49022+ .defaults = lmc_ds3_default, /* reset to default state */
49023+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
49024+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
49025+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49026+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
49027+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
49028+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
49029+ .set_link_status = lmc_dummy_set_1, /* set link status */
49030+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
49031+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49032+ .watchdog = lmc_ds3_watchdog
49033 };
49034
49035 lmc_media_t lmc_hssi_media = {
49036- lmc_hssi_init, /* special media init stuff */
49037- lmc_hssi_default, /* reset to default state */
49038- lmc_hssi_set_status, /* reset status to state provided */
49039- lmc_hssi_set_clock, /* set clock source */
49040- lmc_dummy_set2_1, /* set line speed */
49041- lmc_dummy_set_1, /* set cable length */
49042- lmc_dummy_set_1, /* set scrambler */
49043- lmc_hssi_get_link_status, /* get link status */
49044- lmc_hssi_set_link_status, /* set link status */
49045- lmc_hssi_set_crc_length, /* set CRC length */
49046- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49047- lmc_hssi_watchdog
49048+ .init = lmc_hssi_init, /* special media init stuff */
49049+ .defaults = lmc_hssi_default, /* reset to default state */
49050+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
49051+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
49052+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49053+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49054+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49055+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
49056+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
49057+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
49058+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49059+ .watchdog = lmc_hssi_watchdog
49060 };
49061
49062-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
49063- lmc_ssi_default, /* reset to default state */
49064- lmc_ssi_set_status, /* reset status to state provided */
49065- lmc_ssi_set_clock, /* set clock source */
49066- lmc_ssi_set_speed, /* set line speed */
49067- lmc_dummy_set_1, /* set cable length */
49068- lmc_dummy_set_1, /* set scrambler */
49069- lmc_ssi_get_link_status, /* get link status */
49070- lmc_ssi_set_link_status, /* set link status */
49071- lmc_ssi_set_crc_length, /* set CRC length */
49072- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49073- lmc_ssi_watchdog
49074+lmc_media_t lmc_ssi_media = {
49075+ .init = lmc_ssi_init, /* special media init stuff */
49076+ .defaults = lmc_ssi_default, /* reset to default state */
49077+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
49078+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
49079+ .set_speed = lmc_ssi_set_speed, /* set line speed */
49080+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49081+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49082+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
49083+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
49084+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
49085+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49086+ .watchdog = lmc_ssi_watchdog
49087 };
49088
49089 lmc_media_t lmc_t1_media = {
49090- lmc_t1_init, /* special media init stuff */
49091- lmc_t1_default, /* reset to default state */
49092- lmc_t1_set_status, /* reset status to state provided */
49093- lmc_t1_set_clock, /* set clock source */
49094- lmc_dummy_set2_1, /* set line speed */
49095- lmc_dummy_set_1, /* set cable length */
49096- lmc_dummy_set_1, /* set scrambler */
49097- lmc_t1_get_link_status, /* get link status */
49098- lmc_dummy_set_1, /* set link status */
49099- lmc_t1_set_crc_length, /* set CRC length */
49100- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
49101- lmc_t1_watchdog
49102+ .init = lmc_t1_init, /* special media init stuff */
49103+ .defaults = lmc_t1_default, /* reset to default state */
49104+ .set_status = lmc_t1_set_status, /* reset status to state provided */
49105+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
49106+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49107+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49108+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49109+ .get_link_status = lmc_t1_get_link_status, /* get link status */
49110+ .set_link_status = lmc_dummy_set_1, /* set link status */
49111+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
49112+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
49113+ .watchdog = lmc_t1_watchdog
49114 };
49115
49116 static void
49117diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
49118index feacc3b..5bac0de 100644
49119--- a/drivers/net/wan/z85230.c
49120+++ b/drivers/net/wan/z85230.c
49121@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
49122
49123 struct z8530_irqhandler z8530_sync =
49124 {
49125- z8530_rx,
49126- z8530_tx,
49127- z8530_status
49128+ .rx = z8530_rx,
49129+ .tx = z8530_tx,
49130+ .status = z8530_status
49131 };
49132
49133 EXPORT_SYMBOL(z8530_sync);
49134@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
49135 }
49136
49137 static struct z8530_irqhandler z8530_dma_sync = {
49138- z8530_dma_rx,
49139- z8530_dma_tx,
49140- z8530_dma_status
49141+ .rx = z8530_dma_rx,
49142+ .tx = z8530_dma_tx,
49143+ .status = z8530_dma_status
49144 };
49145
49146 static struct z8530_irqhandler z8530_txdma_sync = {
49147- z8530_rx,
49148- z8530_dma_tx,
49149- z8530_dma_status
49150+ .rx = z8530_rx,
49151+ .tx = z8530_dma_tx,
49152+ .status = z8530_dma_status
49153 };
49154
49155 /**
49156@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
49157
49158 struct z8530_irqhandler z8530_nop=
49159 {
49160- z8530_rx_clear,
49161- z8530_tx_clear,
49162- z8530_status_clear
49163+ .rx = z8530_rx_clear,
49164+ .tx = z8530_tx_clear,
49165+ .status = z8530_status_clear
49166 };
49167
49168
49169diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
49170index 0b60295..b8bfa5b 100644
49171--- a/drivers/net/wimax/i2400m/rx.c
49172+++ b/drivers/net/wimax/i2400m/rx.c
49173@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
49174 if (i2400m->rx_roq == NULL)
49175 goto error_roq_alloc;
49176
49177- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
49178+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
49179 GFP_KERNEL);
49180 if (rd == NULL) {
49181 result = -ENOMEM;
49182diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
49183index e71a2ce..2268d61 100644
49184--- a/drivers/net/wireless/airo.c
49185+++ b/drivers/net/wireless/airo.c
49186@@ -7846,7 +7846,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
49187 struct airo_info *ai = dev->ml_priv;
49188 int ridcode;
49189 int enabled;
49190- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
49191+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
49192 unsigned char *iobuf;
49193
49194 /* Only super-user can write RIDs */
49195diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
49196index da92bfa..5a9001a 100644
49197--- a/drivers/net/wireless/at76c50x-usb.c
49198+++ b/drivers/net/wireless/at76c50x-usb.c
49199@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
49200 }
49201
49202 /* Convert timeout from the DFU status to jiffies */
49203-static inline unsigned long at76_get_timeout(struct dfu_status *s)
49204+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
49205 {
49206 return msecs_to_jiffies((s->poll_timeout[2] << 16)
49207 | (s->poll_timeout[1] << 8)
49208diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
49209index 5fdc40d..3975205 100644
49210--- a/drivers/net/wireless/ath/ath10k/htc.c
49211+++ b/drivers/net/wireless/ath/ath10k/htc.c
49212@@ -856,7 +856,10 @@ void ath10k_htc_stop(struct ath10k_htc *htc)
49213 /* registered target arrival callback from the HIF layer */
49214 int ath10k_htc_init(struct ath10k *ar)
49215 {
49216- struct ath10k_hif_cb htc_callbacks;
49217+ static struct ath10k_hif_cb htc_callbacks = {
49218+ .rx_completion = ath10k_htc_rx_completion_handler,
49219+ .tx_completion = ath10k_htc_tx_completion_handler,
49220+ };
49221 struct ath10k_htc_ep *ep = NULL;
49222 struct ath10k_htc *htc = &ar->htc;
49223
49224@@ -866,8 +869,6 @@ int ath10k_htc_init(struct ath10k *ar)
49225 ath10k_htc_reset_endpoint_states(htc);
49226
49227 /* setup HIF layer callbacks */
49228- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
49229- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
49230 htc->ar = ar;
49231
49232 /* Get HIF default pipe for HTC message exchange */
49233diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
49234index 4716d33..a688310 100644
49235--- a/drivers/net/wireless/ath/ath10k/htc.h
49236+++ b/drivers/net/wireless/ath/ath10k/htc.h
49237@@ -271,13 +271,13 @@ enum ath10k_htc_ep_id {
49238
49239 struct ath10k_htc_ops {
49240 void (*target_send_suspend_complete)(struct ath10k *ar);
49241-};
49242+} __no_const;
49243
49244 struct ath10k_htc_ep_ops {
49245 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
49246 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
49247 void (*ep_tx_credits)(struct ath10k *);
49248-};
49249+} __no_const;
49250
49251 /* service connection information */
49252 struct ath10k_htc_svc_conn_req {
49253diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
49254index 59af9f9..5f3564f 100644
49255--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
49256+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
49257@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49258 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
49259 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
49260
49261- ACCESS_ONCE(ads->ds_link) = i->link;
49262- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
49263+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
49264+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
49265
49266 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
49267 ctl6 = SM(i->keytype, AR_EncrType);
49268@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49269
49270 if ((i->is_first || i->is_last) &&
49271 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
49272- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
49273+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
49274 | set11nTries(i->rates, 1)
49275 | set11nTries(i->rates, 2)
49276 | set11nTries(i->rates, 3)
49277 | (i->dur_update ? AR_DurUpdateEna : 0)
49278 | SM(0, AR_BurstDur);
49279
49280- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
49281+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
49282 | set11nRate(i->rates, 1)
49283 | set11nRate(i->rates, 2)
49284 | set11nRate(i->rates, 3);
49285 } else {
49286- ACCESS_ONCE(ads->ds_ctl2) = 0;
49287- ACCESS_ONCE(ads->ds_ctl3) = 0;
49288+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
49289+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
49290 }
49291
49292 if (!i->is_first) {
49293- ACCESS_ONCE(ads->ds_ctl0) = 0;
49294- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
49295- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
49296+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
49297+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
49298+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
49299 return;
49300 }
49301
49302@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49303 break;
49304 }
49305
49306- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
49307+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
49308 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
49309 | SM(i->txpower, AR_XmitPower0)
49310 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
49311@@ -289,27 +289,27 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49312 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
49313 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
49314
49315- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
49316- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
49317+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
49318+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
49319
49320 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
49321 return;
49322
49323- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
49324+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
49325 | set11nPktDurRTSCTS(i->rates, 1);
49326
49327- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
49328+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
49329 | set11nPktDurRTSCTS(i->rates, 3);
49330
49331- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
49332+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
49333 | set11nRateFlags(i->rates, 1)
49334 | set11nRateFlags(i->rates, 2)
49335 | set11nRateFlags(i->rates, 3)
49336 | SM(i->rtscts_rate, AR_RTSCTSRate);
49337
49338- ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower, AR_XmitPower1);
49339- ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower, AR_XmitPower2);
49340- ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower, AR_XmitPower3);
49341+ ACCESS_ONCE_RW(ads->ds_ctl9) = SM(i->txpower, AR_XmitPower1);
49342+ ACCESS_ONCE_RW(ads->ds_ctl10) = SM(i->txpower, AR_XmitPower2);
49343+ ACCESS_ONCE_RW(ads->ds_ctl11) = SM(i->txpower, AR_XmitPower3);
49344 }
49345
49346 static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
49347diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
49348index 71e38e8..5ac96ca 100644
49349--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
49350+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
49351@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49352 (i->qcu << AR_TxQcuNum_S) | desc_len;
49353
49354 checksum += val;
49355- ACCESS_ONCE(ads->info) = val;
49356+ ACCESS_ONCE_RW(ads->info) = val;
49357
49358 checksum += i->link;
49359- ACCESS_ONCE(ads->link) = i->link;
49360+ ACCESS_ONCE_RW(ads->link) = i->link;
49361
49362 checksum += i->buf_addr[0];
49363- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
49364+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
49365 checksum += i->buf_addr[1];
49366- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
49367+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
49368 checksum += i->buf_addr[2];
49369- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
49370+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
49371 checksum += i->buf_addr[3];
49372- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
49373+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
49374
49375 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
49376- ACCESS_ONCE(ads->ctl3) = val;
49377+ ACCESS_ONCE_RW(ads->ctl3) = val;
49378 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
49379- ACCESS_ONCE(ads->ctl5) = val;
49380+ ACCESS_ONCE_RW(ads->ctl5) = val;
49381 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
49382- ACCESS_ONCE(ads->ctl7) = val;
49383+ ACCESS_ONCE_RW(ads->ctl7) = val;
49384 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
49385- ACCESS_ONCE(ads->ctl9) = val;
49386+ ACCESS_ONCE_RW(ads->ctl9) = val;
49387
49388 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
49389- ACCESS_ONCE(ads->ctl10) = checksum;
49390+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
49391
49392 if (i->is_first || i->is_last) {
49393- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
49394+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
49395 | set11nTries(i->rates, 1)
49396 | set11nTries(i->rates, 2)
49397 | set11nTries(i->rates, 3)
49398 | (i->dur_update ? AR_DurUpdateEna : 0)
49399 | SM(0, AR_BurstDur);
49400
49401- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
49402+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
49403 | set11nRate(i->rates, 1)
49404 | set11nRate(i->rates, 2)
49405 | set11nRate(i->rates, 3);
49406 } else {
49407- ACCESS_ONCE(ads->ctl13) = 0;
49408- ACCESS_ONCE(ads->ctl14) = 0;
49409+ ACCESS_ONCE_RW(ads->ctl13) = 0;
49410+ ACCESS_ONCE_RW(ads->ctl14) = 0;
49411 }
49412
49413 ads->ctl20 = 0;
49414@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49415
49416 ctl17 = SM(i->keytype, AR_EncrType);
49417 if (!i->is_first) {
49418- ACCESS_ONCE(ads->ctl11) = 0;
49419- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
49420- ACCESS_ONCE(ads->ctl15) = 0;
49421- ACCESS_ONCE(ads->ctl16) = 0;
49422- ACCESS_ONCE(ads->ctl17) = ctl17;
49423- ACCESS_ONCE(ads->ctl18) = 0;
49424- ACCESS_ONCE(ads->ctl19) = 0;
49425+ ACCESS_ONCE_RW(ads->ctl11) = 0;
49426+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
49427+ ACCESS_ONCE_RW(ads->ctl15) = 0;
49428+ ACCESS_ONCE_RW(ads->ctl16) = 0;
49429+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
49430+ ACCESS_ONCE_RW(ads->ctl18) = 0;
49431+ ACCESS_ONCE_RW(ads->ctl19) = 0;
49432 return;
49433 }
49434
49435- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
49436+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
49437 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
49438 | SM(i->txpower, AR_XmitPower0)
49439 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
49440@@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49441 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
49442 ctl12 |= SM(val, AR_PAPRDChainMask);
49443
49444- ACCESS_ONCE(ads->ctl12) = ctl12;
49445- ACCESS_ONCE(ads->ctl17) = ctl17;
49446+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
49447+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
49448
49449- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
49450+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
49451 | set11nPktDurRTSCTS(i->rates, 1);
49452
49453- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
49454+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
49455 | set11nPktDurRTSCTS(i->rates, 3);
49456
49457- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
49458+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
49459 | set11nRateFlags(i->rates, 1)
49460 | set11nRateFlags(i->rates, 2)
49461 | set11nRateFlags(i->rates, 3)
49462 | SM(i->rtscts_rate, AR_RTSCTSRate);
49463
49464- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
49465+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
49466
49467- ACCESS_ONCE(ads->ctl20) = SM(i->txpower, AR_XmitPower1);
49468- ACCESS_ONCE(ads->ctl21) = SM(i->txpower, AR_XmitPower2);
49469- ACCESS_ONCE(ads->ctl22) = SM(i->txpower, AR_XmitPower3);
49470+ ACCESS_ONCE_RW(ads->ctl20) = SM(i->txpower, AR_XmitPower1);
49471+ ACCESS_ONCE_RW(ads->ctl21) = SM(i->txpower, AR_XmitPower2);
49472+ ACCESS_ONCE_RW(ads->ctl22) = SM(i->txpower, AR_XmitPower3);
49473 }
49474
49475 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
49476diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
49477index 51b4ebe..d1929dd 100644
49478--- a/drivers/net/wireless/ath/ath9k/hw.h
49479+++ b/drivers/net/wireless/ath/ath9k/hw.h
49480@@ -629,7 +629,7 @@ struct ath_hw_private_ops {
49481
49482 /* ANI */
49483 void (*ani_cache_ini_regs)(struct ath_hw *ah);
49484-};
49485+} __no_const;
49486
49487 /**
49488 * struct ath_spec_scan - parameters for Atheros spectral scan
49489@@ -706,7 +706,7 @@ struct ath_hw_ops {
49490 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
49491 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
49492 #endif
49493-};
49494+} __no_const;
49495
49496 struct ath_nf_limits {
49497 s16 max;
49498diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
49499index 4b148bb..ac738fa 100644
49500--- a/drivers/net/wireless/ath/ath9k/main.c
49501+++ b/drivers/net/wireless/ath/ath9k/main.c
49502@@ -2592,16 +2592,18 @@ void ath9k_fill_chanctx_ops(void)
49503 if (!ath9k_use_chanctx)
49504 return;
49505
49506- ath9k_ops.hw_scan = ath9k_hw_scan;
49507- ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
49508- ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
49509- ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
49510- ath9k_ops.add_chanctx = ath9k_add_chanctx;
49511- ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
49512- ath9k_ops.change_chanctx = ath9k_change_chanctx;
49513- ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
49514- ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
49515- ath9k_ops.mgd_prepare_tx = ath9k_chanctx_force_active;
49516+ pax_open_kernel();
49517+ *(void **)&ath9k_ops.hw_scan = ath9k_hw_scan;
49518+ *(void **)&ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
49519+ *(void **)&ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
49520+ *(void **)&ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
49521+ *(void **)&ath9k_ops.add_chanctx = ath9k_add_chanctx;
49522+ *(void **)&ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
49523+ *(void **)&ath9k_ops.change_chanctx = ath9k_change_chanctx;
49524+ *(void **)&ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
49525+ *(void **)&ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
49526+ *(void **)&ath9k_ops.mgd_prepare_tx = ath9k_chanctx_force_active;
49527+ pax_close_kernel();
49528 }
49529
49530 struct ieee80211_ops ath9k_ops = {
49531diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
49532index 92190da..f3a4c4c 100644
49533--- a/drivers/net/wireless/b43/phy_lp.c
49534+++ b/drivers/net/wireless/b43/phy_lp.c
49535@@ -2514,7 +2514,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
49536 {
49537 struct ssb_bus *bus = dev->dev->sdev->bus;
49538
49539- static const struct b206x_channel *chandata = NULL;
49540+ const struct b206x_channel *chandata = NULL;
49541 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
49542 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
49543 u16 old_comm15, scale;
49544diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
49545index dc1d20c..f7a4f06 100644
49546--- a/drivers/net/wireless/iwlegacy/3945-mac.c
49547+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
49548@@ -3633,7 +3633,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
49549 */
49550 if (il3945_mod_params.disable_hw_scan) {
49551 D_INFO("Disabling hw_scan\n");
49552- il3945_mac_ops.hw_scan = NULL;
49553+ pax_open_kernel();
49554+ *(void **)&il3945_mac_ops.hw_scan = NULL;
49555+ pax_close_kernel();
49556 }
49557
49558 D_INFO("*** LOAD DRIVER ***\n");
49559diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49560index 0ffb6ff..c0b7f0e 100644
49561--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49562+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49563@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
49564 {
49565 struct iwl_priv *priv = file->private_data;
49566 char buf[64];
49567- int buf_size;
49568+ size_t buf_size;
49569 u32 offset, len;
49570
49571 memset(buf, 0, sizeof(buf));
49572@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
49573 struct iwl_priv *priv = file->private_data;
49574
49575 char buf[8];
49576- int buf_size;
49577+ size_t buf_size;
49578 u32 reset_flag;
49579
49580 memset(buf, 0, sizeof(buf));
49581@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
49582 {
49583 struct iwl_priv *priv = file->private_data;
49584 char buf[8];
49585- int buf_size;
49586+ size_t buf_size;
49587 int ht40;
49588
49589 memset(buf, 0, sizeof(buf));
49590@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
49591 {
49592 struct iwl_priv *priv = file->private_data;
49593 char buf[8];
49594- int buf_size;
49595+ size_t buf_size;
49596 int value;
49597
49598 memset(buf, 0, sizeof(buf));
49599@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
49600 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
49601 DEBUGFS_READ_FILE_OPS(current_sleep_command);
49602
49603-static const char *fmt_value = " %-30s %10u\n";
49604-static const char *fmt_hex = " %-30s 0x%02X\n";
49605-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
49606-static const char *fmt_header =
49607+static const char fmt_value[] = " %-30s %10u\n";
49608+static const char fmt_hex[] = " %-30s 0x%02X\n";
49609+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
49610+static const char fmt_header[] =
49611 "%-32s current cumulative delta max\n";
49612
49613 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
49614@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
49615 {
49616 struct iwl_priv *priv = file->private_data;
49617 char buf[8];
49618- int buf_size;
49619+ size_t buf_size;
49620 int clear;
49621
49622 memset(buf, 0, sizeof(buf));
49623@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
49624 {
49625 struct iwl_priv *priv = file->private_data;
49626 char buf[8];
49627- int buf_size;
49628+ size_t buf_size;
49629 int trace;
49630
49631 memset(buf, 0, sizeof(buf));
49632@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
49633 {
49634 struct iwl_priv *priv = file->private_data;
49635 char buf[8];
49636- int buf_size;
49637+ size_t buf_size;
49638 int missed;
49639
49640 memset(buf, 0, sizeof(buf));
49641@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
49642
49643 struct iwl_priv *priv = file->private_data;
49644 char buf[8];
49645- int buf_size;
49646+ size_t buf_size;
49647 int plcp;
49648
49649 memset(buf, 0, sizeof(buf));
49650@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
49651
49652 struct iwl_priv *priv = file->private_data;
49653 char buf[8];
49654- int buf_size;
49655+ size_t buf_size;
49656 int flush;
49657
49658 memset(buf, 0, sizeof(buf));
49659@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
49660
49661 struct iwl_priv *priv = file->private_data;
49662 char buf[8];
49663- int buf_size;
49664+ size_t buf_size;
49665 int rts;
49666
49667 if (!priv->cfg->ht_params)
49668@@ -2204,7 +2204,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
49669 {
49670 struct iwl_priv *priv = file->private_data;
49671 char buf[8];
49672- int buf_size;
49673+ size_t buf_size;
49674
49675 memset(buf, 0, sizeof(buf));
49676 buf_size = min(count, sizeof(buf) - 1);
49677@@ -2238,7 +2238,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
49678 struct iwl_priv *priv = file->private_data;
49679 u32 event_log_flag;
49680 char buf[8];
49681- int buf_size;
49682+ size_t buf_size;
49683
49684 /* check that the interface is up */
49685 if (!iwl_is_ready(priv))
49686@@ -2292,7 +2292,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
49687 struct iwl_priv *priv = file->private_data;
49688 char buf[8];
49689 u32 calib_disabled;
49690- int buf_size;
49691+ size_t buf_size;
49692
49693 memset(buf, 0, sizeof(buf));
49694 buf_size = min(count, sizeof(buf) - 1);
49695diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
49696index bb36d67..a43451e 100644
49697--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
49698+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
49699@@ -1686,7 +1686,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
49700 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
49701
49702 char buf[8];
49703- int buf_size;
49704+ size_t buf_size;
49705 u32 reset_flag;
49706
49707 memset(buf, 0, sizeof(buf));
49708@@ -1707,7 +1707,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
49709 {
49710 struct iwl_trans *trans = file->private_data;
49711 char buf[8];
49712- int buf_size;
49713+ size_t buf_size;
49714 int csr;
49715
49716 memset(buf, 0, sizeof(buf));
49717diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
49718index 6b48c865..19646a7 100644
49719--- a/drivers/net/wireless/mac80211_hwsim.c
49720+++ b/drivers/net/wireless/mac80211_hwsim.c
49721@@ -2577,20 +2577,20 @@ static int __init init_mac80211_hwsim(void)
49722 if (channels < 1)
49723 return -EINVAL;
49724
49725- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
49726- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
49727- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
49728- mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
49729- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
49730- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
49731- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
49732- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
49733- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
49734- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
49735- mac80211_hwsim_mchan_ops.assign_vif_chanctx =
49736- mac80211_hwsim_assign_vif_chanctx;
49737- mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
49738- mac80211_hwsim_unassign_vif_chanctx;
49739+ pax_open_kernel();
49740+ memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
49741+ *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
49742+ *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
49743+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
49744+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
49745+ *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
49746+ *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
49747+ *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
49748+ *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
49749+ *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
49750+ *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
49751+ *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
49752+ pax_close_kernel();
49753
49754 spin_lock_init(&hwsim_radio_lock);
49755 INIT_LIST_HEAD(&hwsim_radios);
49756diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
49757index d2a9a08..0cb175d 100644
49758--- a/drivers/net/wireless/rndis_wlan.c
49759+++ b/drivers/net/wireless/rndis_wlan.c
49760@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
49761
49762 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
49763
49764- if (rts_threshold < 0 || rts_threshold > 2347)
49765+ if (rts_threshold > 2347)
49766 rts_threshold = 2347;
49767
49768 tmp = cpu_to_le32(rts_threshold);
49769diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
49770index d13f25c..2573994 100644
49771--- a/drivers/net/wireless/rt2x00/rt2x00.h
49772+++ b/drivers/net/wireless/rt2x00/rt2x00.h
49773@@ -375,7 +375,7 @@ struct rt2x00_intf {
49774 * for hardware which doesn't support hardware
49775 * sequence counting.
49776 */
49777- atomic_t seqno;
49778+ atomic_unchecked_t seqno;
49779 };
49780
49781 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
49782diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
49783index 66ff364..3ce34f7 100644
49784--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
49785+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
49786@@ -224,9 +224,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
49787 * sequence counter given by mac80211.
49788 */
49789 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
49790- seqno = atomic_add_return(0x10, &intf->seqno);
49791+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
49792 else
49793- seqno = atomic_read(&intf->seqno);
49794+ seqno = atomic_read_unchecked(&intf->seqno);
49795
49796 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
49797 hdr->seq_ctrl |= cpu_to_le16(seqno);
49798diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
49799index b661f896..ddf7d2b 100644
49800--- a/drivers/net/wireless/ti/wl1251/sdio.c
49801+++ b/drivers/net/wireless/ti/wl1251/sdio.c
49802@@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
49803
49804 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
49805
49806- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
49807- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
49808+ pax_open_kernel();
49809+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
49810+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
49811+ pax_close_kernel();
49812
49813 wl1251_info("using dedicated interrupt line");
49814 } else {
49815- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
49816- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
49817+ pax_open_kernel();
49818+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
49819+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
49820+ pax_close_kernel();
49821
49822 wl1251_info("using SDIO interrupt");
49823 }
49824diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
49825index 0bccf12..3d95068 100644
49826--- a/drivers/net/wireless/ti/wl12xx/main.c
49827+++ b/drivers/net/wireless/ti/wl12xx/main.c
49828@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
49829 sizeof(wl->conf.mem));
49830
49831 /* read data preparation is only needed by wl127x */
49832- wl->ops->prepare_read = wl127x_prepare_read;
49833+ pax_open_kernel();
49834+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
49835+ pax_close_kernel();
49836
49837 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
49838 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
49839@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
49840 sizeof(wl->conf.mem));
49841
49842 /* read data preparation is only needed by wl127x */
49843- wl->ops->prepare_read = wl127x_prepare_read;
49844+ pax_open_kernel();
49845+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
49846+ pax_close_kernel();
49847
49848 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
49849 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
49850diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
49851index 7af1936..128bb35 100644
49852--- a/drivers/net/wireless/ti/wl18xx/main.c
49853+++ b/drivers/net/wireless/ti/wl18xx/main.c
49854@@ -1916,8 +1916,10 @@ static int wl18xx_setup(struct wl1271 *wl)
49855 }
49856
49857 if (!checksum_param) {
49858- wl18xx_ops.set_rx_csum = NULL;
49859- wl18xx_ops.init_vif = NULL;
49860+ pax_open_kernel();
49861+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
49862+ *(void **)&wl18xx_ops.init_vif = NULL;
49863+ pax_close_kernel();
49864 }
49865
49866 /* Enable 11a Band only if we have 5G antennas */
49867diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
49868index a912dc0..a8225ba 100644
49869--- a/drivers/net/wireless/zd1211rw/zd_usb.c
49870+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
49871@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
49872 {
49873 struct zd_usb *usb = urb->context;
49874 struct zd_usb_interrupt *intr = &usb->intr;
49875- int len;
49876+ unsigned int len;
49877 u16 int_num;
49878
49879 ZD_ASSERT(in_interrupt());
49880diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
49881index ca82f54..3767771 100644
49882--- a/drivers/net/xen-netfront.c
49883+++ b/drivers/net/xen-netfront.c
49884@@ -496,9 +496,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
49885 len = skb_frag_size(frag);
49886 offset = frag->page_offset;
49887
49888- /* Data must not cross a page boundary. */
49889- BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
49890-
49891 /* Skip unused frames from start of page */
49892 page += offset >> PAGE_SHIFT;
49893 offset &= ~PAGE_MASK;
49894@@ -506,8 +503,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
49895 while (len > 0) {
49896 unsigned long bytes;
49897
49898- BUG_ON(offset >= PAGE_SIZE);
49899-
49900 bytes = PAGE_SIZE - offset;
49901 if (bytes > len)
49902 bytes = len;
49903diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
49904index 683671a..4519fc2 100644
49905--- a/drivers/nfc/nfcwilink.c
49906+++ b/drivers/nfc/nfcwilink.c
49907@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
49908
49909 static int nfcwilink_probe(struct platform_device *pdev)
49910 {
49911- static struct nfcwilink *drv;
49912+ struct nfcwilink *drv;
49913 int rc;
49914 __u32 protocols;
49915
49916diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
49917index d93b2b6..ae50401 100644
49918--- a/drivers/oprofile/buffer_sync.c
49919+++ b/drivers/oprofile/buffer_sync.c
49920@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
49921 if (cookie == NO_COOKIE)
49922 offset = pc;
49923 if (cookie == INVALID_COOKIE) {
49924- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
49925+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
49926 offset = pc;
49927 }
49928 if (cookie != last_cookie) {
49929@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
49930 /* add userspace sample */
49931
49932 if (!mm) {
49933- atomic_inc(&oprofile_stats.sample_lost_no_mm);
49934+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
49935 return 0;
49936 }
49937
49938 cookie = lookup_dcookie(mm, s->eip, &offset);
49939
49940 if (cookie == INVALID_COOKIE) {
49941- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
49942+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
49943 return 0;
49944 }
49945
49946@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
49947 /* ignore backtraces if failed to add a sample */
49948 if (state == sb_bt_start) {
49949 state = sb_bt_ignore;
49950- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
49951+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
49952 }
49953 }
49954 release_mm(mm);
49955diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
49956index c0cc4e7..44d4e54 100644
49957--- a/drivers/oprofile/event_buffer.c
49958+++ b/drivers/oprofile/event_buffer.c
49959@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
49960 }
49961
49962 if (buffer_pos == buffer_size) {
49963- atomic_inc(&oprofile_stats.event_lost_overflow);
49964+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
49965 return;
49966 }
49967
49968diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
49969index ed2c3ec..deda85a 100644
49970--- a/drivers/oprofile/oprof.c
49971+++ b/drivers/oprofile/oprof.c
49972@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
49973 if (oprofile_ops.switch_events())
49974 return;
49975
49976- atomic_inc(&oprofile_stats.multiplex_counter);
49977+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
49978 start_switch_worker();
49979 }
49980
49981diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
49982index ee2cfce..7f8f699 100644
49983--- a/drivers/oprofile/oprofile_files.c
49984+++ b/drivers/oprofile/oprofile_files.c
49985@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
49986
49987 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
49988
49989-static ssize_t timeout_read(struct file *file, char __user *buf,
49990+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
49991 size_t count, loff_t *offset)
49992 {
49993 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
49994diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
49995index 59659ce..6c860a0 100644
49996--- a/drivers/oprofile/oprofile_stats.c
49997+++ b/drivers/oprofile/oprofile_stats.c
49998@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
49999 cpu_buf->sample_invalid_eip = 0;
50000 }
50001
50002- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
50003- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
50004- atomic_set(&oprofile_stats.event_lost_overflow, 0);
50005- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
50006- atomic_set(&oprofile_stats.multiplex_counter, 0);
50007+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
50008+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
50009+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
50010+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
50011+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
50012 }
50013
50014
50015diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
50016index 1fc622b..8c48fc3 100644
50017--- a/drivers/oprofile/oprofile_stats.h
50018+++ b/drivers/oprofile/oprofile_stats.h
50019@@ -13,11 +13,11 @@
50020 #include <linux/atomic.h>
50021
50022 struct oprofile_stat_struct {
50023- atomic_t sample_lost_no_mm;
50024- atomic_t sample_lost_no_mapping;
50025- atomic_t bt_lost_no_mapping;
50026- atomic_t event_lost_overflow;
50027- atomic_t multiplex_counter;
50028+ atomic_unchecked_t sample_lost_no_mm;
50029+ atomic_unchecked_t sample_lost_no_mapping;
50030+ atomic_unchecked_t bt_lost_no_mapping;
50031+ atomic_unchecked_t event_lost_overflow;
50032+ atomic_unchecked_t multiplex_counter;
50033 };
50034
50035 extern struct oprofile_stat_struct oprofile_stats;
50036diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
50037index 3f49345..c750d0b 100644
50038--- a/drivers/oprofile/oprofilefs.c
50039+++ b/drivers/oprofile/oprofilefs.c
50040@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
50041
50042 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
50043 {
50044- atomic_t *val = file->private_data;
50045- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
50046+ atomic_unchecked_t *val = file->private_data;
50047+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
50048 }
50049
50050
50051@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
50052
50053
50054 int oprofilefs_create_ro_atomic(struct dentry *root,
50055- char const *name, atomic_t *val)
50056+ char const *name, atomic_unchecked_t *val)
50057 {
50058 return __oprofilefs_create_file(root, name,
50059 &atomic_ro_fops, 0444, val);
50060diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
50061index 61be1d9..dec05d7 100644
50062--- a/drivers/oprofile/timer_int.c
50063+++ b/drivers/oprofile/timer_int.c
50064@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
50065 return NOTIFY_OK;
50066 }
50067
50068-static struct notifier_block __refdata oprofile_cpu_notifier = {
50069+static struct notifier_block oprofile_cpu_notifier = {
50070 .notifier_call = oprofile_cpu_notify,
50071 };
50072
50073diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
50074index 3b47080..6cd05dd 100644
50075--- a/drivers/parport/procfs.c
50076+++ b/drivers/parport/procfs.c
50077@@ -64,7 +64,7 @@ static int do_active_device(struct ctl_table *table, int write,
50078
50079 *ppos += len;
50080
50081- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
50082+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
50083 }
50084
50085 #ifdef CONFIG_PARPORT_1284
50086@@ -106,7 +106,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
50087
50088 *ppos += len;
50089
50090- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
50091+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
50092 }
50093 #endif /* IEEE1284.3 support. */
50094
50095diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
50096index 8dcccff..35d701d 100644
50097--- a/drivers/pci/hotplug/acpiphp_ibm.c
50098+++ b/drivers/pci/hotplug/acpiphp_ibm.c
50099@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
50100 goto init_cleanup;
50101 }
50102
50103- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
50104+ pax_open_kernel();
50105+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
50106+ pax_close_kernel();
50107 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
50108
50109 return retval;
50110diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
50111index 04fcd78..39e83f1 100644
50112--- a/drivers/pci/hotplug/cpcihp_generic.c
50113+++ b/drivers/pci/hotplug/cpcihp_generic.c
50114@@ -73,7 +73,6 @@ static u16 port;
50115 static unsigned int enum_bit;
50116 static u8 enum_mask;
50117
50118-static struct cpci_hp_controller_ops generic_hpc_ops;
50119 static struct cpci_hp_controller generic_hpc;
50120
50121 static int __init validate_parameters(void)
50122@@ -139,6 +138,10 @@ static int query_enum(void)
50123 return ((value & enum_mask) == enum_mask);
50124 }
50125
50126+static struct cpci_hp_controller_ops generic_hpc_ops = {
50127+ .query_enum = query_enum,
50128+};
50129+
50130 static int __init cpcihp_generic_init(void)
50131 {
50132 int status;
50133@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
50134 pci_dev_put(dev);
50135
50136 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
50137- generic_hpc_ops.query_enum = query_enum;
50138 generic_hpc.ops = &generic_hpc_ops;
50139
50140 status = cpci_hp_register_controller(&generic_hpc);
50141diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
50142index 6757b3e..d3bad62 100644
50143--- a/drivers/pci/hotplug/cpcihp_zt5550.c
50144+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
50145@@ -59,7 +59,6 @@
50146 /* local variables */
50147 static bool debug;
50148 static bool poll;
50149-static struct cpci_hp_controller_ops zt5550_hpc_ops;
50150 static struct cpci_hp_controller zt5550_hpc;
50151
50152 /* Primary cPCI bus bridge device */
50153@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
50154 return 0;
50155 }
50156
50157+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
50158+ .query_enum = zt5550_hc_query_enum,
50159+};
50160+
50161 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
50162 {
50163 int status;
50164@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
50165 dbg("returned from zt5550_hc_config");
50166
50167 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
50168- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
50169 zt5550_hpc.ops = &zt5550_hpc_ops;
50170 if(!poll) {
50171 zt5550_hpc.irq = hc_dev->irq;
50172 zt5550_hpc.irq_flags = IRQF_SHARED;
50173 zt5550_hpc.dev_id = hc_dev;
50174
50175- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
50176- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
50177- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
50178+ pax_open_kernel();
50179+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
50180+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
50181+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
50182+ pax_open_kernel();
50183 } else {
50184 info("using ENUM# polling mode");
50185 }
50186diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
50187index 0968a9b..5a00edf 100644
50188--- a/drivers/pci/hotplug/cpqphp_nvram.c
50189+++ b/drivers/pci/hotplug/cpqphp_nvram.c
50190@@ -427,9 +427,13 @@ static u32 store_HRT (void __iomem *rom_start)
50191
50192 void compaq_nvram_init (void __iomem *rom_start)
50193 {
50194+
50195+#ifndef CONFIG_PAX_KERNEXEC
50196 if (rom_start) {
50197 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
50198 }
50199+#endif
50200+
50201 dbg("int15 entry = %p\n", compaq_int15_entry_point);
50202
50203 /* initialize our int15 lock */
50204diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
50205index 56d8486..f26113f 100644
50206--- a/drivers/pci/hotplug/pci_hotplug_core.c
50207+++ b/drivers/pci/hotplug/pci_hotplug_core.c
50208@@ -436,8 +436,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
50209 return -EINVAL;
50210 }
50211
50212- slot->ops->owner = owner;
50213- slot->ops->mod_name = mod_name;
50214+ pax_open_kernel();
50215+ *(struct module **)&slot->ops->owner = owner;
50216+ *(const char **)&slot->ops->mod_name = mod_name;
50217+ pax_close_kernel();
50218
50219 mutex_lock(&pci_hp_mutex);
50220 /*
50221diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
50222index 07aa722..84514b4 100644
50223--- a/drivers/pci/hotplug/pciehp_core.c
50224+++ b/drivers/pci/hotplug/pciehp_core.c
50225@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
50226 struct slot *slot = ctrl->slot;
50227 struct hotplug_slot *hotplug = NULL;
50228 struct hotplug_slot_info *info = NULL;
50229- struct hotplug_slot_ops *ops = NULL;
50230+ hotplug_slot_ops_no_const *ops = NULL;
50231 char name[SLOT_NAME_SIZE];
50232 int retval = -ENOMEM;
50233
50234diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
50235index 6807edd..086a7dc 100644
50236--- a/drivers/pci/msi.c
50237+++ b/drivers/pci/msi.c
50238@@ -507,8 +507,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
50239 {
50240 struct attribute **msi_attrs;
50241 struct attribute *msi_attr;
50242- struct device_attribute *msi_dev_attr;
50243- struct attribute_group *msi_irq_group;
50244+ device_attribute_no_const *msi_dev_attr;
50245+ attribute_group_no_const *msi_irq_group;
50246 const struct attribute_group **msi_irq_groups;
50247 struct msi_desc *entry;
50248 int ret = -ENOMEM;
50249@@ -568,7 +568,7 @@ error_attrs:
50250 count = 0;
50251 msi_attr = msi_attrs[count];
50252 while (msi_attr) {
50253- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
50254+ msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
50255 kfree(msi_attr->name);
50256 kfree(msi_dev_attr);
50257 ++count;
50258diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
50259index 6d04771..4126004 100644
50260--- a/drivers/pci/pci-sysfs.c
50261+++ b/drivers/pci/pci-sysfs.c
50262@@ -1134,7 +1134,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
50263 {
50264 /* allocate attribute structure, piggyback attribute name */
50265 int name_len = write_combine ? 13 : 10;
50266- struct bin_attribute *res_attr;
50267+ bin_attribute_no_const *res_attr;
50268 int retval;
50269
50270 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
50271@@ -1311,7 +1311,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
50272 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
50273 {
50274 int retval;
50275- struct bin_attribute *attr;
50276+ bin_attribute_no_const *attr;
50277
50278 /* If the device has VPD, try to expose it in sysfs. */
50279 if (dev->vpd) {
50280@@ -1358,7 +1358,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
50281 {
50282 int retval;
50283 int rom_size = 0;
50284- struct bin_attribute *attr;
50285+ bin_attribute_no_const *attr;
50286
50287 if (!sysfs_initialized)
50288 return -EACCES;
50289diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
50290index 0601890..dc15007 100644
50291--- a/drivers/pci/pci.h
50292+++ b/drivers/pci/pci.h
50293@@ -91,7 +91,7 @@ struct pci_vpd_ops {
50294 struct pci_vpd {
50295 unsigned int len;
50296 const struct pci_vpd_ops *ops;
50297- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
50298+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
50299 };
50300
50301 int pci_vpd_pci22_init(struct pci_dev *dev);
50302diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
50303index e1e7026..d28dd33 100644
50304--- a/drivers/pci/pcie/aspm.c
50305+++ b/drivers/pci/pcie/aspm.c
50306@@ -27,9 +27,9 @@
50307 #define MODULE_PARAM_PREFIX "pcie_aspm."
50308
50309 /* Note: those are not register definitions */
50310-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
50311-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
50312-#define ASPM_STATE_L1 (4) /* L1 state */
50313+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
50314+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
50315+#define ASPM_STATE_L1 (4U) /* L1 state */
50316 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
50317 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
50318
50319diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
50320index 9cce960..7c530f4 100644
50321--- a/drivers/pci/probe.c
50322+++ b/drivers/pci/probe.c
50323@@ -176,7 +176,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
50324 struct pci_bus_region region, inverted_region;
50325 bool bar_too_big = false, bar_too_high = false, bar_invalid = false;
50326
50327- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
50328+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
50329
50330 /* No printks while decoding is disabled! */
50331 if (!dev->mmio_always_on) {
50332diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
50333index 3f155e7..0f4b1f0 100644
50334--- a/drivers/pci/proc.c
50335+++ b/drivers/pci/proc.c
50336@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
50337 static int __init pci_proc_init(void)
50338 {
50339 struct pci_dev *dev = NULL;
50340+
50341+#ifdef CONFIG_GRKERNSEC_PROC_ADD
50342+#ifdef CONFIG_GRKERNSEC_PROC_USER
50343+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
50344+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50345+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
50346+#endif
50347+#else
50348 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
50349+#endif
50350 proc_create("devices", 0, proc_bus_pci_dir,
50351 &proc_bus_pci_dev_operations);
50352 proc_initialized = 1;
50353diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
50354index d866db8..c827d1f 100644
50355--- a/drivers/platform/chrome/chromeos_laptop.c
50356+++ b/drivers/platform/chrome/chromeos_laptop.c
50357@@ -479,7 +479,7 @@ static struct chromeos_laptop cr48 = {
50358 .callback = chromeos_laptop_dmi_matched, \
50359 .driver_data = (void *)&board_
50360
50361-static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
50362+static struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
50363 {
50364 .ident = "Samsung Series 5 550",
50365 .matches = {
50366diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
50367index c5af23b..3d62d5e 100644
50368--- a/drivers/platform/x86/alienware-wmi.c
50369+++ b/drivers/platform/x86/alienware-wmi.c
50370@@ -150,7 +150,7 @@ struct wmax_led_args {
50371 } __packed;
50372
50373 static struct platform_device *platform_device;
50374-static struct device_attribute *zone_dev_attrs;
50375+static device_attribute_no_const *zone_dev_attrs;
50376 static struct attribute **zone_attrs;
50377 static struct platform_zone *zone_data;
50378
50379@@ -161,7 +161,7 @@ static struct platform_driver platform_driver = {
50380 }
50381 };
50382
50383-static struct attribute_group zone_attribute_group = {
50384+static attribute_group_no_const zone_attribute_group = {
50385 .name = "rgb_zones",
50386 };
50387
50388diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
50389index 21fc932..ee9394a 100644
50390--- a/drivers/platform/x86/asus-wmi.c
50391+++ b/drivers/platform/x86/asus-wmi.c
50392@@ -1590,6 +1590,10 @@ static int show_dsts(struct seq_file *m, void *data)
50393 int err;
50394 u32 retval = -1;
50395
50396+#ifdef CONFIG_GRKERNSEC_KMEM
50397+ return -EPERM;
50398+#endif
50399+
50400 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
50401
50402 if (err < 0)
50403@@ -1606,6 +1610,10 @@ static int show_devs(struct seq_file *m, void *data)
50404 int err;
50405 u32 retval = -1;
50406
50407+#ifdef CONFIG_GRKERNSEC_KMEM
50408+ return -EPERM;
50409+#endif
50410+
50411 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
50412 &retval);
50413
50414@@ -1630,6 +1638,10 @@ static int show_call(struct seq_file *m, void *data)
50415 union acpi_object *obj;
50416 acpi_status status;
50417
50418+#ifdef CONFIG_GRKERNSEC_KMEM
50419+ return -EPERM;
50420+#endif
50421+
50422 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
50423 1, asus->debug.method_id,
50424 &input, &output);
50425diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
50426index 62f8030..c7f2a45 100644
50427--- a/drivers/platform/x86/msi-laptop.c
50428+++ b/drivers/platform/x86/msi-laptop.c
50429@@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
50430
50431 if (!quirks->ec_read_only) {
50432 /* allow userland write sysfs file */
50433- dev_attr_bluetooth.store = store_bluetooth;
50434- dev_attr_wlan.store = store_wlan;
50435- dev_attr_threeg.store = store_threeg;
50436- dev_attr_bluetooth.attr.mode |= S_IWUSR;
50437- dev_attr_wlan.attr.mode |= S_IWUSR;
50438- dev_attr_threeg.attr.mode |= S_IWUSR;
50439+ pax_open_kernel();
50440+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
50441+ *(void **)&dev_attr_wlan.store = store_wlan;
50442+ *(void **)&dev_attr_threeg.store = store_threeg;
50443+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
50444+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
50445+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
50446+ pax_close_kernel();
50447 }
50448
50449 /* disable hardware control by fn key */
50450diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
50451index 70222f2..8c8ce66 100644
50452--- a/drivers/platform/x86/msi-wmi.c
50453+++ b/drivers/platform/x86/msi-wmi.c
50454@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
50455 static void msi_wmi_notify(u32 value, void *context)
50456 {
50457 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
50458- static struct key_entry *key;
50459+ struct key_entry *key;
50460 union acpi_object *obj;
50461 acpi_status status;
50462
50463diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
50464index 26ad9ff..7c52909 100644
50465--- a/drivers/platform/x86/sony-laptop.c
50466+++ b/drivers/platform/x86/sony-laptop.c
50467@@ -2527,7 +2527,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
50468 }
50469
50470 /* High speed charging function */
50471-static struct device_attribute *hsc_handle;
50472+static device_attribute_no_const *hsc_handle;
50473
50474 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
50475 struct device_attribute *attr,
50476@@ -2601,7 +2601,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
50477 }
50478
50479 /* low battery function */
50480-static struct device_attribute *lowbatt_handle;
50481+static device_attribute_no_const *lowbatt_handle;
50482
50483 static ssize_t sony_nc_lowbatt_store(struct device *dev,
50484 struct device_attribute *attr,
50485@@ -2667,7 +2667,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
50486 }
50487
50488 /* fan speed function */
50489-static struct device_attribute *fan_handle, *hsf_handle;
50490+static device_attribute_no_const *fan_handle, *hsf_handle;
50491
50492 static ssize_t sony_nc_hsfan_store(struct device *dev,
50493 struct device_attribute *attr,
50494@@ -2774,7 +2774,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
50495 }
50496
50497 /* USB charge function */
50498-static struct device_attribute *uc_handle;
50499+static device_attribute_no_const *uc_handle;
50500
50501 static ssize_t sony_nc_usb_charge_store(struct device *dev,
50502 struct device_attribute *attr,
50503@@ -2848,7 +2848,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
50504 }
50505
50506 /* Panel ID function */
50507-static struct device_attribute *panel_handle;
50508+static device_attribute_no_const *panel_handle;
50509
50510 static ssize_t sony_nc_panelid_show(struct device *dev,
50511 struct device_attribute *attr, char *buffer)
50512@@ -2895,7 +2895,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
50513 }
50514
50515 /* smart connect function */
50516-static struct device_attribute *sc_handle;
50517+static device_attribute_no_const *sc_handle;
50518
50519 static ssize_t sony_nc_smart_conn_store(struct device *dev,
50520 struct device_attribute *attr,
50521diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
50522index 3bbc6eb..7760460 100644
50523--- a/drivers/platform/x86/thinkpad_acpi.c
50524+++ b/drivers/platform/x86/thinkpad_acpi.c
50525@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
50526 return 0;
50527 }
50528
50529-void static hotkey_mask_warn_incomplete_mask(void)
50530+static void hotkey_mask_warn_incomplete_mask(void)
50531 {
50532 /* log only what the user can fix... */
50533 const u32 wantedmask = hotkey_driver_mask &
50534@@ -2438,10 +2438,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
50535 && !tp_features.bright_unkfw)
50536 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
50537 }
50538+}
50539
50540 #undef TPACPI_COMPARE_KEY
50541 #undef TPACPI_MAY_SEND_KEY
50542-}
50543
50544 /*
50545 * Polling driver
50546diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
50547index 438d4c7..ca8a2fb 100644
50548--- a/drivers/pnp/pnpbios/bioscalls.c
50549+++ b/drivers/pnp/pnpbios/bioscalls.c
50550@@ -59,7 +59,7 @@ do { \
50551 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
50552 } while(0)
50553
50554-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
50555+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
50556 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
50557
50558 /*
50559@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
50560
50561 cpu = get_cpu();
50562 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
50563+
50564+ pax_open_kernel();
50565 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
50566+ pax_close_kernel();
50567
50568 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
50569 spin_lock_irqsave(&pnp_bios_lock, flags);
50570@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
50571 :"memory");
50572 spin_unlock_irqrestore(&pnp_bios_lock, flags);
50573
50574+ pax_open_kernel();
50575 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
50576+ pax_close_kernel();
50577+
50578 put_cpu();
50579
50580 /* If we get here and this is set then the PnP BIOS faulted on us. */
50581@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
50582 return status;
50583 }
50584
50585-void pnpbios_calls_init(union pnp_bios_install_struct *header)
50586+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
50587 {
50588 int i;
50589
50590@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
50591 pnp_bios_callpoint.offset = header->fields.pm16offset;
50592 pnp_bios_callpoint.segment = PNP_CS16;
50593
50594+ pax_open_kernel();
50595+
50596 for_each_possible_cpu(i) {
50597 struct desc_struct *gdt = get_cpu_gdt_table(i);
50598 if (!gdt)
50599@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
50600 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
50601 (unsigned long)__va(header->fields.pm16dseg));
50602 }
50603+
50604+ pax_close_kernel();
50605 }
50606diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
50607index 0c52e2a..3421ab7 100644
50608--- a/drivers/power/pda_power.c
50609+++ b/drivers/power/pda_power.c
50610@@ -37,7 +37,11 @@ static int polling;
50611
50612 #if IS_ENABLED(CONFIG_USB_PHY)
50613 static struct usb_phy *transceiver;
50614-static struct notifier_block otg_nb;
50615+static int otg_handle_notification(struct notifier_block *nb,
50616+ unsigned long event, void *unused);
50617+static struct notifier_block otg_nb = {
50618+ .notifier_call = otg_handle_notification
50619+};
50620 #endif
50621
50622 static struct regulator *ac_draw;
50623@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
50624
50625 #if IS_ENABLED(CONFIG_USB_PHY)
50626 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
50627- otg_nb.notifier_call = otg_handle_notification;
50628 ret = usb_register_notifier(transceiver, &otg_nb);
50629 if (ret) {
50630 dev_err(dev, "failure to register otg notifier\n");
50631diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
50632index cc439fd..8fa30df 100644
50633--- a/drivers/power/power_supply.h
50634+++ b/drivers/power/power_supply.h
50635@@ -16,12 +16,12 @@ struct power_supply;
50636
50637 #ifdef CONFIG_SYSFS
50638
50639-extern void power_supply_init_attrs(struct device_type *dev_type);
50640+extern void power_supply_init_attrs(void);
50641 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
50642
50643 #else
50644
50645-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
50646+static inline void power_supply_init_attrs(void) {}
50647 #define power_supply_uevent NULL
50648
50649 #endif /* CONFIG_SYSFS */
50650diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
50651index 078afd6..fbac9da 100644
50652--- a/drivers/power/power_supply_core.c
50653+++ b/drivers/power/power_supply_core.c
50654@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
50655 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
50656 EXPORT_SYMBOL_GPL(power_supply_notifier);
50657
50658-static struct device_type power_supply_dev_type;
50659+extern const struct attribute_group *power_supply_attr_groups[];
50660+static struct device_type power_supply_dev_type = {
50661+ .groups = power_supply_attr_groups,
50662+};
50663
50664 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
50665 struct power_supply *supply)
50666@@ -640,7 +643,7 @@ static int __init power_supply_class_init(void)
50667 return PTR_ERR(power_supply_class);
50668
50669 power_supply_class->dev_uevent = power_supply_uevent;
50670- power_supply_init_attrs(&power_supply_dev_type);
50671+ power_supply_init_attrs();
50672
50673 return 0;
50674 }
50675diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
50676index 750a202..99c8f4b 100644
50677--- a/drivers/power/power_supply_sysfs.c
50678+++ b/drivers/power/power_supply_sysfs.c
50679@@ -234,17 +234,15 @@ static struct attribute_group power_supply_attr_group = {
50680 .is_visible = power_supply_attr_is_visible,
50681 };
50682
50683-static const struct attribute_group *power_supply_attr_groups[] = {
50684+const struct attribute_group *power_supply_attr_groups[] = {
50685 &power_supply_attr_group,
50686 NULL,
50687 };
50688
50689-void power_supply_init_attrs(struct device_type *dev_type)
50690+void power_supply_init_attrs(void)
50691 {
50692 int i;
50693
50694- dev_type->groups = power_supply_attr_groups;
50695-
50696 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
50697 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
50698 }
50699diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
50700index 84419af..268ede8 100644
50701--- a/drivers/powercap/powercap_sys.c
50702+++ b/drivers/powercap/powercap_sys.c
50703@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
50704 struct device_attribute name_attr;
50705 };
50706
50707+static ssize_t show_constraint_name(struct device *dev,
50708+ struct device_attribute *dev_attr,
50709+ char *buf);
50710+
50711 static struct powercap_constraint_attr
50712- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
50713+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
50714+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
50715+ .power_limit_attr = {
50716+ .attr = {
50717+ .name = NULL,
50718+ .mode = S_IWUSR | S_IRUGO
50719+ },
50720+ .show = show_constraint_power_limit_uw,
50721+ .store = store_constraint_power_limit_uw
50722+ },
50723+
50724+ .time_window_attr = {
50725+ .attr = {
50726+ .name = NULL,
50727+ .mode = S_IWUSR | S_IRUGO
50728+ },
50729+ .show = show_constraint_time_window_us,
50730+ .store = store_constraint_time_window_us
50731+ },
50732+
50733+ .max_power_attr = {
50734+ .attr = {
50735+ .name = NULL,
50736+ .mode = S_IRUGO
50737+ },
50738+ .show = show_constraint_max_power_uw,
50739+ .store = NULL
50740+ },
50741+
50742+ .min_power_attr = {
50743+ .attr = {
50744+ .name = NULL,
50745+ .mode = S_IRUGO
50746+ },
50747+ .show = show_constraint_min_power_uw,
50748+ .store = NULL
50749+ },
50750+
50751+ .max_time_window_attr = {
50752+ .attr = {
50753+ .name = NULL,
50754+ .mode = S_IRUGO
50755+ },
50756+ .show = show_constraint_max_time_window_us,
50757+ .store = NULL
50758+ },
50759+
50760+ .min_time_window_attr = {
50761+ .attr = {
50762+ .name = NULL,
50763+ .mode = S_IRUGO
50764+ },
50765+ .show = show_constraint_min_time_window_us,
50766+ .store = NULL
50767+ },
50768+
50769+ .name_attr = {
50770+ .attr = {
50771+ .name = NULL,
50772+ .mode = S_IRUGO
50773+ },
50774+ .show = show_constraint_name,
50775+ .store = NULL
50776+ }
50777+ }
50778+};
50779
50780 /* A list of powercap control_types */
50781 static LIST_HEAD(powercap_cntrl_list);
50782@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
50783 }
50784
50785 static int create_constraint_attribute(int id, const char *name,
50786- int mode,
50787- struct device_attribute *dev_attr,
50788- ssize_t (*show)(struct device *,
50789- struct device_attribute *, char *),
50790- ssize_t (*store)(struct device *,
50791- struct device_attribute *,
50792- const char *, size_t)
50793- )
50794+ struct device_attribute *dev_attr)
50795 {
50796+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
50797
50798- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
50799- id, name);
50800- if (!dev_attr->attr.name)
50801+ if (!name)
50802 return -ENOMEM;
50803- dev_attr->attr.mode = mode;
50804- dev_attr->show = show;
50805- dev_attr->store = store;
50806+
50807+ pax_open_kernel();
50808+ *(const char **)&dev_attr->attr.name = name;
50809+ pax_close_kernel();
50810
50811 return 0;
50812 }
50813@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
50814
50815 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
50816 ret = create_constraint_attribute(i, "power_limit_uw",
50817- S_IWUSR | S_IRUGO,
50818- &constraint_attrs[i].power_limit_attr,
50819- show_constraint_power_limit_uw,
50820- store_constraint_power_limit_uw);
50821+ &constraint_attrs[i].power_limit_attr);
50822 if (ret)
50823 goto err_alloc;
50824 ret = create_constraint_attribute(i, "time_window_us",
50825- S_IWUSR | S_IRUGO,
50826- &constraint_attrs[i].time_window_attr,
50827- show_constraint_time_window_us,
50828- store_constraint_time_window_us);
50829+ &constraint_attrs[i].time_window_attr);
50830 if (ret)
50831 goto err_alloc;
50832- ret = create_constraint_attribute(i, "name", S_IRUGO,
50833- &constraint_attrs[i].name_attr,
50834- show_constraint_name,
50835- NULL);
50836+ ret = create_constraint_attribute(i, "name",
50837+ &constraint_attrs[i].name_attr);
50838 if (ret)
50839 goto err_alloc;
50840- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
50841- &constraint_attrs[i].max_power_attr,
50842- show_constraint_max_power_uw,
50843- NULL);
50844+ ret = create_constraint_attribute(i, "max_power_uw",
50845+ &constraint_attrs[i].max_power_attr);
50846 if (ret)
50847 goto err_alloc;
50848- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
50849- &constraint_attrs[i].min_power_attr,
50850- show_constraint_min_power_uw,
50851- NULL);
50852+ ret = create_constraint_attribute(i, "min_power_uw",
50853+ &constraint_attrs[i].min_power_attr);
50854 if (ret)
50855 goto err_alloc;
50856 ret = create_constraint_attribute(i, "max_time_window_us",
50857- S_IRUGO,
50858- &constraint_attrs[i].max_time_window_attr,
50859- show_constraint_max_time_window_us,
50860- NULL);
50861+ &constraint_attrs[i].max_time_window_attr);
50862 if (ret)
50863 goto err_alloc;
50864 ret = create_constraint_attribute(i, "min_time_window_us",
50865- S_IRUGO,
50866- &constraint_attrs[i].min_time_window_attr,
50867- show_constraint_min_time_window_us,
50868- NULL);
50869+ &constraint_attrs[i].min_time_window_attr);
50870 if (ret)
50871 goto err_alloc;
50872
50873@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
50874 power_zone->zone_dev_attrs[count++] =
50875 &dev_attr_max_energy_range_uj.attr;
50876 if (power_zone->ops->get_energy_uj) {
50877+ pax_open_kernel();
50878 if (power_zone->ops->reset_energy_uj)
50879- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
50880+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
50881 else
50882- dev_attr_energy_uj.attr.mode = S_IRUGO;
50883+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
50884+ pax_close_kernel();
50885 power_zone->zone_dev_attrs[count++] =
50886 &dev_attr_energy_uj.attr;
50887 }
50888diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
50889index 9c5d414..c7900ce 100644
50890--- a/drivers/ptp/ptp_private.h
50891+++ b/drivers/ptp/ptp_private.h
50892@@ -51,7 +51,7 @@ struct ptp_clock {
50893 struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
50894 wait_queue_head_t tsev_wq;
50895 int defunct; /* tells readers to go away when clock is being removed */
50896- struct device_attribute *pin_dev_attr;
50897+ device_attribute_no_const *pin_dev_attr;
50898 struct attribute **pin_attr;
50899 struct attribute_group pin_attr_group;
50900 };
50901diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
50902index 302e626..12579af 100644
50903--- a/drivers/ptp/ptp_sysfs.c
50904+++ b/drivers/ptp/ptp_sysfs.c
50905@@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
50906 goto no_pin_attr;
50907
50908 for (i = 0; i < n_pins; i++) {
50909- struct device_attribute *da = &ptp->pin_dev_attr[i];
50910+ device_attribute_no_const *da = &ptp->pin_dev_attr[i];
50911 sysfs_attr_init(&da->attr);
50912 da->attr.name = info->pin_config[i].name;
50913 da->attr.mode = 0644;
50914diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
50915index a3c3785..c901e3a 100644
50916--- a/drivers/regulator/core.c
50917+++ b/drivers/regulator/core.c
50918@@ -3481,7 +3481,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
50919 {
50920 const struct regulation_constraints *constraints = NULL;
50921 const struct regulator_init_data *init_data;
50922- static atomic_t regulator_no = ATOMIC_INIT(0);
50923+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
50924 struct regulator_dev *rdev;
50925 struct device *dev;
50926 int ret, i;
50927@@ -3551,7 +3551,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
50928 rdev->dev.of_node = of_node_get(config->of_node);
50929 rdev->dev.parent = dev;
50930 dev_set_name(&rdev->dev, "regulator.%d",
50931- atomic_inc_return(&regulator_no) - 1);
50932+ atomic_inc_return_unchecked(&regulator_no) - 1);
50933 ret = device_register(&rdev->dev);
50934 if (ret != 0) {
50935 put_device(&rdev->dev);
50936diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
50937index 2fc4111..6aa88ca 100644
50938--- a/drivers/regulator/max8660.c
50939+++ b/drivers/regulator/max8660.c
50940@@ -424,8 +424,10 @@ static int max8660_probe(struct i2c_client *client,
50941 max8660->shadow_regs[MAX8660_OVER1] = 5;
50942 } else {
50943 /* Otherwise devices can be toggled via software */
50944- max8660_dcdc_ops.enable = max8660_dcdc_enable;
50945- max8660_dcdc_ops.disable = max8660_dcdc_disable;
50946+ pax_open_kernel();
50947+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
50948+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
50949+ pax_close_kernel();
50950 }
50951
50952 /*
50953diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
50954index dbedf17..18ff6b7 100644
50955--- a/drivers/regulator/max8973-regulator.c
50956+++ b/drivers/regulator/max8973-regulator.c
50957@@ -403,9 +403,11 @@ static int max8973_probe(struct i2c_client *client,
50958 if (!pdata || !pdata->enable_ext_control) {
50959 max->desc.enable_reg = MAX8973_VOUT;
50960 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
50961- max->ops.enable = regulator_enable_regmap;
50962- max->ops.disable = regulator_disable_regmap;
50963- max->ops.is_enabled = regulator_is_enabled_regmap;
50964+ pax_open_kernel();
50965+ *(void **)&max->ops.enable = regulator_enable_regmap;
50966+ *(void **)&max->ops.disable = regulator_disable_regmap;
50967+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
50968+ pax_close_kernel();
50969 }
50970
50971 if (pdata) {
50972diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
50973index f374fa5..26f0683 100644
50974--- a/drivers/regulator/mc13892-regulator.c
50975+++ b/drivers/regulator/mc13892-regulator.c
50976@@ -582,10 +582,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
50977 }
50978 mc13xxx_unlock(mc13892);
50979
50980- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
50981+ pax_open_kernel();
50982+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
50983 = mc13892_vcam_set_mode;
50984- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
50985+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
50986 = mc13892_vcam_get_mode;
50987+ pax_close_kernel();
50988
50989 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
50990 ARRAY_SIZE(mc13892_regulators));
50991diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
50992index 5b2e761..c8c8a4a 100644
50993--- a/drivers/rtc/rtc-cmos.c
50994+++ b/drivers/rtc/rtc-cmos.c
50995@@ -789,7 +789,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
50996 hpet_rtc_timer_init();
50997
50998 /* export at least the first block of NVRAM */
50999- nvram.size = address_space - NVRAM_OFFSET;
51000+ pax_open_kernel();
51001+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
51002+ pax_close_kernel();
51003 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
51004 if (retval < 0) {
51005 dev_dbg(dev, "can't create nvram file? %d\n", retval);
51006diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
51007index d049393..bb20be0 100644
51008--- a/drivers/rtc/rtc-dev.c
51009+++ b/drivers/rtc/rtc-dev.c
51010@@ -16,6 +16,7 @@
51011 #include <linux/module.h>
51012 #include <linux/rtc.h>
51013 #include <linux/sched.h>
51014+#include <linux/grsecurity.h>
51015 #include "rtc-core.h"
51016
51017 static dev_t rtc_devt;
51018@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
51019 if (copy_from_user(&tm, uarg, sizeof(tm)))
51020 return -EFAULT;
51021
51022+ gr_log_timechange();
51023+
51024 return rtc_set_time(rtc, &tm);
51025
51026 case RTC_PIE_ON:
51027diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
51028index f03d5ba..8325bf6 100644
51029--- a/drivers/rtc/rtc-ds1307.c
51030+++ b/drivers/rtc/rtc-ds1307.c
51031@@ -107,7 +107,7 @@ struct ds1307 {
51032 u8 offset; /* register's offset */
51033 u8 regs[11];
51034 u16 nvram_offset;
51035- struct bin_attribute *nvram;
51036+ bin_attribute_no_const *nvram;
51037 enum ds_type type;
51038 unsigned long flags;
51039 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
51040diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
51041index 11880c1..b823aa4 100644
51042--- a/drivers/rtc/rtc-m48t59.c
51043+++ b/drivers/rtc/rtc-m48t59.c
51044@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
51045 if (IS_ERR(m48t59->rtc))
51046 return PTR_ERR(m48t59->rtc);
51047
51048- m48t59_nvram_attr.size = pdata->offset;
51049+ pax_open_kernel();
51050+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
51051+ pax_close_kernel();
51052
51053 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
51054 if (ret)
51055diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
51056index e693af6..2e525b6 100644
51057--- a/drivers/scsi/bfa/bfa_fcpim.h
51058+++ b/drivers/scsi/bfa/bfa_fcpim.h
51059@@ -36,7 +36,7 @@ struct bfa_iotag_s {
51060
51061 struct bfa_itn_s {
51062 bfa_isr_func_t isr;
51063-};
51064+} __no_const;
51065
51066 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
51067 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
51068diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
51069index 0f19455..ef7adb5 100644
51070--- a/drivers/scsi/bfa/bfa_fcs.c
51071+++ b/drivers/scsi/bfa/bfa_fcs.c
51072@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
51073 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
51074
51075 static struct bfa_fcs_mod_s fcs_modules[] = {
51076- { bfa_fcs_port_attach, NULL, NULL },
51077- { bfa_fcs_uf_attach, NULL, NULL },
51078- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
51079- bfa_fcs_fabric_modexit },
51080+ {
51081+ .attach = bfa_fcs_port_attach,
51082+ .modinit = NULL,
51083+ .modexit = NULL
51084+ },
51085+ {
51086+ .attach = bfa_fcs_uf_attach,
51087+ .modinit = NULL,
51088+ .modexit = NULL
51089+ },
51090+ {
51091+ .attach = bfa_fcs_fabric_attach,
51092+ .modinit = bfa_fcs_fabric_modinit,
51093+ .modexit = bfa_fcs_fabric_modexit
51094+ },
51095 };
51096
51097 /*
51098diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
51099index ff75ef8..2dfe00a 100644
51100--- a/drivers/scsi/bfa/bfa_fcs_lport.c
51101+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
51102@@ -89,15 +89,26 @@ static struct {
51103 void (*offline) (struct bfa_fcs_lport_s *port);
51104 } __port_action[] = {
51105 {
51106- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
51107- bfa_fcs_lport_unknown_offline}, {
51108- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
51109- bfa_fcs_lport_fab_offline}, {
51110- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
51111- bfa_fcs_lport_n2n_offline}, {
51112- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
51113- bfa_fcs_lport_loop_offline},
51114- };
51115+ .init = bfa_fcs_lport_unknown_init,
51116+ .online = bfa_fcs_lport_unknown_online,
51117+ .offline = bfa_fcs_lport_unknown_offline
51118+ },
51119+ {
51120+ .init = bfa_fcs_lport_fab_init,
51121+ .online = bfa_fcs_lport_fab_online,
51122+ .offline = bfa_fcs_lport_fab_offline
51123+ },
51124+ {
51125+ .init = bfa_fcs_lport_n2n_init,
51126+ .online = bfa_fcs_lport_n2n_online,
51127+ .offline = bfa_fcs_lport_n2n_offline
51128+ },
51129+ {
51130+ .init = bfa_fcs_lport_loop_init,
51131+ .online = bfa_fcs_lport_loop_online,
51132+ .offline = bfa_fcs_lport_loop_offline
51133+ },
51134+};
51135
51136 /*
51137 * fcs_port_sm FCS logical port state machine
51138diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
51139index a38aafa0..fe8f03b 100644
51140--- a/drivers/scsi/bfa/bfa_ioc.h
51141+++ b/drivers/scsi/bfa/bfa_ioc.h
51142@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
51143 bfa_ioc_disable_cbfn_t disable_cbfn;
51144 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
51145 bfa_ioc_reset_cbfn_t reset_cbfn;
51146-};
51147+} __no_const;
51148
51149 /*
51150 * IOC event notification mechanism.
51151@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
51152 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
51153 enum bfi_ioc_state fwstate);
51154 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
51155-};
51156+} __no_const;
51157
51158 /*
51159 * Queue element to wait for room in request queue. FIFO order is
51160diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
51161index a14c784..6de6790 100644
51162--- a/drivers/scsi/bfa/bfa_modules.h
51163+++ b/drivers/scsi/bfa/bfa_modules.h
51164@@ -78,12 +78,12 @@ enum {
51165 \
51166 extern struct bfa_module_s hal_mod_ ## __mod; \
51167 struct bfa_module_s hal_mod_ ## __mod = { \
51168- bfa_ ## __mod ## _meminfo, \
51169- bfa_ ## __mod ## _attach, \
51170- bfa_ ## __mod ## _detach, \
51171- bfa_ ## __mod ## _start, \
51172- bfa_ ## __mod ## _stop, \
51173- bfa_ ## __mod ## _iocdisable, \
51174+ .meminfo = bfa_ ## __mod ## _meminfo, \
51175+ .attach = bfa_ ## __mod ## _attach, \
51176+ .detach = bfa_ ## __mod ## _detach, \
51177+ .start = bfa_ ## __mod ## _start, \
51178+ .stop = bfa_ ## __mod ## _stop, \
51179+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
51180 }
51181
51182 #define BFA_CACHELINE_SZ (256)
51183diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
51184index 045c4e1..13de803 100644
51185--- a/drivers/scsi/fcoe/fcoe_sysfs.c
51186+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
51187@@ -33,8 +33,8 @@
51188 */
51189 #include "libfcoe.h"
51190
51191-static atomic_t ctlr_num;
51192-static atomic_t fcf_num;
51193+static atomic_unchecked_t ctlr_num;
51194+static atomic_unchecked_t fcf_num;
51195
51196 /*
51197 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
51198@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
51199 if (!ctlr)
51200 goto out;
51201
51202- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
51203+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
51204 ctlr->f = f;
51205 ctlr->mode = FIP_CONN_TYPE_FABRIC;
51206 INIT_LIST_HEAD(&ctlr->fcfs);
51207@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
51208 fcf->dev.parent = &ctlr->dev;
51209 fcf->dev.bus = &fcoe_bus_type;
51210 fcf->dev.type = &fcoe_fcf_device_type;
51211- fcf->id = atomic_inc_return(&fcf_num) - 1;
51212+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
51213 fcf->state = FCOE_FCF_STATE_UNKNOWN;
51214
51215 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
51216@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
51217 {
51218 int error;
51219
51220- atomic_set(&ctlr_num, 0);
51221- atomic_set(&fcf_num, 0);
51222+ atomic_set_unchecked(&ctlr_num, 0);
51223+ atomic_set_unchecked(&fcf_num, 0);
51224
51225 error = bus_register(&fcoe_bus_type);
51226 if (error)
51227diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
51228index 6de80e3..a11e0ac 100644
51229--- a/drivers/scsi/hosts.c
51230+++ b/drivers/scsi/hosts.c
51231@@ -42,7 +42,7 @@
51232 #include "scsi_logging.h"
51233
51234
51235-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
51236+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
51237
51238
51239 static void scsi_host_cls_release(struct device *dev)
51240@@ -392,7 +392,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
51241 * subtract one because we increment first then return, but we need to
51242 * know what the next host number was before increment
51243 */
51244- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
51245+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
51246 shost->dma_channel = 0xff;
51247
51248 /* These three are default values which can be overridden */
51249diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
51250index 6b35d0d..2880305 100644
51251--- a/drivers/scsi/hpsa.c
51252+++ b/drivers/scsi/hpsa.c
51253@@ -701,10 +701,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
51254 unsigned long flags;
51255
51256 if (h->transMethod & CFGTBL_Trans_io_accel1)
51257- return h->access.command_completed(h, q);
51258+ return h->access->command_completed(h, q);
51259
51260 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
51261- return h->access.command_completed(h, q);
51262+ return h->access->command_completed(h, q);
51263
51264 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
51265 a = rq->head[rq->current_entry];
51266@@ -5454,7 +5454,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
51267 while (!list_empty(&h->reqQ)) {
51268 c = list_entry(h->reqQ.next, struct CommandList, list);
51269 /* can't do anything if fifo is full */
51270- if ((h->access.fifo_full(h))) {
51271+ if ((h->access->fifo_full(h))) {
51272 h->fifo_recently_full = 1;
51273 dev_warn(&h->pdev->dev, "fifo full\n");
51274 break;
51275@@ -5476,7 +5476,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
51276
51277 /* Tell the controller execute command */
51278 spin_unlock_irqrestore(&h->lock, *flags);
51279- h->access.submit_command(h, c);
51280+ h->access->submit_command(h, c);
51281 spin_lock_irqsave(&h->lock, *flags);
51282 }
51283 }
51284@@ -5492,17 +5492,17 @@ static void lock_and_start_io(struct ctlr_info *h)
51285
51286 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
51287 {
51288- return h->access.command_completed(h, q);
51289+ return h->access->command_completed(h, q);
51290 }
51291
51292 static inline bool interrupt_pending(struct ctlr_info *h)
51293 {
51294- return h->access.intr_pending(h);
51295+ return h->access->intr_pending(h);
51296 }
51297
51298 static inline long interrupt_not_for_us(struct ctlr_info *h)
51299 {
51300- return (h->access.intr_pending(h) == 0) ||
51301+ return (h->access->intr_pending(h) == 0) ||
51302 (h->interrupts_enabled == 0);
51303 }
51304
51305@@ -6458,7 +6458,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
51306 if (prod_index < 0)
51307 return -ENODEV;
51308 h->product_name = products[prod_index].product_name;
51309- h->access = *(products[prod_index].access);
51310+ h->access = products[prod_index].access;
51311
51312 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
51313 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
51314@@ -6780,7 +6780,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
51315 unsigned long flags;
51316 u32 lockup_detected;
51317
51318- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51319+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51320 spin_lock_irqsave(&h->lock, flags);
51321 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
51322 if (!lockup_detected) {
51323@@ -7027,7 +7027,7 @@ reinit_after_soft_reset:
51324 }
51325
51326 /* make sure the board interrupts are off */
51327- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51328+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51329
51330 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
51331 goto clean2;
51332@@ -7062,7 +7062,7 @@ reinit_after_soft_reset:
51333 * fake ones to scoop up any residual completions.
51334 */
51335 spin_lock_irqsave(&h->lock, flags);
51336- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51337+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51338 spin_unlock_irqrestore(&h->lock, flags);
51339 free_irqs(h);
51340 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
51341@@ -7081,9 +7081,9 @@ reinit_after_soft_reset:
51342 dev_info(&h->pdev->dev, "Board READY.\n");
51343 dev_info(&h->pdev->dev,
51344 "Waiting for stale completions to drain.\n");
51345- h->access.set_intr_mask(h, HPSA_INTR_ON);
51346+ h->access->set_intr_mask(h, HPSA_INTR_ON);
51347 msleep(10000);
51348- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51349+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51350
51351 rc = controller_reset_failed(h->cfgtable);
51352 if (rc)
51353@@ -7109,7 +7109,7 @@ reinit_after_soft_reset:
51354 h->drv_req_rescan = 0;
51355
51356 /* Turn the interrupts on so we can service requests */
51357- h->access.set_intr_mask(h, HPSA_INTR_ON);
51358+ h->access->set_intr_mask(h, HPSA_INTR_ON);
51359
51360 hpsa_hba_inquiry(h);
51361 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
51362@@ -7174,7 +7174,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
51363 * To write all data in the battery backed cache to disks
51364 */
51365 hpsa_flush_cache(h);
51366- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51367+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51368 hpsa_free_irqs_and_disable_msix(h);
51369 }
51370
51371@@ -7292,7 +7292,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
51372 CFGTBL_Trans_enable_directed_msix |
51373 (trans_support & (CFGTBL_Trans_io_accel1 |
51374 CFGTBL_Trans_io_accel2));
51375- struct access_method access = SA5_performant_access;
51376+ struct access_method *access = &SA5_performant_access;
51377
51378 /* This is a bit complicated. There are 8 registers on
51379 * the controller which we write to to tell it 8 different
51380@@ -7334,7 +7334,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
51381 * perform the superfluous readl() after each command submission.
51382 */
51383 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
51384- access = SA5_performant_access_no_read;
51385+ access = &SA5_performant_access_no_read;
51386
51387 /* Controller spec: zero out this buffer. */
51388 for (i = 0; i < h->nreply_queues; i++)
51389@@ -7364,12 +7364,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
51390 * enable outbound interrupt coalescing in accelerator mode;
51391 */
51392 if (trans_support & CFGTBL_Trans_io_accel1) {
51393- access = SA5_ioaccel_mode1_access;
51394+ access = &SA5_ioaccel_mode1_access;
51395 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
51396 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
51397 } else {
51398 if (trans_support & CFGTBL_Trans_io_accel2) {
51399- access = SA5_ioaccel_mode2_access;
51400+ access = &SA5_ioaccel_mode2_access;
51401 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
51402 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
51403 }
51404diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
51405index 24472ce..8782caf 100644
51406--- a/drivers/scsi/hpsa.h
51407+++ b/drivers/scsi/hpsa.h
51408@@ -127,7 +127,7 @@ struct ctlr_info {
51409 unsigned int msix_vector;
51410 unsigned int msi_vector;
51411 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
51412- struct access_method access;
51413+ struct access_method *access;
51414 char hba_mode_enabled;
51415
51416 /* queue and queue Info */
51417@@ -536,43 +536,43 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
51418 }
51419
51420 static struct access_method SA5_access = {
51421- SA5_submit_command,
51422- SA5_intr_mask,
51423- SA5_fifo_full,
51424- SA5_intr_pending,
51425- SA5_completed,
51426+ .submit_command = SA5_submit_command,
51427+ .set_intr_mask = SA5_intr_mask,
51428+ .fifo_full = SA5_fifo_full,
51429+ .intr_pending = SA5_intr_pending,
51430+ .command_completed = SA5_completed,
51431 };
51432
51433 static struct access_method SA5_ioaccel_mode1_access = {
51434- SA5_submit_command,
51435- SA5_performant_intr_mask,
51436- SA5_fifo_full,
51437- SA5_ioaccel_mode1_intr_pending,
51438- SA5_ioaccel_mode1_completed,
51439+ .submit_command = SA5_submit_command,
51440+ .set_intr_mask = SA5_performant_intr_mask,
51441+ .fifo_full = SA5_fifo_full,
51442+ .intr_pending = SA5_ioaccel_mode1_intr_pending,
51443+ .command_completed = SA5_ioaccel_mode1_completed,
51444 };
51445
51446 static struct access_method SA5_ioaccel_mode2_access = {
51447- SA5_submit_command_ioaccel2,
51448- SA5_performant_intr_mask,
51449- SA5_fifo_full,
51450- SA5_performant_intr_pending,
51451- SA5_performant_completed,
51452+ .submit_command = SA5_submit_command_ioaccel2,
51453+ .set_intr_mask = SA5_performant_intr_mask,
51454+ .fifo_full = SA5_fifo_full,
51455+ .intr_pending = SA5_performant_intr_pending,
51456+ .command_completed = SA5_performant_completed,
51457 };
51458
51459 static struct access_method SA5_performant_access = {
51460- SA5_submit_command,
51461- SA5_performant_intr_mask,
51462- SA5_fifo_full,
51463- SA5_performant_intr_pending,
51464- SA5_performant_completed,
51465+ .submit_command = SA5_submit_command,
51466+ .set_intr_mask = SA5_performant_intr_mask,
51467+ .fifo_full = SA5_fifo_full,
51468+ .intr_pending = SA5_performant_intr_pending,
51469+ .command_completed = SA5_performant_completed,
51470 };
51471
51472 static struct access_method SA5_performant_access_no_read = {
51473- SA5_submit_command_no_read,
51474- SA5_performant_intr_mask,
51475- SA5_fifo_full,
51476- SA5_performant_intr_pending,
51477- SA5_performant_completed,
51478+ .submit_command = SA5_submit_command_no_read,
51479+ .set_intr_mask = SA5_performant_intr_mask,
51480+ .fifo_full = SA5_fifo_full,
51481+ .intr_pending = SA5_performant_intr_pending,
51482+ .command_completed = SA5_performant_completed,
51483 };
51484
51485 struct board_type {
51486diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
51487index 1b3a094..068e683 100644
51488--- a/drivers/scsi/libfc/fc_exch.c
51489+++ b/drivers/scsi/libfc/fc_exch.c
51490@@ -101,12 +101,12 @@ struct fc_exch_mgr {
51491 u16 pool_max_index;
51492
51493 struct {
51494- atomic_t no_free_exch;
51495- atomic_t no_free_exch_xid;
51496- atomic_t xid_not_found;
51497- atomic_t xid_busy;
51498- atomic_t seq_not_found;
51499- atomic_t non_bls_resp;
51500+ atomic_unchecked_t no_free_exch;
51501+ atomic_unchecked_t no_free_exch_xid;
51502+ atomic_unchecked_t xid_not_found;
51503+ atomic_unchecked_t xid_busy;
51504+ atomic_unchecked_t seq_not_found;
51505+ atomic_unchecked_t non_bls_resp;
51506 } stats;
51507 };
51508
51509@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
51510 /* allocate memory for exchange */
51511 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
51512 if (!ep) {
51513- atomic_inc(&mp->stats.no_free_exch);
51514+ atomic_inc_unchecked(&mp->stats.no_free_exch);
51515 goto out;
51516 }
51517 memset(ep, 0, sizeof(*ep));
51518@@ -874,7 +874,7 @@ out:
51519 return ep;
51520 err:
51521 spin_unlock_bh(&pool->lock);
51522- atomic_inc(&mp->stats.no_free_exch_xid);
51523+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
51524 mempool_free(ep, mp->ep_pool);
51525 return NULL;
51526 }
51527@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51528 xid = ntohs(fh->fh_ox_id); /* we originated exch */
51529 ep = fc_exch_find(mp, xid);
51530 if (!ep) {
51531- atomic_inc(&mp->stats.xid_not_found);
51532+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51533 reject = FC_RJT_OX_ID;
51534 goto out;
51535 }
51536@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51537 ep = fc_exch_find(mp, xid);
51538 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
51539 if (ep) {
51540- atomic_inc(&mp->stats.xid_busy);
51541+ atomic_inc_unchecked(&mp->stats.xid_busy);
51542 reject = FC_RJT_RX_ID;
51543 goto rel;
51544 }
51545@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51546 }
51547 xid = ep->xid; /* get our XID */
51548 } else if (!ep) {
51549- atomic_inc(&mp->stats.xid_not_found);
51550+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51551 reject = FC_RJT_RX_ID; /* XID not found */
51552 goto out;
51553 }
51554@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51555 } else {
51556 sp = &ep->seq;
51557 if (sp->id != fh->fh_seq_id) {
51558- atomic_inc(&mp->stats.seq_not_found);
51559+ atomic_inc_unchecked(&mp->stats.seq_not_found);
51560 if (f_ctl & FC_FC_END_SEQ) {
51561 /*
51562 * Update sequence_id based on incoming last
51563@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51564
51565 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
51566 if (!ep) {
51567- atomic_inc(&mp->stats.xid_not_found);
51568+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51569 goto out;
51570 }
51571 if (ep->esb_stat & ESB_ST_COMPLETE) {
51572- atomic_inc(&mp->stats.xid_not_found);
51573+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51574 goto rel;
51575 }
51576 if (ep->rxid == FC_XID_UNKNOWN)
51577 ep->rxid = ntohs(fh->fh_rx_id);
51578 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
51579- atomic_inc(&mp->stats.xid_not_found);
51580+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51581 goto rel;
51582 }
51583 if (ep->did != ntoh24(fh->fh_s_id) &&
51584 ep->did != FC_FID_FLOGI) {
51585- atomic_inc(&mp->stats.xid_not_found);
51586+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51587 goto rel;
51588 }
51589 sof = fr_sof(fp);
51590@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51591 sp->ssb_stat |= SSB_ST_RESP;
51592 sp->id = fh->fh_seq_id;
51593 } else if (sp->id != fh->fh_seq_id) {
51594- atomic_inc(&mp->stats.seq_not_found);
51595+ atomic_inc_unchecked(&mp->stats.seq_not_found);
51596 goto rel;
51597 }
51598
51599@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51600 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
51601
51602 if (!sp)
51603- atomic_inc(&mp->stats.xid_not_found);
51604+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51605 else
51606- atomic_inc(&mp->stats.non_bls_resp);
51607+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
51608
51609 fc_frame_free(fp);
51610 }
51611@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
51612
51613 list_for_each_entry(ema, &lport->ema_list, ema_list) {
51614 mp = ema->mp;
51615- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
51616+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
51617 st->fc_no_free_exch_xid +=
51618- atomic_read(&mp->stats.no_free_exch_xid);
51619- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
51620- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
51621- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
51622- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
51623+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
51624+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
51625+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
51626+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
51627+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
51628 }
51629 }
51630 EXPORT_SYMBOL(fc_exch_update_stats);
51631diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
51632index 766098a..1c6c971 100644
51633--- a/drivers/scsi/libsas/sas_ata.c
51634+++ b/drivers/scsi/libsas/sas_ata.c
51635@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
51636 .postreset = ata_std_postreset,
51637 .error_handler = ata_std_error_handler,
51638 .post_internal_cmd = sas_ata_post_internal,
51639- .qc_defer = ata_std_qc_defer,
51640+ .qc_defer = ata_std_qc_defer,
51641 .qc_prep = ata_noop_qc_prep,
51642 .qc_issue = sas_ata_qc_issue,
51643 .qc_fill_rtf = sas_ata_qc_fill_rtf,
51644diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
51645index 434e903..5a4a79b 100644
51646--- a/drivers/scsi/lpfc/lpfc.h
51647+++ b/drivers/scsi/lpfc/lpfc.h
51648@@ -430,7 +430,7 @@ struct lpfc_vport {
51649 struct dentry *debug_nodelist;
51650 struct dentry *vport_debugfs_root;
51651 struct lpfc_debugfs_trc *disc_trc;
51652- atomic_t disc_trc_cnt;
51653+ atomic_unchecked_t disc_trc_cnt;
51654 #endif
51655 uint8_t stat_data_enabled;
51656 uint8_t stat_data_blocked;
51657@@ -880,8 +880,8 @@ struct lpfc_hba {
51658 struct timer_list fabric_block_timer;
51659 unsigned long bit_flags;
51660 #define FABRIC_COMANDS_BLOCKED 0
51661- atomic_t num_rsrc_err;
51662- atomic_t num_cmd_success;
51663+ atomic_unchecked_t num_rsrc_err;
51664+ atomic_unchecked_t num_cmd_success;
51665 unsigned long last_rsrc_error_time;
51666 unsigned long last_ramp_down_time;
51667 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
51668@@ -916,7 +916,7 @@ struct lpfc_hba {
51669
51670 struct dentry *debug_slow_ring_trc;
51671 struct lpfc_debugfs_trc *slow_ring_trc;
51672- atomic_t slow_ring_trc_cnt;
51673+ atomic_unchecked_t slow_ring_trc_cnt;
51674 /* iDiag debugfs sub-directory */
51675 struct dentry *idiag_root;
51676 struct dentry *idiag_pci_cfg;
51677diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
51678index b0aedce..89c6ca6 100644
51679--- a/drivers/scsi/lpfc/lpfc_debugfs.c
51680+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
51681@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
51682
51683 #include <linux/debugfs.h>
51684
51685-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
51686+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
51687 static unsigned long lpfc_debugfs_start_time = 0L;
51688
51689 /* iDiag */
51690@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
51691 lpfc_debugfs_enable = 0;
51692
51693 len = 0;
51694- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
51695+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
51696 (lpfc_debugfs_max_disc_trc - 1);
51697 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
51698 dtp = vport->disc_trc + i;
51699@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
51700 lpfc_debugfs_enable = 0;
51701
51702 len = 0;
51703- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
51704+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
51705 (lpfc_debugfs_max_slow_ring_trc - 1);
51706 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
51707 dtp = phba->slow_ring_trc + i;
51708@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
51709 !vport || !vport->disc_trc)
51710 return;
51711
51712- index = atomic_inc_return(&vport->disc_trc_cnt) &
51713+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
51714 (lpfc_debugfs_max_disc_trc - 1);
51715 dtp = vport->disc_trc + index;
51716 dtp->fmt = fmt;
51717 dtp->data1 = data1;
51718 dtp->data2 = data2;
51719 dtp->data3 = data3;
51720- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
51721+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
51722 dtp->jif = jiffies;
51723 #endif
51724 return;
51725@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
51726 !phba || !phba->slow_ring_trc)
51727 return;
51728
51729- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
51730+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
51731 (lpfc_debugfs_max_slow_ring_trc - 1);
51732 dtp = phba->slow_ring_trc + index;
51733 dtp->fmt = fmt;
51734 dtp->data1 = data1;
51735 dtp->data2 = data2;
51736 dtp->data3 = data3;
51737- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
51738+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
51739 dtp->jif = jiffies;
51740 #endif
51741 return;
51742@@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
51743 "slow_ring buffer\n");
51744 goto debug_failed;
51745 }
51746- atomic_set(&phba->slow_ring_trc_cnt, 0);
51747+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
51748 memset(phba->slow_ring_trc, 0,
51749 (sizeof(struct lpfc_debugfs_trc) *
51750 lpfc_debugfs_max_slow_ring_trc));
51751@@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
51752 "buffer\n");
51753 goto debug_failed;
51754 }
51755- atomic_set(&vport->disc_trc_cnt, 0);
51756+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
51757
51758 snprintf(name, sizeof(name), "discovery_trace");
51759 vport->debug_disc_trc =
51760diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
51761index a5769a9..718ecc7 100644
51762--- a/drivers/scsi/lpfc/lpfc_init.c
51763+++ b/drivers/scsi/lpfc/lpfc_init.c
51764@@ -11299,8 +11299,10 @@ lpfc_init(void)
51765 "misc_register returned with status %d", error);
51766
51767 if (lpfc_enable_npiv) {
51768- lpfc_transport_functions.vport_create = lpfc_vport_create;
51769- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
51770+ pax_open_kernel();
51771+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
51772+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
51773+ pax_close_kernel();
51774 }
51775 lpfc_transport_template =
51776 fc_attach_transport(&lpfc_transport_functions);
51777diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
51778index 7862c55..5aa65df 100644
51779--- a/drivers/scsi/lpfc/lpfc_scsi.c
51780+++ b/drivers/scsi/lpfc/lpfc_scsi.c
51781@@ -382,7 +382,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
51782 uint32_t evt_posted;
51783
51784 spin_lock_irqsave(&phba->hbalock, flags);
51785- atomic_inc(&phba->num_rsrc_err);
51786+ atomic_inc_unchecked(&phba->num_rsrc_err);
51787 phba->last_rsrc_error_time = jiffies;
51788
51789 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
51790@@ -423,8 +423,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
51791 unsigned long num_rsrc_err, num_cmd_success;
51792 int i;
51793
51794- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
51795- num_cmd_success = atomic_read(&phba->num_cmd_success);
51796+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
51797+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
51798
51799 /*
51800 * The error and success command counters are global per
51801@@ -452,8 +452,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
51802 }
51803 }
51804 lpfc_destroy_vport_work_array(phba, vports);
51805- atomic_set(&phba->num_rsrc_err, 0);
51806- atomic_set(&phba->num_cmd_success, 0);
51807+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
51808+ atomic_set_unchecked(&phba->num_cmd_success, 0);
51809 }
51810
51811 /**
51812diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51813index dd46101..ca80eb9 100644
51814--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51815+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51816@@ -1559,7 +1559,7 @@ _scsih_get_resync(struct device *dev)
51817 {
51818 struct scsi_device *sdev = to_scsi_device(dev);
51819 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
51820- static struct _raid_device *raid_device;
51821+ struct _raid_device *raid_device;
51822 unsigned long flags;
51823 Mpi2RaidVolPage0_t vol_pg0;
51824 Mpi2ConfigReply_t mpi_reply;
51825@@ -1611,7 +1611,7 @@ _scsih_get_state(struct device *dev)
51826 {
51827 struct scsi_device *sdev = to_scsi_device(dev);
51828 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
51829- static struct _raid_device *raid_device;
51830+ struct _raid_device *raid_device;
51831 unsigned long flags;
51832 Mpi2RaidVolPage0_t vol_pg0;
51833 Mpi2ConfigReply_t mpi_reply;
51834@@ -6648,7 +6648,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
51835 Mpi2EventDataIrOperationStatus_t *event_data =
51836 (Mpi2EventDataIrOperationStatus_t *)
51837 fw_event->event_data;
51838- static struct _raid_device *raid_device;
51839+ struct _raid_device *raid_device;
51840 unsigned long flags;
51841 u16 handle;
51842
51843@@ -7119,7 +7119,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
51844 u64 sas_address;
51845 struct _sas_device *sas_device;
51846 struct _sas_node *expander_device;
51847- static struct _raid_device *raid_device;
51848+ struct _raid_device *raid_device;
51849 u8 retry_count;
51850 unsigned long flags;
51851
51852diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
51853index 6f3275d..fa5e6b6 100644
51854--- a/drivers/scsi/pmcraid.c
51855+++ b/drivers/scsi/pmcraid.c
51856@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
51857 res->scsi_dev = scsi_dev;
51858 scsi_dev->hostdata = res;
51859 res->change_detected = 0;
51860- atomic_set(&res->read_failures, 0);
51861- atomic_set(&res->write_failures, 0);
51862+ atomic_set_unchecked(&res->read_failures, 0);
51863+ atomic_set_unchecked(&res->write_failures, 0);
51864 rc = 0;
51865 }
51866 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
51867@@ -2687,9 +2687,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
51868
51869 /* If this was a SCSI read/write command keep count of errors */
51870 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
51871- atomic_inc(&res->read_failures);
51872+ atomic_inc_unchecked(&res->read_failures);
51873 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
51874- atomic_inc(&res->write_failures);
51875+ atomic_inc_unchecked(&res->write_failures);
51876
51877 if (!RES_IS_GSCSI(res->cfg_entry) &&
51878 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
51879@@ -3545,7 +3545,7 @@ static int pmcraid_queuecommand_lck(
51880 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
51881 * hrrq_id assigned here in queuecommand
51882 */
51883- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
51884+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
51885 pinstance->num_hrrq;
51886 cmd->cmd_done = pmcraid_io_done;
51887
51888@@ -3857,7 +3857,7 @@ static long pmcraid_ioctl_passthrough(
51889 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
51890 * hrrq_id assigned here in queuecommand
51891 */
51892- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
51893+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
51894 pinstance->num_hrrq;
51895
51896 if (request_size) {
51897@@ -4495,7 +4495,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
51898
51899 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
51900 /* add resources only after host is added into system */
51901- if (!atomic_read(&pinstance->expose_resources))
51902+ if (!atomic_read_unchecked(&pinstance->expose_resources))
51903 return;
51904
51905 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
51906@@ -5322,8 +5322,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
51907 init_waitqueue_head(&pinstance->reset_wait_q);
51908
51909 atomic_set(&pinstance->outstanding_cmds, 0);
51910- atomic_set(&pinstance->last_message_id, 0);
51911- atomic_set(&pinstance->expose_resources, 0);
51912+ atomic_set_unchecked(&pinstance->last_message_id, 0);
51913+ atomic_set_unchecked(&pinstance->expose_resources, 0);
51914
51915 INIT_LIST_HEAD(&pinstance->free_res_q);
51916 INIT_LIST_HEAD(&pinstance->used_res_q);
51917@@ -6036,7 +6036,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
51918 /* Schedule worker thread to handle CCN and take care of adding and
51919 * removing devices to OS
51920 */
51921- atomic_set(&pinstance->expose_resources, 1);
51922+ atomic_set_unchecked(&pinstance->expose_resources, 1);
51923 schedule_work(&pinstance->worker_q);
51924 return rc;
51925
51926diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
51927index e1d150f..6c6df44 100644
51928--- a/drivers/scsi/pmcraid.h
51929+++ b/drivers/scsi/pmcraid.h
51930@@ -748,7 +748,7 @@ struct pmcraid_instance {
51931 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
51932
51933 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
51934- atomic_t last_message_id;
51935+ atomic_unchecked_t last_message_id;
51936
51937 /* configuration table */
51938 struct pmcraid_config_table *cfg_table;
51939@@ -777,7 +777,7 @@ struct pmcraid_instance {
51940 atomic_t outstanding_cmds;
51941
51942 /* should add/delete resources to mid-layer now ?*/
51943- atomic_t expose_resources;
51944+ atomic_unchecked_t expose_resources;
51945
51946
51947
51948@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
51949 struct pmcraid_config_table_entry_ext cfg_entry_ext;
51950 };
51951 struct scsi_device *scsi_dev; /* Link scsi_device structure */
51952- atomic_t read_failures; /* count of failed READ commands */
51953- atomic_t write_failures; /* count of failed WRITE commands */
51954+ atomic_unchecked_t read_failures; /* count of failed READ commands */
51955+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
51956
51957 /* To indicate add/delete/modify during CCN */
51958 u8 change_detected;
51959diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
51960index 16fe519..3b1ec82 100644
51961--- a/drivers/scsi/qla2xxx/qla_attr.c
51962+++ b/drivers/scsi/qla2xxx/qla_attr.c
51963@@ -2188,7 +2188,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
51964 return 0;
51965 }
51966
51967-struct fc_function_template qla2xxx_transport_functions = {
51968+fc_function_template_no_const qla2xxx_transport_functions = {
51969
51970 .show_host_node_name = 1,
51971 .show_host_port_name = 1,
51972@@ -2236,7 +2236,7 @@ struct fc_function_template qla2xxx_transport_functions = {
51973 .bsg_timeout = qla24xx_bsg_timeout,
51974 };
51975
51976-struct fc_function_template qla2xxx_transport_vport_functions = {
51977+fc_function_template_no_const qla2xxx_transport_vport_functions = {
51978
51979 .show_host_node_name = 1,
51980 .show_host_port_name = 1,
51981diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
51982index d646540..5b13554 100644
51983--- a/drivers/scsi/qla2xxx/qla_gbl.h
51984+++ b/drivers/scsi/qla2xxx/qla_gbl.h
51985@@ -569,8 +569,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
51986 struct device_attribute;
51987 extern struct device_attribute *qla2x00_host_attrs[];
51988 struct fc_function_template;
51989-extern struct fc_function_template qla2xxx_transport_functions;
51990-extern struct fc_function_template qla2xxx_transport_vport_functions;
51991+extern fc_function_template_no_const qla2xxx_transport_functions;
51992+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
51993 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
51994 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
51995 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
51996diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
51997index 8252c0e..613adad 100644
51998--- a/drivers/scsi/qla2xxx/qla_os.c
51999+++ b/drivers/scsi/qla2xxx/qla_os.c
52000@@ -1493,8 +1493,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
52001 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
52002 /* Ok, a 64bit DMA mask is applicable. */
52003 ha->flags.enable_64bit_addressing = 1;
52004- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
52005- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
52006+ pax_open_kernel();
52007+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
52008+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
52009+ pax_close_kernel();
52010 return;
52011 }
52012 }
52013diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
52014index 8f6d0fb..1b21097 100644
52015--- a/drivers/scsi/qla4xxx/ql4_def.h
52016+++ b/drivers/scsi/qla4xxx/ql4_def.h
52017@@ -305,7 +305,7 @@ struct ddb_entry {
52018 * (4000 only) */
52019 atomic_t relogin_timer; /* Max Time to wait for
52020 * relogin to complete */
52021- atomic_t relogin_retry_count; /* Num of times relogin has been
52022+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
52023 * retried */
52024 uint32_t default_time2wait; /* Default Min time between
52025 * relogins (+aens) */
52026diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
52027index 199fcf7..3c3a918 100644
52028--- a/drivers/scsi/qla4xxx/ql4_os.c
52029+++ b/drivers/scsi/qla4xxx/ql4_os.c
52030@@ -4496,12 +4496,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
52031 */
52032 if (!iscsi_is_session_online(cls_sess)) {
52033 /* Reset retry relogin timer */
52034- atomic_inc(&ddb_entry->relogin_retry_count);
52035+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
52036 DEBUG2(ql4_printk(KERN_INFO, ha,
52037 "%s: index[%d] relogin timed out-retrying"
52038 " relogin (%d), retry (%d)\n", __func__,
52039 ddb_entry->fw_ddb_index,
52040- atomic_read(&ddb_entry->relogin_retry_count),
52041+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
52042 ddb_entry->default_time2wait + 4));
52043 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
52044 atomic_set(&ddb_entry->retry_relogin_timer,
52045@@ -6609,7 +6609,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
52046
52047 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
52048 atomic_set(&ddb_entry->relogin_timer, 0);
52049- atomic_set(&ddb_entry->relogin_retry_count, 0);
52050+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
52051 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
52052 ddb_entry->default_relogin_timeout =
52053 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
52054diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
52055index d81f3cc..0093e5b 100644
52056--- a/drivers/scsi/scsi.c
52057+++ b/drivers/scsi/scsi.c
52058@@ -645,7 +645,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
52059 struct Scsi_Host *host = cmd->device->host;
52060 int rtn = 0;
52061
52062- atomic_inc(&cmd->device->iorequest_cnt);
52063+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
52064
52065 /* check if the device is still usable */
52066 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
52067diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
52068index 7cb8c73..14561b5 100644
52069--- a/drivers/scsi/scsi_lib.c
52070+++ b/drivers/scsi/scsi_lib.c
52071@@ -1581,7 +1581,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
52072 shost = sdev->host;
52073 scsi_init_cmd_errh(cmd);
52074 cmd->result = DID_NO_CONNECT << 16;
52075- atomic_inc(&cmd->device->iorequest_cnt);
52076+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
52077
52078 /*
52079 * SCSI request completion path will do scsi_device_unbusy(),
52080@@ -1604,9 +1604,9 @@ static void scsi_softirq_done(struct request *rq)
52081
52082 INIT_LIST_HEAD(&cmd->eh_entry);
52083
52084- atomic_inc(&cmd->device->iodone_cnt);
52085+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
52086 if (cmd->result)
52087- atomic_inc(&cmd->device->ioerr_cnt);
52088+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
52089
52090 disposition = scsi_decide_disposition(cmd);
52091 if (disposition != SUCCESS &&
52092diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
52093index 8b4105a..1f58363 100644
52094--- a/drivers/scsi/scsi_sysfs.c
52095+++ b/drivers/scsi/scsi_sysfs.c
52096@@ -805,7 +805,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
52097 char *buf) \
52098 { \
52099 struct scsi_device *sdev = to_scsi_device(dev); \
52100- unsigned long long count = atomic_read(&sdev->field); \
52101+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
52102 return snprintf(buf, 20, "0x%llx\n", count); \
52103 } \
52104 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
52105diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
52106index 5d6f348..18778a6b 100644
52107--- a/drivers/scsi/scsi_transport_fc.c
52108+++ b/drivers/scsi/scsi_transport_fc.c
52109@@ -501,7 +501,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
52110 * Netlink Infrastructure
52111 */
52112
52113-static atomic_t fc_event_seq;
52114+static atomic_unchecked_t fc_event_seq;
52115
52116 /**
52117 * fc_get_event_number - Obtain the next sequential FC event number
52118@@ -514,7 +514,7 @@ static atomic_t fc_event_seq;
52119 u32
52120 fc_get_event_number(void)
52121 {
52122- return atomic_add_return(1, &fc_event_seq);
52123+ return atomic_add_return_unchecked(1, &fc_event_seq);
52124 }
52125 EXPORT_SYMBOL(fc_get_event_number);
52126
52127@@ -658,7 +658,7 @@ static __init int fc_transport_init(void)
52128 {
52129 int error;
52130
52131- atomic_set(&fc_event_seq, 0);
52132+ atomic_set_unchecked(&fc_event_seq, 0);
52133
52134 error = transport_class_register(&fc_host_class);
52135 if (error)
52136@@ -848,7 +848,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
52137 char *cp;
52138
52139 *val = simple_strtoul(buf, &cp, 0);
52140- if ((*cp && (*cp != '\n')) || (*val < 0))
52141+ if (*cp && (*cp != '\n'))
52142 return -EINVAL;
52143 /*
52144 * Check for overflow; dev_loss_tmo is u32
52145diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
52146index 67d43e3..8cee73c 100644
52147--- a/drivers/scsi/scsi_transport_iscsi.c
52148+++ b/drivers/scsi/scsi_transport_iscsi.c
52149@@ -79,7 +79,7 @@ struct iscsi_internal {
52150 struct transport_container session_cont;
52151 };
52152
52153-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
52154+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
52155 static struct workqueue_struct *iscsi_eh_timer_workq;
52156
52157 static DEFINE_IDA(iscsi_sess_ida);
52158@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
52159 int err;
52160
52161 ihost = shost->shost_data;
52162- session->sid = atomic_add_return(1, &iscsi_session_nr);
52163+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
52164
52165 if (target_id == ISCSI_MAX_TARGET) {
52166 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
52167@@ -4515,7 +4515,7 @@ static __init int iscsi_transport_init(void)
52168 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
52169 ISCSI_TRANSPORT_VERSION);
52170
52171- atomic_set(&iscsi_session_nr, 0);
52172+ atomic_set_unchecked(&iscsi_session_nr, 0);
52173
52174 err = class_register(&iscsi_transport_class);
52175 if (err)
52176diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
52177index ae45bd9..c32a586 100644
52178--- a/drivers/scsi/scsi_transport_srp.c
52179+++ b/drivers/scsi/scsi_transport_srp.c
52180@@ -35,7 +35,7 @@
52181 #include "scsi_priv.h"
52182
52183 struct srp_host_attrs {
52184- atomic_t next_port_id;
52185+ atomic_unchecked_t next_port_id;
52186 };
52187 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
52188
52189@@ -100,7 +100,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
52190 struct Scsi_Host *shost = dev_to_shost(dev);
52191 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
52192
52193- atomic_set(&srp_host->next_port_id, 0);
52194+ atomic_set_unchecked(&srp_host->next_port_id, 0);
52195 return 0;
52196 }
52197
52198@@ -734,7 +734,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
52199 rport_fast_io_fail_timedout);
52200 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
52201
52202- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
52203+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
52204 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
52205
52206 transport_setup_device(&rport->dev);
52207diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
52208index 2c2041c..9d94085 100644
52209--- a/drivers/scsi/sd.c
52210+++ b/drivers/scsi/sd.c
52211@@ -3002,7 +3002,7 @@ static int sd_probe(struct device *dev)
52212 sdkp->disk = gd;
52213 sdkp->index = index;
52214 atomic_set(&sdkp->openers, 0);
52215- atomic_set(&sdkp->device->ioerr_cnt, 0);
52216+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
52217
52218 if (!sdp->request_queue->rq_timeout) {
52219 if (sdp->type != TYPE_MOD)
52220diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
52221index 01cf888..59e0475 100644
52222--- a/drivers/scsi/sg.c
52223+++ b/drivers/scsi/sg.c
52224@@ -1138,7 +1138,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
52225 sdp->disk->disk_name,
52226 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
52227 NULL,
52228- (char *)arg);
52229+ (char __user *)arg);
52230 case BLKTRACESTART:
52231 return blk_trace_startstop(sdp->device->request_queue, 1);
52232 case BLKTRACESTOP:
52233diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
52234index 11a5043..e36f04c 100644
52235--- a/drivers/soc/tegra/fuse/fuse-tegra.c
52236+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
52237@@ -70,7 +70,7 @@ static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
52238 return i;
52239 }
52240
52241-static struct bin_attribute fuse_bin_attr = {
52242+static bin_attribute_no_const fuse_bin_attr = {
52243 .attr = { .name = "fuse", .mode = S_IRUGO, },
52244 .read = fuse_read,
52245 };
52246diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
52247index 2bf2dfa..b4d9008 100644
52248--- a/drivers/spi/spi.c
52249+++ b/drivers/spi/spi.c
52250@@ -2210,7 +2210,7 @@ int spi_bus_unlock(struct spi_master *master)
52251 EXPORT_SYMBOL_GPL(spi_bus_unlock);
52252
52253 /* portable code must never pass more than 32 bytes */
52254-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
52255+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
52256
52257 static u8 *buf;
52258
52259diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
52260index b41429f..2de5373 100644
52261--- a/drivers/staging/android/timed_output.c
52262+++ b/drivers/staging/android/timed_output.c
52263@@ -25,7 +25,7 @@
52264 #include "timed_output.h"
52265
52266 static struct class *timed_output_class;
52267-static atomic_t device_count;
52268+static atomic_unchecked_t device_count;
52269
52270 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
52271 char *buf)
52272@@ -65,7 +65,7 @@ static int create_timed_output_class(void)
52273 timed_output_class = class_create(THIS_MODULE, "timed_output");
52274 if (IS_ERR(timed_output_class))
52275 return PTR_ERR(timed_output_class);
52276- atomic_set(&device_count, 0);
52277+ atomic_set_unchecked(&device_count, 0);
52278 timed_output_class->dev_groups = timed_output_groups;
52279 }
52280
52281@@ -83,7 +83,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
52282 if (ret < 0)
52283 return ret;
52284
52285- tdev->index = atomic_inc_return(&device_count);
52286+ tdev->index = atomic_inc_return_unchecked(&device_count);
52287 tdev->dev = device_create(timed_output_class, NULL,
52288 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
52289 if (IS_ERR(tdev->dev))
52290diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
52291index 001348c..cfaac8a 100644
52292--- a/drivers/staging/gdm724x/gdm_tty.c
52293+++ b/drivers/staging/gdm724x/gdm_tty.c
52294@@ -44,7 +44,7 @@
52295 #define gdm_tty_send_control(n, r, v, d, l) (\
52296 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
52297
52298-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
52299+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
52300
52301 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
52302 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
52303diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
52304index 6b22106..6c6e641 100644
52305--- a/drivers/staging/imx-drm/imx-drm-core.c
52306+++ b/drivers/staging/imx-drm/imx-drm-core.c
52307@@ -355,7 +355,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
52308 if (imxdrm->pipes >= MAX_CRTC)
52309 return -EINVAL;
52310
52311- if (imxdrm->drm->open_count)
52312+ if (local_read(&imxdrm->drm->open_count))
52313 return -EBUSY;
52314
52315 imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
52316diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
52317index 503b2d7..c918745 100644
52318--- a/drivers/staging/line6/driver.c
52319+++ b/drivers/staging/line6/driver.c
52320@@ -463,7 +463,7 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
52321 {
52322 struct usb_device *usbdev = line6->usbdev;
52323 int ret;
52324- unsigned char len;
52325+ unsigned char *plen;
52326
52327 /* query the serial number: */
52328 ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
52329@@ -476,27 +476,34 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
52330 return ret;
52331 }
52332
52333+ plen = kmalloc(1, GFP_KERNEL);
52334+ if (plen == NULL)
52335+ return -ENOMEM;
52336+
52337 /* Wait for data length. We'll get 0xff until length arrives. */
52338 do {
52339 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
52340 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
52341 USB_DIR_IN,
52342- 0x0012, 0x0000, &len, 1,
52343+ 0x0012, 0x0000, plen, 1,
52344 LINE6_TIMEOUT * HZ);
52345 if (ret < 0) {
52346 dev_err(line6->ifcdev,
52347 "receive length failed (error %d)\n", ret);
52348+ kfree(plen);
52349 return ret;
52350 }
52351- } while (len == 0xff);
52352+ } while (*plen == 0xff);
52353
52354- if (len != datalen) {
52355+ if (*plen != datalen) {
52356 /* should be equal or something went wrong */
52357 dev_err(line6->ifcdev,
52358 "length mismatch (expected %d, got %d)\n",
52359- (int)datalen, (int)len);
52360+ (int)datalen, (int)*plen);
52361+ kfree(plen);
52362 return -EINVAL;
52363 }
52364+ kfree(plen);
52365
52366 /* receive the result: */
52367 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
52368diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
52369index bcce919..f30fcf9 100644
52370--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
52371+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
52372@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
52373 return 0;
52374 }
52375
52376-sfw_test_client_ops_t brw_test_client;
52377-void brw_init_test_client(void)
52378-{
52379- brw_test_client.tso_init = brw_client_init;
52380- brw_test_client.tso_fini = brw_client_fini;
52381- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
52382- brw_test_client.tso_done_rpc = brw_client_done_rpc;
52383+sfw_test_client_ops_t brw_test_client = {
52384+ .tso_init = brw_client_init,
52385+ .tso_fini = brw_client_fini,
52386+ .tso_prep_rpc = brw_client_prep_rpc,
52387+ .tso_done_rpc = brw_client_done_rpc,
52388 };
52389
52390 srpc_service_t brw_test_service;
52391diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
52392index 7e83dff..1f9a545 100644
52393--- a/drivers/staging/lustre/lnet/selftest/framework.c
52394+++ b/drivers/staging/lustre/lnet/selftest/framework.c
52395@@ -1633,12 +1633,10 @@ static srpc_service_t sfw_services[] =
52396
52397 extern sfw_test_client_ops_t ping_test_client;
52398 extern srpc_service_t ping_test_service;
52399-extern void ping_init_test_client(void);
52400 extern void ping_init_test_service(void);
52401
52402 extern sfw_test_client_ops_t brw_test_client;
52403 extern srpc_service_t brw_test_service;
52404-extern void brw_init_test_client(void);
52405 extern void brw_init_test_service(void);
52406
52407
52408@@ -1682,12 +1680,10 @@ sfw_startup (void)
52409 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
52410 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
52411
52412- brw_init_test_client();
52413 brw_init_test_service();
52414 rc = sfw_register_test(&brw_test_service, &brw_test_client);
52415 LASSERT (rc == 0);
52416
52417- ping_init_test_client();
52418 ping_init_test_service();
52419 rc = sfw_register_test(&ping_test_service, &ping_test_client);
52420 LASSERT (rc == 0);
52421diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
52422index 750cac4..e4d751f 100644
52423--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
52424+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
52425@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
52426 return 0;
52427 }
52428
52429-sfw_test_client_ops_t ping_test_client;
52430-void ping_init_test_client(void)
52431-{
52432- ping_test_client.tso_init = ping_client_init;
52433- ping_test_client.tso_fini = ping_client_fini;
52434- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
52435- ping_test_client.tso_done_rpc = ping_client_done_rpc;
52436-}
52437+sfw_test_client_ops_t ping_test_client = {
52438+ .tso_init = ping_client_init,
52439+ .tso_fini = ping_client_fini,
52440+ .tso_prep_rpc = ping_client_prep_rpc,
52441+ .tso_done_rpc = ping_client_done_rpc,
52442+};
52443
52444 srpc_service_t ping_test_service;
52445 void ping_init_test_service(void)
52446diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
52447index 30b1812f..9e5bd0b 100644
52448--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
52449+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
52450@@ -1141,7 +1141,7 @@ struct ldlm_callback_suite {
52451 ldlm_completion_callback lcs_completion;
52452 ldlm_blocking_callback lcs_blocking;
52453 ldlm_glimpse_callback lcs_glimpse;
52454-};
52455+} __no_const;
52456
52457 /* ldlm_lockd.c */
52458 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
52459diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
52460index 489bdd3..65058081 100644
52461--- a/drivers/staging/lustre/lustre/include/obd.h
52462+++ b/drivers/staging/lustre/lustre/include/obd.h
52463@@ -1438,7 +1438,7 @@ struct md_ops {
52464 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
52465 * wrapper function in include/linux/obd_class.h.
52466 */
52467-};
52468+} __no_const;
52469
52470 struct lsm_operations {
52471 void (*lsm_free)(struct lov_stripe_md *);
52472diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52473index b798daa..b28ca8f 100644
52474--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52475+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52476@@ -258,7 +258,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
52477 int added = (mode == LCK_NL);
52478 int overlaps = 0;
52479 int splitted = 0;
52480- const struct ldlm_callback_suite null_cbs = { NULL };
52481+ const struct ldlm_callback_suite null_cbs = { };
52482
52483 CDEBUG(D_DLMTRACE, "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
52484 *flags, new->l_policy_data.l_flock.owner,
52485diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52486index 13a9266..3439390 100644
52487--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52488+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52489@@ -235,7 +235,7 @@ int proc_console_max_delay_cs(struct ctl_table *table, int write,
52490 void __user *buffer, size_t *lenp, loff_t *ppos)
52491 {
52492 int rc, max_delay_cs;
52493- struct ctl_table dummy = *table;
52494+ ctl_table_no_const dummy = *table;
52495 long d;
52496
52497 dummy.data = &max_delay_cs;
52498@@ -267,7 +267,7 @@ int proc_console_min_delay_cs(struct ctl_table *table, int write,
52499 void __user *buffer, size_t *lenp, loff_t *ppos)
52500 {
52501 int rc, min_delay_cs;
52502- struct ctl_table dummy = *table;
52503+ ctl_table_no_const dummy = *table;
52504 long d;
52505
52506 dummy.data = &min_delay_cs;
52507@@ -299,7 +299,7 @@ int proc_console_backoff(struct ctl_table *table, int write,
52508 void __user *buffer, size_t *lenp, loff_t *ppos)
52509 {
52510 int rc, backoff;
52511- struct ctl_table dummy = *table;
52512+ ctl_table_no_const dummy = *table;
52513
52514 dummy.data = &backoff;
52515 dummy.proc_handler = &proc_dointvec;
52516diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
52517index 3396858..c0bd996 100644
52518--- a/drivers/staging/lustre/lustre/libcfs/module.c
52519+++ b/drivers/staging/lustre/lustre/libcfs/module.c
52520@@ -314,11 +314,11 @@ out:
52521
52522
52523 struct cfs_psdev_ops libcfs_psdev_ops = {
52524- libcfs_psdev_open,
52525- libcfs_psdev_release,
52526- NULL,
52527- NULL,
52528- libcfs_ioctl
52529+ .p_open = libcfs_psdev_open,
52530+ .p_close = libcfs_psdev_release,
52531+ .p_read = NULL,
52532+ .p_write = NULL,
52533+ .p_ioctl = libcfs_ioctl
52534 };
52535
52536 extern int insert_proc(void);
52537diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
52538index efa2faf..03a9836 100644
52539--- a/drivers/staging/lustre/lustre/llite/dir.c
52540+++ b/drivers/staging/lustre/lustre/llite/dir.c
52541@@ -659,7 +659,7 @@ int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
52542 int mode;
52543 int err;
52544
52545- mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current->fs->umask) | S_IFDIR;
52546+ mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current_umask()) | S_IFDIR;
52547 op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
52548 strlen(filename), mode, LUSTRE_OPC_MKDIR,
52549 lump);
52550diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
52551index a0f4868..139f1fb 100644
52552--- a/drivers/staging/octeon/ethernet-rx.c
52553+++ b/drivers/staging/octeon/ethernet-rx.c
52554@@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
52555 /* Increment RX stats for virtual ports */
52556 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
52557 #ifdef CONFIG_64BIT
52558- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
52559- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
52560+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
52561+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
52562 #else
52563- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
52564- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
52565+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
52566+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
52567 #endif
52568 }
52569 netif_receive_skb(skb);
52570@@ -432,9 +432,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
52571 dev->name);
52572 */
52573 #ifdef CONFIG_64BIT
52574- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
52575+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
52576 #else
52577- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
52578+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
52579 #endif
52580 dev_kfree_skb_irq(skb);
52581 }
52582diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
52583index 2aa7235..ba3c205 100644
52584--- a/drivers/staging/octeon/ethernet.c
52585+++ b/drivers/staging/octeon/ethernet.c
52586@@ -247,11 +247,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
52587 * since the RX tasklet also increments it.
52588 */
52589 #ifdef CONFIG_64BIT
52590- atomic64_add(rx_status.dropped_packets,
52591- (atomic64_t *)&priv->stats.rx_dropped);
52592+ atomic64_add_unchecked(rx_status.dropped_packets,
52593+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
52594 #else
52595- atomic_add(rx_status.dropped_packets,
52596- (atomic_t *)&priv->stats.rx_dropped);
52597+ atomic_add_unchecked(rx_status.dropped_packets,
52598+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
52599 #endif
52600 }
52601
52602diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
52603index 56d5c50..a14f4db 100644
52604--- a/drivers/staging/rtl8188eu/include/hal_intf.h
52605+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
52606@@ -234,7 +234,7 @@ struct hal_ops {
52607
52608 void (*hal_notch_filter)(struct adapter *adapter, bool enable);
52609 void (*hal_reset_security_engine)(struct adapter *adapter);
52610-};
52611+} __no_const;
52612
52613 enum rt_eeprom_type {
52614 EEPROM_93C46,
52615diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
52616index dc23395..cf7e9b1 100644
52617--- a/drivers/staging/rtl8712/rtl871x_io.h
52618+++ b/drivers/staging/rtl8712/rtl871x_io.h
52619@@ -108,7 +108,7 @@ struct _io_ops {
52620 u8 *pmem);
52621 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
52622 u8 *pmem);
52623-};
52624+} __no_const;
52625
52626 struct io_req {
52627 struct list_head list;
52628diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
52629index 2bf2e2f..84421c9 100644
52630--- a/drivers/staging/unisys/visorchipset/visorchipset.h
52631+++ b/drivers/staging/unisys/visorchipset/visorchipset.h
52632@@ -228,7 +228,7 @@ typedef struct {
52633 void (*device_resume)(ulong busNo, ulong devNo);
52634 int (*get_channel_info)(uuid_le typeGuid, ulong *minSize,
52635 ulong *maxSize);
52636-} VISORCHIPSET_BUSDEV_NOTIFIERS;
52637+} __no_const VISORCHIPSET_BUSDEV_NOTIFIERS;
52638
52639 /* These functions live inside visorchipset, and will be called to indicate
52640 * responses to specific events (by code outside of visorchipset).
52641@@ -243,7 +243,7 @@ typedef struct {
52642 void (*device_destroy)(ulong busNo, ulong devNo, int response);
52643 void (*device_pause)(ulong busNo, ulong devNo, int response);
52644 void (*device_resume)(ulong busNo, ulong devNo, int response);
52645-} VISORCHIPSET_BUSDEV_RESPONDERS;
52646+} __no_const VISORCHIPSET_BUSDEV_RESPONDERS;
52647
52648 /** Register functions (in the bus driver) to get called by visorchipset
52649 * whenever a bus or device appears for which this service partition is
52650diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
52651index 164136b..7244df5 100644
52652--- a/drivers/staging/vt6655/hostap.c
52653+++ b/drivers/staging/vt6655/hostap.c
52654@@ -68,14 +68,13 @@ static int msglevel = MSG_LEVEL_INFO;
52655 *
52656 */
52657
52658+static net_device_ops_no_const apdev_netdev_ops;
52659+
52660 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
52661 {
52662 PSDevice apdev_priv;
52663 struct net_device *dev = pDevice->dev;
52664 int ret;
52665- const struct net_device_ops apdev_netdev_ops = {
52666- .ndo_start_xmit = pDevice->tx_80211,
52667- };
52668
52669 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
52670
52671@@ -87,6 +86,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
52672 *apdev_priv = *pDevice;
52673 eth_hw_addr_inherit(pDevice->apdev, dev);
52674
52675+ /* only half broken now */
52676+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
52677 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
52678
52679 pDevice->apdev->type = ARPHRD_IEEE80211;
52680diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
52681index e7e9372..161f530 100644
52682--- a/drivers/target/sbp/sbp_target.c
52683+++ b/drivers/target/sbp/sbp_target.c
52684@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
52685
52686 #define SESSION_MAINTENANCE_INTERVAL HZ
52687
52688-static atomic_t login_id = ATOMIC_INIT(0);
52689+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
52690
52691 static void session_maintenance_work(struct work_struct *);
52692 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
52693@@ -444,7 +444,7 @@ static void sbp_management_request_login(
52694 login->lun = se_lun;
52695 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
52696 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
52697- login->login_id = atomic_inc_return(&login_id);
52698+ login->login_id = atomic_inc_return_unchecked(&login_id);
52699
52700 login->tgt_agt = sbp_target_agent_register(login);
52701 if (IS_ERR(login->tgt_agt)) {
52702diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
52703index 15a1c13..6c9b96b 100644
52704--- a/drivers/target/target_core_device.c
52705+++ b/drivers/target/target_core_device.c
52706@@ -1526,7 +1526,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
52707 spin_lock_init(&dev->se_tmr_lock);
52708 spin_lock_init(&dev->qf_cmd_lock);
52709 sema_init(&dev->caw_sem, 1);
52710- atomic_set(&dev->dev_ordered_id, 0);
52711+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
52712 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
52713 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
52714 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
52715diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
52716index a9c77b5..024a07d 100644
52717--- a/drivers/target/target_core_transport.c
52718+++ b/drivers/target/target_core_transport.c
52719@@ -1165,7 +1165,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
52720 * Used to determine when ORDERED commands should go from
52721 * Dormant to Active status.
52722 */
52723- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
52724+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
52725 smp_mb__after_atomic();
52726 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
52727 cmd->se_ordered_id, cmd->sam_task_attr,
52728diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
52729index 4b2b999..cad9fa5 100644
52730--- a/drivers/thermal/of-thermal.c
52731+++ b/drivers/thermal/of-thermal.c
52732@@ -30,6 +30,7 @@
52733 #include <linux/err.h>
52734 #include <linux/export.h>
52735 #include <linux/string.h>
52736+#include <linux/mm.h>
52737
52738 #include "thermal_core.h"
52739
52740@@ -341,8 +342,10 @@ thermal_zone_of_add_sensor(struct device_node *zone,
52741 tz->get_trend = get_trend;
52742 tz->sensor_data = data;
52743
52744- tzd->ops->get_temp = of_thermal_get_temp;
52745- tzd->ops->get_trend = of_thermal_get_trend;
52746+ pax_open_kernel();
52747+ *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
52748+ *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
52749+ pax_close_kernel();
52750 mutex_unlock(&tzd->lock);
52751
52752 return tzd;
52753@@ -461,8 +464,10 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
52754 return;
52755
52756 mutex_lock(&tzd->lock);
52757- tzd->ops->get_temp = NULL;
52758- tzd->ops->get_trend = NULL;
52759+ pax_open_kernel();
52760+ *(void **)&tzd->ops->get_temp = NULL;
52761+ *(void **)&tzd->ops->get_trend = NULL;
52762+ pax_close_kernel();
52763
52764 tz->get_temp = NULL;
52765 tz->get_trend = NULL;
52766diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
52767index fd66f57..48e6376 100644
52768--- a/drivers/tty/cyclades.c
52769+++ b/drivers/tty/cyclades.c
52770@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
52771 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
52772 info->port.count);
52773 #endif
52774- info->port.count++;
52775+ atomic_inc(&info->port.count);
52776 #ifdef CY_DEBUG_COUNT
52777 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
52778- current->pid, info->port.count);
52779+ current->pid, atomic_read(&info->port.count));
52780 #endif
52781
52782 /*
52783@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
52784 for (j = 0; j < cy_card[i].nports; j++) {
52785 info = &cy_card[i].ports[j];
52786
52787- if (info->port.count) {
52788+ if (atomic_read(&info->port.count)) {
52789 /* XXX is the ldisc num worth this? */
52790 struct tty_struct *tty;
52791 struct tty_ldisc *ld;
52792diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
52793index 4fcec1d..5a036f7 100644
52794--- a/drivers/tty/hvc/hvc_console.c
52795+++ b/drivers/tty/hvc/hvc_console.c
52796@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
52797
52798 spin_lock_irqsave(&hp->port.lock, flags);
52799 /* Check and then increment for fast path open. */
52800- if (hp->port.count++ > 0) {
52801+ if (atomic_inc_return(&hp->port.count) > 1) {
52802 spin_unlock_irqrestore(&hp->port.lock, flags);
52803 hvc_kick();
52804 return 0;
52805@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
52806
52807 spin_lock_irqsave(&hp->port.lock, flags);
52808
52809- if (--hp->port.count == 0) {
52810+ if (atomic_dec_return(&hp->port.count) == 0) {
52811 spin_unlock_irqrestore(&hp->port.lock, flags);
52812 /* We are done with the tty pointer now. */
52813 tty_port_tty_set(&hp->port, NULL);
52814@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
52815 */
52816 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
52817 } else {
52818- if (hp->port.count < 0)
52819+ if (atomic_read(&hp->port.count) < 0)
52820 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
52821- hp->vtermno, hp->port.count);
52822+ hp->vtermno, atomic_read(&hp->port.count));
52823 spin_unlock_irqrestore(&hp->port.lock, flags);
52824 }
52825 }
52826@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
52827 * open->hangup case this can be called after the final close so prevent
52828 * that from happening for now.
52829 */
52830- if (hp->port.count <= 0) {
52831+ if (atomic_read(&hp->port.count) <= 0) {
52832 spin_unlock_irqrestore(&hp->port.lock, flags);
52833 return;
52834 }
52835
52836- hp->port.count = 0;
52837+ atomic_set(&hp->port.count, 0);
52838 spin_unlock_irqrestore(&hp->port.lock, flags);
52839 tty_port_tty_set(&hp->port, NULL);
52840
52841@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
52842 return -EPIPE;
52843
52844 /* FIXME what's this (unprotected) check for? */
52845- if (hp->port.count <= 0)
52846+ if (atomic_read(&hp->port.count) <= 0)
52847 return -EIO;
52848
52849 spin_lock_irqsave(&hp->lock, flags);
52850diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
52851index 81e939e..95ead10 100644
52852--- a/drivers/tty/hvc/hvcs.c
52853+++ b/drivers/tty/hvc/hvcs.c
52854@@ -83,6 +83,7 @@
52855 #include <asm/hvcserver.h>
52856 #include <asm/uaccess.h>
52857 #include <asm/vio.h>
52858+#include <asm/local.h>
52859
52860 /*
52861 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
52862@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
52863
52864 spin_lock_irqsave(&hvcsd->lock, flags);
52865
52866- if (hvcsd->port.count > 0) {
52867+ if (atomic_read(&hvcsd->port.count) > 0) {
52868 spin_unlock_irqrestore(&hvcsd->lock, flags);
52869 printk(KERN_INFO "HVCS: vterm state unchanged. "
52870 "The hvcs device node is still in use.\n");
52871@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
52872 }
52873 }
52874
52875- hvcsd->port.count = 0;
52876+ atomic_set(&hvcsd->port.count, 0);
52877 hvcsd->port.tty = tty;
52878 tty->driver_data = hvcsd;
52879
52880@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
52881 unsigned long flags;
52882
52883 spin_lock_irqsave(&hvcsd->lock, flags);
52884- hvcsd->port.count++;
52885+ atomic_inc(&hvcsd->port.count);
52886 hvcsd->todo_mask |= HVCS_SCHED_READ;
52887 spin_unlock_irqrestore(&hvcsd->lock, flags);
52888
52889@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
52890 hvcsd = tty->driver_data;
52891
52892 spin_lock_irqsave(&hvcsd->lock, flags);
52893- if (--hvcsd->port.count == 0) {
52894+ if (atomic_dec_and_test(&hvcsd->port.count)) {
52895
52896 vio_disable_interrupts(hvcsd->vdev);
52897
52898@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
52899
52900 free_irq(irq, hvcsd);
52901 return;
52902- } else if (hvcsd->port.count < 0) {
52903+ } else if (atomic_read(&hvcsd->port.count) < 0) {
52904 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
52905 " is missmanaged.\n",
52906- hvcsd->vdev->unit_address, hvcsd->port.count);
52907+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
52908 }
52909
52910 spin_unlock_irqrestore(&hvcsd->lock, flags);
52911@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
52912
52913 spin_lock_irqsave(&hvcsd->lock, flags);
52914 /* Preserve this so that we know how many kref refs to put */
52915- temp_open_count = hvcsd->port.count;
52916+ temp_open_count = atomic_read(&hvcsd->port.count);
52917
52918 /*
52919 * Don't kref put inside the spinlock because the destruction
52920@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
52921 tty->driver_data = NULL;
52922 hvcsd->port.tty = NULL;
52923
52924- hvcsd->port.count = 0;
52925+ atomic_set(&hvcsd->port.count, 0);
52926
52927 /* This will drop any buffered data on the floor which is OK in a hangup
52928 * scenario. */
52929@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
52930 * the middle of a write operation? This is a crummy place to do this
52931 * but we want to keep it all in the spinlock.
52932 */
52933- if (hvcsd->port.count <= 0) {
52934+ if (atomic_read(&hvcsd->port.count) <= 0) {
52935 spin_unlock_irqrestore(&hvcsd->lock, flags);
52936 return -ENODEV;
52937 }
52938@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
52939 {
52940 struct hvcs_struct *hvcsd = tty->driver_data;
52941
52942- if (!hvcsd || hvcsd->port.count <= 0)
52943+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
52944 return 0;
52945
52946 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
52947diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
52948index 4190199..06d5bfa 100644
52949--- a/drivers/tty/hvc/hvsi.c
52950+++ b/drivers/tty/hvc/hvsi.c
52951@@ -85,7 +85,7 @@ struct hvsi_struct {
52952 int n_outbuf;
52953 uint32_t vtermno;
52954 uint32_t virq;
52955- atomic_t seqno; /* HVSI packet sequence number */
52956+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
52957 uint16_t mctrl;
52958 uint8_t state; /* HVSI protocol state */
52959 uint8_t flags;
52960@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
52961
52962 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
52963 packet.hdr.len = sizeof(struct hvsi_query_response);
52964- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52965+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52966 packet.verb = VSV_SEND_VERSION_NUMBER;
52967 packet.u.version = HVSI_VERSION;
52968 packet.query_seqno = query_seqno+1;
52969@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
52970
52971 packet.hdr.type = VS_QUERY_PACKET_HEADER;
52972 packet.hdr.len = sizeof(struct hvsi_query);
52973- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52974+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52975 packet.verb = verb;
52976
52977 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
52978@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
52979 int wrote;
52980
52981 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
52982- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52983+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52984 packet.hdr.len = sizeof(struct hvsi_control);
52985 packet.verb = VSV_SET_MODEM_CTL;
52986 packet.mask = HVSI_TSDTR;
52987@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
52988 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
52989
52990 packet.hdr.type = VS_DATA_PACKET_HEADER;
52991- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52992+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52993 packet.hdr.len = count + sizeof(struct hvsi_header);
52994 memcpy(&packet.data, buf, count);
52995
52996@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
52997 struct hvsi_control packet __ALIGNED__;
52998
52999 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
53000- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53001+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53002 packet.hdr.len = 6;
53003 packet.verb = VSV_CLOSE_PROTOCOL;
53004
53005@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
53006
53007 tty_port_tty_set(&hp->port, tty);
53008 spin_lock_irqsave(&hp->lock, flags);
53009- hp->port.count++;
53010+ atomic_inc(&hp->port.count);
53011 atomic_set(&hp->seqno, 0);
53012 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
53013 spin_unlock_irqrestore(&hp->lock, flags);
53014@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
53015
53016 spin_lock_irqsave(&hp->lock, flags);
53017
53018- if (--hp->port.count == 0) {
53019+ if (atomic_dec_return(&hp->port.count) == 0) {
53020 tty_port_tty_set(&hp->port, NULL);
53021 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
53022
53023@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
53024
53025 spin_lock_irqsave(&hp->lock, flags);
53026 }
53027- } else if (hp->port.count < 0)
53028+ } else if (atomic_read(&hp->port.count) < 0)
53029 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
53030- hp - hvsi_ports, hp->port.count);
53031+ hp - hvsi_ports, atomic_read(&hp->port.count));
53032
53033 spin_unlock_irqrestore(&hp->lock, flags);
53034 }
53035@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
53036 tty_port_tty_set(&hp->port, NULL);
53037
53038 spin_lock_irqsave(&hp->lock, flags);
53039- hp->port.count = 0;
53040+ atomic_set(&hp->port.count, 0);
53041 hp->n_outbuf = 0;
53042 spin_unlock_irqrestore(&hp->lock, flags);
53043 }
53044diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
53045index 7ae6c29..05c6dba 100644
53046--- a/drivers/tty/hvc/hvsi_lib.c
53047+++ b/drivers/tty/hvc/hvsi_lib.c
53048@@ -8,7 +8,7 @@
53049
53050 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
53051 {
53052- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
53053+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
53054
53055 /* Assumes that always succeeds, works in practice */
53056 return pv->put_chars(pv->termno, (char *)packet, packet->len);
53057@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
53058
53059 /* Reset state */
53060 pv->established = 0;
53061- atomic_set(&pv->seqno, 0);
53062+ atomic_set_unchecked(&pv->seqno, 0);
53063
53064 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
53065
53066diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
53067index 345cebb..d5a1e9e 100644
53068--- a/drivers/tty/ipwireless/tty.c
53069+++ b/drivers/tty/ipwireless/tty.c
53070@@ -28,6 +28,7 @@
53071 #include <linux/tty_driver.h>
53072 #include <linux/tty_flip.h>
53073 #include <linux/uaccess.h>
53074+#include <asm/local.h>
53075
53076 #include "tty.h"
53077 #include "network.h"
53078@@ -93,10 +94,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
53079 return -ENODEV;
53080
53081 mutex_lock(&tty->ipw_tty_mutex);
53082- if (tty->port.count == 0)
53083+ if (atomic_read(&tty->port.count) == 0)
53084 tty->tx_bytes_queued = 0;
53085
53086- tty->port.count++;
53087+ atomic_inc(&tty->port.count);
53088
53089 tty->port.tty = linux_tty;
53090 linux_tty->driver_data = tty;
53091@@ -112,9 +113,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
53092
53093 static void do_ipw_close(struct ipw_tty *tty)
53094 {
53095- tty->port.count--;
53096-
53097- if (tty->port.count == 0) {
53098+ if (atomic_dec_return(&tty->port.count) == 0) {
53099 struct tty_struct *linux_tty = tty->port.tty;
53100
53101 if (linux_tty != NULL) {
53102@@ -135,7 +134,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
53103 return;
53104
53105 mutex_lock(&tty->ipw_tty_mutex);
53106- if (tty->port.count == 0) {
53107+ if (atomic_read(&tty->port.count) == 0) {
53108 mutex_unlock(&tty->ipw_tty_mutex);
53109 return;
53110 }
53111@@ -158,7 +157,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
53112
53113 mutex_lock(&tty->ipw_tty_mutex);
53114
53115- if (!tty->port.count) {
53116+ if (!atomic_read(&tty->port.count)) {
53117 mutex_unlock(&tty->ipw_tty_mutex);
53118 return;
53119 }
53120@@ -197,7 +196,7 @@ static int ipw_write(struct tty_struct *linux_tty,
53121 return -ENODEV;
53122
53123 mutex_lock(&tty->ipw_tty_mutex);
53124- if (!tty->port.count) {
53125+ if (!atomic_read(&tty->port.count)) {
53126 mutex_unlock(&tty->ipw_tty_mutex);
53127 return -EINVAL;
53128 }
53129@@ -237,7 +236,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
53130 if (!tty)
53131 return -ENODEV;
53132
53133- if (!tty->port.count)
53134+ if (!atomic_read(&tty->port.count))
53135 return -EINVAL;
53136
53137 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
53138@@ -279,7 +278,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
53139 if (!tty)
53140 return 0;
53141
53142- if (!tty->port.count)
53143+ if (!atomic_read(&tty->port.count))
53144 return 0;
53145
53146 return tty->tx_bytes_queued;
53147@@ -360,7 +359,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
53148 if (!tty)
53149 return -ENODEV;
53150
53151- if (!tty->port.count)
53152+ if (!atomic_read(&tty->port.count))
53153 return -EINVAL;
53154
53155 return get_control_lines(tty);
53156@@ -376,7 +375,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
53157 if (!tty)
53158 return -ENODEV;
53159
53160- if (!tty->port.count)
53161+ if (!atomic_read(&tty->port.count))
53162 return -EINVAL;
53163
53164 return set_control_lines(tty, set, clear);
53165@@ -390,7 +389,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
53166 if (!tty)
53167 return -ENODEV;
53168
53169- if (!tty->port.count)
53170+ if (!atomic_read(&tty->port.count))
53171 return -EINVAL;
53172
53173 /* FIXME: Exactly how is the tty object locked here .. */
53174@@ -546,7 +545,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
53175 * are gone */
53176 mutex_lock(&ttyj->ipw_tty_mutex);
53177 }
53178- while (ttyj->port.count)
53179+ while (atomic_read(&ttyj->port.count))
53180 do_ipw_close(ttyj);
53181 ipwireless_disassociate_network_ttys(network,
53182 ttyj->channel_idx);
53183diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
53184index 1deaca4..c8582d4 100644
53185--- a/drivers/tty/moxa.c
53186+++ b/drivers/tty/moxa.c
53187@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
53188 }
53189
53190 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
53191- ch->port.count++;
53192+ atomic_inc(&ch->port.count);
53193 tty->driver_data = ch;
53194 tty_port_tty_set(&ch->port, tty);
53195 mutex_lock(&ch->port.mutex);
53196diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
53197index c434376..114ce13 100644
53198--- a/drivers/tty/n_gsm.c
53199+++ b/drivers/tty/n_gsm.c
53200@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
53201 spin_lock_init(&dlci->lock);
53202 mutex_init(&dlci->mutex);
53203 dlci->fifo = &dlci->_fifo;
53204- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
53205+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
53206 kfree(dlci);
53207 return NULL;
53208 }
53209@@ -2958,7 +2958,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
53210 struct gsm_dlci *dlci = tty->driver_data;
53211 struct tty_port *port = &dlci->port;
53212
53213- port->count++;
53214+ atomic_inc(&port->count);
53215 tty_port_tty_set(port, tty);
53216
53217 dlci->modem_rx = 0;
53218diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
53219index f44f1ba..a8d5915 100644
53220--- a/drivers/tty/n_tty.c
53221+++ b/drivers/tty/n_tty.c
53222@@ -115,7 +115,7 @@ struct n_tty_data {
53223 int minimum_to_wake;
53224
53225 /* consumer-published */
53226- size_t read_tail;
53227+ size_t read_tail __intentional_overflow(-1);
53228 size_t line_start;
53229
53230 /* protected by output lock */
53231@@ -2517,6 +2517,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
53232 {
53233 *ops = tty_ldisc_N_TTY;
53234 ops->owner = NULL;
53235- ops->refcount = ops->flags = 0;
53236+ atomic_set(&ops->refcount, 0);
53237+ ops->flags = 0;
53238 }
53239 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
53240diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
53241index 9bbdb1d..dc514ee 100644
53242--- a/drivers/tty/pty.c
53243+++ b/drivers/tty/pty.c
53244@@ -789,8 +789,10 @@ static void __init unix98_pty_init(void)
53245 panic("Couldn't register Unix98 pts driver");
53246
53247 /* Now create the /dev/ptmx special device */
53248+ pax_open_kernel();
53249 tty_default_fops(&ptmx_fops);
53250- ptmx_fops.open = ptmx_open;
53251+ *(void **)&ptmx_fops.open = ptmx_open;
53252+ pax_close_kernel();
53253
53254 cdev_init(&ptmx_cdev, &ptmx_fops);
53255 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
53256diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
53257index 383c4c7..d408e21 100644
53258--- a/drivers/tty/rocket.c
53259+++ b/drivers/tty/rocket.c
53260@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
53261 tty->driver_data = info;
53262 tty_port_tty_set(port, tty);
53263
53264- if (port->count++ == 0) {
53265+ if (atomic_inc_return(&port->count) == 1) {
53266 atomic_inc(&rp_num_ports_open);
53267
53268 #ifdef ROCKET_DEBUG_OPEN
53269@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
53270 #endif
53271 }
53272 #ifdef ROCKET_DEBUG_OPEN
53273- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
53274+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
53275 #endif
53276
53277 /*
53278@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
53279 spin_unlock_irqrestore(&info->port.lock, flags);
53280 return;
53281 }
53282- if (info->port.count)
53283+ if (atomic_read(&info->port.count))
53284 atomic_dec(&rp_num_ports_open);
53285 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
53286 spin_unlock_irqrestore(&info->port.lock, flags);
53287diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
53288index aa28209..e08fb85 100644
53289--- a/drivers/tty/serial/ioc4_serial.c
53290+++ b/drivers/tty/serial/ioc4_serial.c
53291@@ -437,7 +437,7 @@ struct ioc4_soft {
53292 } is_intr_info[MAX_IOC4_INTR_ENTS];
53293
53294 /* Number of entries active in the above array */
53295- atomic_t is_num_intrs;
53296+ atomic_unchecked_t is_num_intrs;
53297 } is_intr_type[IOC4_NUM_INTR_TYPES];
53298
53299 /* is_ir_lock must be held while
53300@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
53301 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
53302 || (type == IOC4_OTHER_INTR_TYPE)));
53303
53304- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
53305+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
53306 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
53307
53308 /* Save off the lower level interrupt handler */
53309@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
53310
53311 soft = arg;
53312 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
53313- num_intrs = (int)atomic_read(
53314+ num_intrs = (int)atomic_read_unchecked(
53315 &soft->is_intr_type[intr_type].is_num_intrs);
53316
53317 this_mir = this_ir = pending_intrs(soft, intr_type);
53318diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
53319index 6ec7501..265bcbf 100644
53320--- a/drivers/tty/serial/kgdb_nmi.c
53321+++ b/drivers/tty/serial/kgdb_nmi.c
53322@@ -51,7 +51,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
53323 * I/O utilities that messages sent to the console will automatically
53324 * be displayed on the dbg_io.
53325 */
53326- dbg_io_ops->is_console = true;
53327+ pax_open_kernel();
53328+ *(int *)&dbg_io_ops->is_console = true;
53329+ pax_close_kernel();
53330
53331 return 0;
53332 }
53333diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
53334index a260cde..6b2b5ce 100644
53335--- a/drivers/tty/serial/kgdboc.c
53336+++ b/drivers/tty/serial/kgdboc.c
53337@@ -24,8 +24,9 @@
53338 #define MAX_CONFIG_LEN 40
53339
53340 static struct kgdb_io kgdboc_io_ops;
53341+static struct kgdb_io kgdboc_io_ops_console;
53342
53343-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
53344+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
53345 static int configured = -1;
53346
53347 static char config[MAX_CONFIG_LEN];
53348@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
53349 kgdboc_unregister_kbd();
53350 if (configured == 1)
53351 kgdb_unregister_io_module(&kgdboc_io_ops);
53352+ else if (configured == 2)
53353+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
53354 }
53355
53356 static int configure_kgdboc(void)
53357@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
53358 int err;
53359 char *cptr = config;
53360 struct console *cons;
53361+ int is_console = 0;
53362
53363 err = kgdboc_option_setup(config);
53364 if (err || !strlen(config) || isspace(config[0]))
53365 goto noconfig;
53366
53367 err = -ENODEV;
53368- kgdboc_io_ops.is_console = 0;
53369 kgdb_tty_driver = NULL;
53370
53371 kgdboc_use_kms = 0;
53372@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
53373 int idx;
53374 if (cons->device && cons->device(cons, &idx) == p &&
53375 idx == tty_line) {
53376- kgdboc_io_ops.is_console = 1;
53377+ is_console = 1;
53378 break;
53379 }
53380 cons = cons->next;
53381@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
53382 kgdb_tty_line = tty_line;
53383
53384 do_register:
53385- err = kgdb_register_io_module(&kgdboc_io_ops);
53386+ if (is_console) {
53387+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
53388+ configured = 2;
53389+ } else {
53390+ err = kgdb_register_io_module(&kgdboc_io_ops);
53391+ configured = 1;
53392+ }
53393 if (err)
53394 goto noconfig;
53395
53396@@ -205,8 +214,6 @@ do_register:
53397 if (err)
53398 goto nmi_con_failed;
53399
53400- configured = 1;
53401-
53402 return 0;
53403
53404 nmi_con_failed:
53405@@ -223,7 +230,7 @@ noconfig:
53406 static int __init init_kgdboc(void)
53407 {
53408 /* Already configured? */
53409- if (configured == 1)
53410+ if (configured >= 1)
53411 return 0;
53412
53413 return configure_kgdboc();
53414@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
53415 if (config[len - 1] == '\n')
53416 config[len - 1] = '\0';
53417
53418- if (configured == 1)
53419+ if (configured >= 1)
53420 cleanup_kgdboc();
53421
53422 /* Go and configure with the new params. */
53423@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
53424 .post_exception = kgdboc_post_exp_handler,
53425 };
53426
53427+static struct kgdb_io kgdboc_io_ops_console = {
53428+ .name = "kgdboc",
53429+ .read_char = kgdboc_get_char,
53430+ .write_char = kgdboc_put_char,
53431+ .pre_exception = kgdboc_pre_exp_handler,
53432+ .post_exception = kgdboc_post_exp_handler,
53433+ .is_console = 1
53434+};
53435+
53436 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
53437 /* This is only available if kgdboc is a built in for early debugging */
53438 static int __init kgdboc_early_init(char *opt)
53439diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
53440index 077570a..12550a9 100644
53441--- a/drivers/tty/serial/msm_serial.c
53442+++ b/drivers/tty/serial/msm_serial.c
53443@@ -981,7 +981,7 @@ static struct uart_driver msm_uart_driver = {
53444 .cons = MSM_CONSOLE,
53445 };
53446
53447-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
53448+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
53449
53450 static const struct of_device_id msm_uartdm_table[] = {
53451 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
53452@@ -1000,7 +1000,7 @@ static int msm_serial_probe(struct platform_device *pdev)
53453 int irq;
53454
53455 if (pdev->id == -1)
53456- pdev->id = atomic_inc_return(&msm_uart_next_id) - 1;
53457+ pdev->id = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
53458
53459 if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
53460 return -ENXIO;
53461diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
53462index c78f43a..22b1dab 100644
53463--- a/drivers/tty/serial/samsung.c
53464+++ b/drivers/tty/serial/samsung.c
53465@@ -478,11 +478,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
53466 }
53467 }
53468
53469+static int s3c64xx_serial_startup(struct uart_port *port);
53470 static int s3c24xx_serial_startup(struct uart_port *port)
53471 {
53472 struct s3c24xx_uart_port *ourport = to_ourport(port);
53473 int ret;
53474
53475+ /* Startup sequence is different for s3c64xx and higher SoC's */
53476+ if (s3c24xx_serial_has_interrupt_mask(port))
53477+ return s3c64xx_serial_startup(port);
53478+
53479 dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
53480 port, (unsigned long long)port->mapbase, port->membase);
53481
53482@@ -1155,10 +1160,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
53483 /* setup info for port */
53484 port->dev = &platdev->dev;
53485
53486- /* Startup sequence is different for s3c64xx and higher SoC's */
53487- if (s3c24xx_serial_has_interrupt_mask(port))
53488- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
53489-
53490 port->uartclk = 1;
53491
53492 if (cfg->uart_flags & UPF_CONS_FLOW) {
53493diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
53494index 0f03988..8a8038d 100644
53495--- a/drivers/tty/serial/serial_core.c
53496+++ b/drivers/tty/serial/serial_core.c
53497@@ -1343,7 +1343,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
53498
53499 pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
53500
53501- if (!port->count || tty_port_close_start(port, tty, filp) == 0)
53502+ if (!atomic_read(&port->count) || tty_port_close_start(port, tty, filp) == 0)
53503 return;
53504
53505 /*
53506@@ -1470,7 +1470,7 @@ static void uart_hangup(struct tty_struct *tty)
53507 uart_flush_buffer(tty);
53508 uart_shutdown(tty, state);
53509 spin_lock_irqsave(&port->lock, flags);
53510- port->count = 0;
53511+ atomic_set(&port->count, 0);
53512 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
53513 spin_unlock_irqrestore(&port->lock, flags);
53514 tty_port_tty_set(port, NULL);
53515@@ -1568,7 +1568,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
53516 goto end;
53517 }
53518
53519- port->count++;
53520+ atomic_inc(&port->count);
53521 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
53522 retval = -ENXIO;
53523 goto err_dec_count;
53524@@ -1600,7 +1600,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
53525 end:
53526 return retval;
53527 err_dec_count:
53528- port->count--;
53529+ atomic_inc(&port->count);
53530 mutex_unlock(&port->mutex);
53531 goto end;
53532 }
53533diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
53534index b799170..87dafd5 100644
53535--- a/drivers/tty/synclink.c
53536+++ b/drivers/tty/synclink.c
53537@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
53538
53539 if (debug_level >= DEBUG_LEVEL_INFO)
53540 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
53541- __FILE__,__LINE__, info->device_name, info->port.count);
53542+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
53543
53544 if (tty_port_close_start(&info->port, tty, filp) == 0)
53545 goto cleanup;
53546@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
53547 cleanup:
53548 if (debug_level >= DEBUG_LEVEL_INFO)
53549 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
53550- tty->driver->name, info->port.count);
53551+ tty->driver->name, atomic_read(&info->port.count));
53552
53553 } /* end of mgsl_close() */
53554
53555@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
53556
53557 mgsl_flush_buffer(tty);
53558 shutdown(info);
53559-
53560- info->port.count = 0;
53561+
53562+ atomic_set(&info->port.count, 0);
53563 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53564 info->port.tty = NULL;
53565
53566@@ -3296,10 +3296,10 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53567
53568 if (debug_level >= DEBUG_LEVEL_INFO)
53569 printk("%s(%d):block_til_ready before block on %s count=%d\n",
53570- __FILE__,__LINE__, tty->driver->name, port->count );
53571+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53572
53573 spin_lock_irqsave(&info->irq_spinlock, flags);
53574- port->count--;
53575+ atomic_dec(&port->count);
53576 spin_unlock_irqrestore(&info->irq_spinlock, flags);
53577 port->blocked_open++;
53578
53579@@ -3327,7 +3327,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53580
53581 if (debug_level >= DEBUG_LEVEL_INFO)
53582 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
53583- __FILE__,__LINE__, tty->driver->name, port->count );
53584+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53585
53586 tty_unlock(tty);
53587 schedule();
53588@@ -3339,12 +3339,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53589
53590 /* FIXME: Racy on hangup during close wait */
53591 if (!tty_hung_up_p(filp))
53592- port->count++;
53593+ atomic_inc(&port->count);
53594 port->blocked_open--;
53595
53596 if (debug_level >= DEBUG_LEVEL_INFO)
53597 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
53598- __FILE__,__LINE__, tty->driver->name, port->count );
53599+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53600
53601 if (!retval)
53602 port->flags |= ASYNC_NORMAL_ACTIVE;
53603@@ -3396,7 +3396,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
53604
53605 if (debug_level >= DEBUG_LEVEL_INFO)
53606 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
53607- __FILE__,__LINE__,tty->driver->name, info->port.count);
53608+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
53609
53610 /* If port is closing, signal caller to try again */
53611 if (info->port.flags & ASYNC_CLOSING){
53612@@ -3415,10 +3415,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
53613 spin_unlock_irqrestore(&info->netlock, flags);
53614 goto cleanup;
53615 }
53616- info->port.count++;
53617+ atomic_inc(&info->port.count);
53618 spin_unlock_irqrestore(&info->netlock, flags);
53619
53620- if (info->port.count == 1) {
53621+ if (atomic_read(&info->port.count) == 1) {
53622 /* 1st open on this device, init hardware */
53623 retval = startup(info);
53624 if (retval < 0)
53625@@ -3442,8 +3442,8 @@ cleanup:
53626 if (retval) {
53627 if (tty->count == 1)
53628 info->port.tty = NULL; /* tty layer will release tty struct */
53629- if(info->port.count)
53630- info->port.count--;
53631+ if (atomic_read(&info->port.count))
53632+ atomic_dec(&info->port.count);
53633 }
53634
53635 return retval;
53636@@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53637 unsigned short new_crctype;
53638
53639 /* return error if TTY interface open */
53640- if (info->port.count)
53641+ if (atomic_read(&info->port.count))
53642 return -EBUSY;
53643
53644 switch (encoding)
53645@@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
53646
53647 /* arbitrate between network and tty opens */
53648 spin_lock_irqsave(&info->netlock, flags);
53649- if (info->port.count != 0 || info->netcount != 0) {
53650+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53651 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
53652 spin_unlock_irqrestore(&info->netlock, flags);
53653 return -EBUSY;
53654@@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53655 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
53656
53657 /* return error if TTY interface open */
53658- if (info->port.count)
53659+ if (atomic_read(&info->port.count))
53660 return -EBUSY;
53661
53662 if (cmd != SIOCWANDEV)
53663diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
53664index 0e8c39b..e0cb171 100644
53665--- a/drivers/tty/synclink_gt.c
53666+++ b/drivers/tty/synclink_gt.c
53667@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
53668 tty->driver_data = info;
53669 info->port.tty = tty;
53670
53671- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
53672+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
53673
53674 /* If port is closing, signal caller to try again */
53675 if (info->port.flags & ASYNC_CLOSING){
53676@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
53677 mutex_unlock(&info->port.mutex);
53678 goto cleanup;
53679 }
53680- info->port.count++;
53681+ atomic_inc(&info->port.count);
53682 spin_unlock_irqrestore(&info->netlock, flags);
53683
53684- if (info->port.count == 1) {
53685+ if (atomic_read(&info->port.count) == 1) {
53686 /* 1st open on this device, init hardware */
53687 retval = startup(info);
53688 if (retval < 0) {
53689@@ -715,8 +715,8 @@ cleanup:
53690 if (retval) {
53691 if (tty->count == 1)
53692 info->port.tty = NULL; /* tty layer will release tty struct */
53693- if(info->port.count)
53694- info->port.count--;
53695+ if(atomic_read(&info->port.count))
53696+ atomic_dec(&info->port.count);
53697 }
53698
53699 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
53700@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53701
53702 if (sanity_check(info, tty->name, "close"))
53703 return;
53704- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
53705+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
53706
53707 if (tty_port_close_start(&info->port, tty, filp) == 0)
53708 goto cleanup;
53709@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53710 tty_port_close_end(&info->port, tty);
53711 info->port.tty = NULL;
53712 cleanup:
53713- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
53714+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
53715 }
53716
53717 static void hangup(struct tty_struct *tty)
53718@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
53719 shutdown(info);
53720
53721 spin_lock_irqsave(&info->port.lock, flags);
53722- info->port.count = 0;
53723+ atomic_set(&info->port.count, 0);
53724 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53725 info->port.tty = NULL;
53726 spin_unlock_irqrestore(&info->port.lock, flags);
53727@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53728 unsigned short new_crctype;
53729
53730 /* return error if TTY interface open */
53731- if (info->port.count)
53732+ if (atomic_read(&info->port.count))
53733 return -EBUSY;
53734
53735 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
53736@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
53737
53738 /* arbitrate between network and tty opens */
53739 spin_lock_irqsave(&info->netlock, flags);
53740- if (info->port.count != 0 || info->netcount != 0) {
53741+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53742 DBGINFO(("%s hdlc_open busy\n", dev->name));
53743 spin_unlock_irqrestore(&info->netlock, flags);
53744 return -EBUSY;
53745@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53746 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
53747
53748 /* return error if TTY interface open */
53749- if (info->port.count)
53750+ if (atomic_read(&info->port.count))
53751 return -EBUSY;
53752
53753 if (cmd != SIOCWANDEV)
53754@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
53755 if (port == NULL)
53756 continue;
53757 spin_lock(&port->lock);
53758- if ((port->port.count || port->netcount) &&
53759+ if ((atomic_read(&port->port.count) || port->netcount) &&
53760 port->pending_bh && !port->bh_running &&
53761 !port->bh_requested) {
53762 DBGISR(("%s bh queued\n", port->device_name));
53763@@ -3299,7 +3299,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53764 add_wait_queue(&port->open_wait, &wait);
53765
53766 spin_lock_irqsave(&info->lock, flags);
53767- port->count--;
53768+ atomic_dec(&port->count);
53769 spin_unlock_irqrestore(&info->lock, flags);
53770 port->blocked_open++;
53771
53772@@ -3335,7 +3335,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53773 remove_wait_queue(&port->open_wait, &wait);
53774
53775 if (!tty_hung_up_p(filp))
53776- port->count++;
53777+ atomic_inc(&port->count);
53778 port->blocked_open--;
53779
53780 if (!retval)
53781diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
53782index c3f9091..abe4601 100644
53783--- a/drivers/tty/synclinkmp.c
53784+++ b/drivers/tty/synclinkmp.c
53785@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
53786
53787 if (debug_level >= DEBUG_LEVEL_INFO)
53788 printk("%s(%d):%s open(), old ref count = %d\n",
53789- __FILE__,__LINE__,tty->driver->name, info->port.count);
53790+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
53791
53792 /* If port is closing, signal caller to try again */
53793 if (info->port.flags & ASYNC_CLOSING){
53794@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
53795 spin_unlock_irqrestore(&info->netlock, flags);
53796 goto cleanup;
53797 }
53798- info->port.count++;
53799+ atomic_inc(&info->port.count);
53800 spin_unlock_irqrestore(&info->netlock, flags);
53801
53802- if (info->port.count == 1) {
53803+ if (atomic_read(&info->port.count) == 1) {
53804 /* 1st open on this device, init hardware */
53805 retval = startup(info);
53806 if (retval < 0)
53807@@ -796,8 +796,8 @@ cleanup:
53808 if (retval) {
53809 if (tty->count == 1)
53810 info->port.tty = NULL; /* tty layer will release tty struct */
53811- if(info->port.count)
53812- info->port.count--;
53813+ if(atomic_read(&info->port.count))
53814+ atomic_dec(&info->port.count);
53815 }
53816
53817 return retval;
53818@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53819
53820 if (debug_level >= DEBUG_LEVEL_INFO)
53821 printk("%s(%d):%s close() entry, count=%d\n",
53822- __FILE__,__LINE__, info->device_name, info->port.count);
53823+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
53824
53825 if (tty_port_close_start(&info->port, tty, filp) == 0)
53826 goto cleanup;
53827@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53828 cleanup:
53829 if (debug_level >= DEBUG_LEVEL_INFO)
53830 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
53831- tty->driver->name, info->port.count);
53832+ tty->driver->name, atomic_read(&info->port.count));
53833 }
53834
53835 /* Called by tty_hangup() when a hangup is signaled.
53836@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
53837 shutdown(info);
53838
53839 spin_lock_irqsave(&info->port.lock, flags);
53840- info->port.count = 0;
53841+ atomic_set(&info->port.count, 0);
53842 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53843 info->port.tty = NULL;
53844 spin_unlock_irqrestore(&info->port.lock, flags);
53845@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53846 unsigned short new_crctype;
53847
53848 /* return error if TTY interface open */
53849- if (info->port.count)
53850+ if (atomic_read(&info->port.count))
53851 return -EBUSY;
53852
53853 switch (encoding)
53854@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
53855
53856 /* arbitrate between network and tty opens */
53857 spin_lock_irqsave(&info->netlock, flags);
53858- if (info->port.count != 0 || info->netcount != 0) {
53859+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53860 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
53861 spin_unlock_irqrestore(&info->netlock, flags);
53862 return -EBUSY;
53863@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53864 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
53865
53866 /* return error if TTY interface open */
53867- if (info->port.count)
53868+ if (atomic_read(&info->port.count))
53869 return -EBUSY;
53870
53871 if (cmd != SIOCWANDEV)
53872@@ -2621,7 +2621,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
53873 * do not request bottom half processing if the
53874 * device is not open in a normal mode.
53875 */
53876- if ( port && (port->port.count || port->netcount) &&
53877+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
53878 port->pending_bh && !port->bh_running &&
53879 !port->bh_requested ) {
53880 if ( debug_level >= DEBUG_LEVEL_ISR )
53881@@ -3318,10 +3318,10 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53882
53883 if (debug_level >= DEBUG_LEVEL_INFO)
53884 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
53885- __FILE__,__LINE__, tty->driver->name, port->count );
53886+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53887
53888 spin_lock_irqsave(&info->lock, flags);
53889- port->count--;
53890+ atomic_dec(&port->count);
53891 spin_unlock_irqrestore(&info->lock, flags);
53892 port->blocked_open++;
53893
53894@@ -3349,7 +3349,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53895
53896 if (debug_level >= DEBUG_LEVEL_INFO)
53897 printk("%s(%d):%s block_til_ready() count=%d\n",
53898- __FILE__,__LINE__, tty->driver->name, port->count );
53899+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53900
53901 tty_unlock(tty);
53902 schedule();
53903@@ -3359,12 +3359,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53904 set_current_state(TASK_RUNNING);
53905 remove_wait_queue(&port->open_wait, &wait);
53906 if (!tty_hung_up_p(filp))
53907- port->count++;
53908+ atomic_inc(&port->count);
53909 port->blocked_open--;
53910
53911 if (debug_level >= DEBUG_LEVEL_INFO)
53912 printk("%s(%d):%s block_til_ready() after, count=%d\n",
53913- __FILE__,__LINE__, tty->driver->name, port->count );
53914+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53915
53916 if (!retval)
53917 port->flags |= ASYNC_NORMAL_ACTIVE;
53918diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
53919index 42bad18..447d7a2 100644
53920--- a/drivers/tty/sysrq.c
53921+++ b/drivers/tty/sysrq.c
53922@@ -1084,7 +1084,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
53923 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
53924 size_t count, loff_t *ppos)
53925 {
53926- if (count) {
53927+ if (count && capable(CAP_SYS_ADMIN)) {
53928 char c;
53929
53930 if (get_user(c, buf))
53931diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
53932index 848c17a..e930437 100644
53933--- a/drivers/tty/tty_io.c
53934+++ b/drivers/tty/tty_io.c
53935@@ -3469,7 +3469,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
53936
53937 void tty_default_fops(struct file_operations *fops)
53938 {
53939- *fops = tty_fops;
53940+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
53941 }
53942
53943 /*
53944diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
53945index 2d822aa..a566234 100644
53946--- a/drivers/tty/tty_ldisc.c
53947+++ b/drivers/tty/tty_ldisc.c
53948@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
53949 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53950 tty_ldiscs[disc] = new_ldisc;
53951 new_ldisc->num = disc;
53952- new_ldisc->refcount = 0;
53953+ atomic_set(&new_ldisc->refcount, 0);
53954 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
53955
53956 return ret;
53957@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
53958 return -EINVAL;
53959
53960 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53961- if (tty_ldiscs[disc]->refcount)
53962+ if (atomic_read(&tty_ldiscs[disc]->refcount))
53963 ret = -EBUSY;
53964 else
53965 tty_ldiscs[disc] = NULL;
53966@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
53967 if (ldops) {
53968 ret = ERR_PTR(-EAGAIN);
53969 if (try_module_get(ldops->owner)) {
53970- ldops->refcount++;
53971+ atomic_inc(&ldops->refcount);
53972 ret = ldops;
53973 }
53974 }
53975@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
53976 unsigned long flags;
53977
53978 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53979- ldops->refcount--;
53980+ atomic_dec(&ldops->refcount);
53981 module_put(ldops->owner);
53982 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
53983 }
53984diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
53985index 1b93357..ea9f82c 100644
53986--- a/drivers/tty/tty_port.c
53987+++ b/drivers/tty/tty_port.c
53988@@ -237,7 +237,7 @@ void tty_port_hangup(struct tty_port *port)
53989 unsigned long flags;
53990
53991 spin_lock_irqsave(&port->lock, flags);
53992- port->count = 0;
53993+ atomic_set(&port->count, 0);
53994 port->flags &= ~ASYNC_NORMAL_ACTIVE;
53995 tty = port->tty;
53996 if (tty)
53997@@ -399,7 +399,7 @@ int tty_port_block_til_ready(struct tty_port *port,
53998
53999 /* The port lock protects the port counts */
54000 spin_lock_irqsave(&port->lock, flags);
54001- port->count--;
54002+ atomic_dec(&port->count);
54003 port->blocked_open++;
54004 spin_unlock_irqrestore(&port->lock, flags);
54005
54006@@ -441,7 +441,7 @@ int tty_port_block_til_ready(struct tty_port *port,
54007 we must not mess that up further */
54008 spin_lock_irqsave(&port->lock, flags);
54009 if (!tty_hung_up_p(filp))
54010- port->count++;
54011+ atomic_inc(&port->count);
54012 port->blocked_open--;
54013 if (retval == 0)
54014 port->flags |= ASYNC_NORMAL_ACTIVE;
54015@@ -479,19 +479,19 @@ int tty_port_close_start(struct tty_port *port,
54016 return 0;
54017 }
54018
54019- if (tty->count == 1 && port->count != 1) {
54020+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
54021 printk(KERN_WARNING
54022 "tty_port_close_start: tty->count = 1 port count = %d.\n",
54023- port->count);
54024- port->count = 1;
54025+ atomic_read(&port->count));
54026+ atomic_set(&port->count, 1);
54027 }
54028- if (--port->count < 0) {
54029+ if (atomic_dec_return(&port->count) < 0) {
54030 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
54031- port->count);
54032- port->count = 0;
54033+ atomic_read(&port->count));
54034+ atomic_set(&port->count, 0);
54035 }
54036
54037- if (port->count) {
54038+ if (atomic_read(&port->count)) {
54039 spin_unlock_irqrestore(&port->lock, flags);
54040 return 0;
54041 }
54042@@ -592,7 +592,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
54043 struct file *filp)
54044 {
54045 spin_lock_irq(&port->lock);
54046- ++port->count;
54047+ atomic_inc(&port->count);
54048 spin_unlock_irq(&port->lock);
54049 tty_port_tty_set(port, tty);
54050
54051diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
54052index d0e3a44..5f8b754 100644
54053--- a/drivers/tty/vt/keyboard.c
54054+++ b/drivers/tty/vt/keyboard.c
54055@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
54056 kbd->kbdmode == VC_OFF) &&
54057 value != KVAL(K_SAK))
54058 return; /* SAK is allowed even in raw mode */
54059+
54060+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
54061+ {
54062+ void *func = fn_handler[value];
54063+ if (func == fn_show_state || func == fn_show_ptregs ||
54064+ func == fn_show_mem)
54065+ return;
54066+ }
54067+#endif
54068+
54069 fn_handler[value](vc);
54070 }
54071
54072@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
54073 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
54074 return -EFAULT;
54075
54076- if (!capable(CAP_SYS_TTY_CONFIG))
54077- perm = 0;
54078-
54079 switch (cmd) {
54080 case KDGKBENT:
54081 /* Ensure another thread doesn't free it under us */
54082@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
54083 spin_unlock_irqrestore(&kbd_event_lock, flags);
54084 return put_user(val, &user_kbe->kb_value);
54085 case KDSKBENT:
54086+ if (!capable(CAP_SYS_TTY_CONFIG))
54087+ perm = 0;
54088+
54089 if (!perm)
54090 return -EPERM;
54091 if (!i && v == K_NOSUCHMAP) {
54092@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
54093 int i, j, k;
54094 int ret;
54095
54096- if (!capable(CAP_SYS_TTY_CONFIG))
54097- perm = 0;
54098-
54099 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
54100 if (!kbs) {
54101 ret = -ENOMEM;
54102@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
54103 kfree(kbs);
54104 return ((p && *p) ? -EOVERFLOW : 0);
54105 case KDSKBSENT:
54106+ if (!capable(CAP_SYS_TTY_CONFIG))
54107+ perm = 0;
54108+
54109 if (!perm) {
54110 ret = -EPERM;
54111 goto reterr;
54112diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
54113index a673e5b..36e5d32 100644
54114--- a/drivers/uio/uio.c
54115+++ b/drivers/uio/uio.c
54116@@ -25,6 +25,7 @@
54117 #include <linux/kobject.h>
54118 #include <linux/cdev.h>
54119 #include <linux/uio_driver.h>
54120+#include <asm/local.h>
54121
54122 #define UIO_MAX_DEVICES (1U << MINORBITS)
54123
54124@@ -32,7 +33,7 @@ struct uio_device {
54125 struct module *owner;
54126 struct device *dev;
54127 int minor;
54128- atomic_t event;
54129+ atomic_unchecked_t event;
54130 struct fasync_struct *async_queue;
54131 wait_queue_head_t wait;
54132 struct uio_info *info;
54133@@ -243,7 +244,7 @@ static ssize_t event_show(struct device *dev,
54134 struct device_attribute *attr, char *buf)
54135 {
54136 struct uio_device *idev = dev_get_drvdata(dev);
54137- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
54138+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
54139 }
54140 static DEVICE_ATTR_RO(event);
54141
54142@@ -405,7 +406,7 @@ void uio_event_notify(struct uio_info *info)
54143 {
54144 struct uio_device *idev = info->uio_dev;
54145
54146- atomic_inc(&idev->event);
54147+ atomic_inc_unchecked(&idev->event);
54148 wake_up_interruptible(&idev->wait);
54149 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
54150 }
54151@@ -458,7 +459,7 @@ static int uio_open(struct inode *inode, struct file *filep)
54152 }
54153
54154 listener->dev = idev;
54155- listener->event_count = atomic_read(&idev->event);
54156+ listener->event_count = atomic_read_unchecked(&idev->event);
54157 filep->private_data = listener;
54158
54159 if (idev->info->open) {
54160@@ -509,7 +510,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
54161 return -EIO;
54162
54163 poll_wait(filep, &idev->wait, wait);
54164- if (listener->event_count != atomic_read(&idev->event))
54165+ if (listener->event_count != atomic_read_unchecked(&idev->event))
54166 return POLLIN | POLLRDNORM;
54167 return 0;
54168 }
54169@@ -534,7 +535,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
54170 do {
54171 set_current_state(TASK_INTERRUPTIBLE);
54172
54173- event_count = atomic_read(&idev->event);
54174+ event_count = atomic_read_unchecked(&idev->event);
54175 if (event_count != listener->event_count) {
54176 if (copy_to_user(buf, &event_count, count))
54177 retval = -EFAULT;
54178@@ -591,9 +592,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
54179 static int uio_find_mem_index(struct vm_area_struct *vma)
54180 {
54181 struct uio_device *idev = vma->vm_private_data;
54182+ unsigned long size;
54183
54184 if (vma->vm_pgoff < MAX_UIO_MAPS) {
54185- if (idev->info->mem[vma->vm_pgoff].size == 0)
54186+ size = idev->info->mem[vma->vm_pgoff].size;
54187+ if (size == 0)
54188+ return -1;
54189+ if (vma->vm_end - vma->vm_start > size)
54190 return -1;
54191 return (int)vma->vm_pgoff;
54192 }
54193@@ -825,7 +830,7 @@ int __uio_register_device(struct module *owner,
54194 idev->owner = owner;
54195 idev->info = info;
54196 init_waitqueue_head(&idev->wait);
54197- atomic_set(&idev->event, 0);
54198+ atomic_set_unchecked(&idev->event, 0);
54199
54200 ret = uio_get_minor(idev);
54201 if (ret)
54202diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
54203index 813d4d3..a71934f 100644
54204--- a/drivers/usb/atm/cxacru.c
54205+++ b/drivers/usb/atm/cxacru.c
54206@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
54207 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
54208 if (ret < 2)
54209 return -EINVAL;
54210- if (index < 0 || index > 0x7f)
54211+ if (index > 0x7f)
54212 return -EINVAL;
54213 pos += tmp;
54214
54215diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
54216index dada014..1d0d517 100644
54217--- a/drivers/usb/atm/usbatm.c
54218+++ b/drivers/usb/atm/usbatm.c
54219@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54220 if (printk_ratelimit())
54221 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
54222 __func__, vpi, vci);
54223- atomic_inc(&vcc->stats->rx_err);
54224+ atomic_inc_unchecked(&vcc->stats->rx_err);
54225 return;
54226 }
54227
54228@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54229 if (length > ATM_MAX_AAL5_PDU) {
54230 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
54231 __func__, length, vcc);
54232- atomic_inc(&vcc->stats->rx_err);
54233+ atomic_inc_unchecked(&vcc->stats->rx_err);
54234 goto out;
54235 }
54236
54237@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54238 if (sarb->len < pdu_length) {
54239 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
54240 __func__, pdu_length, sarb->len, vcc);
54241- atomic_inc(&vcc->stats->rx_err);
54242+ atomic_inc_unchecked(&vcc->stats->rx_err);
54243 goto out;
54244 }
54245
54246 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
54247 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
54248 __func__, vcc);
54249- atomic_inc(&vcc->stats->rx_err);
54250+ atomic_inc_unchecked(&vcc->stats->rx_err);
54251 goto out;
54252 }
54253
54254@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54255 if (printk_ratelimit())
54256 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
54257 __func__, length);
54258- atomic_inc(&vcc->stats->rx_drop);
54259+ atomic_inc_unchecked(&vcc->stats->rx_drop);
54260 goto out;
54261 }
54262
54263@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54264
54265 vcc->push(vcc, skb);
54266
54267- atomic_inc(&vcc->stats->rx);
54268+ atomic_inc_unchecked(&vcc->stats->rx);
54269 out:
54270 skb_trim(sarb, 0);
54271 }
54272@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
54273 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
54274
54275 usbatm_pop(vcc, skb);
54276- atomic_inc(&vcc->stats->tx);
54277+ atomic_inc_unchecked(&vcc->stats->tx);
54278
54279 skb = skb_dequeue(&instance->sndqueue);
54280 }
54281@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
54282 if (!left--)
54283 return sprintf(page,
54284 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
54285- atomic_read(&atm_dev->stats.aal5.tx),
54286- atomic_read(&atm_dev->stats.aal5.tx_err),
54287- atomic_read(&atm_dev->stats.aal5.rx),
54288- atomic_read(&atm_dev->stats.aal5.rx_err),
54289- atomic_read(&atm_dev->stats.aal5.rx_drop));
54290+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
54291+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
54292+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
54293+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
54294+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
54295
54296 if (!left--) {
54297 if (instance->disconnected)
54298diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
54299index 2a3bbdf..91d72cf 100644
54300--- a/drivers/usb/core/devices.c
54301+++ b/drivers/usb/core/devices.c
54302@@ -126,7 +126,7 @@ static const char format_endpt[] =
54303 * time it gets called.
54304 */
54305 static struct device_connect_event {
54306- atomic_t count;
54307+ atomic_unchecked_t count;
54308 wait_queue_head_t wait;
54309 } device_event = {
54310 .count = ATOMIC_INIT(1),
54311@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
54312
54313 void usbfs_conn_disc_event(void)
54314 {
54315- atomic_add(2, &device_event.count);
54316+ atomic_add_unchecked(2, &device_event.count);
54317 wake_up(&device_event.wait);
54318 }
54319
54320@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
54321
54322 poll_wait(file, &device_event.wait, wait);
54323
54324- event_count = atomic_read(&device_event.count);
54325+ event_count = atomic_read_unchecked(&device_event.count);
54326 if (file->f_version != event_count) {
54327 file->f_version = event_count;
54328 return POLLIN | POLLRDNORM;
54329diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
54330index 0b59731..46ee7d1 100644
54331--- a/drivers/usb/core/devio.c
54332+++ b/drivers/usb/core/devio.c
54333@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
54334 struct usb_dev_state *ps = file->private_data;
54335 struct usb_device *dev = ps->dev;
54336 ssize_t ret = 0;
54337- unsigned len;
54338+ size_t len;
54339 loff_t pos;
54340 int i;
54341
54342@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
54343 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
54344 struct usb_config_descriptor *config =
54345 (struct usb_config_descriptor *)dev->rawdescriptors[i];
54346- unsigned int length = le16_to_cpu(config->wTotalLength);
54347+ size_t length = le16_to_cpu(config->wTotalLength);
54348
54349 if (*ppos < pos + length) {
54350
54351 /* The descriptor may claim to be longer than it
54352 * really is. Here is the actual allocated length. */
54353- unsigned alloclen =
54354+ size_t alloclen =
54355 le16_to_cpu(dev->config[i].desc.wTotalLength);
54356
54357- len = length - (*ppos - pos);
54358+ len = length + pos - *ppos;
54359 if (len > nbytes)
54360 len = nbytes;
54361
54362 /* Simply don't write (skip over) unallocated parts */
54363 if (alloclen > (*ppos - pos)) {
54364- alloclen -= (*ppos - pos);
54365+ alloclen = alloclen + pos - *ppos;
54366 if (copy_to_user(buf,
54367 dev->rawdescriptors[i] + (*ppos - pos),
54368 min(len, alloclen))) {
54369diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
54370index 258e6fe..9ea48d7 100644
54371--- a/drivers/usb/core/hcd.c
54372+++ b/drivers/usb/core/hcd.c
54373@@ -1550,7 +1550,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
54374 */
54375 usb_get_urb(urb);
54376 atomic_inc(&urb->use_count);
54377- atomic_inc(&urb->dev->urbnum);
54378+ atomic_inc_unchecked(&urb->dev->urbnum);
54379 usbmon_urb_submit(&hcd->self, urb);
54380
54381 /* NOTE requirements on root-hub callers (usbfs and the hub
54382@@ -1577,7 +1577,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
54383 urb->hcpriv = NULL;
54384 INIT_LIST_HEAD(&urb->urb_list);
54385 atomic_dec(&urb->use_count);
54386- atomic_dec(&urb->dev->urbnum);
54387+ atomic_dec_unchecked(&urb->dev->urbnum);
54388 if (atomic_read(&urb->reject))
54389 wake_up(&usb_kill_urb_queue);
54390 usb_put_urb(urb);
54391diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
54392index 674c262..71fdd90 100644
54393--- a/drivers/usb/core/hub.c
54394+++ b/drivers/usb/core/hub.c
54395@@ -27,6 +27,7 @@
54396 #include <linux/freezer.h>
54397 #include <linux/random.h>
54398 #include <linux/pm_qos.h>
54399+#include <linux/grsecurity.h>
54400
54401 #include <asm/uaccess.h>
54402 #include <asm/byteorder.h>
54403@@ -4665,6 +4666,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
54404 goto done;
54405 return;
54406 }
54407+
54408+ if (gr_handle_new_usb())
54409+ goto done;
54410+
54411 if (hub_is_superspeed(hub->hdev))
54412 unit_load = 150;
54413 else
54414diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
54415index 0c8a7fc..c45b40a 100644
54416--- a/drivers/usb/core/message.c
54417+++ b/drivers/usb/core/message.c
54418@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
54419 * Return: If successful, the number of bytes transferred. Otherwise, a negative
54420 * error number.
54421 */
54422-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
54423+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
54424 __u8 requesttype, __u16 value, __u16 index, void *data,
54425 __u16 size, int timeout)
54426 {
54427@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
54428 * If successful, 0. Otherwise a negative error number. The number of actual
54429 * bytes transferred will be stored in the @actual_length parameter.
54430 */
54431-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
54432+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
54433 void *data, int len, int *actual_length, int timeout)
54434 {
54435 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
54436@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
54437 * bytes transferred will be stored in the @actual_length parameter.
54438 *
54439 */
54440-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
54441+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
54442 void *data, int len, int *actual_length, int timeout)
54443 {
54444 struct urb *urb;
54445diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
54446index 1236c60..d47a51c 100644
54447--- a/drivers/usb/core/sysfs.c
54448+++ b/drivers/usb/core/sysfs.c
54449@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
54450 struct usb_device *udev;
54451
54452 udev = to_usb_device(dev);
54453- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
54454+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
54455 }
54456 static DEVICE_ATTR_RO(urbnum);
54457
54458diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
54459index 2dd2362..1135437 100644
54460--- a/drivers/usb/core/usb.c
54461+++ b/drivers/usb/core/usb.c
54462@@ -433,7 +433,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
54463 set_dev_node(&dev->dev, dev_to_node(bus->controller));
54464 dev->state = USB_STATE_ATTACHED;
54465 dev->lpm_disable_count = 1;
54466- atomic_set(&dev->urbnum, 0);
54467+ atomic_set_unchecked(&dev->urbnum, 0);
54468
54469 INIT_LIST_HEAD(&dev->ep0.urb_list);
54470 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
54471diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
54472index 8cfc319..4868255 100644
54473--- a/drivers/usb/early/ehci-dbgp.c
54474+++ b/drivers/usb/early/ehci-dbgp.c
54475@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
54476
54477 #ifdef CONFIG_KGDB
54478 static struct kgdb_io kgdbdbgp_io_ops;
54479-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
54480+static struct kgdb_io kgdbdbgp_io_ops_console;
54481+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
54482 #else
54483 #define dbgp_kgdb_mode (0)
54484 #endif
54485@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
54486 .write_char = kgdbdbgp_write_char,
54487 };
54488
54489+static struct kgdb_io kgdbdbgp_io_ops_console = {
54490+ .name = "kgdbdbgp",
54491+ .read_char = kgdbdbgp_read_char,
54492+ .write_char = kgdbdbgp_write_char,
54493+ .is_console = 1
54494+};
54495+
54496 static int kgdbdbgp_wait_time;
54497
54498 static int __init kgdbdbgp_parse_config(char *str)
54499@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
54500 ptr++;
54501 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
54502 }
54503- kgdb_register_io_module(&kgdbdbgp_io_ops);
54504- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
54505+ if (early_dbgp_console.index != -1)
54506+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
54507+ else
54508+ kgdb_register_io_module(&kgdbdbgp_io_ops);
54509
54510 return 0;
54511 }
54512diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
54513index 2b4c82d..06a8ee6 100644
54514--- a/drivers/usb/gadget/function/f_uac1.c
54515+++ b/drivers/usb/gadget/function/f_uac1.c
54516@@ -13,6 +13,7 @@
54517 #include <linux/kernel.h>
54518 #include <linux/device.h>
54519 #include <linux/atomic.h>
54520+#include <linux/module.h>
54521
54522 #include "u_uac1.h"
54523
54524diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
54525index ad0aca8..8ff84865 100644
54526--- a/drivers/usb/gadget/function/u_serial.c
54527+++ b/drivers/usb/gadget/function/u_serial.c
54528@@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
54529 spin_lock_irq(&port->port_lock);
54530
54531 /* already open? Great. */
54532- if (port->port.count) {
54533+ if (atomic_read(&port->port.count)) {
54534 status = 0;
54535- port->port.count++;
54536+ atomic_inc(&port->port.count);
54537
54538 /* currently opening/closing? wait ... */
54539 } else if (port->openclose) {
54540@@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
54541 tty->driver_data = port;
54542 port->port.tty = tty;
54543
54544- port->port.count = 1;
54545+ atomic_set(&port->port.count, 1);
54546 port->openclose = false;
54547
54548 /* if connected, start the I/O stream */
54549@@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
54550
54551 spin_lock_irq(&port->port_lock);
54552
54553- if (port->port.count != 1) {
54554- if (port->port.count == 0)
54555+ if (atomic_read(&port->port.count) != 1) {
54556+ if (atomic_read(&port->port.count) == 0)
54557 WARN_ON(1);
54558 else
54559- --port->port.count;
54560+ atomic_dec(&port->port.count);
54561 goto exit;
54562 }
54563
54564@@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
54565 * and sleep if necessary
54566 */
54567 port->openclose = true;
54568- port->port.count = 0;
54569+ atomic_set(&port->port.count, 0);
54570
54571 gser = port->port_usb;
54572 if (gser && gser->disconnect)
54573@@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port)
54574 int cond;
54575
54576 spin_lock_irq(&port->port_lock);
54577- cond = (port->port.count == 0) && !port->openclose;
54578+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
54579 spin_unlock_irq(&port->port_lock);
54580 return cond;
54581 }
54582@@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
54583 /* if it's already open, start I/O ... and notify the serial
54584 * protocol about open/close status (connect/disconnect).
54585 */
54586- if (port->port.count) {
54587+ if (atomic_read(&port->port.count)) {
54588 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
54589 gs_start_io(port);
54590 if (gser->connect)
54591@@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser)
54592
54593 port->port_usb = NULL;
54594 gser->ioport = NULL;
54595- if (port->port.count > 0 || port->openclose) {
54596+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
54597 wake_up_interruptible(&port->drain_wait);
54598 if (port->port.tty)
54599 tty_hangup(port->port.tty);
54600@@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser)
54601
54602 /* finally, free any unused/unusable I/O buffers */
54603 spin_lock_irqsave(&port->port_lock, flags);
54604- if (port->port.count == 0 && !port->openclose)
54605+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
54606 gs_buf_free(&port->port_write_buf);
54607 gs_free_requests(gser->out, &port->read_pool, NULL);
54608 gs_free_requests(gser->out, &port->read_queue, NULL);
54609diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1.c
54610index 7a55fea..cc0ed4f 100644
54611--- a/drivers/usb/gadget/function/u_uac1.c
54612+++ b/drivers/usb/gadget/function/u_uac1.c
54613@@ -16,6 +16,7 @@
54614 #include <linux/ctype.h>
54615 #include <linux/random.h>
54616 #include <linux/syscalls.h>
54617+#include <linux/module.h>
54618
54619 #include "u_uac1.h"
54620
54621diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
54622index 6130b75..3b60008 100644
54623--- a/drivers/usb/host/ehci-hub.c
54624+++ b/drivers/usb/host/ehci-hub.c
54625@@ -771,7 +771,7 @@ static struct urb *request_single_step_set_feature_urb(
54626 urb->transfer_flags = URB_DIR_IN;
54627 usb_get_urb(urb);
54628 atomic_inc(&urb->use_count);
54629- atomic_inc(&urb->dev->urbnum);
54630+ atomic_inc_unchecked(&urb->dev->urbnum);
54631 urb->setup_dma = dma_map_single(
54632 hcd->self.controller,
54633 urb->setup_packet,
54634@@ -838,7 +838,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
54635 urb->status = -EINPROGRESS;
54636 usb_get_urb(urb);
54637 atomic_inc(&urb->use_count);
54638- atomic_inc(&urb->dev->urbnum);
54639+ atomic_inc_unchecked(&urb->dev->urbnum);
54640 retval = submit_single_step_set_feature(hcd, urb, 0);
54641 if (!retval && !wait_for_completion_timeout(&done,
54642 msecs_to_jiffies(2000))) {
54643diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
54644index d0d8fad..668ef7b 100644
54645--- a/drivers/usb/host/hwa-hc.c
54646+++ b/drivers/usb/host/hwa-hc.c
54647@@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
54648 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
54649 struct wahc *wa = &hwahc->wa;
54650 struct device *dev = &wa->usb_iface->dev;
54651- u8 mas_le[UWB_NUM_MAS/8];
54652+ u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
54653+
54654+ if (mas_le == NULL)
54655+ return -ENOMEM;
54656
54657 /* Set the stream index */
54658 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
54659@@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
54660 WUSB_REQ_SET_WUSB_MAS,
54661 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
54662 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
54663- mas_le, 32, USB_CTRL_SET_TIMEOUT);
54664+ mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
54665 if (result < 0)
54666 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
54667 out:
54668+ kfree(mas_le);
54669+
54670 return result;
54671 }
54672
54673diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
54674index b3d245e..99549ed 100644
54675--- a/drivers/usb/misc/appledisplay.c
54676+++ b/drivers/usb/misc/appledisplay.c
54677@@ -84,7 +84,7 @@ struct appledisplay {
54678 struct mutex sysfslock; /* concurrent read and write */
54679 };
54680
54681-static atomic_t count_displays = ATOMIC_INIT(0);
54682+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
54683 static struct workqueue_struct *wq;
54684
54685 static void appledisplay_complete(struct urb *urb)
54686@@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
54687
54688 /* Register backlight device */
54689 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
54690- atomic_inc_return(&count_displays) - 1);
54691+ atomic_inc_return_unchecked(&count_displays) - 1);
54692 memset(&props, 0, sizeof(struct backlight_properties));
54693 props.type = BACKLIGHT_RAW;
54694 props.max_brightness = 0xff;
54695diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
54696index 8d7fc48..01c4986 100644
54697--- a/drivers/usb/serial/console.c
54698+++ b/drivers/usb/serial/console.c
54699@@ -123,7 +123,7 @@ static int usb_console_setup(struct console *co, char *options)
54700
54701 info->port = port;
54702
54703- ++port->port.count;
54704+ atomic_inc(&port->port.count);
54705 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
54706 if (serial->type->set_termios) {
54707 /*
54708@@ -167,7 +167,7 @@ static int usb_console_setup(struct console *co, char *options)
54709 }
54710 /* Now that any required fake tty operations are completed restore
54711 * the tty port count */
54712- --port->port.count;
54713+ atomic_dec(&port->port.count);
54714 /* The console is special in terms of closing the device so
54715 * indicate this port is now acting as a system console. */
54716 port->port.console = 1;
54717@@ -180,7 +180,7 @@ static int usb_console_setup(struct console *co, char *options)
54718 free_tty:
54719 kfree(tty);
54720 reset_open_count:
54721- port->port.count = 0;
54722+ atomic_set(&port->port.count, 0);
54723 usb_autopm_put_interface(serial->interface);
54724 error_get_interface:
54725 usb_serial_put(serial);
54726@@ -191,7 +191,7 @@ static int usb_console_setup(struct console *co, char *options)
54727 static void usb_console_write(struct console *co,
54728 const char *buf, unsigned count)
54729 {
54730- static struct usbcons_info *info = &usbcons_info;
54731+ struct usbcons_info *info = &usbcons_info;
54732 struct usb_serial_port *port = info->port;
54733 struct usb_serial *serial;
54734 int retval = -ENODEV;
54735diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
54736index 307e339..6aa97cb 100644
54737--- a/drivers/usb/storage/usb.h
54738+++ b/drivers/usb/storage/usb.h
54739@@ -63,7 +63,7 @@ struct us_unusual_dev {
54740 __u8 useProtocol;
54741 __u8 useTransport;
54742 int (*initFunction)(struct us_data *);
54743-};
54744+} __do_const;
54745
54746
54747 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
54748diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h
54749index a863a98..d272795 100644
54750--- a/drivers/usb/usbip/vhci.h
54751+++ b/drivers/usb/usbip/vhci.h
54752@@ -83,7 +83,7 @@ struct vhci_hcd {
54753 unsigned resuming:1;
54754 unsigned long re_timeout;
54755
54756- atomic_t seqnum;
54757+ atomic_unchecked_t seqnum;
54758
54759 /*
54760 * NOTE:
54761diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
54762index c02374b..32d47a9 100644
54763--- a/drivers/usb/usbip/vhci_hcd.c
54764+++ b/drivers/usb/usbip/vhci_hcd.c
54765@@ -439,7 +439,7 @@ static void vhci_tx_urb(struct urb *urb)
54766
54767 spin_lock(&vdev->priv_lock);
54768
54769- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
54770+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
54771 if (priv->seqnum == 0xffff)
54772 dev_info(&urb->dev->dev, "seqnum max\n");
54773
54774@@ -686,7 +686,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
54775 return -ENOMEM;
54776 }
54777
54778- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
54779+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
54780 if (unlink->seqnum == 0xffff)
54781 pr_info("seqnum max\n");
54782
54783@@ -891,7 +891,7 @@ static int vhci_start(struct usb_hcd *hcd)
54784 vdev->rhport = rhport;
54785 }
54786
54787- atomic_set(&vhci->seqnum, 0);
54788+ atomic_set_unchecked(&vhci->seqnum, 0);
54789 spin_lock_init(&vhci->lock);
54790
54791 hcd->power_budget = 0; /* no limit */
54792diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
54793index 00e4a54..d676f85 100644
54794--- a/drivers/usb/usbip/vhci_rx.c
54795+++ b/drivers/usb/usbip/vhci_rx.c
54796@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
54797 if (!urb) {
54798 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
54799 pr_info("max seqnum %d\n",
54800- atomic_read(&the_controller->seqnum));
54801+ atomic_read_unchecked(&the_controller->seqnum));
54802 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
54803 return;
54804 }
54805diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
54806index f2a8d29..7bc3fe7 100644
54807--- a/drivers/usb/wusbcore/wa-hc.h
54808+++ b/drivers/usb/wusbcore/wa-hc.h
54809@@ -240,7 +240,7 @@ struct wahc {
54810 spinlock_t xfer_list_lock;
54811 struct work_struct xfer_enqueue_work;
54812 struct work_struct xfer_error_work;
54813- atomic_t xfer_id_count;
54814+ atomic_unchecked_t xfer_id_count;
54815
54816 kernel_ulong_t quirks;
54817 };
54818@@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
54819 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
54820 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
54821 wa->dto_in_use = 0;
54822- atomic_set(&wa->xfer_id_count, 1);
54823+ atomic_set_unchecked(&wa->xfer_id_count, 1);
54824 /* init the buf in URBs */
54825 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
54826 usb_init_urb(&(wa->buf_in_urbs[index]));
54827diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
54828index e279015..c2d0dae 100644
54829--- a/drivers/usb/wusbcore/wa-xfer.c
54830+++ b/drivers/usb/wusbcore/wa-xfer.c
54831@@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
54832 */
54833 static void wa_xfer_id_init(struct wa_xfer *xfer)
54834 {
54835- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
54836+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
54837 }
54838
54839 /* Return the xfer's ID. */
54840diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
54841index f018d8d..ccab63f 100644
54842--- a/drivers/vfio/vfio.c
54843+++ b/drivers/vfio/vfio.c
54844@@ -481,7 +481,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
54845 return 0;
54846
54847 /* TODO Prevent device auto probing */
54848- WARN("Device %s added to live group %d!\n", dev_name(dev),
54849+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
54850 iommu_group_id(group->iommu_group));
54851
54852 return 0;
54853diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
54854index 5174eba..451e6bc 100644
54855--- a/drivers/vhost/vringh.c
54856+++ b/drivers/vhost/vringh.c
54857@@ -530,17 +530,17 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
54858 /* Userspace access helpers: in this case, addresses are really userspace. */
54859 static inline int getu16_user(u16 *val, const u16 *p)
54860 {
54861- return get_user(*val, (__force u16 __user *)p);
54862+ return get_user(*val, (u16 __force_user *)p);
54863 }
54864
54865 static inline int putu16_user(u16 *p, u16 val)
54866 {
54867- return put_user(val, (__force u16 __user *)p);
54868+ return put_user(val, (u16 __force_user *)p);
54869 }
54870
54871 static inline int copydesc_user(void *dst, const void *src, size_t len)
54872 {
54873- return copy_from_user(dst, (__force void __user *)src, len) ?
54874+ return copy_from_user(dst, (void __force_user *)src, len) ?
54875 -EFAULT : 0;
54876 }
54877
54878@@ -548,19 +548,19 @@ static inline int putused_user(struct vring_used_elem *dst,
54879 const struct vring_used_elem *src,
54880 unsigned int num)
54881 {
54882- return copy_to_user((__force void __user *)dst, src,
54883+ return copy_to_user((void __force_user *)dst, src,
54884 sizeof(*dst) * num) ? -EFAULT : 0;
54885 }
54886
54887 static inline int xfer_from_user(void *src, void *dst, size_t len)
54888 {
54889- return copy_from_user(dst, (__force void __user *)src, len) ?
54890+ return copy_from_user(dst, (void __force_user *)src, len) ?
54891 -EFAULT : 0;
54892 }
54893
54894 static inline int xfer_to_user(void *dst, void *src, size_t len)
54895 {
54896- return copy_to_user((__force void __user *)dst, src, len) ?
54897+ return copy_to_user((void __force_user *)dst, src, len) ?
54898 -EFAULT : 0;
54899 }
54900
54901@@ -596,9 +596,9 @@ int vringh_init_user(struct vringh *vrh, u32 features,
54902 vrh->last_used_idx = 0;
54903 vrh->vring.num = num;
54904 /* vring expects kernel addresses, but only used via accessors. */
54905- vrh->vring.desc = (__force struct vring_desc *)desc;
54906- vrh->vring.avail = (__force struct vring_avail *)avail;
54907- vrh->vring.used = (__force struct vring_used *)used;
54908+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
54909+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
54910+ vrh->vring.used = (__force_kernel struct vring_used *)used;
54911 return 0;
54912 }
54913 EXPORT_SYMBOL(vringh_init_user);
54914@@ -800,7 +800,7 @@ static inline int getu16_kern(u16 *val, const u16 *p)
54915
54916 static inline int putu16_kern(u16 *p, u16 val)
54917 {
54918- ACCESS_ONCE(*p) = val;
54919+ ACCESS_ONCE_RW(*p) = val;
54920 return 0;
54921 }
54922
54923diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
54924index 84a110a..96312c3 100644
54925--- a/drivers/video/backlight/kb3886_bl.c
54926+++ b/drivers/video/backlight/kb3886_bl.c
54927@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
54928 static unsigned long kb3886bl_flags;
54929 #define KB3886BL_SUSPENDED 0x01
54930
54931-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
54932+static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
54933 {
54934 .ident = "Sahara Touch-iT",
54935 .matches = {
54936diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
54937index 1b0b233..6f34c2c 100644
54938--- a/drivers/video/fbdev/arcfb.c
54939+++ b/drivers/video/fbdev/arcfb.c
54940@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
54941 return -ENOSPC;
54942
54943 err = 0;
54944- if ((count + p) > fbmemlength) {
54945+ if (count > (fbmemlength - p)) {
54946 count = fbmemlength - p;
54947 err = -ENOSPC;
54948 }
54949diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
54950index ff60701..814b973 100644
54951--- a/drivers/video/fbdev/aty/aty128fb.c
54952+++ b/drivers/video/fbdev/aty/aty128fb.c
54953@@ -149,7 +149,7 @@ enum {
54954 };
54955
54956 /* Must match above enum */
54957-static char * const r128_family[] = {
54958+static const char * const r128_family[] = {
54959 "AGP",
54960 "PCI",
54961 "PRO AGP",
54962diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
54963index 37ec09b..98f8862 100644
54964--- a/drivers/video/fbdev/aty/atyfb_base.c
54965+++ b/drivers/video/fbdev/aty/atyfb_base.c
54966@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
54967 par->accel_flags = var->accel_flags; /* hack */
54968
54969 if (var->accel_flags) {
54970- info->fbops->fb_sync = atyfb_sync;
54971+ pax_open_kernel();
54972+ *(void **)&info->fbops->fb_sync = atyfb_sync;
54973+ pax_close_kernel();
54974 info->flags &= ~FBINFO_HWACCEL_DISABLED;
54975 } else {
54976- info->fbops->fb_sync = NULL;
54977+ pax_open_kernel();
54978+ *(void **)&info->fbops->fb_sync = NULL;
54979+ pax_close_kernel();
54980 info->flags |= FBINFO_HWACCEL_DISABLED;
54981 }
54982
54983diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
54984index 2fa0317..4983f2a 100644
54985--- a/drivers/video/fbdev/aty/mach64_cursor.c
54986+++ b/drivers/video/fbdev/aty/mach64_cursor.c
54987@@ -8,6 +8,7 @@
54988 #include "../core/fb_draw.h"
54989
54990 #include <asm/io.h>
54991+#include <asm/pgtable.h>
54992
54993 #ifdef __sparc__
54994 #include <asm/fbio.h>
54995@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
54996 info->sprite.buf_align = 16; /* and 64 lines tall. */
54997 info->sprite.flags = FB_PIXMAP_IO;
54998
54999- info->fbops->fb_cursor = atyfb_cursor;
55000+ pax_open_kernel();
55001+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
55002+ pax_close_kernel();
55003
55004 return 0;
55005 }
55006diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
55007index 900aa4e..6d49418 100644
55008--- a/drivers/video/fbdev/core/fb_defio.c
55009+++ b/drivers/video/fbdev/core/fb_defio.c
55010@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
55011
55012 BUG_ON(!fbdefio);
55013 mutex_init(&fbdefio->lock);
55014- info->fbops->fb_mmap = fb_deferred_io_mmap;
55015+ pax_open_kernel();
55016+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
55017+ pax_close_kernel();
55018 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
55019 INIT_LIST_HEAD(&fbdefio->pagelist);
55020 if (fbdefio->delay == 0) /* set a default of 1 s */
55021@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
55022 page->mapping = NULL;
55023 }
55024
55025- info->fbops->fb_mmap = NULL;
55026+ *(void **)&info->fbops->fb_mmap = NULL;
55027 mutex_destroy(&fbdefio->lock);
55028 }
55029 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
55030diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
55031index b5e85f6..290f8c7 100644
55032--- a/drivers/video/fbdev/core/fbmem.c
55033+++ b/drivers/video/fbdev/core/fbmem.c
55034@@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
55035 __u32 data;
55036 int err;
55037
55038- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
55039+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
55040
55041 data = (__u32) (unsigned long) fix->smem_start;
55042 err |= put_user(data, &fix32->smem_start);
55043diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
55044index 4254336..282567e 100644
55045--- a/drivers/video/fbdev/hyperv_fb.c
55046+++ b/drivers/video/fbdev/hyperv_fb.c
55047@@ -240,7 +240,7 @@ static uint screen_fb_size;
55048 static inline int synthvid_send(struct hv_device *hdev,
55049 struct synthvid_msg *msg)
55050 {
55051- static atomic64_t request_id = ATOMIC64_INIT(0);
55052+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
55053 int ret;
55054
55055 msg->pipe_hdr.type = PIPE_MSG_DATA;
55056@@ -248,7 +248,7 @@ static inline int synthvid_send(struct hv_device *hdev,
55057
55058 ret = vmbus_sendpacket(hdev->channel, msg,
55059 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
55060- atomic64_inc_return(&request_id),
55061+ atomic64_inc_return_unchecked(&request_id),
55062 VM_PKT_DATA_INBAND, 0);
55063
55064 if (ret)
55065diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
55066index 7672d2e..b56437f 100644
55067--- a/drivers/video/fbdev/i810/i810_accel.c
55068+++ b/drivers/video/fbdev/i810/i810_accel.c
55069@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
55070 }
55071 }
55072 printk("ringbuffer lockup!!!\n");
55073+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
55074 i810_report_error(mmio);
55075 par->dev_flags |= LOCKUP;
55076 info->pixmap.scan_align = 1;
55077diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55078index a01147f..5d896f8 100644
55079--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55080+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55081@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
55082
55083 #ifdef CONFIG_FB_MATROX_MYSTIQUE
55084 struct matrox_switch matrox_mystique = {
55085- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
55086+ .preinit = MGA1064_preinit,
55087+ .reset = MGA1064_reset,
55088+ .init = MGA1064_init,
55089+ .restore = MGA1064_restore,
55090 };
55091 EXPORT_SYMBOL(matrox_mystique);
55092 #endif
55093
55094 #ifdef CONFIG_FB_MATROX_G
55095 struct matrox_switch matrox_G100 = {
55096- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
55097+ .preinit = MGAG100_preinit,
55098+ .reset = MGAG100_reset,
55099+ .init = MGAG100_init,
55100+ .restore = MGAG100_restore,
55101 };
55102 EXPORT_SYMBOL(matrox_G100);
55103 #endif
55104diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55105index 195ad7c..09743fc 100644
55106--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55107+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55108@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
55109 }
55110
55111 struct matrox_switch matrox_millennium = {
55112- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
55113+ .preinit = Ti3026_preinit,
55114+ .reset = Ti3026_reset,
55115+ .init = Ti3026_init,
55116+ .restore = Ti3026_restore
55117 };
55118 EXPORT_SYMBOL(matrox_millennium);
55119 #endif
55120diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55121index fe92eed..106e085 100644
55122--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55123+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55124@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
55125 struct mb862xxfb_par *par = info->par;
55126
55127 if (info->var.bits_per_pixel == 32) {
55128- info->fbops->fb_fillrect = cfb_fillrect;
55129- info->fbops->fb_copyarea = cfb_copyarea;
55130- info->fbops->fb_imageblit = cfb_imageblit;
55131+ pax_open_kernel();
55132+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
55133+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
55134+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
55135+ pax_close_kernel();
55136 } else {
55137 outreg(disp, GC_L0EM, 3);
55138- info->fbops->fb_fillrect = mb86290fb_fillrect;
55139- info->fbops->fb_copyarea = mb86290fb_copyarea;
55140- info->fbops->fb_imageblit = mb86290fb_imageblit;
55141+ pax_open_kernel();
55142+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
55143+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
55144+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
55145+ pax_close_kernel();
55146 }
55147 outreg(draw, GDC_REG_DRAW_BASE, 0);
55148 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
55149diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
55150index def0412..fed6529 100644
55151--- a/drivers/video/fbdev/nvidia/nvidia.c
55152+++ b/drivers/video/fbdev/nvidia/nvidia.c
55153@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
55154 info->fix.line_length = (info->var.xres_virtual *
55155 info->var.bits_per_pixel) >> 3;
55156 if (info->var.accel_flags) {
55157- info->fbops->fb_imageblit = nvidiafb_imageblit;
55158- info->fbops->fb_fillrect = nvidiafb_fillrect;
55159- info->fbops->fb_copyarea = nvidiafb_copyarea;
55160- info->fbops->fb_sync = nvidiafb_sync;
55161+ pax_open_kernel();
55162+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
55163+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
55164+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
55165+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
55166+ pax_close_kernel();
55167 info->pixmap.scan_align = 4;
55168 info->flags &= ~FBINFO_HWACCEL_DISABLED;
55169 info->flags |= FBINFO_READS_FAST;
55170 NVResetGraphics(info);
55171 } else {
55172- info->fbops->fb_imageblit = cfb_imageblit;
55173- info->fbops->fb_fillrect = cfb_fillrect;
55174- info->fbops->fb_copyarea = cfb_copyarea;
55175- info->fbops->fb_sync = NULL;
55176+ pax_open_kernel();
55177+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
55178+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
55179+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
55180+ *(void **)&info->fbops->fb_sync = NULL;
55181+ pax_close_kernel();
55182 info->pixmap.scan_align = 1;
55183 info->flags |= FBINFO_HWACCEL_DISABLED;
55184 info->flags &= ~FBINFO_READS_FAST;
55185@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
55186 info->pixmap.size = 8 * 1024;
55187 info->pixmap.flags = FB_PIXMAP_SYSTEM;
55188
55189- if (!hwcur)
55190- info->fbops->fb_cursor = NULL;
55191+ if (!hwcur) {
55192+ pax_open_kernel();
55193+ *(void **)&info->fbops->fb_cursor = NULL;
55194+ pax_close_kernel();
55195+ }
55196
55197 info->var.accel_flags = (!noaccel);
55198
55199diff --git a/drivers/video/fbdev/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
55200index 2412a0d..294215b 100644
55201--- a/drivers/video/fbdev/omap2/dss/display.c
55202+++ b/drivers/video/fbdev/omap2/dss/display.c
55203@@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
55204 if (dssdev->name == NULL)
55205 dssdev->name = dssdev->alias;
55206
55207+ pax_open_kernel();
55208 if (drv && drv->get_resolution == NULL)
55209- drv->get_resolution = omapdss_default_get_resolution;
55210+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
55211 if (drv && drv->get_recommended_bpp == NULL)
55212- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
55213+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
55214 if (drv && drv->get_timings == NULL)
55215- drv->get_timings = omapdss_default_get_timings;
55216+ *(void **)&drv->get_timings = omapdss_default_get_timings;
55217+ pax_close_kernel();
55218
55219 mutex_lock(&panel_list_mutex);
55220 list_add_tail(&dssdev->panel_list, &panel_list);
55221diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
55222index 83433cb..71e9b98 100644
55223--- a/drivers/video/fbdev/s1d13xxxfb.c
55224+++ b/drivers/video/fbdev/s1d13xxxfb.c
55225@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
55226
55227 switch(prod_id) {
55228 case S1D13506_PROD_ID: /* activate acceleration */
55229- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
55230- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
55231+ pax_open_kernel();
55232+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
55233+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
55234+ pax_close_kernel();
55235 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
55236 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
55237 break;
55238diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
55239index 2bcc84a..29dd1ea 100644
55240--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
55241+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
55242@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
55243 }
55244
55245 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
55246- lcdc_sys_write_index,
55247- lcdc_sys_write_data,
55248- lcdc_sys_read_data,
55249+ .write_index = lcdc_sys_write_index,
55250+ .write_data = lcdc_sys_write_data,
55251+ .read_data = lcdc_sys_read_data,
55252 };
55253
55254 static int sh_mobile_lcdc_sginit(struct fb_info *info,
55255diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
55256index d513ed6..90b0de9 100644
55257--- a/drivers/video/fbdev/smscufx.c
55258+++ b/drivers/video/fbdev/smscufx.c
55259@@ -1175,7 +1175,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
55260 fb_deferred_io_cleanup(info);
55261 kfree(info->fbdefio);
55262 info->fbdefio = NULL;
55263- info->fbops->fb_mmap = ufx_ops_mmap;
55264+ pax_open_kernel();
55265+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
55266+ pax_close_kernel();
55267 }
55268
55269 pr_debug("released /dev/fb%d user=%d count=%d",
55270diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
55271index 77b890e..458e666 100644
55272--- a/drivers/video/fbdev/udlfb.c
55273+++ b/drivers/video/fbdev/udlfb.c
55274@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
55275 dlfb_urb_completion(urb);
55276
55277 error:
55278- atomic_add(bytes_sent, &dev->bytes_sent);
55279- atomic_add(bytes_identical, &dev->bytes_identical);
55280- atomic_add(width*height*2, &dev->bytes_rendered);
55281+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
55282+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
55283+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
55284 end_cycles = get_cycles();
55285- atomic_add(((unsigned int) ((end_cycles - start_cycles)
55286+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
55287 >> 10)), /* Kcycles */
55288 &dev->cpu_kcycles_used);
55289
55290@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
55291 dlfb_urb_completion(urb);
55292
55293 error:
55294- atomic_add(bytes_sent, &dev->bytes_sent);
55295- atomic_add(bytes_identical, &dev->bytes_identical);
55296- atomic_add(bytes_rendered, &dev->bytes_rendered);
55297+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
55298+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
55299+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
55300 end_cycles = get_cycles();
55301- atomic_add(((unsigned int) ((end_cycles - start_cycles)
55302+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
55303 >> 10)), /* Kcycles */
55304 &dev->cpu_kcycles_used);
55305 }
55306@@ -993,7 +993,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
55307 fb_deferred_io_cleanup(info);
55308 kfree(info->fbdefio);
55309 info->fbdefio = NULL;
55310- info->fbops->fb_mmap = dlfb_ops_mmap;
55311+ pax_open_kernel();
55312+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
55313+ pax_close_kernel();
55314 }
55315
55316 pr_warn("released /dev/fb%d user=%d count=%d\n",
55317@@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
55318 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55319 struct dlfb_data *dev = fb_info->par;
55320 return snprintf(buf, PAGE_SIZE, "%u\n",
55321- atomic_read(&dev->bytes_rendered));
55322+ atomic_read_unchecked(&dev->bytes_rendered));
55323 }
55324
55325 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
55326@@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
55327 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55328 struct dlfb_data *dev = fb_info->par;
55329 return snprintf(buf, PAGE_SIZE, "%u\n",
55330- atomic_read(&dev->bytes_identical));
55331+ atomic_read_unchecked(&dev->bytes_identical));
55332 }
55333
55334 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
55335@@ -1392,7 +1394,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
55336 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55337 struct dlfb_data *dev = fb_info->par;
55338 return snprintf(buf, PAGE_SIZE, "%u\n",
55339- atomic_read(&dev->bytes_sent));
55340+ atomic_read_unchecked(&dev->bytes_sent));
55341 }
55342
55343 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
55344@@ -1400,7 +1402,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
55345 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55346 struct dlfb_data *dev = fb_info->par;
55347 return snprintf(buf, PAGE_SIZE, "%u\n",
55348- atomic_read(&dev->cpu_kcycles_used));
55349+ atomic_read_unchecked(&dev->cpu_kcycles_used));
55350 }
55351
55352 static ssize_t edid_show(
55353@@ -1460,10 +1462,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
55354 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55355 struct dlfb_data *dev = fb_info->par;
55356
55357- atomic_set(&dev->bytes_rendered, 0);
55358- atomic_set(&dev->bytes_identical, 0);
55359- atomic_set(&dev->bytes_sent, 0);
55360- atomic_set(&dev->cpu_kcycles_used, 0);
55361+ atomic_set_unchecked(&dev->bytes_rendered, 0);
55362+ atomic_set_unchecked(&dev->bytes_identical, 0);
55363+ atomic_set_unchecked(&dev->bytes_sent, 0);
55364+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
55365
55366 return count;
55367 }
55368diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
55369index 509d452..7c9d2de 100644
55370--- a/drivers/video/fbdev/uvesafb.c
55371+++ b/drivers/video/fbdev/uvesafb.c
55372@@ -19,6 +19,7 @@
55373 #include <linux/io.h>
55374 #include <linux/mutex.h>
55375 #include <linux/slab.h>
55376+#include <linux/moduleloader.h>
55377 #include <video/edid.h>
55378 #include <video/uvesafb.h>
55379 #ifdef CONFIG_X86
55380@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
55381 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
55382 par->pmi_setpal = par->ypan = 0;
55383 } else {
55384+
55385+#ifdef CONFIG_PAX_KERNEXEC
55386+#ifdef CONFIG_MODULES
55387+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
55388+#endif
55389+ if (!par->pmi_code) {
55390+ par->pmi_setpal = par->ypan = 0;
55391+ return 0;
55392+ }
55393+#endif
55394+
55395 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
55396 + task->t.regs.edi);
55397+
55398+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55399+ pax_open_kernel();
55400+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
55401+ pax_close_kernel();
55402+
55403+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
55404+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
55405+#else
55406 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
55407 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
55408+#endif
55409+
55410 printk(KERN_INFO "uvesafb: protected mode interface info at "
55411 "%04x:%04x\n",
55412 (u16)task->t.regs.es, (u16)task->t.regs.edi);
55413@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
55414 par->ypan = ypan;
55415
55416 if (par->pmi_setpal || par->ypan) {
55417+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
55418 if (__supported_pte_mask & _PAGE_NX) {
55419 par->pmi_setpal = par->ypan = 0;
55420 printk(KERN_WARNING "uvesafb: NX protection is active, "
55421 "better not use the PMI.\n");
55422- } else {
55423+ } else
55424+#endif
55425 uvesafb_vbe_getpmi(task, par);
55426- }
55427 }
55428 #else
55429 /* The protected mode interface is not available on non-x86. */
55430@@ -1453,8 +1477,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55431 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
55432
55433 /* Disable blanking if the user requested so. */
55434- if (!blank)
55435- info->fbops->fb_blank = NULL;
55436+ if (!blank) {
55437+ pax_open_kernel();
55438+ *(void **)&info->fbops->fb_blank = NULL;
55439+ pax_close_kernel();
55440+ }
55441
55442 /*
55443 * Find out how much IO memory is required for the mode with
55444@@ -1525,8 +1552,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55445 info->flags = FBINFO_FLAG_DEFAULT |
55446 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
55447
55448- if (!par->ypan)
55449- info->fbops->fb_pan_display = NULL;
55450+ if (!par->ypan) {
55451+ pax_open_kernel();
55452+ *(void **)&info->fbops->fb_pan_display = NULL;
55453+ pax_close_kernel();
55454+ }
55455 }
55456
55457 static void uvesafb_init_mtrr(struct fb_info *info)
55458@@ -1787,6 +1817,11 @@ out_mode:
55459 out:
55460 kfree(par->vbe_modes);
55461
55462+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55463+ if (par->pmi_code)
55464+ module_free_exec(NULL, par->pmi_code);
55465+#endif
55466+
55467 framebuffer_release(info);
55468 return err;
55469 }
55470@@ -1811,6 +1846,11 @@ static int uvesafb_remove(struct platform_device *dev)
55471 kfree(par->vbe_state_orig);
55472 kfree(par->vbe_state_saved);
55473
55474+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55475+ if (par->pmi_code)
55476+ module_free_exec(NULL, par->pmi_code);
55477+#endif
55478+
55479 framebuffer_release(info);
55480 }
55481 return 0;
55482diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
55483index 6170e7f..dd63031 100644
55484--- a/drivers/video/fbdev/vesafb.c
55485+++ b/drivers/video/fbdev/vesafb.c
55486@@ -9,6 +9,7 @@
55487 */
55488
55489 #include <linux/module.h>
55490+#include <linux/moduleloader.h>
55491 #include <linux/kernel.h>
55492 #include <linux/errno.h>
55493 #include <linux/string.h>
55494@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
55495 static int vram_total; /* Set total amount of memory */
55496 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
55497 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
55498-static void (*pmi_start)(void) __read_mostly;
55499-static void (*pmi_pal) (void) __read_mostly;
55500+static void (*pmi_start)(void) __read_only;
55501+static void (*pmi_pal) (void) __read_only;
55502 static int depth __read_mostly;
55503 static int vga_compat __read_mostly;
55504 /* --------------------------------------------------------------------- */
55505@@ -233,6 +234,7 @@ static int vesafb_probe(struct platform_device *dev)
55506 unsigned int size_remap;
55507 unsigned int size_total;
55508 char *option = NULL;
55509+ void *pmi_code = NULL;
55510
55511 /* ignore error return of fb_get_options */
55512 fb_get_options("vesafb", &option);
55513@@ -279,10 +281,6 @@ static int vesafb_probe(struct platform_device *dev)
55514 size_remap = size_total;
55515 vesafb_fix.smem_len = size_remap;
55516
55517-#ifndef __i386__
55518- screen_info.vesapm_seg = 0;
55519-#endif
55520-
55521 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
55522 printk(KERN_WARNING
55523 "vesafb: cannot reserve video memory at 0x%lx\n",
55524@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
55525 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
55526 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
55527
55528+#ifdef __i386__
55529+
55530+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55531+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
55532+ if (!pmi_code)
55533+#elif !defined(CONFIG_PAX_KERNEXEC)
55534+ if (0)
55535+#endif
55536+
55537+#endif
55538+ screen_info.vesapm_seg = 0;
55539+
55540 if (screen_info.vesapm_seg) {
55541- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
55542- screen_info.vesapm_seg,screen_info.vesapm_off);
55543+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
55544+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
55545 }
55546
55547 if (screen_info.vesapm_seg < 0xc000)
55548@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
55549
55550 if (ypan || pmi_setpal) {
55551 unsigned short *pmi_base;
55552+
55553 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
55554- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
55555- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
55556+
55557+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55558+ pax_open_kernel();
55559+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
55560+#else
55561+ pmi_code = pmi_base;
55562+#endif
55563+
55564+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
55565+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
55566+
55567+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55568+ pmi_start = ktva_ktla(pmi_start);
55569+ pmi_pal = ktva_ktla(pmi_pal);
55570+ pax_close_kernel();
55571+#endif
55572+
55573 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
55574 if (pmi_base[3]) {
55575 printk(KERN_INFO "vesafb: pmi: ports = ");
55576@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
55577 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
55578 (ypan ? FBINFO_HWACCEL_YPAN : 0);
55579
55580- if (!ypan)
55581- info->fbops->fb_pan_display = NULL;
55582+ if (!ypan) {
55583+ pax_open_kernel();
55584+ *(void **)&info->fbops->fb_pan_display = NULL;
55585+ pax_close_kernel();
55586+ }
55587
55588 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
55589 err = -ENOMEM;
55590@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
55591 fb_info(info, "%s frame buffer device\n", info->fix.id);
55592 return 0;
55593 err:
55594+
55595+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55596+ module_free_exec(NULL, pmi_code);
55597+#endif
55598+
55599 if (info->screen_base)
55600 iounmap(info->screen_base);
55601 framebuffer_release(info);
55602diff --git a/drivers/video/fbdev/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
55603index 88714ae..16c2e11 100644
55604--- a/drivers/video/fbdev/via/via_clock.h
55605+++ b/drivers/video/fbdev/via/via_clock.h
55606@@ -56,7 +56,7 @@ struct via_clock {
55607
55608 void (*set_engine_pll_state)(u8 state);
55609 void (*set_engine_pll)(struct via_pll_config config);
55610-};
55611+} __no_const;
55612
55613
55614 static inline u32 get_pll_internal_frequency(u32 ref_freq,
55615diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
55616index 3c14e43..2630570 100644
55617--- a/drivers/video/logo/logo_linux_clut224.ppm
55618+++ b/drivers/video/logo/logo_linux_clut224.ppm
55619@@ -2,1603 +2,1123 @@ P3
55620 # Standard 224-color Linux logo
55621 80 80
55622 255
55623- 0 0 0 0 0 0 0 0 0 0 0 0
55624- 0 0 0 0 0 0 0 0 0 0 0 0
55625- 0 0 0 0 0 0 0 0 0 0 0 0
55626- 0 0 0 0 0 0 0 0 0 0 0 0
55627- 0 0 0 0 0 0 0 0 0 0 0 0
55628- 0 0 0 0 0 0 0 0 0 0 0 0
55629- 0 0 0 0 0 0 0 0 0 0 0 0
55630- 0 0 0 0 0 0 0 0 0 0 0 0
55631- 0 0 0 0 0 0 0 0 0 0 0 0
55632- 6 6 6 6 6 6 10 10 10 10 10 10
55633- 10 10 10 6 6 6 6 6 6 6 6 6
55634- 0 0 0 0 0 0 0 0 0 0 0 0
55635- 0 0 0 0 0 0 0 0 0 0 0 0
55636- 0 0 0 0 0 0 0 0 0 0 0 0
55637- 0 0 0 0 0 0 0 0 0 0 0 0
55638- 0 0 0 0 0 0 0 0 0 0 0 0
55639- 0 0 0 0 0 0 0 0 0 0 0 0
55640- 0 0 0 0 0 0 0 0 0 0 0 0
55641- 0 0 0 0 0 0 0 0 0 0 0 0
55642- 0 0 0 0 0 0 0 0 0 0 0 0
55643- 0 0 0 0 0 0 0 0 0 0 0 0
55644- 0 0 0 0 0 0 0 0 0 0 0 0
55645- 0 0 0 0 0 0 0 0 0 0 0 0
55646- 0 0 0 0 0 0 0 0 0 0 0 0
55647- 0 0 0 0 0 0 0 0 0 0 0 0
55648- 0 0 0 0 0 0 0 0 0 0 0 0
55649- 0 0 0 0 0 0 0 0 0 0 0 0
55650- 0 0 0 0 0 0 0 0 0 0 0 0
55651- 0 0 0 6 6 6 10 10 10 14 14 14
55652- 22 22 22 26 26 26 30 30 30 34 34 34
55653- 30 30 30 30 30 30 26 26 26 18 18 18
55654- 14 14 14 10 10 10 6 6 6 0 0 0
55655- 0 0 0 0 0 0 0 0 0 0 0 0
55656- 0 0 0 0 0 0 0 0 0 0 0 0
55657- 0 0 0 0 0 0 0 0 0 0 0 0
55658- 0 0 0 0 0 0 0 0 0 0 0 0
55659- 0 0 0 0 0 0 0 0 0 0 0 0
55660- 0 0 0 0 0 0 0 0 0 0 0 0
55661- 0 0 0 0 0 0 0 0 0 0 0 0
55662- 0 0 0 0 0 0 0 0 0 0 0 0
55663- 0 0 0 0 0 0 0 0 0 0 0 0
55664- 0 0 0 0 0 1 0 0 1 0 0 0
55665- 0 0 0 0 0 0 0 0 0 0 0 0
55666- 0 0 0 0 0 0 0 0 0 0 0 0
55667- 0 0 0 0 0 0 0 0 0 0 0 0
55668- 0 0 0 0 0 0 0 0 0 0 0 0
55669- 0 0 0 0 0 0 0 0 0 0 0 0
55670- 0 0 0 0 0 0 0 0 0 0 0 0
55671- 6 6 6 14 14 14 26 26 26 42 42 42
55672- 54 54 54 66 66 66 78 78 78 78 78 78
55673- 78 78 78 74 74 74 66 66 66 54 54 54
55674- 42 42 42 26 26 26 18 18 18 10 10 10
55675- 6 6 6 0 0 0 0 0 0 0 0 0
55676- 0 0 0 0 0 0 0 0 0 0 0 0
55677- 0 0 0 0 0 0 0 0 0 0 0 0
55678- 0 0 0 0 0 0 0 0 0 0 0 0
55679- 0 0 0 0 0 0 0 0 0 0 0 0
55680- 0 0 0 0 0 0 0 0 0 0 0 0
55681- 0 0 0 0 0 0 0 0 0 0 0 0
55682- 0 0 0 0 0 0 0 0 0 0 0 0
55683- 0 0 0 0 0 0 0 0 0 0 0 0
55684- 0 0 1 0 0 0 0 0 0 0 0 0
55685- 0 0 0 0 0 0 0 0 0 0 0 0
55686- 0 0 0 0 0 0 0 0 0 0 0 0
55687- 0 0 0 0 0 0 0 0 0 0 0 0
55688- 0 0 0 0 0 0 0 0 0 0 0 0
55689- 0 0 0 0 0 0 0 0 0 0 0 0
55690- 0 0 0 0 0 0 0 0 0 10 10 10
55691- 22 22 22 42 42 42 66 66 66 86 86 86
55692- 66 66 66 38 38 38 38 38 38 22 22 22
55693- 26 26 26 34 34 34 54 54 54 66 66 66
55694- 86 86 86 70 70 70 46 46 46 26 26 26
55695- 14 14 14 6 6 6 0 0 0 0 0 0
55696- 0 0 0 0 0 0 0 0 0 0 0 0
55697- 0 0 0 0 0 0 0 0 0 0 0 0
55698- 0 0 0 0 0 0 0 0 0 0 0 0
55699- 0 0 0 0 0 0 0 0 0 0 0 0
55700- 0 0 0 0 0 0 0 0 0 0 0 0
55701- 0 0 0 0 0 0 0 0 0 0 0 0
55702- 0 0 0 0 0 0 0 0 0 0 0 0
55703- 0 0 0 0 0 0 0 0 0 0 0 0
55704- 0 0 1 0 0 1 0 0 1 0 0 0
55705- 0 0 0 0 0 0 0 0 0 0 0 0
55706- 0 0 0 0 0 0 0 0 0 0 0 0
55707- 0 0 0 0 0 0 0 0 0 0 0 0
55708- 0 0 0 0 0 0 0 0 0 0 0 0
55709- 0 0 0 0 0 0 0 0 0 0 0 0
55710- 0 0 0 0 0 0 10 10 10 26 26 26
55711- 50 50 50 82 82 82 58 58 58 6 6 6
55712- 2 2 6 2 2 6 2 2 6 2 2 6
55713- 2 2 6 2 2 6 2 2 6 2 2 6
55714- 6 6 6 54 54 54 86 86 86 66 66 66
55715- 38 38 38 18 18 18 6 6 6 0 0 0
55716- 0 0 0 0 0 0 0 0 0 0 0 0
55717- 0 0 0 0 0 0 0 0 0 0 0 0
55718- 0 0 0 0 0 0 0 0 0 0 0 0
55719- 0 0 0 0 0 0 0 0 0 0 0 0
55720- 0 0 0 0 0 0 0 0 0 0 0 0
55721- 0 0 0 0 0 0 0 0 0 0 0 0
55722- 0 0 0 0 0 0 0 0 0 0 0 0
55723- 0 0 0 0 0 0 0 0 0 0 0 0
55724- 0 0 0 0 0 0 0 0 0 0 0 0
55725- 0 0 0 0 0 0 0 0 0 0 0 0
55726- 0 0 0 0 0 0 0 0 0 0 0 0
55727- 0 0 0 0 0 0 0 0 0 0 0 0
55728- 0 0 0 0 0 0 0 0 0 0 0 0
55729- 0 0 0 0 0 0 0 0 0 0 0 0
55730- 0 0 0 6 6 6 22 22 22 50 50 50
55731- 78 78 78 34 34 34 2 2 6 2 2 6
55732- 2 2 6 2 2 6 2 2 6 2 2 6
55733- 2 2 6 2 2 6 2 2 6 2 2 6
55734- 2 2 6 2 2 6 6 6 6 70 70 70
55735- 78 78 78 46 46 46 22 22 22 6 6 6
55736- 0 0 0 0 0 0 0 0 0 0 0 0
55737- 0 0 0 0 0 0 0 0 0 0 0 0
55738- 0 0 0 0 0 0 0 0 0 0 0 0
55739- 0 0 0 0 0 0 0 0 0 0 0 0
55740- 0 0 0 0 0 0 0 0 0 0 0 0
55741- 0 0 0 0 0 0 0 0 0 0 0 0
55742- 0 0 0 0 0 0 0 0 0 0 0 0
55743- 0 0 0 0 0 0 0 0 0 0 0 0
55744- 0 0 1 0 0 1 0 0 1 0 0 0
55745- 0 0 0 0 0 0 0 0 0 0 0 0
55746- 0 0 0 0 0 0 0 0 0 0 0 0
55747- 0 0 0 0 0 0 0 0 0 0 0 0
55748- 0 0 0 0 0 0 0 0 0 0 0 0
55749- 0 0 0 0 0 0 0 0 0 0 0 0
55750- 6 6 6 18 18 18 42 42 42 82 82 82
55751- 26 26 26 2 2 6 2 2 6 2 2 6
55752- 2 2 6 2 2 6 2 2 6 2 2 6
55753- 2 2 6 2 2 6 2 2 6 14 14 14
55754- 46 46 46 34 34 34 6 6 6 2 2 6
55755- 42 42 42 78 78 78 42 42 42 18 18 18
55756- 6 6 6 0 0 0 0 0 0 0 0 0
55757- 0 0 0 0 0 0 0 0 0 0 0 0
55758- 0 0 0 0 0 0 0 0 0 0 0 0
55759- 0 0 0 0 0 0 0 0 0 0 0 0
55760- 0 0 0 0 0 0 0 0 0 0 0 0
55761- 0 0 0 0 0 0 0 0 0 0 0 0
55762- 0 0 0 0 0 0 0 0 0 0 0 0
55763- 0 0 0 0 0 0 0 0 0 0 0 0
55764- 0 0 1 0 0 0 0 0 1 0 0 0
55765- 0 0 0 0 0 0 0 0 0 0 0 0
55766- 0 0 0 0 0 0 0 0 0 0 0 0
55767- 0 0 0 0 0 0 0 0 0 0 0 0
55768- 0 0 0 0 0 0 0 0 0 0 0 0
55769- 0 0 0 0 0 0 0 0 0 0 0 0
55770- 10 10 10 30 30 30 66 66 66 58 58 58
55771- 2 2 6 2 2 6 2 2 6 2 2 6
55772- 2 2 6 2 2 6 2 2 6 2 2 6
55773- 2 2 6 2 2 6 2 2 6 26 26 26
55774- 86 86 86 101 101 101 46 46 46 10 10 10
55775- 2 2 6 58 58 58 70 70 70 34 34 34
55776- 10 10 10 0 0 0 0 0 0 0 0 0
55777- 0 0 0 0 0 0 0 0 0 0 0 0
55778- 0 0 0 0 0 0 0 0 0 0 0 0
55779- 0 0 0 0 0 0 0 0 0 0 0 0
55780- 0 0 0 0 0 0 0 0 0 0 0 0
55781- 0 0 0 0 0 0 0 0 0 0 0 0
55782- 0 0 0 0 0 0 0 0 0 0 0 0
55783- 0 0 0 0 0 0 0 0 0 0 0 0
55784- 0 0 1 0 0 1 0 0 1 0 0 0
55785- 0 0 0 0 0 0 0 0 0 0 0 0
55786- 0 0 0 0 0 0 0 0 0 0 0 0
55787- 0 0 0 0 0 0 0 0 0 0 0 0
55788- 0 0 0 0 0 0 0 0 0 0 0 0
55789- 0 0 0 0 0 0 0 0 0 0 0 0
55790- 14 14 14 42 42 42 86 86 86 10 10 10
55791- 2 2 6 2 2 6 2 2 6 2 2 6
55792- 2 2 6 2 2 6 2 2 6 2 2 6
55793- 2 2 6 2 2 6 2 2 6 30 30 30
55794- 94 94 94 94 94 94 58 58 58 26 26 26
55795- 2 2 6 6 6 6 78 78 78 54 54 54
55796- 22 22 22 6 6 6 0 0 0 0 0 0
55797- 0 0 0 0 0 0 0 0 0 0 0 0
55798- 0 0 0 0 0 0 0 0 0 0 0 0
55799- 0 0 0 0 0 0 0 0 0 0 0 0
55800- 0 0 0 0 0 0 0 0 0 0 0 0
55801- 0 0 0 0 0 0 0 0 0 0 0 0
55802- 0 0 0 0 0 0 0 0 0 0 0 0
55803- 0 0 0 0 0 0 0 0 0 0 0 0
55804- 0 0 0 0 0 0 0 0 0 0 0 0
55805- 0 0 0 0 0 0 0 0 0 0 0 0
55806- 0 0 0 0 0 0 0 0 0 0 0 0
55807- 0 0 0 0 0 0 0 0 0 0 0 0
55808- 0 0 0 0 0 0 0 0 0 0 0 0
55809- 0 0 0 0 0 0 0 0 0 6 6 6
55810- 22 22 22 62 62 62 62 62 62 2 2 6
55811- 2 2 6 2 2 6 2 2 6 2 2 6
55812- 2 2 6 2 2 6 2 2 6 2 2 6
55813- 2 2 6 2 2 6 2 2 6 26 26 26
55814- 54 54 54 38 38 38 18 18 18 10 10 10
55815- 2 2 6 2 2 6 34 34 34 82 82 82
55816- 38 38 38 14 14 14 0 0 0 0 0 0
55817- 0 0 0 0 0 0 0 0 0 0 0 0
55818- 0 0 0 0 0 0 0 0 0 0 0 0
55819- 0 0 0 0 0 0 0 0 0 0 0 0
55820- 0 0 0 0 0 0 0 0 0 0 0 0
55821- 0 0 0 0 0 0 0 0 0 0 0 0
55822- 0 0 0 0 0 0 0 0 0 0 0 0
55823- 0 0 0 0 0 0 0 0 0 0 0 0
55824- 0 0 0 0 0 1 0 0 1 0 0 0
55825- 0 0 0 0 0 0 0 0 0 0 0 0
55826- 0 0 0 0 0 0 0 0 0 0 0 0
55827- 0 0 0 0 0 0 0 0 0 0 0 0
55828- 0 0 0 0 0 0 0 0 0 0 0 0
55829- 0 0 0 0 0 0 0 0 0 6 6 6
55830- 30 30 30 78 78 78 30 30 30 2 2 6
55831- 2 2 6 2 2 6 2 2 6 2 2 6
55832- 2 2 6 2 2 6 2 2 6 2 2 6
55833- 2 2 6 2 2 6 2 2 6 10 10 10
55834- 10 10 10 2 2 6 2 2 6 2 2 6
55835- 2 2 6 2 2 6 2 2 6 78 78 78
55836- 50 50 50 18 18 18 6 6 6 0 0 0
55837- 0 0 0 0 0 0 0 0 0 0 0 0
55838- 0 0 0 0 0 0 0 0 0 0 0 0
55839- 0 0 0 0 0 0 0 0 0 0 0 0
55840- 0 0 0 0 0 0 0 0 0 0 0 0
55841- 0 0 0 0 0 0 0 0 0 0 0 0
55842- 0 0 0 0 0 0 0 0 0 0 0 0
55843- 0 0 0 0 0 0 0 0 0 0 0 0
55844- 0 0 1 0 0 0 0 0 0 0 0 0
55845- 0 0 0 0 0 0 0 0 0 0 0 0
55846- 0 0 0 0 0 0 0 0 0 0 0 0
55847- 0 0 0 0 0 0 0 0 0 0 0 0
55848- 0 0 0 0 0 0 0 0 0 0 0 0
55849- 0 0 0 0 0 0 0 0 0 10 10 10
55850- 38 38 38 86 86 86 14 14 14 2 2 6
55851- 2 2 6 2 2 6 2 2 6 2 2 6
55852- 2 2 6 2 2 6 2 2 6 2 2 6
55853- 2 2 6 2 2 6 2 2 6 2 2 6
55854- 2 2 6 2 2 6 2 2 6 2 2 6
55855- 2 2 6 2 2 6 2 2 6 54 54 54
55856- 66 66 66 26 26 26 6 6 6 0 0 0
55857- 0 0 0 0 0 0 0 0 0 0 0 0
55858- 0 0 0 0 0 0 0 0 0 0 0 0
55859- 0 0 0 0 0 0 0 0 0 0 0 0
55860- 0 0 0 0 0 0 0 0 0 0 0 0
55861- 0 0 0 0 0 0 0 0 0 0 0 0
55862- 0 0 0 0 0 0 0 0 0 0 0 0
55863- 0 0 0 0 0 0 0 0 0 0 0 0
55864- 0 0 0 0 0 1 0 0 1 0 0 0
55865- 0 0 0 0 0 0 0 0 0 0 0 0
55866- 0 0 0 0 0 0 0 0 0 0 0 0
55867- 0 0 0 0 0 0 0 0 0 0 0 0
55868- 0 0 0 0 0 0 0 0 0 0 0 0
55869- 0 0 0 0 0 0 0 0 0 14 14 14
55870- 42 42 42 82 82 82 2 2 6 2 2 6
55871- 2 2 6 6 6 6 10 10 10 2 2 6
55872- 2 2 6 2 2 6 2 2 6 2 2 6
55873- 2 2 6 2 2 6 2 2 6 6 6 6
55874- 14 14 14 10 10 10 2 2 6 2 2 6
55875- 2 2 6 2 2 6 2 2 6 18 18 18
55876- 82 82 82 34 34 34 10 10 10 0 0 0
55877- 0 0 0 0 0 0 0 0 0 0 0 0
55878- 0 0 0 0 0 0 0 0 0 0 0 0
55879- 0 0 0 0 0 0 0 0 0 0 0 0
55880- 0 0 0 0 0 0 0 0 0 0 0 0
55881- 0 0 0 0 0 0 0 0 0 0 0 0
55882- 0 0 0 0 0 0 0 0 0 0 0 0
55883- 0 0 0 0 0 0 0 0 0 0 0 0
55884- 0 0 1 0 0 0 0 0 0 0 0 0
55885- 0 0 0 0 0 0 0 0 0 0 0 0
55886- 0 0 0 0 0 0 0 0 0 0 0 0
55887- 0 0 0 0 0 0 0 0 0 0 0 0
55888- 0 0 0 0 0 0 0 0 0 0 0 0
55889- 0 0 0 0 0 0 0 0 0 14 14 14
55890- 46 46 46 86 86 86 2 2 6 2 2 6
55891- 6 6 6 6 6 6 22 22 22 34 34 34
55892- 6 6 6 2 2 6 2 2 6 2 2 6
55893- 2 2 6 2 2 6 18 18 18 34 34 34
55894- 10 10 10 50 50 50 22 22 22 2 2 6
55895- 2 2 6 2 2 6 2 2 6 10 10 10
55896- 86 86 86 42 42 42 14 14 14 0 0 0
55897- 0 0 0 0 0 0 0 0 0 0 0 0
55898- 0 0 0 0 0 0 0 0 0 0 0 0
55899- 0 0 0 0 0 0 0 0 0 0 0 0
55900- 0 0 0 0 0 0 0 0 0 0 0 0
55901- 0 0 0 0 0 0 0 0 0 0 0 0
55902- 0 0 0 0 0 0 0 0 0 0 0 0
55903- 0 0 0 0 0 0 0 0 0 0 0 0
55904- 0 0 1 0 0 1 0 0 1 0 0 0
55905- 0 0 0 0 0 0 0 0 0 0 0 0
55906- 0 0 0 0 0 0 0 0 0 0 0 0
55907- 0 0 0 0 0 0 0 0 0 0 0 0
55908- 0 0 0 0 0 0 0 0 0 0 0 0
55909- 0 0 0 0 0 0 0 0 0 14 14 14
55910- 46 46 46 86 86 86 2 2 6 2 2 6
55911- 38 38 38 116 116 116 94 94 94 22 22 22
55912- 22 22 22 2 2 6 2 2 6 2 2 6
55913- 14 14 14 86 86 86 138 138 138 162 162 162
55914-154 154 154 38 38 38 26 26 26 6 6 6
55915- 2 2 6 2 2 6 2 2 6 2 2 6
55916- 86 86 86 46 46 46 14 14 14 0 0 0
55917- 0 0 0 0 0 0 0 0 0 0 0 0
55918- 0 0 0 0 0 0 0 0 0 0 0 0
55919- 0 0 0 0 0 0 0 0 0 0 0 0
55920- 0 0 0 0 0 0 0 0 0 0 0 0
55921- 0 0 0 0 0 0 0 0 0 0 0 0
55922- 0 0 0 0 0 0 0 0 0 0 0 0
55923- 0 0 0 0 0 0 0 0 0 0 0 0
55924- 0 0 0 0 0 0 0 0 0 0 0 0
55925- 0 0 0 0 0 0 0 0 0 0 0 0
55926- 0 0 0 0 0 0 0 0 0 0 0 0
55927- 0 0 0 0 0 0 0 0 0 0 0 0
55928- 0 0 0 0 0 0 0 0 0 0 0 0
55929- 0 0 0 0 0 0 0 0 0 14 14 14
55930- 46 46 46 86 86 86 2 2 6 14 14 14
55931-134 134 134 198 198 198 195 195 195 116 116 116
55932- 10 10 10 2 2 6 2 2 6 6 6 6
55933-101 98 89 187 187 187 210 210 210 218 218 218
55934-214 214 214 134 134 134 14 14 14 6 6 6
55935- 2 2 6 2 2 6 2 2 6 2 2 6
55936- 86 86 86 50 50 50 18 18 18 6 6 6
55937- 0 0 0 0 0 0 0 0 0 0 0 0
55938- 0 0 0 0 0 0 0 0 0 0 0 0
55939- 0 0 0 0 0 0 0 0 0 0 0 0
55940- 0 0 0 0 0 0 0 0 0 0 0 0
55941- 0 0 0 0 0 0 0 0 0 0 0 0
55942- 0 0 0 0 0 0 0 0 0 0 0 0
55943- 0 0 0 0 0 0 0 0 1 0 0 0
55944- 0 0 1 0 0 1 0 0 1 0 0 0
55945- 0 0 0 0 0 0 0 0 0 0 0 0
55946- 0 0 0 0 0 0 0 0 0 0 0 0
55947- 0 0 0 0 0 0 0 0 0 0 0 0
55948- 0 0 0 0 0 0 0 0 0 0 0 0
55949- 0 0 0 0 0 0 0 0 0 14 14 14
55950- 46 46 46 86 86 86 2 2 6 54 54 54
55951-218 218 218 195 195 195 226 226 226 246 246 246
55952- 58 58 58 2 2 6 2 2 6 30 30 30
55953-210 210 210 253 253 253 174 174 174 123 123 123
55954-221 221 221 234 234 234 74 74 74 2 2 6
55955- 2 2 6 2 2 6 2 2 6 2 2 6
55956- 70 70 70 58 58 58 22 22 22 6 6 6
55957- 0 0 0 0 0 0 0 0 0 0 0 0
55958- 0 0 0 0 0 0 0 0 0 0 0 0
55959- 0 0 0 0 0 0 0 0 0 0 0 0
55960- 0 0 0 0 0 0 0 0 0 0 0 0
55961- 0 0 0 0 0 0 0 0 0 0 0 0
55962- 0 0 0 0 0 0 0 0 0 0 0 0
55963- 0 0 0 0 0 0 0 0 0 0 0 0
55964- 0 0 0 0 0 0 0 0 0 0 0 0
55965- 0 0 0 0 0 0 0 0 0 0 0 0
55966- 0 0 0 0 0 0 0 0 0 0 0 0
55967- 0 0 0 0 0 0 0 0 0 0 0 0
55968- 0 0 0 0 0 0 0 0 0 0 0 0
55969- 0 0 0 0 0 0 0 0 0 14 14 14
55970- 46 46 46 82 82 82 2 2 6 106 106 106
55971-170 170 170 26 26 26 86 86 86 226 226 226
55972-123 123 123 10 10 10 14 14 14 46 46 46
55973-231 231 231 190 190 190 6 6 6 70 70 70
55974- 90 90 90 238 238 238 158 158 158 2 2 6
55975- 2 2 6 2 2 6 2 2 6 2 2 6
55976- 70 70 70 58 58 58 22 22 22 6 6 6
55977- 0 0 0 0 0 0 0 0 0 0 0 0
55978- 0 0 0 0 0 0 0 0 0 0 0 0
55979- 0 0 0 0 0 0 0 0 0 0 0 0
55980- 0 0 0 0 0 0 0 0 0 0 0 0
55981- 0 0 0 0 0 0 0 0 0 0 0 0
55982- 0 0 0 0 0 0 0 0 0 0 0 0
55983- 0 0 0 0 0 0 0 0 1 0 0 0
55984- 0 0 1 0 0 1 0 0 1 0 0 0
55985- 0 0 0 0 0 0 0 0 0 0 0 0
55986- 0 0 0 0 0 0 0 0 0 0 0 0
55987- 0 0 0 0 0 0 0 0 0 0 0 0
55988- 0 0 0 0 0 0 0 0 0 0 0 0
55989- 0 0 0 0 0 0 0 0 0 14 14 14
55990- 42 42 42 86 86 86 6 6 6 116 116 116
55991-106 106 106 6 6 6 70 70 70 149 149 149
55992-128 128 128 18 18 18 38 38 38 54 54 54
55993-221 221 221 106 106 106 2 2 6 14 14 14
55994- 46 46 46 190 190 190 198 198 198 2 2 6
55995- 2 2 6 2 2 6 2 2 6 2 2 6
55996- 74 74 74 62 62 62 22 22 22 6 6 6
55997- 0 0 0 0 0 0 0 0 0 0 0 0
55998- 0 0 0 0 0 0 0 0 0 0 0 0
55999- 0 0 0 0 0 0 0 0 0 0 0 0
56000- 0 0 0 0 0 0 0 0 0 0 0 0
56001- 0 0 0 0 0 0 0 0 0 0 0 0
56002- 0 0 0 0 0 0 0 0 0 0 0 0
56003- 0 0 0 0 0 0 0 0 1 0 0 0
56004- 0 0 1 0 0 0 0 0 1 0 0 0
56005- 0 0 0 0 0 0 0 0 0 0 0 0
56006- 0 0 0 0 0 0 0 0 0 0 0 0
56007- 0 0 0 0 0 0 0 0 0 0 0 0
56008- 0 0 0 0 0 0 0 0 0 0 0 0
56009- 0 0 0 0 0 0 0 0 0 14 14 14
56010- 42 42 42 94 94 94 14 14 14 101 101 101
56011-128 128 128 2 2 6 18 18 18 116 116 116
56012-118 98 46 121 92 8 121 92 8 98 78 10
56013-162 162 162 106 106 106 2 2 6 2 2 6
56014- 2 2 6 195 195 195 195 195 195 6 6 6
56015- 2 2 6 2 2 6 2 2 6 2 2 6
56016- 74 74 74 62 62 62 22 22 22 6 6 6
56017- 0 0 0 0 0 0 0 0 0 0 0 0
56018- 0 0 0 0 0 0 0 0 0 0 0 0
56019- 0 0 0 0 0 0 0 0 0 0 0 0
56020- 0 0 0 0 0 0 0 0 0 0 0 0
56021- 0 0 0 0 0 0 0 0 0 0 0 0
56022- 0 0 0 0 0 0 0 0 0 0 0 0
56023- 0 0 0 0 0 0 0 0 1 0 0 1
56024- 0 0 1 0 0 0 0 0 1 0 0 0
56025- 0 0 0 0 0 0 0 0 0 0 0 0
56026- 0 0 0 0 0 0 0 0 0 0 0 0
56027- 0 0 0 0 0 0 0 0 0 0 0 0
56028- 0 0 0 0 0 0 0 0 0 0 0 0
56029- 0 0 0 0 0 0 0 0 0 10 10 10
56030- 38 38 38 90 90 90 14 14 14 58 58 58
56031-210 210 210 26 26 26 54 38 6 154 114 10
56032-226 170 11 236 186 11 225 175 15 184 144 12
56033-215 174 15 175 146 61 37 26 9 2 2 6
56034- 70 70 70 246 246 246 138 138 138 2 2 6
56035- 2 2 6 2 2 6 2 2 6 2 2 6
56036- 70 70 70 66 66 66 26 26 26 6 6 6
56037- 0 0 0 0 0 0 0 0 0 0 0 0
56038- 0 0 0 0 0 0 0 0 0 0 0 0
56039- 0 0 0 0 0 0 0 0 0 0 0 0
56040- 0 0 0 0 0 0 0 0 0 0 0 0
56041- 0 0 0 0 0 0 0 0 0 0 0 0
56042- 0 0 0 0 0 0 0 0 0 0 0 0
56043- 0 0 0 0 0 0 0 0 0 0 0 0
56044- 0 0 0 0 0 0 0 0 0 0 0 0
56045- 0 0 0 0 0 0 0 0 0 0 0 0
56046- 0 0 0 0 0 0 0 0 0 0 0 0
56047- 0 0 0 0 0 0 0 0 0 0 0 0
56048- 0 0 0 0 0 0 0 0 0 0 0 0
56049- 0 0 0 0 0 0 0 0 0 10 10 10
56050- 38 38 38 86 86 86 14 14 14 10 10 10
56051-195 195 195 188 164 115 192 133 9 225 175 15
56052-239 182 13 234 190 10 232 195 16 232 200 30
56053-245 207 45 241 208 19 232 195 16 184 144 12
56054-218 194 134 211 206 186 42 42 42 2 2 6
56055- 2 2 6 2 2 6 2 2 6 2 2 6
56056- 50 50 50 74 74 74 30 30 30 6 6 6
56057- 0 0 0 0 0 0 0 0 0 0 0 0
56058- 0 0 0 0 0 0 0 0 0 0 0 0
56059- 0 0 0 0 0 0 0 0 0 0 0 0
56060- 0 0 0 0 0 0 0 0 0 0 0 0
56061- 0 0 0 0 0 0 0 0 0 0 0 0
56062- 0 0 0 0 0 0 0 0 0 0 0 0
56063- 0 0 0 0 0 0 0 0 0 0 0 0
56064- 0 0 0 0 0 0 0 0 0 0 0 0
56065- 0 0 0 0 0 0 0 0 0 0 0 0
56066- 0 0 0 0 0 0 0 0 0 0 0 0
56067- 0 0 0 0 0 0 0 0 0 0 0 0
56068- 0 0 0 0 0 0 0 0 0 0 0 0
56069- 0 0 0 0 0 0 0 0 0 10 10 10
56070- 34 34 34 86 86 86 14 14 14 2 2 6
56071-121 87 25 192 133 9 219 162 10 239 182 13
56072-236 186 11 232 195 16 241 208 19 244 214 54
56073-246 218 60 246 218 38 246 215 20 241 208 19
56074-241 208 19 226 184 13 121 87 25 2 2 6
56075- 2 2 6 2 2 6 2 2 6 2 2 6
56076- 50 50 50 82 82 82 34 34 34 10 10 10
56077- 0 0 0 0 0 0 0 0 0 0 0 0
56078- 0 0 0 0 0 0 0 0 0 0 0 0
56079- 0 0 0 0 0 0 0 0 0 0 0 0
56080- 0 0 0 0 0 0 0 0 0 0 0 0
56081- 0 0 0 0 0 0 0 0 0 0 0 0
56082- 0 0 0 0 0 0 0 0 0 0 0 0
56083- 0 0 0 0 0 0 0 0 0 0 0 0
56084- 0 0 0 0 0 0 0 0 0 0 0 0
56085- 0 0 0 0 0 0 0 0 0 0 0 0
56086- 0 0 0 0 0 0 0 0 0 0 0 0
56087- 0 0 0 0 0 0 0 0 0 0 0 0
56088- 0 0 0 0 0 0 0 0 0 0 0 0
56089- 0 0 0 0 0 0 0 0 0 10 10 10
56090- 34 34 34 82 82 82 30 30 30 61 42 6
56091-180 123 7 206 145 10 230 174 11 239 182 13
56092-234 190 10 238 202 15 241 208 19 246 218 74
56093-246 218 38 246 215 20 246 215 20 246 215 20
56094-226 184 13 215 174 15 184 144 12 6 6 6
56095- 2 2 6 2 2 6 2 2 6 2 2 6
56096- 26 26 26 94 94 94 42 42 42 14 14 14
56097- 0 0 0 0 0 0 0 0 0 0 0 0
56098- 0 0 0 0 0 0 0 0 0 0 0 0
56099- 0 0 0 0 0 0 0 0 0 0 0 0
56100- 0 0 0 0 0 0 0 0 0 0 0 0
56101- 0 0 0 0 0 0 0 0 0 0 0 0
56102- 0 0 0 0 0 0 0 0 0 0 0 0
56103- 0 0 0 0 0 0 0 0 0 0 0 0
56104- 0 0 0 0 0 0 0 0 0 0 0 0
56105- 0 0 0 0 0 0 0 0 0 0 0 0
56106- 0 0 0 0 0 0 0 0 0 0 0 0
56107- 0 0 0 0 0 0 0 0 0 0 0 0
56108- 0 0 0 0 0 0 0 0 0 0 0 0
56109- 0 0 0 0 0 0 0 0 0 10 10 10
56110- 30 30 30 78 78 78 50 50 50 104 69 6
56111-192 133 9 216 158 10 236 178 12 236 186 11
56112-232 195 16 241 208 19 244 214 54 245 215 43
56113-246 215 20 246 215 20 241 208 19 198 155 10
56114-200 144 11 216 158 10 156 118 10 2 2 6
56115- 2 2 6 2 2 6 2 2 6 2 2 6
56116- 6 6 6 90 90 90 54 54 54 18 18 18
56117- 6 6 6 0 0 0 0 0 0 0 0 0
56118- 0 0 0 0 0 0 0 0 0 0 0 0
56119- 0 0 0 0 0 0 0 0 0 0 0 0
56120- 0 0 0 0 0 0 0 0 0 0 0 0
56121- 0 0 0 0 0 0 0 0 0 0 0 0
56122- 0 0 0 0 0 0 0 0 0 0 0 0
56123- 0 0 0 0 0 0 0 0 0 0 0 0
56124- 0 0 0 0 0 0 0 0 0 0 0 0
56125- 0 0 0 0 0 0 0 0 0 0 0 0
56126- 0 0 0 0 0 0 0 0 0 0 0 0
56127- 0 0 0 0 0 0 0 0 0 0 0 0
56128- 0 0 0 0 0 0 0 0 0 0 0 0
56129- 0 0 0 0 0 0 0 0 0 10 10 10
56130- 30 30 30 78 78 78 46 46 46 22 22 22
56131-137 92 6 210 162 10 239 182 13 238 190 10
56132-238 202 15 241 208 19 246 215 20 246 215 20
56133-241 208 19 203 166 17 185 133 11 210 150 10
56134-216 158 10 210 150 10 102 78 10 2 2 6
56135- 6 6 6 54 54 54 14 14 14 2 2 6
56136- 2 2 6 62 62 62 74 74 74 30 30 30
56137- 10 10 10 0 0 0 0 0 0 0 0 0
56138- 0 0 0 0 0 0 0 0 0 0 0 0
56139- 0 0 0 0 0 0 0 0 0 0 0 0
56140- 0 0 0 0 0 0 0 0 0 0 0 0
56141- 0 0 0 0 0 0 0 0 0 0 0 0
56142- 0 0 0 0 0 0 0 0 0 0 0 0
56143- 0 0 0 0 0 0 0 0 0 0 0 0
56144- 0 0 0 0 0 0 0 0 0 0 0 0
56145- 0 0 0 0 0 0 0 0 0 0 0 0
56146- 0 0 0 0 0 0 0 0 0 0 0 0
56147- 0 0 0 0 0 0 0 0 0 0 0 0
56148- 0 0 0 0 0 0 0 0 0 0 0 0
56149- 0 0 0 0 0 0 0 0 0 10 10 10
56150- 34 34 34 78 78 78 50 50 50 6 6 6
56151- 94 70 30 139 102 15 190 146 13 226 184 13
56152-232 200 30 232 195 16 215 174 15 190 146 13
56153-168 122 10 192 133 9 210 150 10 213 154 11
56154-202 150 34 182 157 106 101 98 89 2 2 6
56155- 2 2 6 78 78 78 116 116 116 58 58 58
56156- 2 2 6 22 22 22 90 90 90 46 46 46
56157- 18 18 18 6 6 6 0 0 0 0 0 0
56158- 0 0 0 0 0 0 0 0 0 0 0 0
56159- 0 0 0 0 0 0 0 0 0 0 0 0
56160- 0 0 0 0 0 0 0 0 0 0 0 0
56161- 0 0 0 0 0 0 0 0 0 0 0 0
56162- 0 0 0 0 0 0 0 0 0 0 0 0
56163- 0 0 0 0 0 0 0 0 0 0 0 0
56164- 0 0 0 0 0 0 0 0 0 0 0 0
56165- 0 0 0 0 0 0 0 0 0 0 0 0
56166- 0 0 0 0 0 0 0 0 0 0 0 0
56167- 0 0 0 0 0 0 0 0 0 0 0 0
56168- 0 0 0 0 0 0 0 0 0 0 0 0
56169- 0 0 0 0 0 0 0 0 0 10 10 10
56170- 38 38 38 86 86 86 50 50 50 6 6 6
56171-128 128 128 174 154 114 156 107 11 168 122 10
56172-198 155 10 184 144 12 197 138 11 200 144 11
56173-206 145 10 206 145 10 197 138 11 188 164 115
56174-195 195 195 198 198 198 174 174 174 14 14 14
56175- 2 2 6 22 22 22 116 116 116 116 116 116
56176- 22 22 22 2 2 6 74 74 74 70 70 70
56177- 30 30 30 10 10 10 0 0 0 0 0 0
56178- 0 0 0 0 0 0 0 0 0 0 0 0
56179- 0 0 0 0 0 0 0 0 0 0 0 0
56180- 0 0 0 0 0 0 0 0 0 0 0 0
56181- 0 0 0 0 0 0 0 0 0 0 0 0
56182- 0 0 0 0 0 0 0 0 0 0 0 0
56183- 0 0 0 0 0 0 0 0 0 0 0 0
56184- 0 0 0 0 0 0 0 0 0 0 0 0
56185- 0 0 0 0 0 0 0 0 0 0 0 0
56186- 0 0 0 0 0 0 0 0 0 0 0 0
56187- 0 0 0 0 0 0 0 0 0 0 0 0
56188- 0 0 0 0 0 0 0 0 0 0 0 0
56189- 0 0 0 0 0 0 6 6 6 18 18 18
56190- 50 50 50 101 101 101 26 26 26 10 10 10
56191-138 138 138 190 190 190 174 154 114 156 107 11
56192-197 138 11 200 144 11 197 138 11 192 133 9
56193-180 123 7 190 142 34 190 178 144 187 187 187
56194-202 202 202 221 221 221 214 214 214 66 66 66
56195- 2 2 6 2 2 6 50 50 50 62 62 62
56196- 6 6 6 2 2 6 10 10 10 90 90 90
56197- 50 50 50 18 18 18 6 6 6 0 0 0
56198- 0 0 0 0 0 0 0 0 0 0 0 0
56199- 0 0 0 0 0 0 0 0 0 0 0 0
56200- 0 0 0 0 0 0 0 0 0 0 0 0
56201- 0 0 0 0 0 0 0 0 0 0 0 0
56202- 0 0 0 0 0 0 0 0 0 0 0 0
56203- 0 0 0 0 0 0 0 0 0 0 0 0
56204- 0 0 0 0 0 0 0 0 0 0 0 0
56205- 0 0 0 0 0 0 0 0 0 0 0 0
56206- 0 0 0 0 0 0 0 0 0 0 0 0
56207- 0 0 0 0 0 0 0 0 0 0 0 0
56208- 0 0 0 0 0 0 0 0 0 0 0 0
56209- 0 0 0 0 0 0 10 10 10 34 34 34
56210- 74 74 74 74 74 74 2 2 6 6 6 6
56211-144 144 144 198 198 198 190 190 190 178 166 146
56212-154 121 60 156 107 11 156 107 11 168 124 44
56213-174 154 114 187 187 187 190 190 190 210 210 210
56214-246 246 246 253 253 253 253 253 253 182 182 182
56215- 6 6 6 2 2 6 2 2 6 2 2 6
56216- 2 2 6 2 2 6 2 2 6 62 62 62
56217- 74 74 74 34 34 34 14 14 14 0 0 0
56218- 0 0 0 0 0 0 0 0 0 0 0 0
56219- 0 0 0 0 0 0 0 0 0 0 0 0
56220- 0 0 0 0 0 0 0 0 0 0 0 0
56221- 0 0 0 0 0 0 0 0 0 0 0 0
56222- 0 0 0 0 0 0 0 0 0 0 0 0
56223- 0 0 0 0 0 0 0 0 0 0 0 0
56224- 0 0 0 0 0 0 0 0 0 0 0 0
56225- 0 0 0 0 0 0 0 0 0 0 0 0
56226- 0 0 0 0 0 0 0 0 0 0 0 0
56227- 0 0 0 0 0 0 0 0 0 0 0 0
56228- 0 0 0 0 0 0 0 0 0 0 0 0
56229- 0 0 0 10 10 10 22 22 22 54 54 54
56230- 94 94 94 18 18 18 2 2 6 46 46 46
56231-234 234 234 221 221 221 190 190 190 190 190 190
56232-190 190 190 187 187 187 187 187 187 190 190 190
56233-190 190 190 195 195 195 214 214 214 242 242 242
56234-253 253 253 253 253 253 253 253 253 253 253 253
56235- 82 82 82 2 2 6 2 2 6 2 2 6
56236- 2 2 6 2 2 6 2 2 6 14 14 14
56237- 86 86 86 54 54 54 22 22 22 6 6 6
56238- 0 0 0 0 0 0 0 0 0 0 0 0
56239- 0 0 0 0 0 0 0 0 0 0 0 0
56240- 0 0 0 0 0 0 0 0 0 0 0 0
56241- 0 0 0 0 0 0 0 0 0 0 0 0
56242- 0 0 0 0 0 0 0 0 0 0 0 0
56243- 0 0 0 0 0 0 0 0 0 0 0 0
56244- 0 0 0 0 0 0 0 0 0 0 0 0
56245- 0 0 0 0 0 0 0 0 0 0 0 0
56246- 0 0 0 0 0 0 0 0 0 0 0 0
56247- 0 0 0 0 0 0 0 0 0 0 0 0
56248- 0 0 0 0 0 0 0 0 0 0 0 0
56249- 6 6 6 18 18 18 46 46 46 90 90 90
56250- 46 46 46 18 18 18 6 6 6 182 182 182
56251-253 253 253 246 246 246 206 206 206 190 190 190
56252-190 190 190 190 190 190 190 190 190 190 190 190
56253-206 206 206 231 231 231 250 250 250 253 253 253
56254-253 253 253 253 253 253 253 253 253 253 253 253
56255-202 202 202 14 14 14 2 2 6 2 2 6
56256- 2 2 6 2 2 6 2 2 6 2 2 6
56257- 42 42 42 86 86 86 42 42 42 18 18 18
56258- 6 6 6 0 0 0 0 0 0 0 0 0
56259- 0 0 0 0 0 0 0 0 0 0 0 0
56260- 0 0 0 0 0 0 0 0 0 0 0 0
56261- 0 0 0 0 0 0 0 0 0 0 0 0
56262- 0 0 0 0 0 0 0 0 0 0 0 0
56263- 0 0 0 0 0 0 0 0 0 0 0 0
56264- 0 0 0 0 0 0 0 0 0 0 0 0
56265- 0 0 0 0 0 0 0 0 0 0 0 0
56266- 0 0 0 0 0 0 0 0 0 0 0 0
56267- 0 0 0 0 0 0 0 0 0 0 0 0
56268- 0 0 0 0 0 0 0 0 0 6 6 6
56269- 14 14 14 38 38 38 74 74 74 66 66 66
56270- 2 2 6 6 6 6 90 90 90 250 250 250
56271-253 253 253 253 253 253 238 238 238 198 198 198
56272-190 190 190 190 190 190 195 195 195 221 221 221
56273-246 246 246 253 253 253 253 253 253 253 253 253
56274-253 253 253 253 253 253 253 253 253 253 253 253
56275-253 253 253 82 82 82 2 2 6 2 2 6
56276- 2 2 6 2 2 6 2 2 6 2 2 6
56277- 2 2 6 78 78 78 70 70 70 34 34 34
56278- 14 14 14 6 6 6 0 0 0 0 0 0
56279- 0 0 0 0 0 0 0 0 0 0 0 0
56280- 0 0 0 0 0 0 0 0 0 0 0 0
56281- 0 0 0 0 0 0 0 0 0 0 0 0
56282- 0 0 0 0 0 0 0 0 0 0 0 0
56283- 0 0 0 0 0 0 0 0 0 0 0 0
56284- 0 0 0 0 0 0 0 0 0 0 0 0
56285- 0 0 0 0 0 0 0 0 0 0 0 0
56286- 0 0 0 0 0 0 0 0 0 0 0 0
56287- 0 0 0 0 0 0 0 0 0 0 0 0
56288- 0 0 0 0 0 0 0 0 0 14 14 14
56289- 34 34 34 66 66 66 78 78 78 6 6 6
56290- 2 2 6 18 18 18 218 218 218 253 253 253
56291-253 253 253 253 253 253 253 253 253 246 246 246
56292-226 226 226 231 231 231 246 246 246 253 253 253
56293-253 253 253 253 253 253 253 253 253 253 253 253
56294-253 253 253 253 253 253 253 253 253 253 253 253
56295-253 253 253 178 178 178 2 2 6 2 2 6
56296- 2 2 6 2 2 6 2 2 6 2 2 6
56297- 2 2 6 18 18 18 90 90 90 62 62 62
56298- 30 30 30 10 10 10 0 0 0 0 0 0
56299- 0 0 0 0 0 0 0 0 0 0 0 0
56300- 0 0 0 0 0 0 0 0 0 0 0 0
56301- 0 0 0 0 0 0 0 0 0 0 0 0
56302- 0 0 0 0 0 0 0 0 0 0 0 0
56303- 0 0 0 0 0 0 0 0 0 0 0 0
56304- 0 0 0 0 0 0 0 0 0 0 0 0
56305- 0 0 0 0 0 0 0 0 0 0 0 0
56306- 0 0 0 0 0 0 0 0 0 0 0 0
56307- 0 0 0 0 0 0 0 0 0 0 0 0
56308- 0 0 0 0 0 0 10 10 10 26 26 26
56309- 58 58 58 90 90 90 18 18 18 2 2 6
56310- 2 2 6 110 110 110 253 253 253 253 253 253
56311-253 253 253 253 253 253 253 253 253 253 253 253
56312-250 250 250 253 253 253 253 253 253 253 253 253
56313-253 253 253 253 253 253 253 253 253 253 253 253
56314-253 253 253 253 253 253 253 253 253 253 253 253
56315-253 253 253 231 231 231 18 18 18 2 2 6
56316- 2 2 6 2 2 6 2 2 6 2 2 6
56317- 2 2 6 2 2 6 18 18 18 94 94 94
56318- 54 54 54 26 26 26 10 10 10 0 0 0
56319- 0 0 0 0 0 0 0 0 0 0 0 0
56320- 0 0 0 0 0 0 0 0 0 0 0 0
56321- 0 0 0 0 0 0 0 0 0 0 0 0
56322- 0 0 0 0 0 0 0 0 0 0 0 0
56323- 0 0 0 0 0 0 0 0 0 0 0 0
56324- 0 0 0 0 0 0 0 0 0 0 0 0
56325- 0 0 0 0 0 0 0 0 0 0 0 0
56326- 0 0 0 0 0 0 0 0 0 0 0 0
56327- 0 0 0 0 0 0 0 0 0 0 0 0
56328- 0 0 0 6 6 6 22 22 22 50 50 50
56329- 90 90 90 26 26 26 2 2 6 2 2 6
56330- 14 14 14 195 195 195 250 250 250 253 253 253
56331-253 253 253 253 253 253 253 253 253 253 253 253
56332-253 253 253 253 253 253 253 253 253 253 253 253
56333-253 253 253 253 253 253 253 253 253 253 253 253
56334-253 253 253 253 253 253 253 253 253 253 253 253
56335-250 250 250 242 242 242 54 54 54 2 2 6
56336- 2 2 6 2 2 6 2 2 6 2 2 6
56337- 2 2 6 2 2 6 2 2 6 38 38 38
56338- 86 86 86 50 50 50 22 22 22 6 6 6
56339- 0 0 0 0 0 0 0 0 0 0 0 0
56340- 0 0 0 0 0 0 0 0 0 0 0 0
56341- 0 0 0 0 0 0 0 0 0 0 0 0
56342- 0 0 0 0 0 0 0 0 0 0 0 0
56343- 0 0 0 0 0 0 0 0 0 0 0 0
56344- 0 0 0 0 0 0 0 0 0 0 0 0
56345- 0 0 0 0 0 0 0 0 0 0 0 0
56346- 0 0 0 0 0 0 0 0 0 0 0 0
56347- 0 0 0 0 0 0 0 0 0 0 0 0
56348- 6 6 6 14 14 14 38 38 38 82 82 82
56349- 34 34 34 2 2 6 2 2 6 2 2 6
56350- 42 42 42 195 195 195 246 246 246 253 253 253
56351-253 253 253 253 253 253 253 253 253 250 250 250
56352-242 242 242 242 242 242 250 250 250 253 253 253
56353-253 253 253 253 253 253 253 253 253 253 253 253
56354-253 253 253 250 250 250 246 246 246 238 238 238
56355-226 226 226 231 231 231 101 101 101 6 6 6
56356- 2 2 6 2 2 6 2 2 6 2 2 6
56357- 2 2 6 2 2 6 2 2 6 2 2 6
56358- 38 38 38 82 82 82 42 42 42 14 14 14
56359- 6 6 6 0 0 0 0 0 0 0 0 0
56360- 0 0 0 0 0 0 0 0 0 0 0 0
56361- 0 0 0 0 0 0 0 0 0 0 0 0
56362- 0 0 0 0 0 0 0 0 0 0 0 0
56363- 0 0 0 0 0 0 0 0 0 0 0 0
56364- 0 0 0 0 0 0 0 0 0 0 0 0
56365- 0 0 0 0 0 0 0 0 0 0 0 0
56366- 0 0 0 0 0 0 0 0 0 0 0 0
56367- 0 0 0 0 0 0 0 0 0 0 0 0
56368- 10 10 10 26 26 26 62 62 62 66 66 66
56369- 2 2 6 2 2 6 2 2 6 6 6 6
56370- 70 70 70 170 170 170 206 206 206 234 234 234
56371-246 246 246 250 250 250 250 250 250 238 238 238
56372-226 226 226 231 231 231 238 238 238 250 250 250
56373-250 250 250 250 250 250 246 246 246 231 231 231
56374-214 214 214 206 206 206 202 202 202 202 202 202
56375-198 198 198 202 202 202 182 182 182 18 18 18
56376- 2 2 6 2 2 6 2 2 6 2 2 6
56377- 2 2 6 2 2 6 2 2 6 2 2 6
56378- 2 2 6 62 62 62 66 66 66 30 30 30
56379- 10 10 10 0 0 0 0 0 0 0 0 0
56380- 0 0 0 0 0 0 0 0 0 0 0 0
56381- 0 0 0 0 0 0 0 0 0 0 0 0
56382- 0 0 0 0 0 0 0 0 0 0 0 0
56383- 0 0 0 0 0 0 0 0 0 0 0 0
56384- 0 0 0 0 0 0 0 0 0 0 0 0
56385- 0 0 0 0 0 0 0 0 0 0 0 0
56386- 0 0 0 0 0 0 0 0 0 0 0 0
56387- 0 0 0 0 0 0 0 0 0 0 0 0
56388- 14 14 14 42 42 42 82 82 82 18 18 18
56389- 2 2 6 2 2 6 2 2 6 10 10 10
56390- 94 94 94 182 182 182 218 218 218 242 242 242
56391-250 250 250 253 253 253 253 253 253 250 250 250
56392-234 234 234 253 253 253 253 253 253 253 253 253
56393-253 253 253 253 253 253 253 253 253 246 246 246
56394-238 238 238 226 226 226 210 210 210 202 202 202
56395-195 195 195 195 195 195 210 210 210 158 158 158
56396- 6 6 6 14 14 14 50 50 50 14 14 14
56397- 2 2 6 2 2 6 2 2 6 2 2 6
56398- 2 2 6 6 6 6 86 86 86 46 46 46
56399- 18 18 18 6 6 6 0 0 0 0 0 0
56400- 0 0 0 0 0 0 0 0 0 0 0 0
56401- 0 0 0 0 0 0 0 0 0 0 0 0
56402- 0 0 0 0 0 0 0 0 0 0 0 0
56403- 0 0 0 0 0 0 0 0 0 0 0 0
56404- 0 0 0 0 0 0 0 0 0 0 0 0
56405- 0 0 0 0 0 0 0 0 0 0 0 0
56406- 0 0 0 0 0 0 0 0 0 0 0 0
56407- 0 0 0 0 0 0 0 0 0 6 6 6
56408- 22 22 22 54 54 54 70 70 70 2 2 6
56409- 2 2 6 10 10 10 2 2 6 22 22 22
56410-166 166 166 231 231 231 250 250 250 253 253 253
56411-253 253 253 253 253 253 253 253 253 250 250 250
56412-242 242 242 253 253 253 253 253 253 253 253 253
56413-253 253 253 253 253 253 253 253 253 253 253 253
56414-253 253 253 253 253 253 253 253 253 246 246 246
56415-231 231 231 206 206 206 198 198 198 226 226 226
56416- 94 94 94 2 2 6 6 6 6 38 38 38
56417- 30 30 30 2 2 6 2 2 6 2 2 6
56418- 2 2 6 2 2 6 62 62 62 66 66 66
56419- 26 26 26 10 10 10 0 0 0 0 0 0
56420- 0 0 0 0 0 0 0 0 0 0 0 0
56421- 0 0 0 0 0 0 0 0 0 0 0 0
56422- 0 0 0 0 0 0 0 0 0 0 0 0
56423- 0 0 0 0 0 0 0 0 0 0 0 0
56424- 0 0 0 0 0 0 0 0 0 0 0 0
56425- 0 0 0 0 0 0 0 0 0 0 0 0
56426- 0 0 0 0 0 0 0 0 0 0 0 0
56427- 0 0 0 0 0 0 0 0 0 10 10 10
56428- 30 30 30 74 74 74 50 50 50 2 2 6
56429- 26 26 26 26 26 26 2 2 6 106 106 106
56430-238 238 238 253 253 253 253 253 253 253 253 253
56431-253 253 253 253 253 253 253 253 253 253 253 253
56432-253 253 253 253 253 253 253 253 253 253 253 253
56433-253 253 253 253 253 253 253 253 253 253 253 253
56434-253 253 253 253 253 253 253 253 253 253 253 253
56435-253 253 253 246 246 246 218 218 218 202 202 202
56436-210 210 210 14 14 14 2 2 6 2 2 6
56437- 30 30 30 22 22 22 2 2 6 2 2 6
56438- 2 2 6 2 2 6 18 18 18 86 86 86
56439- 42 42 42 14 14 14 0 0 0 0 0 0
56440- 0 0 0 0 0 0 0 0 0 0 0 0
56441- 0 0 0 0 0 0 0 0 0 0 0 0
56442- 0 0 0 0 0 0 0 0 0 0 0 0
56443- 0 0 0 0 0 0 0 0 0 0 0 0
56444- 0 0 0 0 0 0 0 0 0 0 0 0
56445- 0 0 0 0 0 0 0 0 0 0 0 0
56446- 0 0 0 0 0 0 0 0 0 0 0 0
56447- 0 0 0 0 0 0 0 0 0 14 14 14
56448- 42 42 42 90 90 90 22 22 22 2 2 6
56449- 42 42 42 2 2 6 18 18 18 218 218 218
56450-253 253 253 253 253 253 253 253 253 253 253 253
56451-253 253 253 253 253 253 253 253 253 253 253 253
56452-253 253 253 253 253 253 253 253 253 253 253 253
56453-253 253 253 253 253 253 253 253 253 253 253 253
56454-253 253 253 253 253 253 253 253 253 253 253 253
56455-253 253 253 253 253 253 250 250 250 221 221 221
56456-218 218 218 101 101 101 2 2 6 14 14 14
56457- 18 18 18 38 38 38 10 10 10 2 2 6
56458- 2 2 6 2 2 6 2 2 6 78 78 78
56459- 58 58 58 22 22 22 6 6 6 0 0 0
56460- 0 0 0 0 0 0 0 0 0 0 0 0
56461- 0 0 0 0 0 0 0 0 0 0 0 0
56462- 0 0 0 0 0 0 0 0 0 0 0 0
56463- 0 0 0 0 0 0 0 0 0 0 0 0
56464- 0 0 0 0 0 0 0 0 0 0 0 0
56465- 0 0 0 0 0 0 0 0 0 0 0 0
56466- 0 0 0 0 0 0 0 0 0 0 0 0
56467- 0 0 0 0 0 0 6 6 6 18 18 18
56468- 54 54 54 82 82 82 2 2 6 26 26 26
56469- 22 22 22 2 2 6 123 123 123 253 253 253
56470-253 253 253 253 253 253 253 253 253 253 253 253
56471-253 253 253 253 253 253 253 253 253 253 253 253
56472-253 253 253 253 253 253 253 253 253 253 253 253
56473-253 253 253 253 253 253 253 253 253 253 253 253
56474-253 253 253 253 253 253 253 253 253 253 253 253
56475-253 253 253 253 253 253 253 253 253 250 250 250
56476-238 238 238 198 198 198 6 6 6 38 38 38
56477- 58 58 58 26 26 26 38 38 38 2 2 6
56478- 2 2 6 2 2 6 2 2 6 46 46 46
56479- 78 78 78 30 30 30 10 10 10 0 0 0
56480- 0 0 0 0 0 0 0 0 0 0 0 0
56481- 0 0 0 0 0 0 0 0 0 0 0 0
56482- 0 0 0 0 0 0 0 0 0 0 0 0
56483- 0 0 0 0 0 0 0 0 0 0 0 0
56484- 0 0 0 0 0 0 0 0 0 0 0 0
56485- 0 0 0 0 0 0 0 0 0 0 0 0
56486- 0 0 0 0 0 0 0 0 0 0 0 0
56487- 0 0 0 0 0 0 10 10 10 30 30 30
56488- 74 74 74 58 58 58 2 2 6 42 42 42
56489- 2 2 6 22 22 22 231 231 231 253 253 253
56490-253 253 253 253 253 253 253 253 253 253 253 253
56491-253 253 253 253 253 253 253 253 253 250 250 250
56492-253 253 253 253 253 253 253 253 253 253 253 253
56493-253 253 253 253 253 253 253 253 253 253 253 253
56494-253 253 253 253 253 253 253 253 253 253 253 253
56495-253 253 253 253 253 253 253 253 253 253 253 253
56496-253 253 253 246 246 246 46 46 46 38 38 38
56497- 42 42 42 14 14 14 38 38 38 14 14 14
56498- 2 2 6 2 2 6 2 2 6 6 6 6
56499- 86 86 86 46 46 46 14 14 14 0 0 0
56500- 0 0 0 0 0 0 0 0 0 0 0 0
56501- 0 0 0 0 0 0 0 0 0 0 0 0
56502- 0 0 0 0 0 0 0 0 0 0 0 0
56503- 0 0 0 0 0 0 0 0 0 0 0 0
56504- 0 0 0 0 0 0 0 0 0 0 0 0
56505- 0 0 0 0 0 0 0 0 0 0 0 0
56506- 0 0 0 0 0 0 0 0 0 0 0 0
56507- 0 0 0 6 6 6 14 14 14 42 42 42
56508- 90 90 90 18 18 18 18 18 18 26 26 26
56509- 2 2 6 116 116 116 253 253 253 253 253 253
56510-253 253 253 253 253 253 253 253 253 253 253 253
56511-253 253 253 253 253 253 250 250 250 238 238 238
56512-253 253 253 253 253 253 253 253 253 253 253 253
56513-253 253 253 253 253 253 253 253 253 253 253 253
56514-253 253 253 253 253 253 253 253 253 253 253 253
56515-253 253 253 253 253 253 253 253 253 253 253 253
56516-253 253 253 253 253 253 94 94 94 6 6 6
56517- 2 2 6 2 2 6 10 10 10 34 34 34
56518- 2 2 6 2 2 6 2 2 6 2 2 6
56519- 74 74 74 58 58 58 22 22 22 6 6 6
56520- 0 0 0 0 0 0 0 0 0 0 0 0
56521- 0 0 0 0 0 0 0 0 0 0 0 0
56522- 0 0 0 0 0 0 0 0 0 0 0 0
56523- 0 0 0 0 0 0 0 0 0 0 0 0
56524- 0 0 0 0 0 0 0 0 0 0 0 0
56525- 0 0 0 0 0 0 0 0 0 0 0 0
56526- 0 0 0 0 0 0 0 0 0 0 0 0
56527- 0 0 0 10 10 10 26 26 26 66 66 66
56528- 82 82 82 2 2 6 38 38 38 6 6 6
56529- 14 14 14 210 210 210 253 253 253 253 253 253
56530-253 253 253 253 253 253 253 253 253 253 253 253
56531-253 253 253 253 253 253 246 246 246 242 242 242
56532-253 253 253 253 253 253 253 253 253 253 253 253
56533-253 253 253 253 253 253 253 253 253 253 253 253
56534-253 253 253 253 253 253 253 253 253 253 253 253
56535-253 253 253 253 253 253 253 253 253 253 253 253
56536-253 253 253 253 253 253 144 144 144 2 2 6
56537- 2 2 6 2 2 6 2 2 6 46 46 46
56538- 2 2 6 2 2 6 2 2 6 2 2 6
56539- 42 42 42 74 74 74 30 30 30 10 10 10
56540- 0 0 0 0 0 0 0 0 0 0 0 0
56541- 0 0 0 0 0 0 0 0 0 0 0 0
56542- 0 0 0 0 0 0 0 0 0 0 0 0
56543- 0 0 0 0 0 0 0 0 0 0 0 0
56544- 0 0 0 0 0 0 0 0 0 0 0 0
56545- 0 0 0 0 0 0 0 0 0 0 0 0
56546- 0 0 0 0 0 0 0 0 0 0 0 0
56547- 6 6 6 14 14 14 42 42 42 90 90 90
56548- 26 26 26 6 6 6 42 42 42 2 2 6
56549- 74 74 74 250 250 250 253 253 253 253 253 253
56550-253 253 253 253 253 253 253 253 253 253 253 253
56551-253 253 253 253 253 253 242 242 242 242 242 242
56552-253 253 253 253 253 253 253 253 253 253 253 253
56553-253 253 253 253 253 253 253 253 253 253 253 253
56554-253 253 253 253 253 253 253 253 253 253 253 253
56555-253 253 253 253 253 253 253 253 253 253 253 253
56556-253 253 253 253 253 253 182 182 182 2 2 6
56557- 2 2 6 2 2 6 2 2 6 46 46 46
56558- 2 2 6 2 2 6 2 2 6 2 2 6
56559- 10 10 10 86 86 86 38 38 38 10 10 10
56560- 0 0 0 0 0 0 0 0 0 0 0 0
56561- 0 0 0 0 0 0 0 0 0 0 0 0
56562- 0 0 0 0 0 0 0 0 0 0 0 0
56563- 0 0 0 0 0 0 0 0 0 0 0 0
56564- 0 0 0 0 0 0 0 0 0 0 0 0
56565- 0 0 0 0 0 0 0 0 0 0 0 0
56566- 0 0 0 0 0 0 0 0 0 0 0 0
56567- 10 10 10 26 26 26 66 66 66 82 82 82
56568- 2 2 6 22 22 22 18 18 18 2 2 6
56569-149 149 149 253 253 253 253 253 253 253 253 253
56570-253 253 253 253 253 253 253 253 253 253 253 253
56571-253 253 253 253 253 253 234 234 234 242 242 242
56572-253 253 253 253 253 253 253 253 253 253 253 253
56573-253 253 253 253 253 253 253 253 253 253 253 253
56574-253 253 253 253 253 253 253 253 253 253 253 253
56575-253 253 253 253 253 253 253 253 253 253 253 253
56576-253 253 253 253 253 253 206 206 206 2 2 6
56577- 2 2 6 2 2 6 2 2 6 38 38 38
56578- 2 2 6 2 2 6 2 2 6 2 2 6
56579- 6 6 6 86 86 86 46 46 46 14 14 14
56580- 0 0 0 0 0 0 0 0 0 0 0 0
56581- 0 0 0 0 0 0 0 0 0 0 0 0
56582- 0 0 0 0 0 0 0 0 0 0 0 0
56583- 0 0 0 0 0 0 0 0 0 0 0 0
56584- 0 0 0 0 0 0 0 0 0 0 0 0
56585- 0 0 0 0 0 0 0 0 0 0 0 0
56586- 0 0 0 0 0 0 0 0 0 6 6 6
56587- 18 18 18 46 46 46 86 86 86 18 18 18
56588- 2 2 6 34 34 34 10 10 10 6 6 6
56589-210 210 210 253 253 253 253 253 253 253 253 253
56590-253 253 253 253 253 253 253 253 253 253 253 253
56591-253 253 253 253 253 253 234 234 234 242 242 242
56592-253 253 253 253 253 253 253 253 253 253 253 253
56593-253 253 253 253 253 253 253 253 253 253 253 253
56594-253 253 253 253 253 253 253 253 253 253 253 253
56595-253 253 253 253 253 253 253 253 253 253 253 253
56596-253 253 253 253 253 253 221 221 221 6 6 6
56597- 2 2 6 2 2 6 6 6 6 30 30 30
56598- 2 2 6 2 2 6 2 2 6 2 2 6
56599- 2 2 6 82 82 82 54 54 54 18 18 18
56600- 6 6 6 0 0 0 0 0 0 0 0 0
56601- 0 0 0 0 0 0 0 0 0 0 0 0
56602- 0 0 0 0 0 0 0 0 0 0 0 0
56603- 0 0 0 0 0 0 0 0 0 0 0 0
56604- 0 0 0 0 0 0 0 0 0 0 0 0
56605- 0 0 0 0 0 0 0 0 0 0 0 0
56606- 0 0 0 0 0 0 0 0 0 10 10 10
56607- 26 26 26 66 66 66 62 62 62 2 2 6
56608- 2 2 6 38 38 38 10 10 10 26 26 26
56609-238 238 238 253 253 253 253 253 253 253 253 253
56610-253 253 253 253 253 253 253 253 253 253 253 253
56611-253 253 253 253 253 253 231 231 231 238 238 238
56612-253 253 253 253 253 253 253 253 253 253 253 253
56613-253 253 253 253 253 253 253 253 253 253 253 253
56614-253 253 253 253 253 253 253 253 253 253 253 253
56615-253 253 253 253 253 253 253 253 253 253 253 253
56616-253 253 253 253 253 253 231 231 231 6 6 6
56617- 2 2 6 2 2 6 10 10 10 30 30 30
56618- 2 2 6 2 2 6 2 2 6 2 2 6
56619- 2 2 6 66 66 66 58 58 58 22 22 22
56620- 6 6 6 0 0 0 0 0 0 0 0 0
56621- 0 0 0 0 0 0 0 0 0 0 0 0
56622- 0 0 0 0 0 0 0 0 0 0 0 0
56623- 0 0 0 0 0 0 0 0 0 0 0 0
56624- 0 0 0 0 0 0 0 0 0 0 0 0
56625- 0 0 0 0 0 0 0 0 0 0 0 0
56626- 0 0 0 0 0 0 0 0 0 10 10 10
56627- 38 38 38 78 78 78 6 6 6 2 2 6
56628- 2 2 6 46 46 46 14 14 14 42 42 42
56629-246 246 246 253 253 253 253 253 253 253 253 253
56630-253 253 253 253 253 253 253 253 253 253 253 253
56631-253 253 253 253 253 253 231 231 231 242 242 242
56632-253 253 253 253 253 253 253 253 253 253 253 253
56633-253 253 253 253 253 253 253 253 253 253 253 253
56634-253 253 253 253 253 253 253 253 253 253 253 253
56635-253 253 253 253 253 253 253 253 253 253 253 253
56636-253 253 253 253 253 253 234 234 234 10 10 10
56637- 2 2 6 2 2 6 22 22 22 14 14 14
56638- 2 2 6 2 2 6 2 2 6 2 2 6
56639- 2 2 6 66 66 66 62 62 62 22 22 22
56640- 6 6 6 0 0 0 0 0 0 0 0 0
56641- 0 0 0 0 0 0 0 0 0 0 0 0
56642- 0 0 0 0 0 0 0 0 0 0 0 0
56643- 0 0 0 0 0 0 0 0 0 0 0 0
56644- 0 0 0 0 0 0 0 0 0 0 0 0
56645- 0 0 0 0 0 0 0 0 0 0 0 0
56646- 0 0 0 0 0 0 6 6 6 18 18 18
56647- 50 50 50 74 74 74 2 2 6 2 2 6
56648- 14 14 14 70 70 70 34 34 34 62 62 62
56649-250 250 250 253 253 253 253 253 253 253 253 253
56650-253 253 253 253 253 253 253 253 253 253 253 253
56651-253 253 253 253 253 253 231 231 231 246 246 246
56652-253 253 253 253 253 253 253 253 253 253 253 253
56653-253 253 253 253 253 253 253 253 253 253 253 253
56654-253 253 253 253 253 253 253 253 253 253 253 253
56655-253 253 253 253 253 253 253 253 253 253 253 253
56656-253 253 253 253 253 253 234 234 234 14 14 14
56657- 2 2 6 2 2 6 30 30 30 2 2 6
56658- 2 2 6 2 2 6 2 2 6 2 2 6
56659- 2 2 6 66 66 66 62 62 62 22 22 22
56660- 6 6 6 0 0 0 0 0 0 0 0 0
56661- 0 0 0 0 0 0 0 0 0 0 0 0
56662- 0 0 0 0 0 0 0 0 0 0 0 0
56663- 0 0 0 0 0 0 0 0 0 0 0 0
56664- 0 0 0 0 0 0 0 0 0 0 0 0
56665- 0 0 0 0 0 0 0 0 0 0 0 0
56666- 0 0 0 0 0 0 6 6 6 18 18 18
56667- 54 54 54 62 62 62 2 2 6 2 2 6
56668- 2 2 6 30 30 30 46 46 46 70 70 70
56669-250 250 250 253 253 253 253 253 253 253 253 253
56670-253 253 253 253 253 253 253 253 253 253 253 253
56671-253 253 253 253 253 253 231 231 231 246 246 246
56672-253 253 253 253 253 253 253 253 253 253 253 253
56673-253 253 253 253 253 253 253 253 253 253 253 253
56674-253 253 253 253 253 253 253 253 253 253 253 253
56675-253 253 253 253 253 253 253 253 253 253 253 253
56676-253 253 253 253 253 253 226 226 226 10 10 10
56677- 2 2 6 6 6 6 30 30 30 2 2 6
56678- 2 2 6 2 2 6 2 2 6 2 2 6
56679- 2 2 6 66 66 66 58 58 58 22 22 22
56680- 6 6 6 0 0 0 0 0 0 0 0 0
56681- 0 0 0 0 0 0 0 0 0 0 0 0
56682- 0 0 0 0 0 0 0 0 0 0 0 0
56683- 0 0 0 0 0 0 0 0 0 0 0 0
56684- 0 0 0 0 0 0 0 0 0 0 0 0
56685- 0 0 0 0 0 0 0 0 0 0 0 0
56686- 0 0 0 0 0 0 6 6 6 22 22 22
56687- 58 58 58 62 62 62 2 2 6 2 2 6
56688- 2 2 6 2 2 6 30 30 30 78 78 78
56689-250 250 250 253 253 253 253 253 253 253 253 253
56690-253 253 253 253 253 253 253 253 253 253 253 253
56691-253 253 253 253 253 253 231 231 231 246 246 246
56692-253 253 253 253 253 253 253 253 253 253 253 253
56693-253 253 253 253 253 253 253 253 253 253 253 253
56694-253 253 253 253 253 253 253 253 253 253 253 253
56695-253 253 253 253 253 253 253 253 253 253 253 253
56696-253 253 253 253 253 253 206 206 206 2 2 6
56697- 22 22 22 34 34 34 18 14 6 22 22 22
56698- 26 26 26 18 18 18 6 6 6 2 2 6
56699- 2 2 6 82 82 82 54 54 54 18 18 18
56700- 6 6 6 0 0 0 0 0 0 0 0 0
56701- 0 0 0 0 0 0 0 0 0 0 0 0
56702- 0 0 0 0 0 0 0 0 0 0 0 0
56703- 0 0 0 0 0 0 0 0 0 0 0 0
56704- 0 0 0 0 0 0 0 0 0 0 0 0
56705- 0 0 0 0 0 0 0 0 0 0 0 0
56706- 0 0 0 0 0 0 6 6 6 26 26 26
56707- 62 62 62 106 106 106 74 54 14 185 133 11
56708-210 162 10 121 92 8 6 6 6 62 62 62
56709-238 238 238 253 253 253 253 253 253 253 253 253
56710-253 253 253 253 253 253 253 253 253 253 253 253
56711-253 253 253 253 253 253 231 231 231 246 246 246
56712-253 253 253 253 253 253 253 253 253 253 253 253
56713-253 253 253 253 253 253 253 253 253 253 253 253
56714-253 253 253 253 253 253 253 253 253 253 253 253
56715-253 253 253 253 253 253 253 253 253 253 253 253
56716-253 253 253 253 253 253 158 158 158 18 18 18
56717- 14 14 14 2 2 6 2 2 6 2 2 6
56718- 6 6 6 18 18 18 66 66 66 38 38 38
56719- 6 6 6 94 94 94 50 50 50 18 18 18
56720- 6 6 6 0 0 0 0 0 0 0 0 0
56721- 0 0 0 0 0 0 0 0 0 0 0 0
56722- 0 0 0 0 0 0 0 0 0 0 0 0
56723- 0 0 0 0 0 0 0 0 0 0 0 0
56724- 0 0 0 0 0 0 0 0 0 0 0 0
56725- 0 0 0 0 0 0 0 0 0 6 6 6
56726- 10 10 10 10 10 10 18 18 18 38 38 38
56727- 78 78 78 142 134 106 216 158 10 242 186 14
56728-246 190 14 246 190 14 156 118 10 10 10 10
56729- 90 90 90 238 238 238 253 253 253 253 253 253
56730-253 253 253 253 253 253 253 253 253 253 253 253
56731-253 253 253 253 253 253 231 231 231 250 250 250
56732-253 253 253 253 253 253 253 253 253 253 253 253
56733-253 253 253 253 253 253 253 253 253 253 253 253
56734-253 253 253 253 253 253 253 253 253 253 253 253
56735-253 253 253 253 253 253 253 253 253 246 230 190
56736-238 204 91 238 204 91 181 142 44 37 26 9
56737- 2 2 6 2 2 6 2 2 6 2 2 6
56738- 2 2 6 2 2 6 38 38 38 46 46 46
56739- 26 26 26 106 106 106 54 54 54 18 18 18
56740- 6 6 6 0 0 0 0 0 0 0 0 0
56741- 0 0 0 0 0 0 0 0 0 0 0 0
56742- 0 0 0 0 0 0 0 0 0 0 0 0
56743- 0 0 0 0 0 0 0 0 0 0 0 0
56744- 0 0 0 0 0 0 0 0 0 0 0 0
56745- 0 0 0 6 6 6 14 14 14 22 22 22
56746- 30 30 30 38 38 38 50 50 50 70 70 70
56747-106 106 106 190 142 34 226 170 11 242 186 14
56748-246 190 14 246 190 14 246 190 14 154 114 10
56749- 6 6 6 74 74 74 226 226 226 253 253 253
56750-253 253 253 253 253 253 253 253 253 253 253 253
56751-253 253 253 253 253 253 231 231 231 250 250 250
56752-253 253 253 253 253 253 253 253 253 253 253 253
56753-253 253 253 253 253 253 253 253 253 253 253 253
56754-253 253 253 253 253 253 253 253 253 253 253 253
56755-253 253 253 253 253 253 253 253 253 228 184 62
56756-241 196 14 241 208 19 232 195 16 38 30 10
56757- 2 2 6 2 2 6 2 2 6 2 2 6
56758- 2 2 6 6 6 6 30 30 30 26 26 26
56759-203 166 17 154 142 90 66 66 66 26 26 26
56760- 6 6 6 0 0 0 0 0 0 0 0 0
56761- 0 0 0 0 0 0 0 0 0 0 0 0
56762- 0 0 0 0 0 0 0 0 0 0 0 0
56763- 0 0 0 0 0 0 0 0 0 0 0 0
56764- 0 0 0 0 0 0 0 0 0 0 0 0
56765- 6 6 6 18 18 18 38 38 38 58 58 58
56766- 78 78 78 86 86 86 101 101 101 123 123 123
56767-175 146 61 210 150 10 234 174 13 246 186 14
56768-246 190 14 246 190 14 246 190 14 238 190 10
56769-102 78 10 2 2 6 46 46 46 198 198 198
56770-253 253 253 253 253 253 253 253 253 253 253 253
56771-253 253 253 253 253 253 234 234 234 242 242 242
56772-253 253 253 253 253 253 253 253 253 253 253 253
56773-253 253 253 253 253 253 253 253 253 253 253 253
56774-253 253 253 253 253 253 253 253 253 253 253 253
56775-253 253 253 253 253 253 253 253 253 224 178 62
56776-242 186 14 241 196 14 210 166 10 22 18 6
56777- 2 2 6 2 2 6 2 2 6 2 2 6
56778- 2 2 6 2 2 6 6 6 6 121 92 8
56779-238 202 15 232 195 16 82 82 82 34 34 34
56780- 10 10 10 0 0 0 0 0 0 0 0 0
56781- 0 0 0 0 0 0 0 0 0 0 0 0
56782- 0 0 0 0 0 0 0 0 0 0 0 0
56783- 0 0 0 0 0 0 0 0 0 0 0 0
56784- 0 0 0 0 0 0 0 0 0 0 0 0
56785- 14 14 14 38 38 38 70 70 70 154 122 46
56786-190 142 34 200 144 11 197 138 11 197 138 11
56787-213 154 11 226 170 11 242 186 14 246 190 14
56788-246 190 14 246 190 14 246 190 14 246 190 14
56789-225 175 15 46 32 6 2 2 6 22 22 22
56790-158 158 158 250 250 250 253 253 253 253 253 253
56791-253 253 253 253 253 253 253 253 253 253 253 253
56792-253 253 253 253 253 253 253 253 253 253 253 253
56793-253 253 253 253 253 253 253 253 253 253 253 253
56794-253 253 253 253 253 253 253 253 253 253 253 253
56795-253 253 253 250 250 250 242 242 242 224 178 62
56796-239 182 13 236 186 11 213 154 11 46 32 6
56797- 2 2 6 2 2 6 2 2 6 2 2 6
56798- 2 2 6 2 2 6 61 42 6 225 175 15
56799-238 190 10 236 186 11 112 100 78 42 42 42
56800- 14 14 14 0 0 0 0 0 0 0 0 0
56801- 0 0 0 0 0 0 0 0 0 0 0 0
56802- 0 0 0 0 0 0 0 0 0 0 0 0
56803- 0 0 0 0 0 0 0 0 0 0 0 0
56804- 0 0 0 0 0 0 0 0 0 6 6 6
56805- 22 22 22 54 54 54 154 122 46 213 154 11
56806-226 170 11 230 174 11 226 170 11 226 170 11
56807-236 178 12 242 186 14 246 190 14 246 190 14
56808-246 190 14 246 190 14 246 190 14 246 190 14
56809-241 196 14 184 144 12 10 10 10 2 2 6
56810- 6 6 6 116 116 116 242 242 242 253 253 253
56811-253 253 253 253 253 253 253 253 253 253 253 253
56812-253 253 253 253 253 253 253 253 253 253 253 253
56813-253 253 253 253 253 253 253 253 253 253 253 253
56814-253 253 253 253 253 253 253 253 253 253 253 253
56815-253 253 253 231 231 231 198 198 198 214 170 54
56816-236 178 12 236 178 12 210 150 10 137 92 6
56817- 18 14 6 2 2 6 2 2 6 2 2 6
56818- 6 6 6 70 47 6 200 144 11 236 178 12
56819-239 182 13 239 182 13 124 112 88 58 58 58
56820- 22 22 22 6 6 6 0 0 0 0 0 0
56821- 0 0 0 0 0 0 0 0 0 0 0 0
56822- 0 0 0 0 0 0 0 0 0 0 0 0
56823- 0 0 0 0 0 0 0 0 0 0 0 0
56824- 0 0 0 0 0 0 0 0 0 10 10 10
56825- 30 30 30 70 70 70 180 133 36 226 170 11
56826-239 182 13 242 186 14 242 186 14 246 186 14
56827-246 190 14 246 190 14 246 190 14 246 190 14
56828-246 190 14 246 190 14 246 190 14 246 190 14
56829-246 190 14 232 195 16 98 70 6 2 2 6
56830- 2 2 6 2 2 6 66 66 66 221 221 221
56831-253 253 253 253 253 253 253 253 253 253 253 253
56832-253 253 253 253 253 253 253 253 253 253 253 253
56833-253 253 253 253 253 253 253 253 253 253 253 253
56834-253 253 253 253 253 253 253 253 253 253 253 253
56835-253 253 253 206 206 206 198 198 198 214 166 58
56836-230 174 11 230 174 11 216 158 10 192 133 9
56837-163 110 8 116 81 8 102 78 10 116 81 8
56838-167 114 7 197 138 11 226 170 11 239 182 13
56839-242 186 14 242 186 14 162 146 94 78 78 78
56840- 34 34 34 14 14 14 6 6 6 0 0 0
56841- 0 0 0 0 0 0 0 0 0 0 0 0
56842- 0 0 0 0 0 0 0 0 0 0 0 0
56843- 0 0 0 0 0 0 0 0 0 0 0 0
56844- 0 0 0 0 0 0 0 0 0 6 6 6
56845- 30 30 30 78 78 78 190 142 34 226 170 11
56846-239 182 13 246 190 14 246 190 14 246 190 14
56847-246 190 14 246 190 14 246 190 14 246 190 14
56848-246 190 14 246 190 14 246 190 14 246 190 14
56849-246 190 14 241 196 14 203 166 17 22 18 6
56850- 2 2 6 2 2 6 2 2 6 38 38 38
56851-218 218 218 253 253 253 253 253 253 253 253 253
56852-253 253 253 253 253 253 253 253 253 253 253 253
56853-253 253 253 253 253 253 253 253 253 253 253 253
56854-253 253 253 253 253 253 253 253 253 253 253 253
56855-250 250 250 206 206 206 198 198 198 202 162 69
56856-226 170 11 236 178 12 224 166 10 210 150 10
56857-200 144 11 197 138 11 192 133 9 197 138 11
56858-210 150 10 226 170 11 242 186 14 246 190 14
56859-246 190 14 246 186 14 225 175 15 124 112 88
56860- 62 62 62 30 30 30 14 14 14 6 6 6
56861- 0 0 0 0 0 0 0 0 0 0 0 0
56862- 0 0 0 0 0 0 0 0 0 0 0 0
56863- 0 0 0 0 0 0 0 0 0 0 0 0
56864- 0 0 0 0 0 0 0 0 0 10 10 10
56865- 30 30 30 78 78 78 174 135 50 224 166 10
56866-239 182 13 246 190 14 246 190 14 246 190 14
56867-246 190 14 246 190 14 246 190 14 246 190 14
56868-246 190 14 246 190 14 246 190 14 246 190 14
56869-246 190 14 246 190 14 241 196 14 139 102 15
56870- 2 2 6 2 2 6 2 2 6 2 2 6
56871- 78 78 78 250 250 250 253 253 253 253 253 253
56872-253 253 253 253 253 253 253 253 253 253 253 253
56873-253 253 253 253 253 253 253 253 253 253 253 253
56874-253 253 253 253 253 253 253 253 253 253 253 253
56875-250 250 250 214 214 214 198 198 198 190 150 46
56876-219 162 10 236 178 12 234 174 13 224 166 10
56877-216 158 10 213 154 11 213 154 11 216 158 10
56878-226 170 11 239 182 13 246 190 14 246 190 14
56879-246 190 14 246 190 14 242 186 14 206 162 42
56880-101 101 101 58 58 58 30 30 30 14 14 14
56881- 6 6 6 0 0 0 0 0 0 0 0 0
56882- 0 0 0 0 0 0 0 0 0 0 0 0
56883- 0 0 0 0 0 0 0 0 0 0 0 0
56884- 0 0 0 0 0 0 0 0 0 10 10 10
56885- 30 30 30 74 74 74 174 135 50 216 158 10
56886-236 178 12 246 190 14 246 190 14 246 190 14
56887-246 190 14 246 190 14 246 190 14 246 190 14
56888-246 190 14 246 190 14 246 190 14 246 190 14
56889-246 190 14 246 190 14 241 196 14 226 184 13
56890- 61 42 6 2 2 6 2 2 6 2 2 6
56891- 22 22 22 238 238 238 253 253 253 253 253 253
56892-253 253 253 253 253 253 253 253 253 253 253 253
56893-253 253 253 253 253 253 253 253 253 253 253 253
56894-253 253 253 253 253 253 253 253 253 253 253 253
56895-253 253 253 226 226 226 187 187 187 180 133 36
56896-216 158 10 236 178 12 239 182 13 236 178 12
56897-230 174 11 226 170 11 226 170 11 230 174 11
56898-236 178 12 242 186 14 246 190 14 246 190 14
56899-246 190 14 246 190 14 246 186 14 239 182 13
56900-206 162 42 106 106 106 66 66 66 34 34 34
56901- 14 14 14 6 6 6 0 0 0 0 0 0
56902- 0 0 0 0 0 0 0 0 0 0 0 0
56903- 0 0 0 0 0 0 0 0 0 0 0 0
56904- 0 0 0 0 0 0 0 0 0 6 6 6
56905- 26 26 26 70 70 70 163 133 67 213 154 11
56906-236 178 12 246 190 14 246 190 14 246 190 14
56907-246 190 14 246 190 14 246 190 14 246 190 14
56908-246 190 14 246 190 14 246 190 14 246 190 14
56909-246 190 14 246 190 14 246 190 14 241 196 14
56910-190 146 13 18 14 6 2 2 6 2 2 6
56911- 46 46 46 246 246 246 253 253 253 253 253 253
56912-253 253 253 253 253 253 253 253 253 253 253 253
56913-253 253 253 253 253 253 253 253 253 253 253 253
56914-253 253 253 253 253 253 253 253 253 253 253 253
56915-253 253 253 221 221 221 86 86 86 156 107 11
56916-216 158 10 236 178 12 242 186 14 246 186 14
56917-242 186 14 239 182 13 239 182 13 242 186 14
56918-242 186 14 246 186 14 246 190 14 246 190 14
56919-246 190 14 246 190 14 246 190 14 246 190 14
56920-242 186 14 225 175 15 142 122 72 66 66 66
56921- 30 30 30 10 10 10 0 0 0 0 0 0
56922- 0 0 0 0 0 0 0 0 0 0 0 0
56923- 0 0 0 0 0 0 0 0 0 0 0 0
56924- 0 0 0 0 0 0 0 0 0 6 6 6
56925- 26 26 26 70 70 70 163 133 67 210 150 10
56926-236 178 12 246 190 14 246 190 14 246 190 14
56927-246 190 14 246 190 14 246 190 14 246 190 14
56928-246 190 14 246 190 14 246 190 14 246 190 14
56929-246 190 14 246 190 14 246 190 14 246 190 14
56930-232 195 16 121 92 8 34 34 34 106 106 106
56931-221 221 221 253 253 253 253 253 253 253 253 253
56932-253 253 253 253 253 253 253 253 253 253 253 253
56933-253 253 253 253 253 253 253 253 253 253 253 253
56934-253 253 253 253 253 253 253 253 253 253 253 253
56935-242 242 242 82 82 82 18 14 6 163 110 8
56936-216 158 10 236 178 12 242 186 14 246 190 14
56937-246 190 14 246 190 14 246 190 14 246 190 14
56938-246 190 14 246 190 14 246 190 14 246 190 14
56939-246 190 14 246 190 14 246 190 14 246 190 14
56940-246 190 14 246 190 14 242 186 14 163 133 67
56941- 46 46 46 18 18 18 6 6 6 0 0 0
56942- 0 0 0 0 0 0 0 0 0 0 0 0
56943- 0 0 0 0 0 0 0 0 0 0 0 0
56944- 0 0 0 0 0 0 0 0 0 10 10 10
56945- 30 30 30 78 78 78 163 133 67 210 150 10
56946-236 178 12 246 186 14 246 190 14 246 190 14
56947-246 190 14 246 190 14 246 190 14 246 190 14
56948-246 190 14 246 190 14 246 190 14 246 190 14
56949-246 190 14 246 190 14 246 190 14 246 190 14
56950-241 196 14 215 174 15 190 178 144 253 253 253
56951-253 253 253 253 253 253 253 253 253 253 253 253
56952-253 253 253 253 253 253 253 253 253 253 253 253
56953-253 253 253 253 253 253 253 253 253 253 253 253
56954-253 253 253 253 253 253 253 253 253 218 218 218
56955- 58 58 58 2 2 6 22 18 6 167 114 7
56956-216 158 10 236 178 12 246 186 14 246 190 14
56957-246 190 14 246 190 14 246 190 14 246 190 14
56958-246 190 14 246 190 14 246 190 14 246 190 14
56959-246 190 14 246 190 14 246 190 14 246 190 14
56960-246 190 14 246 186 14 242 186 14 190 150 46
56961- 54 54 54 22 22 22 6 6 6 0 0 0
56962- 0 0 0 0 0 0 0 0 0 0 0 0
56963- 0 0 0 0 0 0 0 0 0 0 0 0
56964- 0 0 0 0 0 0 0 0 0 14 14 14
56965- 38 38 38 86 86 86 180 133 36 213 154 11
56966-236 178 12 246 186 14 246 190 14 246 190 14
56967-246 190 14 246 190 14 246 190 14 246 190 14
56968-246 190 14 246 190 14 246 190 14 246 190 14
56969-246 190 14 246 190 14 246 190 14 246 190 14
56970-246 190 14 232 195 16 190 146 13 214 214 214
56971-253 253 253 253 253 253 253 253 253 253 253 253
56972-253 253 253 253 253 253 253 253 253 253 253 253
56973-253 253 253 253 253 253 253 253 253 253 253 253
56974-253 253 253 250 250 250 170 170 170 26 26 26
56975- 2 2 6 2 2 6 37 26 9 163 110 8
56976-219 162 10 239 182 13 246 186 14 246 190 14
56977-246 190 14 246 190 14 246 190 14 246 190 14
56978-246 190 14 246 190 14 246 190 14 246 190 14
56979-246 190 14 246 190 14 246 190 14 246 190 14
56980-246 186 14 236 178 12 224 166 10 142 122 72
56981- 46 46 46 18 18 18 6 6 6 0 0 0
56982- 0 0 0 0 0 0 0 0 0 0 0 0
56983- 0 0 0 0 0 0 0 0 0 0 0 0
56984- 0 0 0 0 0 0 6 6 6 18 18 18
56985- 50 50 50 109 106 95 192 133 9 224 166 10
56986-242 186 14 246 190 14 246 190 14 246 190 14
56987-246 190 14 246 190 14 246 190 14 246 190 14
56988-246 190 14 246 190 14 246 190 14 246 190 14
56989-246 190 14 246 190 14 246 190 14 246 190 14
56990-242 186 14 226 184 13 210 162 10 142 110 46
56991-226 226 226 253 253 253 253 253 253 253 253 253
56992-253 253 253 253 253 253 253 253 253 253 253 253
56993-253 253 253 253 253 253 253 253 253 253 253 253
56994-198 198 198 66 66 66 2 2 6 2 2 6
56995- 2 2 6 2 2 6 50 34 6 156 107 11
56996-219 162 10 239 182 13 246 186 14 246 190 14
56997-246 190 14 246 190 14 246 190 14 246 190 14
56998-246 190 14 246 190 14 246 190 14 246 190 14
56999-246 190 14 246 190 14 246 190 14 242 186 14
57000-234 174 13 213 154 11 154 122 46 66 66 66
57001- 30 30 30 10 10 10 0 0 0 0 0 0
57002- 0 0 0 0 0 0 0 0 0 0 0 0
57003- 0 0 0 0 0 0 0 0 0 0 0 0
57004- 0 0 0 0 0 0 6 6 6 22 22 22
57005- 58 58 58 154 121 60 206 145 10 234 174 13
57006-242 186 14 246 186 14 246 190 14 246 190 14
57007-246 190 14 246 190 14 246 190 14 246 190 14
57008-246 190 14 246 190 14 246 190 14 246 190 14
57009-246 190 14 246 190 14 246 190 14 246 190 14
57010-246 186 14 236 178 12 210 162 10 163 110 8
57011- 61 42 6 138 138 138 218 218 218 250 250 250
57012-253 253 253 253 253 253 253 253 253 250 250 250
57013-242 242 242 210 210 210 144 144 144 66 66 66
57014- 6 6 6 2 2 6 2 2 6 2 2 6
57015- 2 2 6 2 2 6 61 42 6 163 110 8
57016-216 158 10 236 178 12 246 190 14 246 190 14
57017-246 190 14 246 190 14 246 190 14 246 190 14
57018-246 190 14 246 190 14 246 190 14 246 190 14
57019-246 190 14 239 182 13 230 174 11 216 158 10
57020-190 142 34 124 112 88 70 70 70 38 38 38
57021- 18 18 18 6 6 6 0 0 0 0 0 0
57022- 0 0 0 0 0 0 0 0 0 0 0 0
57023- 0 0 0 0 0 0 0 0 0 0 0 0
57024- 0 0 0 0 0 0 6 6 6 22 22 22
57025- 62 62 62 168 124 44 206 145 10 224 166 10
57026-236 178 12 239 182 13 242 186 14 242 186 14
57027-246 186 14 246 190 14 246 190 14 246 190 14
57028-246 190 14 246 190 14 246 190 14 246 190 14
57029-246 190 14 246 190 14 246 190 14 246 190 14
57030-246 190 14 236 178 12 216 158 10 175 118 6
57031- 80 54 7 2 2 6 6 6 6 30 30 30
57032- 54 54 54 62 62 62 50 50 50 38 38 38
57033- 14 14 14 2 2 6 2 2 6 2 2 6
57034- 2 2 6 2 2 6 2 2 6 2 2 6
57035- 2 2 6 6 6 6 80 54 7 167 114 7
57036-213 154 11 236 178 12 246 190 14 246 190 14
57037-246 190 14 246 190 14 246 190 14 246 190 14
57038-246 190 14 242 186 14 239 182 13 239 182 13
57039-230 174 11 210 150 10 174 135 50 124 112 88
57040- 82 82 82 54 54 54 34 34 34 18 18 18
57041- 6 6 6 0 0 0 0 0 0 0 0 0
57042- 0 0 0 0 0 0 0 0 0 0 0 0
57043- 0 0 0 0 0 0 0 0 0 0 0 0
57044- 0 0 0 0 0 0 6 6 6 18 18 18
57045- 50 50 50 158 118 36 192 133 9 200 144 11
57046-216 158 10 219 162 10 224 166 10 226 170 11
57047-230 174 11 236 178 12 239 182 13 239 182 13
57048-242 186 14 246 186 14 246 190 14 246 190 14
57049-246 190 14 246 190 14 246 190 14 246 190 14
57050-246 186 14 230 174 11 210 150 10 163 110 8
57051-104 69 6 10 10 10 2 2 6 2 2 6
57052- 2 2 6 2 2 6 2 2 6 2 2 6
57053- 2 2 6 2 2 6 2 2 6 2 2 6
57054- 2 2 6 2 2 6 2 2 6 2 2 6
57055- 2 2 6 6 6 6 91 60 6 167 114 7
57056-206 145 10 230 174 11 242 186 14 246 190 14
57057-246 190 14 246 190 14 246 186 14 242 186 14
57058-239 182 13 230 174 11 224 166 10 213 154 11
57059-180 133 36 124 112 88 86 86 86 58 58 58
57060- 38 38 38 22 22 22 10 10 10 6 6 6
57061- 0 0 0 0 0 0 0 0 0 0 0 0
57062- 0 0 0 0 0 0 0 0 0 0 0 0
57063- 0 0 0 0 0 0 0 0 0 0 0 0
57064- 0 0 0 0 0 0 0 0 0 14 14 14
57065- 34 34 34 70 70 70 138 110 50 158 118 36
57066-167 114 7 180 123 7 192 133 9 197 138 11
57067-200 144 11 206 145 10 213 154 11 219 162 10
57068-224 166 10 230 174 11 239 182 13 242 186 14
57069-246 186 14 246 186 14 246 186 14 246 186 14
57070-239 182 13 216 158 10 185 133 11 152 99 6
57071-104 69 6 18 14 6 2 2 6 2 2 6
57072- 2 2 6 2 2 6 2 2 6 2 2 6
57073- 2 2 6 2 2 6 2 2 6 2 2 6
57074- 2 2 6 2 2 6 2 2 6 2 2 6
57075- 2 2 6 6 6 6 80 54 7 152 99 6
57076-192 133 9 219 162 10 236 178 12 239 182 13
57077-246 186 14 242 186 14 239 182 13 236 178 12
57078-224 166 10 206 145 10 192 133 9 154 121 60
57079- 94 94 94 62 62 62 42 42 42 22 22 22
57080- 14 14 14 6 6 6 0 0 0 0 0 0
57081- 0 0 0 0 0 0 0 0 0 0 0 0
57082- 0 0 0 0 0 0 0 0 0 0 0 0
57083- 0 0 0 0 0 0 0 0 0 0 0 0
57084- 0 0 0 0 0 0 0 0 0 6 6 6
57085- 18 18 18 34 34 34 58 58 58 78 78 78
57086-101 98 89 124 112 88 142 110 46 156 107 11
57087-163 110 8 167 114 7 175 118 6 180 123 7
57088-185 133 11 197 138 11 210 150 10 219 162 10
57089-226 170 11 236 178 12 236 178 12 234 174 13
57090-219 162 10 197 138 11 163 110 8 130 83 6
57091- 91 60 6 10 10 10 2 2 6 2 2 6
57092- 18 18 18 38 38 38 38 38 38 38 38 38
57093- 38 38 38 38 38 38 38 38 38 38 38 38
57094- 38 38 38 38 38 38 26 26 26 2 2 6
57095- 2 2 6 6 6 6 70 47 6 137 92 6
57096-175 118 6 200 144 11 219 162 10 230 174 11
57097-234 174 13 230 174 11 219 162 10 210 150 10
57098-192 133 9 163 110 8 124 112 88 82 82 82
57099- 50 50 50 30 30 30 14 14 14 6 6 6
57100- 0 0 0 0 0 0 0 0 0 0 0 0
57101- 0 0 0 0 0 0 0 0 0 0 0 0
57102- 0 0 0 0 0 0 0 0 0 0 0 0
57103- 0 0 0 0 0 0 0 0 0 0 0 0
57104- 0 0 0 0 0 0 0 0 0 0 0 0
57105- 6 6 6 14 14 14 22 22 22 34 34 34
57106- 42 42 42 58 58 58 74 74 74 86 86 86
57107-101 98 89 122 102 70 130 98 46 121 87 25
57108-137 92 6 152 99 6 163 110 8 180 123 7
57109-185 133 11 197 138 11 206 145 10 200 144 11
57110-180 123 7 156 107 11 130 83 6 104 69 6
57111- 50 34 6 54 54 54 110 110 110 101 98 89
57112- 86 86 86 82 82 82 78 78 78 78 78 78
57113- 78 78 78 78 78 78 78 78 78 78 78 78
57114- 78 78 78 82 82 82 86 86 86 94 94 94
57115-106 106 106 101 101 101 86 66 34 124 80 6
57116-156 107 11 180 123 7 192 133 9 200 144 11
57117-206 145 10 200 144 11 192 133 9 175 118 6
57118-139 102 15 109 106 95 70 70 70 42 42 42
57119- 22 22 22 10 10 10 0 0 0 0 0 0
57120- 0 0 0 0 0 0 0 0 0 0 0 0
57121- 0 0 0 0 0 0 0 0 0 0 0 0
57122- 0 0 0 0 0 0 0 0 0 0 0 0
57123- 0 0 0 0 0 0 0 0 0 0 0 0
57124- 0 0 0 0 0 0 0 0 0 0 0 0
57125- 0 0 0 0 0 0 6 6 6 10 10 10
57126- 14 14 14 22 22 22 30 30 30 38 38 38
57127- 50 50 50 62 62 62 74 74 74 90 90 90
57128-101 98 89 112 100 78 121 87 25 124 80 6
57129-137 92 6 152 99 6 152 99 6 152 99 6
57130-138 86 6 124 80 6 98 70 6 86 66 30
57131-101 98 89 82 82 82 58 58 58 46 46 46
57132- 38 38 38 34 34 34 34 34 34 34 34 34
57133- 34 34 34 34 34 34 34 34 34 34 34 34
57134- 34 34 34 34 34 34 38 38 38 42 42 42
57135- 54 54 54 82 82 82 94 86 76 91 60 6
57136-134 86 6 156 107 11 167 114 7 175 118 6
57137-175 118 6 167 114 7 152 99 6 121 87 25
57138-101 98 89 62 62 62 34 34 34 18 18 18
57139- 6 6 6 0 0 0 0 0 0 0 0 0
57140- 0 0 0 0 0 0 0 0 0 0 0 0
57141- 0 0 0 0 0 0 0 0 0 0 0 0
57142- 0 0 0 0 0 0 0 0 0 0 0 0
57143- 0 0 0 0 0 0 0 0 0 0 0 0
57144- 0 0 0 0 0 0 0 0 0 0 0 0
57145- 0 0 0 0 0 0 0 0 0 0 0 0
57146- 0 0 0 6 6 6 6 6 6 10 10 10
57147- 18 18 18 22 22 22 30 30 30 42 42 42
57148- 50 50 50 66 66 66 86 86 86 101 98 89
57149-106 86 58 98 70 6 104 69 6 104 69 6
57150-104 69 6 91 60 6 82 62 34 90 90 90
57151- 62 62 62 38 38 38 22 22 22 14 14 14
57152- 10 10 10 10 10 10 10 10 10 10 10 10
57153- 10 10 10 10 10 10 6 6 6 10 10 10
57154- 10 10 10 10 10 10 10 10 10 14 14 14
57155- 22 22 22 42 42 42 70 70 70 89 81 66
57156- 80 54 7 104 69 6 124 80 6 137 92 6
57157-134 86 6 116 81 8 100 82 52 86 86 86
57158- 58 58 58 30 30 30 14 14 14 6 6 6
57159- 0 0 0 0 0 0 0 0 0 0 0 0
57160- 0 0 0 0 0 0 0 0 0 0 0 0
57161- 0 0 0 0 0 0 0 0 0 0 0 0
57162- 0 0 0 0 0 0 0 0 0 0 0 0
57163- 0 0 0 0 0 0 0 0 0 0 0 0
57164- 0 0 0 0 0 0 0 0 0 0 0 0
57165- 0 0 0 0 0 0 0 0 0 0 0 0
57166- 0 0 0 0 0 0 0 0 0 0 0 0
57167- 0 0 0 6 6 6 10 10 10 14 14 14
57168- 18 18 18 26 26 26 38 38 38 54 54 54
57169- 70 70 70 86 86 86 94 86 76 89 81 66
57170- 89 81 66 86 86 86 74 74 74 50 50 50
57171- 30 30 30 14 14 14 6 6 6 0 0 0
57172- 0 0 0 0 0 0 0 0 0 0 0 0
57173- 0 0 0 0 0 0 0 0 0 0 0 0
57174- 0 0 0 0 0 0 0 0 0 0 0 0
57175- 6 6 6 18 18 18 34 34 34 58 58 58
57176- 82 82 82 89 81 66 89 81 66 89 81 66
57177- 94 86 66 94 86 76 74 74 74 50 50 50
57178- 26 26 26 14 14 14 6 6 6 0 0 0
57179- 0 0 0 0 0 0 0 0 0 0 0 0
57180- 0 0 0 0 0 0 0 0 0 0 0 0
57181- 0 0 0 0 0 0 0 0 0 0 0 0
57182- 0 0 0 0 0 0 0 0 0 0 0 0
57183- 0 0 0 0 0 0 0 0 0 0 0 0
57184- 0 0 0 0 0 0 0 0 0 0 0 0
57185- 0 0 0 0 0 0 0 0 0 0 0 0
57186- 0 0 0 0 0 0 0 0 0 0 0 0
57187- 0 0 0 0 0 0 0 0 0 0 0 0
57188- 6 6 6 6 6 6 14 14 14 18 18 18
57189- 30 30 30 38 38 38 46 46 46 54 54 54
57190- 50 50 50 42 42 42 30 30 30 18 18 18
57191- 10 10 10 0 0 0 0 0 0 0 0 0
57192- 0 0 0 0 0 0 0 0 0 0 0 0
57193- 0 0 0 0 0 0 0 0 0 0 0 0
57194- 0 0 0 0 0 0 0 0 0 0 0 0
57195- 0 0 0 6 6 6 14 14 14 26 26 26
57196- 38 38 38 50 50 50 58 58 58 58 58 58
57197- 54 54 54 42 42 42 30 30 30 18 18 18
57198- 10 10 10 0 0 0 0 0 0 0 0 0
57199- 0 0 0 0 0 0 0 0 0 0 0 0
57200- 0 0 0 0 0 0 0 0 0 0 0 0
57201- 0 0 0 0 0 0 0 0 0 0 0 0
57202- 0 0 0 0 0 0 0 0 0 0 0 0
57203- 0 0 0 0 0 0 0 0 0 0 0 0
57204- 0 0 0 0 0 0 0 0 0 0 0 0
57205- 0 0 0 0 0 0 0 0 0 0 0 0
57206- 0 0 0 0 0 0 0 0 0 0 0 0
57207- 0 0 0 0 0 0 0 0 0 0 0 0
57208- 0 0 0 0 0 0 0 0 0 6 6 6
57209- 6 6 6 10 10 10 14 14 14 18 18 18
57210- 18 18 18 14 14 14 10 10 10 6 6 6
57211- 0 0 0 0 0 0 0 0 0 0 0 0
57212- 0 0 0 0 0 0 0 0 0 0 0 0
57213- 0 0 0 0 0 0 0 0 0 0 0 0
57214- 0 0 0 0 0 0 0 0 0 0 0 0
57215- 0 0 0 0 0 0 0 0 0 6 6 6
57216- 14 14 14 18 18 18 22 22 22 22 22 22
57217- 18 18 18 14 14 14 10 10 10 6 6 6
57218- 0 0 0 0 0 0 0 0 0 0 0 0
57219- 0 0 0 0 0 0 0 0 0 0 0 0
57220- 0 0 0 0 0 0 0 0 0 0 0 0
57221- 0 0 0 0 0 0 0 0 0 0 0 0
57222- 0 0 0 0 0 0 0 0 0 0 0 0
57223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57236+4 4 4 4 4 4
57237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57250+4 4 4 4 4 4
57251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57258+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57264+4 4 4 4 4 4
57265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57278+4 4 4 4 4 4
57279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57286+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57292+4 4 4 4 4 4
57293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57300+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57306+4 4 4 4 4 4
57307+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57311+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
57312+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
57313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57314+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57316+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
57317+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
57318+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
57319+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57320+4 4 4 4 4 4
57321+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57325+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
57326+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
57327+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57328+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57330+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
57331+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
57332+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
57333+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57334+4 4 4 4 4 4
57335+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57339+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
57340+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
57341+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
57342+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57344+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
57345+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
57346+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
57347+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
57348+4 4 4 4 4 4
57349+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57352+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
57353+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
57354+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
57355+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
57356+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57357+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
57358+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
57359+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
57360+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
57361+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
57362+4 4 4 4 4 4
57363+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57366+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
57367+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
57368+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
57369+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
57370+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57371+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
57372+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
57373+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
57374+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
57375+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
57376+4 4 4 4 4 4
57377+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57380+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
57381+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
57382+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
57383+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
57384+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
57385+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
57386+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
57387+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
57388+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
57389+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
57390+4 4 4 4 4 4
57391+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57392+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57393+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
57394+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
57395+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
57396+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
57397+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
57398+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
57399+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
57400+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
57401+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
57402+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
57403+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
57404+4 4 4 4 4 4
57405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57407+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
57408+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
57409+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
57410+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
57411+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
57412+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
57413+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
57414+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
57415+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
57416+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
57417+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
57418+4 4 4 4 4 4
57419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57421+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
57422+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
57423+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
57424+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
57425+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
57426+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
57427+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
57428+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
57429+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
57430+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
57431+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
57432+4 4 4 4 4 4
57433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57435+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
57436+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
57437+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
57438+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
57439+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
57440+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
57441+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
57442+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
57443+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
57444+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
57445+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
57446+4 4 4 4 4 4
57447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57448+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
57449+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
57450+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
57451+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
57452+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
57453+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
57454+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
57455+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
57456+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
57457+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
57458+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
57459+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
57460+4 4 4 4 4 4
57461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57462+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
57463+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
57464+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
57465+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
57466+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
57467+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
57468+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
57469+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
57470+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
57471+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
57472+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
57473+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
57474+0 0 0 4 4 4
57475+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
57476+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
57477+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
57478+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
57479+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
57480+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
57481+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
57482+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
57483+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
57484+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
57485+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
57486+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
57487+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
57488+2 0 0 0 0 0
57489+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
57490+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
57491+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
57492+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
57493+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
57494+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
57495+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
57496+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
57497+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
57498+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
57499+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
57500+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
57501+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
57502+37 38 37 0 0 0
57503+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
57504+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
57505+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
57506+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
57507+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
57508+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
57509+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
57510+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
57511+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
57512+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
57513+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
57514+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
57515+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
57516+85 115 134 4 0 0
57517+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
57518+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
57519+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
57520+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
57521+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
57522+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
57523+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
57524+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
57525+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
57526+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
57527+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
57528+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
57529+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
57530+60 73 81 4 0 0
57531+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
57532+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
57533+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
57534+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
57535+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
57536+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
57537+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
57538+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
57539+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
57540+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
57541+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
57542+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
57543+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
57544+16 19 21 4 0 0
57545+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
57546+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
57547+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
57548+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
57549+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
57550+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
57551+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
57552+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
57553+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
57554+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
57555+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
57556+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
57557+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
57558+4 0 0 4 3 3
57559+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
57560+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
57561+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
57562+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
57563+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
57564+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
57565+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
57566+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
57567+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
57568+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
57569+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
57570+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
57571+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
57572+3 2 2 4 4 4
57573+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
57574+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
57575+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
57576+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
57577+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
57578+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
57579+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
57580+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
57581+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
57582+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
57583+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
57584+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
57585+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
57586+4 4 4 4 4 4
57587+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
57588+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
57589+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
57590+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
57591+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
57592+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
57593+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
57594+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
57595+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
57596+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
57597+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
57598+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
57599+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
57600+4 4 4 4 4 4
57601+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
57602+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
57603+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
57604+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
57605+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
57606+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
57607+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
57608+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
57609+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
57610+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
57611+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
57612+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
57613+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
57614+5 5 5 5 5 5
57615+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
57616+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
57617+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
57618+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
57619+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
57620+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57621+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
57622+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
57623+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
57624+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
57625+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
57626+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
57627+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
57628+5 5 5 4 4 4
57629+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
57630+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
57631+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
57632+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
57633+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57634+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
57635+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
57636+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
57637+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
57638+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
57639+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
57640+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
57641+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57642+4 4 4 4 4 4
57643+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
57644+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
57645+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
57646+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
57647+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
57648+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57649+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57650+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
57651+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
57652+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
57653+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
57654+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
57655+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57656+4 4 4 4 4 4
57657+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
57658+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
57659+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
57660+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
57661+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57662+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
57663+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
57664+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
57665+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
57666+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
57667+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
57668+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57670+4 4 4 4 4 4
57671+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
57672+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
57673+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
57674+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
57675+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57676+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57677+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57678+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
57679+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
57680+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
57681+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
57682+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57684+4 4 4 4 4 4
57685+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
57686+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
57687+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
57688+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
57689+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57690+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
57691+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
57692+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
57693+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
57694+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
57695+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57696+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57697+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57698+4 4 4 4 4 4
57699+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
57700+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
57701+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
57702+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
57703+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57704+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
57705+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
57706+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
57707+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
57708+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
57709+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
57710+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57712+4 4 4 4 4 4
57713+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
57714+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
57715+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
57716+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
57717+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57718+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
57719+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
57720+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
57721+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
57722+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
57723+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
57724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57726+4 4 4 4 4 4
57727+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
57728+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
57729+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
57730+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
57731+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
57732+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
57733+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
57734+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
57735+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
57736+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
57737+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57740+4 4 4 4 4 4
57741+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
57742+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
57743+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
57744+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
57745+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57746+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
57747+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
57748+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
57749+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
57750+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
57751+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57754+4 4 4 4 4 4
57755+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
57756+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
57757+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
57758+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
57759+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57760+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
57761+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
57762+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
57763+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
57764+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
57765+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57768+4 4 4 4 4 4
57769+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
57770+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
57771+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
57772+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
57773+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57774+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
57775+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
57776+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
57777+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
57778+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57779+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57782+4 4 4 4 4 4
57783+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
57784+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
57785+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
57786+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
57787+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
57788+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
57789+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
57790+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
57791+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57792+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57793+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57796+4 4 4 4 4 4
57797+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
57798+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
57799+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
57800+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
57801+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57802+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
57803+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
57804+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
57805+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57806+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57807+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57810+4 4 4 4 4 4
57811+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
57812+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
57813+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
57814+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
57815+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
57816+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
57817+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
57818+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
57819+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57820+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57821+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57824+4 4 4 4 4 4
57825+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
57826+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
57827+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57828+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
57829+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
57830+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
57831+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
57832+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
57833+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
57834+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57835+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57838+4 4 4 4 4 4
57839+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
57840+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
57841+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
57842+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
57843+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
57844+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
57845+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
57846+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
57847+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57848+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57849+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57852+4 4 4 4 4 4
57853+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
57854+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
57855+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57856+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
57857+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
57858+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
57859+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
57860+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
57861+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
57862+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57863+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57866+4 4 4 4 4 4
57867+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
57868+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
57869+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
57870+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
57871+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
57872+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
57873+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
57874+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
57875+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57876+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57877+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57880+4 4 4 4 4 4
57881+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57882+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
57883+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57884+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
57885+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
57886+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
57887+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
57888+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
57889+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57890+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57891+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57894+4 4 4 4 4 4
57895+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
57896+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
57897+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
57898+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
57899+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
57900+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
57901+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57902+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
57903+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57904+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57905+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57908+4 4 4 4 4 4
57909+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57910+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
57911+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
57912+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
57913+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
57914+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
57915+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57916+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
57917+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57918+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57919+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57922+4 4 4 4 4 4
57923+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
57924+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
57925+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
57926+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
57927+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
57928+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
57929+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
57930+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
57931+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
57932+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57933+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57936+4 4 4 4 4 4
57937+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57938+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
57939+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
57940+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
57941+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
57942+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
57943+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
57944+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
57945+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
57946+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57947+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57950+4 4 4 4 4 4
57951+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
57952+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
57953+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
57954+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
57955+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
57956+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
57957+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
57958+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
57959+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
57960+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57961+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57964+4 4 4 4 4 4
57965+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57966+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
57967+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
57968+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
57969+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
57970+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
57971+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
57972+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
57973+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
57974+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57975+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57978+4 4 4 4 4 4
57979+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
57980+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
57981+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
57982+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
57983+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
57984+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
57985+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
57986+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
57987+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
57988+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
57989+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57992+4 4 4 4 4 4
57993+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
57994+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
57995+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
57996+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
57997+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
57998+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
57999+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
58000+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
58001+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
58002+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
58003+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58006+4 4 4 4 4 4
58007+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
58008+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
58009+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
58010+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
58011+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
58012+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
58013+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58014+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
58015+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
58016+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
58017+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58020+4 4 4 4 4 4
58021+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
58022+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
58023+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
58024+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
58025+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
58026+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
58027+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
58028+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
58029+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
58030+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
58031+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58034+4 4 4 4 4 4
58035+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
58036+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
58037+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
58038+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
58039+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
58040+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
58041+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
58042+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
58043+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
58044+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
58045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58048+4 4 4 4 4 4
58049+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58050+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
58051+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
58052+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
58053+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
58054+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
58055+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
58056+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
58057+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
58058+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
58059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58062+4 4 4 4 4 4
58063+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
58064+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
58065+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
58066+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
58067+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
58068+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
58069+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
58070+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
58071+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
58072+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
58073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58076+4 4 4 4 4 4
58077+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
58078+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
58079+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
58080+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
58081+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
58082+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
58083+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
58084+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
58085+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
58086+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58090+4 4 4 4 4 4
58091+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
58092+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58093+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
58094+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
58095+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
58096+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
58097+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
58098+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
58099+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
58100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58104+4 4 4 4 4 4
58105+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
58106+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
58107+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
58108+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
58109+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
58110+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
58111+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
58112+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
58113+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
58114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58118+4 4 4 4 4 4
58119+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
58120+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
58121+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
58122+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
58123+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
58124+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
58125+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
58126+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
58127+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58128+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58132+4 4 4 4 4 4
58133+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
58134+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
58135+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
58136+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
58137+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
58138+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
58139+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
58140+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
58141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58146+4 4 4 4 4 4
58147+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
58148+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
58149+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
58150+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
58151+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
58152+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
58153+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
58154+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
58155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58160+4 4 4 4 4 4
58161+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58162+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
58163+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
58164+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
58165+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
58166+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
58167+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
58168+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
58169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58174+4 4 4 4 4 4
58175+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58176+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
58177+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
58178+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
58179+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
58180+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
58181+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
58182+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
58183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58188+4 4 4 4 4 4
58189+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58190+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
58191+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
58192+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
58193+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
58194+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
58195+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
58196+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58202+4 4 4 4 4 4
58203+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58205+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
58206+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
58207+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
58208+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
58209+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
58210+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58216+4 4 4 4 4 4
58217+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58220+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
58221+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
58222+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
58223+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
58224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58230+4 4 4 4 4 4
58231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58234+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
58235+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
58236+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
58237+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
58238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58244+4 4 4 4 4 4
58245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58248+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
58249+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58250+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
58251+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
58252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58258+4 4 4 4 4 4
58259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58262+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
58263+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
58264+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
58265+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
58266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58272+4 4 4 4 4 4
58273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58277+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
58278+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
58279+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58286+4 4 4 4 4 4
58287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58291+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
58292+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
58293+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
58294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58300+4 4 4 4 4 4
58301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58305+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
58306+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
58307+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58314+4 4 4 4 4 4
58315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58319+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
58320+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
58321+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58328+4 4 4 4 4 4
58329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58333+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
58334+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
58335+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58340+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58341+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58342+4 4 4 4 4 4
58343diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
58344index fef20db..d28b1ab 100644
58345--- a/drivers/xen/xenfs/xenstored.c
58346+++ b/drivers/xen/xenfs/xenstored.c
58347@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
58348 static int xsd_kva_open(struct inode *inode, struct file *file)
58349 {
58350 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
58351+#ifdef CONFIG_GRKERNSEC_HIDESYM
58352+ NULL);
58353+#else
58354 xen_store_interface);
58355+#endif
58356+
58357 if (!file->private_data)
58358 return -ENOMEM;
58359 return 0;
58360diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
58361index cc1cfae..41158ad 100644
58362--- a/fs/9p/vfs_addr.c
58363+++ b/fs/9p/vfs_addr.c
58364@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
58365
58366 retval = v9fs_file_write_internal(inode,
58367 v9inode->writeback_fid,
58368- (__force const char __user *)buffer,
58369+ (const char __force_user *)buffer,
58370 len, &offset, 0);
58371 if (retval > 0)
58372 retval = 0;
58373diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
58374index 7fa4f7a..a7ebf8c 100644
58375--- a/fs/9p/vfs_inode.c
58376+++ b/fs/9p/vfs_inode.c
58377@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
58378 void
58379 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
58380 {
58381- char *s = nd_get_link(nd);
58382+ const char *s = nd_get_link(nd);
58383
58384 p9_debug(P9_DEBUG_VFS, " %s %s\n",
58385 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
58386diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
58387index 370b24c..ff0be7b 100644
58388--- a/fs/Kconfig.binfmt
58389+++ b/fs/Kconfig.binfmt
58390@@ -103,7 +103,7 @@ config HAVE_AOUT
58391
58392 config BINFMT_AOUT
58393 tristate "Kernel support for a.out and ECOFF binaries"
58394- depends on HAVE_AOUT
58395+ depends on HAVE_AOUT && BROKEN
58396 ---help---
58397 A.out (Assembler.OUTput) is a set of formats for libraries and
58398 executables used in the earliest versions of UNIX. Linux used
58399diff --git a/fs/afs/inode.c b/fs/afs/inode.c
58400index 2946712..f737435 100644
58401--- a/fs/afs/inode.c
58402+++ b/fs/afs/inode.c
58403@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
58404 struct afs_vnode *vnode;
58405 struct super_block *sb;
58406 struct inode *inode;
58407- static atomic_t afs_autocell_ino;
58408+ static atomic_unchecked_t afs_autocell_ino;
58409
58410 _enter("{%x:%u},%*.*s,",
58411 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
58412@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
58413 data.fid.unique = 0;
58414 data.fid.vnode = 0;
58415
58416- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
58417+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
58418 afs_iget5_autocell_test, afs_iget5_set,
58419 &data);
58420 if (!inode) {
58421diff --git a/fs/aio.c b/fs/aio.c
58422index 0ff7c46..7f5d132 100644
58423--- a/fs/aio.c
58424+++ b/fs/aio.c
58425@@ -388,7 +388,7 @@ static int aio_setup_ring(struct kioctx *ctx)
58426 size += sizeof(struct io_event) * nr_events;
58427
58428 nr_pages = PFN_UP(size);
58429- if (nr_pages < 0)
58430+ if (nr_pages <= 0)
58431 return -EINVAL;
58432
58433 file = aio_private_file(ctx, nr_pages);
58434diff --git a/fs/attr.c b/fs/attr.c
58435index 6530ced..4a827e2 100644
58436--- a/fs/attr.c
58437+++ b/fs/attr.c
58438@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
58439 unsigned long limit;
58440
58441 limit = rlimit(RLIMIT_FSIZE);
58442+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
58443 if (limit != RLIM_INFINITY && offset > limit)
58444 goto out_sig;
58445 if (offset > inode->i_sb->s_maxbytes)
58446diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
58447index 116fd38..c04182da 100644
58448--- a/fs/autofs4/waitq.c
58449+++ b/fs/autofs4/waitq.c
58450@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
58451 {
58452 unsigned long sigpipe, flags;
58453 mm_segment_t fs;
58454- const char *data = (const char *)addr;
58455+ const char __user *data = (const char __force_user *)addr;
58456 ssize_t wr = 0;
58457
58458 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
58459@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
58460 return 1;
58461 }
58462
58463+#ifdef CONFIG_GRKERNSEC_HIDESYM
58464+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
58465+#endif
58466+
58467 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
58468 enum autofs_notify notify)
58469 {
58470@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
58471
58472 /* If this is a direct mount request create a dummy name */
58473 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
58474+#ifdef CONFIG_GRKERNSEC_HIDESYM
58475+ /* this name does get written to userland via autofs4_write() */
58476+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
58477+#else
58478 qstr.len = sprintf(name, "%p", dentry);
58479+#endif
58480 else {
58481 qstr.len = autofs4_getpath(sbi, dentry, &name);
58482 if (!qstr.len) {
58483diff --git a/fs/befs/endian.h b/fs/befs/endian.h
58484index 2722387..56059b5 100644
58485--- a/fs/befs/endian.h
58486+++ b/fs/befs/endian.h
58487@@ -11,7 +11,7 @@
58488
58489 #include <asm/byteorder.h>
58490
58491-static inline u64
58492+static inline u64 __intentional_overflow(-1)
58493 fs64_to_cpu(const struct super_block *sb, fs64 n)
58494 {
58495 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58496@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
58497 return (__force fs64)cpu_to_be64(n);
58498 }
58499
58500-static inline u32
58501+static inline u32 __intentional_overflow(-1)
58502 fs32_to_cpu(const struct super_block *sb, fs32 n)
58503 {
58504 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58505@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
58506 return (__force fs32)cpu_to_be32(n);
58507 }
58508
58509-static inline u16
58510+static inline u16 __intentional_overflow(-1)
58511 fs16_to_cpu(const struct super_block *sb, fs16 n)
58512 {
58513 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58514diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
58515index ca0ba15..0fa3257 100644
58516--- a/fs/binfmt_aout.c
58517+++ b/fs/binfmt_aout.c
58518@@ -16,6 +16,7 @@
58519 #include <linux/string.h>
58520 #include <linux/fs.h>
58521 #include <linux/file.h>
58522+#include <linux/security.h>
58523 #include <linux/stat.h>
58524 #include <linux/fcntl.h>
58525 #include <linux/ptrace.h>
58526@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
58527 #endif
58528 # define START_STACK(u) ((void __user *)u.start_stack)
58529
58530+ memset(&dump, 0, sizeof(dump));
58531+
58532 fs = get_fs();
58533 set_fs(KERNEL_DS);
58534 has_dumped = 1;
58535@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
58536
58537 /* If the size of the dump file exceeds the rlimit, then see what would happen
58538 if we wrote the stack, but not the data area. */
58539+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
58540 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
58541 dump.u_dsize = 0;
58542
58543 /* Make sure we have enough room to write the stack and data areas. */
58544+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
58545 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
58546 dump.u_ssize = 0;
58547
58548@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
58549 rlim = rlimit(RLIMIT_DATA);
58550 if (rlim >= RLIM_INFINITY)
58551 rlim = ~0;
58552+
58553+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
58554 if (ex.a_data + ex.a_bss > rlim)
58555 return -ENOMEM;
58556
58557@@ -264,6 +271,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
58558
58559 install_exec_creds(bprm);
58560
58561+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58562+ current->mm->pax_flags = 0UL;
58563+#endif
58564+
58565+#ifdef CONFIG_PAX_PAGEEXEC
58566+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
58567+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
58568+
58569+#ifdef CONFIG_PAX_EMUTRAMP
58570+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
58571+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
58572+#endif
58573+
58574+#ifdef CONFIG_PAX_MPROTECT
58575+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
58576+ current->mm->pax_flags |= MF_PAX_MPROTECT;
58577+#endif
58578+
58579+ }
58580+#endif
58581+
58582 if (N_MAGIC(ex) == OMAGIC) {
58583 unsigned long text_addr, map_size;
58584 loff_t pos;
58585@@ -321,7 +349,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
58586 }
58587
58588 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
58589- PROT_READ | PROT_WRITE | PROT_EXEC,
58590+ PROT_READ | PROT_WRITE,
58591 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
58592 fd_offset + ex.a_text);
58593 if (error != N_DATADDR(ex)) {
58594diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
58595index 3892c1a..4e27c04 100644
58596--- a/fs/binfmt_elf.c
58597+++ b/fs/binfmt_elf.c
58598@@ -34,6 +34,7 @@
58599 #include <linux/utsname.h>
58600 #include <linux/coredump.h>
58601 #include <linux/sched.h>
58602+#include <linux/xattr.h>
58603 #include <asm/uaccess.h>
58604 #include <asm/param.h>
58605 #include <asm/page.h>
58606@@ -47,7 +48,7 @@
58607
58608 static int load_elf_binary(struct linux_binprm *bprm);
58609 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
58610- int, int, unsigned long);
58611+ int, int, unsigned long) __intentional_overflow(-1);
58612
58613 #ifdef CONFIG_USELIB
58614 static int load_elf_library(struct file *);
58615@@ -65,6 +66,14 @@ static int elf_core_dump(struct coredump_params *cprm);
58616 #define elf_core_dump NULL
58617 #endif
58618
58619+#ifdef CONFIG_PAX_MPROTECT
58620+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
58621+#endif
58622+
58623+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58624+static void elf_handle_mmap(struct file *file);
58625+#endif
58626+
58627 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
58628 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
58629 #else
58630@@ -84,6 +93,15 @@ static struct linux_binfmt elf_format = {
58631 .load_binary = load_elf_binary,
58632 .load_shlib = load_elf_library,
58633 .core_dump = elf_core_dump,
58634+
58635+#ifdef CONFIG_PAX_MPROTECT
58636+ .handle_mprotect= elf_handle_mprotect,
58637+#endif
58638+
58639+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58640+ .handle_mmap = elf_handle_mmap,
58641+#endif
58642+
58643 .min_coredump = ELF_EXEC_PAGESIZE,
58644 };
58645
58646@@ -91,6 +109,8 @@ static struct linux_binfmt elf_format = {
58647
58648 static int set_brk(unsigned long start, unsigned long end)
58649 {
58650+ unsigned long e = end;
58651+
58652 start = ELF_PAGEALIGN(start);
58653 end = ELF_PAGEALIGN(end);
58654 if (end > start) {
58655@@ -99,7 +119,7 @@ static int set_brk(unsigned long start, unsigned long end)
58656 if (BAD_ADDR(addr))
58657 return addr;
58658 }
58659- current->mm->start_brk = current->mm->brk = end;
58660+ current->mm->start_brk = current->mm->brk = e;
58661 return 0;
58662 }
58663
58664@@ -160,12 +180,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58665 elf_addr_t __user *u_rand_bytes;
58666 const char *k_platform = ELF_PLATFORM;
58667 const char *k_base_platform = ELF_BASE_PLATFORM;
58668- unsigned char k_rand_bytes[16];
58669+ u32 k_rand_bytes[4];
58670 int items;
58671 elf_addr_t *elf_info;
58672 int ei_index = 0;
58673 const struct cred *cred = current_cred();
58674 struct vm_area_struct *vma;
58675+ unsigned long saved_auxv[AT_VECTOR_SIZE];
58676
58677 /*
58678 * In some cases (e.g. Hyper-Threading), we want to avoid L1
58679@@ -207,8 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58680 * Generate 16 random bytes for userspace PRNG seeding.
58681 */
58682 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
58683- u_rand_bytes = (elf_addr_t __user *)
58684- STACK_ALLOC(p, sizeof(k_rand_bytes));
58685+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
58686+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
58687+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
58688+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
58689+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
58690+ u_rand_bytes = (elf_addr_t __user *) p;
58691 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
58692 return -EFAULT;
58693
58694@@ -323,9 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58695 return -EFAULT;
58696 current->mm->env_end = p;
58697
58698+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
58699+
58700 /* Put the elf_info on the stack in the right place. */
58701 sp = (elf_addr_t __user *)envp + 1;
58702- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
58703+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
58704 return -EFAULT;
58705 return 0;
58706 }
58707@@ -393,15 +420,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
58708 an ELF header */
58709
58710 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58711- struct file *interpreter, unsigned long *interp_map_addr,
58712- unsigned long no_base)
58713+ struct file *interpreter, unsigned long no_base)
58714 {
58715 struct elf_phdr *elf_phdata;
58716 struct elf_phdr *eppnt;
58717- unsigned long load_addr = 0;
58718+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
58719 int load_addr_set = 0;
58720 unsigned long last_bss = 0, elf_bss = 0;
58721- unsigned long error = ~0UL;
58722+ unsigned long error = -EINVAL;
58723 unsigned long total_size;
58724 int retval, i, size;
58725
58726@@ -447,6 +473,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58727 goto out_close;
58728 }
58729
58730+#ifdef CONFIG_PAX_SEGMEXEC
58731+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
58732+ pax_task_size = SEGMEXEC_TASK_SIZE;
58733+#endif
58734+
58735 eppnt = elf_phdata;
58736 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
58737 if (eppnt->p_type == PT_LOAD) {
58738@@ -470,8 +501,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58739 map_addr = elf_map(interpreter, load_addr + vaddr,
58740 eppnt, elf_prot, elf_type, total_size);
58741 total_size = 0;
58742- if (!*interp_map_addr)
58743- *interp_map_addr = map_addr;
58744 error = map_addr;
58745 if (BAD_ADDR(map_addr))
58746 goto out_close;
58747@@ -490,8 +519,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58748 k = load_addr + eppnt->p_vaddr;
58749 if (BAD_ADDR(k) ||
58750 eppnt->p_filesz > eppnt->p_memsz ||
58751- eppnt->p_memsz > TASK_SIZE ||
58752- TASK_SIZE - eppnt->p_memsz < k) {
58753+ eppnt->p_memsz > pax_task_size ||
58754+ pax_task_size - eppnt->p_memsz < k) {
58755 error = -ENOMEM;
58756 goto out_close;
58757 }
58758@@ -530,9 +559,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58759 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
58760
58761 /* Map the last of the bss segment */
58762- error = vm_brk(elf_bss, last_bss - elf_bss);
58763- if (BAD_ADDR(error))
58764- goto out_close;
58765+ if (last_bss > elf_bss) {
58766+ error = vm_brk(elf_bss, last_bss - elf_bss);
58767+ if (BAD_ADDR(error))
58768+ goto out_close;
58769+ }
58770 }
58771
58772 error = load_addr;
58773@@ -543,6 +574,336 @@ out:
58774 return error;
58775 }
58776
58777+#ifdef CONFIG_PAX_PT_PAX_FLAGS
58778+#ifdef CONFIG_PAX_SOFTMODE
58779+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
58780+{
58781+ unsigned long pax_flags = 0UL;
58782+
58783+#ifdef CONFIG_PAX_PAGEEXEC
58784+ if (elf_phdata->p_flags & PF_PAGEEXEC)
58785+ pax_flags |= MF_PAX_PAGEEXEC;
58786+#endif
58787+
58788+#ifdef CONFIG_PAX_SEGMEXEC
58789+ if (elf_phdata->p_flags & PF_SEGMEXEC)
58790+ pax_flags |= MF_PAX_SEGMEXEC;
58791+#endif
58792+
58793+#ifdef CONFIG_PAX_EMUTRAMP
58794+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
58795+ pax_flags |= MF_PAX_EMUTRAMP;
58796+#endif
58797+
58798+#ifdef CONFIG_PAX_MPROTECT
58799+ if (elf_phdata->p_flags & PF_MPROTECT)
58800+ pax_flags |= MF_PAX_MPROTECT;
58801+#endif
58802+
58803+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58804+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
58805+ pax_flags |= MF_PAX_RANDMMAP;
58806+#endif
58807+
58808+ return pax_flags;
58809+}
58810+#endif
58811+
58812+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
58813+{
58814+ unsigned long pax_flags = 0UL;
58815+
58816+#ifdef CONFIG_PAX_PAGEEXEC
58817+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
58818+ pax_flags |= MF_PAX_PAGEEXEC;
58819+#endif
58820+
58821+#ifdef CONFIG_PAX_SEGMEXEC
58822+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
58823+ pax_flags |= MF_PAX_SEGMEXEC;
58824+#endif
58825+
58826+#ifdef CONFIG_PAX_EMUTRAMP
58827+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
58828+ pax_flags |= MF_PAX_EMUTRAMP;
58829+#endif
58830+
58831+#ifdef CONFIG_PAX_MPROTECT
58832+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
58833+ pax_flags |= MF_PAX_MPROTECT;
58834+#endif
58835+
58836+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58837+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
58838+ pax_flags |= MF_PAX_RANDMMAP;
58839+#endif
58840+
58841+ return pax_flags;
58842+}
58843+#endif
58844+
58845+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
58846+#ifdef CONFIG_PAX_SOFTMODE
58847+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
58848+{
58849+ unsigned long pax_flags = 0UL;
58850+
58851+#ifdef CONFIG_PAX_PAGEEXEC
58852+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
58853+ pax_flags |= MF_PAX_PAGEEXEC;
58854+#endif
58855+
58856+#ifdef CONFIG_PAX_SEGMEXEC
58857+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
58858+ pax_flags |= MF_PAX_SEGMEXEC;
58859+#endif
58860+
58861+#ifdef CONFIG_PAX_EMUTRAMP
58862+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
58863+ pax_flags |= MF_PAX_EMUTRAMP;
58864+#endif
58865+
58866+#ifdef CONFIG_PAX_MPROTECT
58867+ if (pax_flags_softmode & MF_PAX_MPROTECT)
58868+ pax_flags |= MF_PAX_MPROTECT;
58869+#endif
58870+
58871+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58872+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
58873+ pax_flags |= MF_PAX_RANDMMAP;
58874+#endif
58875+
58876+ return pax_flags;
58877+}
58878+#endif
58879+
58880+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
58881+{
58882+ unsigned long pax_flags = 0UL;
58883+
58884+#ifdef CONFIG_PAX_PAGEEXEC
58885+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
58886+ pax_flags |= MF_PAX_PAGEEXEC;
58887+#endif
58888+
58889+#ifdef CONFIG_PAX_SEGMEXEC
58890+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
58891+ pax_flags |= MF_PAX_SEGMEXEC;
58892+#endif
58893+
58894+#ifdef CONFIG_PAX_EMUTRAMP
58895+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
58896+ pax_flags |= MF_PAX_EMUTRAMP;
58897+#endif
58898+
58899+#ifdef CONFIG_PAX_MPROTECT
58900+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
58901+ pax_flags |= MF_PAX_MPROTECT;
58902+#endif
58903+
58904+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58905+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
58906+ pax_flags |= MF_PAX_RANDMMAP;
58907+#endif
58908+
58909+ return pax_flags;
58910+}
58911+#endif
58912+
58913+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58914+static unsigned long pax_parse_defaults(void)
58915+{
58916+ unsigned long pax_flags = 0UL;
58917+
58918+#ifdef CONFIG_PAX_SOFTMODE
58919+ if (pax_softmode)
58920+ return pax_flags;
58921+#endif
58922+
58923+#ifdef CONFIG_PAX_PAGEEXEC
58924+ pax_flags |= MF_PAX_PAGEEXEC;
58925+#endif
58926+
58927+#ifdef CONFIG_PAX_SEGMEXEC
58928+ pax_flags |= MF_PAX_SEGMEXEC;
58929+#endif
58930+
58931+#ifdef CONFIG_PAX_MPROTECT
58932+ pax_flags |= MF_PAX_MPROTECT;
58933+#endif
58934+
58935+#ifdef CONFIG_PAX_RANDMMAP
58936+ if (randomize_va_space)
58937+ pax_flags |= MF_PAX_RANDMMAP;
58938+#endif
58939+
58940+ return pax_flags;
58941+}
58942+
58943+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
58944+{
58945+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
58946+
58947+#ifdef CONFIG_PAX_EI_PAX
58948+
58949+#ifdef CONFIG_PAX_SOFTMODE
58950+ if (pax_softmode)
58951+ return pax_flags;
58952+#endif
58953+
58954+ pax_flags = 0UL;
58955+
58956+#ifdef CONFIG_PAX_PAGEEXEC
58957+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
58958+ pax_flags |= MF_PAX_PAGEEXEC;
58959+#endif
58960+
58961+#ifdef CONFIG_PAX_SEGMEXEC
58962+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
58963+ pax_flags |= MF_PAX_SEGMEXEC;
58964+#endif
58965+
58966+#ifdef CONFIG_PAX_EMUTRAMP
58967+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
58968+ pax_flags |= MF_PAX_EMUTRAMP;
58969+#endif
58970+
58971+#ifdef CONFIG_PAX_MPROTECT
58972+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
58973+ pax_flags |= MF_PAX_MPROTECT;
58974+#endif
58975+
58976+#ifdef CONFIG_PAX_ASLR
58977+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
58978+ pax_flags |= MF_PAX_RANDMMAP;
58979+#endif
58980+
58981+#endif
58982+
58983+ return pax_flags;
58984+
58985+}
58986+
58987+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
58988+{
58989+
58990+#ifdef CONFIG_PAX_PT_PAX_FLAGS
58991+ unsigned long i;
58992+
58993+ for (i = 0UL; i < elf_ex->e_phnum; i++)
58994+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
58995+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
58996+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
58997+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
58998+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
58999+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
59000+ return PAX_PARSE_FLAGS_FALLBACK;
59001+
59002+#ifdef CONFIG_PAX_SOFTMODE
59003+ if (pax_softmode)
59004+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
59005+ else
59006+#endif
59007+
59008+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
59009+ break;
59010+ }
59011+#endif
59012+
59013+ return PAX_PARSE_FLAGS_FALLBACK;
59014+}
59015+
59016+static unsigned long pax_parse_xattr_pax(struct file * const file)
59017+{
59018+
59019+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
59020+ ssize_t xattr_size, i;
59021+ unsigned char xattr_value[sizeof("pemrs") - 1];
59022+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
59023+
59024+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
59025+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
59026+ return PAX_PARSE_FLAGS_FALLBACK;
59027+
59028+ for (i = 0; i < xattr_size; i++)
59029+ switch (xattr_value[i]) {
59030+ default:
59031+ return PAX_PARSE_FLAGS_FALLBACK;
59032+
59033+#define parse_flag(option1, option2, flag) \
59034+ case option1: \
59035+ if (pax_flags_hardmode & MF_PAX_##flag) \
59036+ return PAX_PARSE_FLAGS_FALLBACK;\
59037+ pax_flags_hardmode |= MF_PAX_##flag; \
59038+ break; \
59039+ case option2: \
59040+ if (pax_flags_softmode & MF_PAX_##flag) \
59041+ return PAX_PARSE_FLAGS_FALLBACK;\
59042+ pax_flags_softmode |= MF_PAX_##flag; \
59043+ break;
59044+
59045+ parse_flag('p', 'P', PAGEEXEC);
59046+ parse_flag('e', 'E', EMUTRAMP);
59047+ parse_flag('m', 'M', MPROTECT);
59048+ parse_flag('r', 'R', RANDMMAP);
59049+ parse_flag('s', 'S', SEGMEXEC);
59050+
59051+#undef parse_flag
59052+ }
59053+
59054+ if (pax_flags_hardmode & pax_flags_softmode)
59055+ return PAX_PARSE_FLAGS_FALLBACK;
59056+
59057+#ifdef CONFIG_PAX_SOFTMODE
59058+ if (pax_softmode)
59059+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
59060+ else
59061+#endif
59062+
59063+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
59064+#else
59065+ return PAX_PARSE_FLAGS_FALLBACK;
59066+#endif
59067+
59068+}
59069+
59070+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
59071+{
59072+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
59073+
59074+ pax_flags = pax_parse_defaults();
59075+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
59076+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
59077+ xattr_pax_flags = pax_parse_xattr_pax(file);
59078+
59079+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
59080+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
59081+ pt_pax_flags != xattr_pax_flags)
59082+ return -EINVAL;
59083+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
59084+ pax_flags = xattr_pax_flags;
59085+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
59086+ pax_flags = pt_pax_flags;
59087+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
59088+ pax_flags = ei_pax_flags;
59089+
59090+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
59091+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
59092+ if ((__supported_pte_mask & _PAGE_NX))
59093+ pax_flags &= ~MF_PAX_SEGMEXEC;
59094+ else
59095+ pax_flags &= ~MF_PAX_PAGEEXEC;
59096+ }
59097+#endif
59098+
59099+ if (0 > pax_check_flags(&pax_flags))
59100+ return -EINVAL;
59101+
59102+ current->mm->pax_flags = pax_flags;
59103+ return 0;
59104+}
59105+#endif
59106+
59107 /*
59108 * These are the functions used to load ELF style executables and shared
59109 * libraries. There is no binary dependent code anywhere else.
59110@@ -556,6 +917,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
59111 {
59112 unsigned int random_variable = 0;
59113
59114+#ifdef CONFIG_PAX_RANDUSTACK
59115+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
59116+ return stack_top - current->mm->delta_stack;
59117+#endif
59118+
59119 if ((current->flags & PF_RANDOMIZE) &&
59120 !(current->personality & ADDR_NO_RANDOMIZE)) {
59121 random_variable = get_random_int() & STACK_RND_MASK;
59122@@ -574,7 +940,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
59123 unsigned long load_addr = 0, load_bias = 0;
59124 int load_addr_set = 0;
59125 char * elf_interpreter = NULL;
59126- unsigned long error;
59127+ unsigned long error = 0;
59128 struct elf_phdr *elf_ppnt, *elf_phdata;
59129 unsigned long elf_bss, elf_brk;
59130 int retval, i;
59131@@ -589,6 +955,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
59132 struct elfhdr elf_ex;
59133 struct elfhdr interp_elf_ex;
59134 } *loc;
59135+ unsigned long pax_task_size;
59136
59137 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
59138 if (!loc) {
59139@@ -726,6 +1093,77 @@ static int load_elf_binary(struct linux_binprm *bprm)
59140 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
59141 may depend on the personality. */
59142 SET_PERSONALITY(loc->elf_ex);
59143+
59144+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59145+ current->mm->pax_flags = 0UL;
59146+#endif
59147+
59148+#ifdef CONFIG_PAX_DLRESOLVE
59149+ current->mm->call_dl_resolve = 0UL;
59150+#endif
59151+
59152+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59153+ current->mm->call_syscall = 0UL;
59154+#endif
59155+
59156+#ifdef CONFIG_PAX_ASLR
59157+ current->mm->delta_mmap = 0UL;
59158+ current->mm->delta_stack = 0UL;
59159+#endif
59160+
59161+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59162+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
59163+ send_sig(SIGKILL, current, 0);
59164+ goto out_free_dentry;
59165+ }
59166+#endif
59167+
59168+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
59169+ pax_set_initial_flags(bprm);
59170+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
59171+ if (pax_set_initial_flags_func)
59172+ (pax_set_initial_flags_func)(bprm);
59173+#endif
59174+
59175+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59176+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
59177+ current->mm->context.user_cs_limit = PAGE_SIZE;
59178+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
59179+ }
59180+#endif
59181+
59182+#ifdef CONFIG_PAX_SEGMEXEC
59183+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
59184+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
59185+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
59186+ pax_task_size = SEGMEXEC_TASK_SIZE;
59187+ current->mm->def_flags |= VM_NOHUGEPAGE;
59188+ } else
59189+#endif
59190+
59191+ pax_task_size = TASK_SIZE;
59192+
59193+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
59194+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
59195+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
59196+ put_cpu();
59197+ }
59198+#endif
59199+
59200+#ifdef CONFIG_PAX_ASLR
59201+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
59202+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
59203+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
59204+ }
59205+#endif
59206+
59207+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
59208+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
59209+ executable_stack = EXSTACK_DISABLE_X;
59210+ current->personality &= ~READ_IMPLIES_EXEC;
59211+ } else
59212+#endif
59213+
59214 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
59215 current->personality |= READ_IMPLIES_EXEC;
59216
59217@@ -815,6 +1253,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
59218 #else
59219 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
59220 #endif
59221+
59222+#ifdef CONFIG_PAX_RANDMMAP
59223+ /* PaX: randomize base address at the default exe base if requested */
59224+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
59225+#ifdef CONFIG_SPARC64
59226+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
59227+#else
59228+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
59229+#endif
59230+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
59231+ elf_flags |= MAP_FIXED;
59232+ }
59233+#endif
59234+
59235 }
59236
59237 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
59238@@ -847,9 +1299,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
59239 * allowed task size. Note that p_filesz must always be
59240 * <= p_memsz so it is only necessary to check p_memsz.
59241 */
59242- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
59243- elf_ppnt->p_memsz > TASK_SIZE ||
59244- TASK_SIZE - elf_ppnt->p_memsz < k) {
59245+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
59246+ elf_ppnt->p_memsz > pax_task_size ||
59247+ pax_task_size - elf_ppnt->p_memsz < k) {
59248 /* set_brk can never work. Avoid overflows. */
59249 send_sig(SIGKILL, current, 0);
59250 retval = -EINVAL;
59251@@ -888,17 +1340,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
59252 goto out_free_dentry;
59253 }
59254 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
59255- send_sig(SIGSEGV, current, 0);
59256- retval = -EFAULT; /* Nobody gets to see this, but.. */
59257- goto out_free_dentry;
59258+ /*
59259+ * This bss-zeroing can fail if the ELF
59260+ * file specifies odd protections. So
59261+ * we don't check the return value
59262+ */
59263 }
59264
59265+#ifdef CONFIG_PAX_RANDMMAP
59266+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
59267+ unsigned long start, size, flags;
59268+ vm_flags_t vm_flags;
59269+
59270+ start = ELF_PAGEALIGN(elf_brk);
59271+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
59272+ flags = MAP_FIXED | MAP_PRIVATE;
59273+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
59274+
59275+ down_write(&current->mm->mmap_sem);
59276+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
59277+ retval = -ENOMEM;
59278+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
59279+// if (current->personality & ADDR_NO_RANDOMIZE)
59280+// vm_flags |= VM_READ | VM_MAYREAD;
59281+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
59282+ retval = IS_ERR_VALUE(start) ? start : 0;
59283+ }
59284+ up_write(&current->mm->mmap_sem);
59285+ if (retval == 0)
59286+ retval = set_brk(start + size, start + size + PAGE_SIZE);
59287+ if (retval < 0) {
59288+ send_sig(SIGKILL, current, 0);
59289+ goto out_free_dentry;
59290+ }
59291+ }
59292+#endif
59293+
59294 if (elf_interpreter) {
59295- unsigned long interp_map_addr = 0;
59296-
59297 elf_entry = load_elf_interp(&loc->interp_elf_ex,
59298 interpreter,
59299- &interp_map_addr,
59300 load_bias);
59301 if (!IS_ERR((void *)elf_entry)) {
59302 /*
59303@@ -1130,7 +1610,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
59304 * Decide what to dump of a segment, part, all or none.
59305 */
59306 static unsigned long vma_dump_size(struct vm_area_struct *vma,
59307- unsigned long mm_flags)
59308+ unsigned long mm_flags, long signr)
59309 {
59310 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
59311
59312@@ -1168,7 +1648,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
59313 if (vma->vm_file == NULL)
59314 return 0;
59315
59316- if (FILTER(MAPPED_PRIVATE))
59317+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
59318 goto whole;
59319
59320 /*
59321@@ -1375,9 +1855,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
59322 {
59323 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
59324 int i = 0;
59325- do
59326+ do {
59327 i += 2;
59328- while (auxv[i - 2] != AT_NULL);
59329+ } while (auxv[i - 2] != AT_NULL);
59330 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
59331 }
59332
59333@@ -1386,7 +1866,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
59334 {
59335 mm_segment_t old_fs = get_fs();
59336 set_fs(KERNEL_DS);
59337- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
59338+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
59339 set_fs(old_fs);
59340 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
59341 }
59342@@ -2010,14 +2490,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
59343 }
59344
59345 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
59346- unsigned long mm_flags)
59347+ struct coredump_params *cprm)
59348 {
59349 struct vm_area_struct *vma;
59350 size_t size = 0;
59351
59352 for (vma = first_vma(current, gate_vma); vma != NULL;
59353 vma = next_vma(vma, gate_vma))
59354- size += vma_dump_size(vma, mm_flags);
59355+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
59356 return size;
59357 }
59358
59359@@ -2108,7 +2588,7 @@ static int elf_core_dump(struct coredump_params *cprm)
59360
59361 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
59362
59363- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
59364+ offset += elf_core_vma_data_size(gate_vma, cprm);
59365 offset += elf_core_extra_data_size();
59366 e_shoff = offset;
59367
59368@@ -2136,7 +2616,7 @@ static int elf_core_dump(struct coredump_params *cprm)
59369 phdr.p_offset = offset;
59370 phdr.p_vaddr = vma->vm_start;
59371 phdr.p_paddr = 0;
59372- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
59373+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
59374 phdr.p_memsz = vma->vm_end - vma->vm_start;
59375 offset += phdr.p_filesz;
59376 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
59377@@ -2169,7 +2649,7 @@ static int elf_core_dump(struct coredump_params *cprm)
59378 unsigned long addr;
59379 unsigned long end;
59380
59381- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
59382+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
59383
59384 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
59385 struct page *page;
59386@@ -2210,6 +2690,167 @@ out:
59387
59388 #endif /* CONFIG_ELF_CORE */
59389
59390+#ifdef CONFIG_PAX_MPROTECT
59391+/* PaX: non-PIC ELF libraries need relocations on their executable segments
59392+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
59393+ * we'll remove VM_MAYWRITE for good on RELRO segments.
59394+ *
59395+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
59396+ * basis because we want to allow the common case and not the special ones.
59397+ */
59398+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
59399+{
59400+ struct elfhdr elf_h;
59401+ struct elf_phdr elf_p;
59402+ unsigned long i;
59403+ unsigned long oldflags;
59404+ bool is_textrel_rw, is_textrel_rx, is_relro;
59405+
59406+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
59407+ return;
59408+
59409+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
59410+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
59411+
59412+#ifdef CONFIG_PAX_ELFRELOCS
59413+ /* possible TEXTREL */
59414+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
59415+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
59416+#else
59417+ is_textrel_rw = false;
59418+ is_textrel_rx = false;
59419+#endif
59420+
59421+ /* possible RELRO */
59422+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
59423+
59424+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
59425+ return;
59426+
59427+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
59428+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
59429+
59430+#ifdef CONFIG_PAX_ETEXECRELOCS
59431+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
59432+#else
59433+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
59434+#endif
59435+
59436+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
59437+ !elf_check_arch(&elf_h) ||
59438+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
59439+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
59440+ return;
59441+
59442+ for (i = 0UL; i < elf_h.e_phnum; i++) {
59443+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
59444+ return;
59445+ switch (elf_p.p_type) {
59446+ case PT_DYNAMIC:
59447+ if (!is_textrel_rw && !is_textrel_rx)
59448+ continue;
59449+ i = 0UL;
59450+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
59451+ elf_dyn dyn;
59452+
59453+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
59454+ break;
59455+ if (dyn.d_tag == DT_NULL)
59456+ break;
59457+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
59458+ gr_log_textrel(vma);
59459+ if (is_textrel_rw)
59460+ vma->vm_flags |= VM_MAYWRITE;
59461+ else
59462+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
59463+ vma->vm_flags &= ~VM_MAYWRITE;
59464+ break;
59465+ }
59466+ i++;
59467+ }
59468+ is_textrel_rw = false;
59469+ is_textrel_rx = false;
59470+ continue;
59471+
59472+ case PT_GNU_RELRO:
59473+ if (!is_relro)
59474+ continue;
59475+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
59476+ vma->vm_flags &= ~VM_MAYWRITE;
59477+ is_relro = false;
59478+ continue;
59479+
59480+#ifdef CONFIG_PAX_PT_PAX_FLAGS
59481+ case PT_PAX_FLAGS: {
59482+ const char *msg_mprotect = "", *msg_emutramp = "";
59483+ char *buffer_lib, *buffer_exe;
59484+
59485+ if (elf_p.p_flags & PF_NOMPROTECT)
59486+ msg_mprotect = "MPROTECT disabled";
59487+
59488+#ifdef CONFIG_PAX_EMUTRAMP
59489+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
59490+ msg_emutramp = "EMUTRAMP enabled";
59491+#endif
59492+
59493+ if (!msg_mprotect[0] && !msg_emutramp[0])
59494+ continue;
59495+
59496+ if (!printk_ratelimit())
59497+ continue;
59498+
59499+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
59500+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
59501+ if (buffer_lib && buffer_exe) {
59502+ char *path_lib, *path_exe;
59503+
59504+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
59505+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
59506+
59507+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
59508+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
59509+
59510+ }
59511+ free_page((unsigned long)buffer_exe);
59512+ free_page((unsigned long)buffer_lib);
59513+ continue;
59514+ }
59515+#endif
59516+
59517+ }
59518+ }
59519+}
59520+#endif
59521+
59522+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59523+
59524+extern int grsec_enable_log_rwxmaps;
59525+
59526+static void elf_handle_mmap(struct file *file)
59527+{
59528+ struct elfhdr elf_h;
59529+ struct elf_phdr elf_p;
59530+ unsigned long i;
59531+
59532+ if (!grsec_enable_log_rwxmaps)
59533+ return;
59534+
59535+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
59536+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
59537+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
59538+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
59539+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
59540+ return;
59541+
59542+ for (i = 0UL; i < elf_h.e_phnum; i++) {
59543+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
59544+ return;
59545+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
59546+ gr_log_ptgnustack(file);
59547+ }
59548+}
59549+#endif
59550+
59551 static int __init init_elf_binfmt(void)
59552 {
59553 register_binfmt(&elf_format);
59554diff --git a/fs/block_dev.c b/fs/block_dev.c
59555index 6d72746..536d1db 100644
59556--- a/fs/block_dev.c
59557+++ b/fs/block_dev.c
59558@@ -701,7 +701,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
59559 else if (bdev->bd_contains == bdev)
59560 return true; /* is a whole device which isn't held */
59561
59562- else if (whole->bd_holder == bd_may_claim)
59563+ else if (whole->bd_holder == (void *)bd_may_claim)
59564 return true; /* is a partition of a device that is being partitioned */
59565 else if (whole->bd_holder != NULL)
59566 return false; /* is a partition of a held device */
59567diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
59568index 8bbcc24..6f10d78 100644
59569--- a/fs/btrfs/ctree.c
59570+++ b/fs/btrfs/ctree.c
59571@@ -1174,9 +1174,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
59572 free_extent_buffer(buf);
59573 add_root_to_dirty_list(root);
59574 } else {
59575- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
59576- parent_start = parent->start;
59577- else
59578+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
59579+ if (parent)
59580+ parent_start = parent->start;
59581+ else
59582+ parent_start = 0;
59583+ } else
59584 parent_start = 0;
59585
59586 WARN_ON(trans->transid != btrfs_header_generation(parent));
59587diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
59588index a2e90f8..5135e5f 100644
59589--- a/fs/btrfs/delayed-inode.c
59590+++ b/fs/btrfs/delayed-inode.c
59591@@ -462,7 +462,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
59592
59593 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
59594 {
59595- int seq = atomic_inc_return(&delayed_root->items_seq);
59596+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
59597 if ((atomic_dec_return(&delayed_root->items) <
59598 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
59599 waitqueue_active(&delayed_root->wait))
59600@@ -1412,7 +1412,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
59601
59602 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
59603 {
59604- int val = atomic_read(&delayed_root->items_seq);
59605+ int val = atomic_read_unchecked(&delayed_root->items_seq);
59606
59607 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
59608 return 1;
59609@@ -1436,7 +1436,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
59610 int seq;
59611 int ret;
59612
59613- seq = atomic_read(&delayed_root->items_seq);
59614+ seq = atomic_read_unchecked(&delayed_root->items_seq);
59615
59616 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
59617 if (ret)
59618diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
59619index f70119f..ab5894d 100644
59620--- a/fs/btrfs/delayed-inode.h
59621+++ b/fs/btrfs/delayed-inode.h
59622@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
59623 */
59624 struct list_head prepare_list;
59625 atomic_t items; /* for delayed items */
59626- atomic_t items_seq; /* for delayed items */
59627+ atomic_unchecked_t items_seq; /* for delayed items */
59628 int nodes; /* for delayed nodes */
59629 wait_queue_head_t wait;
59630 };
59631@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
59632 struct btrfs_delayed_root *delayed_root)
59633 {
59634 atomic_set(&delayed_root->items, 0);
59635- atomic_set(&delayed_root->items_seq, 0);
59636+ atomic_set_unchecked(&delayed_root->items_seq, 0);
59637 delayed_root->nodes = 0;
59638 spin_lock_init(&delayed_root->lock);
59639 init_waitqueue_head(&delayed_root->wait);
59640diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
59641index b765d41..5a8b0c3 100644
59642--- a/fs/btrfs/ioctl.c
59643+++ b/fs/btrfs/ioctl.c
59644@@ -3975,9 +3975,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
59645 for (i = 0; i < num_types; i++) {
59646 struct btrfs_space_info *tmp;
59647
59648+ /* Don't copy in more than we allocated */
59649 if (!slot_count)
59650 break;
59651
59652+ slot_count--;
59653+
59654 info = NULL;
59655 rcu_read_lock();
59656 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
59657@@ -3999,10 +4002,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
59658 memcpy(dest, &space, sizeof(space));
59659 dest++;
59660 space_args.total_spaces++;
59661- slot_count--;
59662 }
59663- if (!slot_count)
59664- break;
59665 }
59666 up_read(&info->groups_sem);
59667 }
59668diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
59669index c4124de..d7613eb6 100644
59670--- a/fs/btrfs/super.c
59671+++ b/fs/btrfs/super.c
59672@@ -270,7 +270,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
59673 function, line, errstr);
59674 return;
59675 }
59676- ACCESS_ONCE(trans->transaction->aborted) = errno;
59677+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
59678 /* Wake up anybody who may be waiting on this transaction */
59679 wake_up(&root->fs_info->transaction_wait);
59680 wake_up(&root->fs_info->transaction_blocked_wait);
59681diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
59682index 12e5355..cdf30c6 100644
59683--- a/fs/btrfs/sysfs.c
59684+++ b/fs/btrfs/sysfs.c
59685@@ -475,7 +475,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
59686 for (set = 0; set < FEAT_MAX; set++) {
59687 int i;
59688 struct attribute *attrs[2];
59689- struct attribute_group agroup = {
59690+ attribute_group_no_const agroup = {
59691 .name = "features",
59692 .attrs = attrs,
59693 };
59694diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
59695index e2e798a..f454c18 100644
59696--- a/fs/btrfs/tree-log.h
59697+++ b/fs/btrfs/tree-log.h
59698@@ -41,7 +41,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
59699 static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
59700 struct btrfs_trans_handle *trans)
59701 {
59702- ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid;
59703+ ACCESS_ONCE_RW(fs_info->last_trans_log_full_commit) = trans->transid;
59704 }
59705
59706 static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
59707diff --git a/fs/buffer.c b/fs/buffer.c
59708index 72daaa5..60ffeb9 100644
59709--- a/fs/buffer.c
59710+++ b/fs/buffer.c
59711@@ -3432,7 +3432,7 @@ void __init buffer_init(void)
59712 bh_cachep = kmem_cache_create("buffer_head",
59713 sizeof(struct buffer_head), 0,
59714 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
59715- SLAB_MEM_SPREAD),
59716+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
59717 NULL);
59718
59719 /*
59720diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
59721index fbb08e9..0fda764 100644
59722--- a/fs/cachefiles/bind.c
59723+++ b/fs/cachefiles/bind.c
59724@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
59725 args);
59726
59727 /* start by checking things over */
59728- ASSERT(cache->fstop_percent >= 0 &&
59729- cache->fstop_percent < cache->fcull_percent &&
59730+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
59731 cache->fcull_percent < cache->frun_percent &&
59732 cache->frun_percent < 100);
59733
59734- ASSERT(cache->bstop_percent >= 0 &&
59735- cache->bstop_percent < cache->bcull_percent &&
59736+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
59737 cache->bcull_percent < cache->brun_percent &&
59738 cache->brun_percent < 100);
59739
59740diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
59741index ce1b115..4a6852c 100644
59742--- a/fs/cachefiles/daemon.c
59743+++ b/fs/cachefiles/daemon.c
59744@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
59745 if (n > buflen)
59746 return -EMSGSIZE;
59747
59748- if (copy_to_user(_buffer, buffer, n) != 0)
59749+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
59750 return -EFAULT;
59751
59752 return n;
59753@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
59754 if (test_bit(CACHEFILES_DEAD, &cache->flags))
59755 return -EIO;
59756
59757- if (datalen < 0 || datalen > PAGE_SIZE - 1)
59758+ if (datalen > PAGE_SIZE - 1)
59759 return -EOPNOTSUPP;
59760
59761 /* drag the command string into the kernel so we can parse it */
59762@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
59763 if (args[0] != '%' || args[1] != '\0')
59764 return -EINVAL;
59765
59766- if (fstop < 0 || fstop >= cache->fcull_percent)
59767+ if (fstop >= cache->fcull_percent)
59768 return cachefiles_daemon_range_error(cache, args);
59769
59770 cache->fstop_percent = fstop;
59771@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
59772 if (args[0] != '%' || args[1] != '\0')
59773 return -EINVAL;
59774
59775- if (bstop < 0 || bstop >= cache->bcull_percent)
59776+ if (bstop >= cache->bcull_percent)
59777 return cachefiles_daemon_range_error(cache, args);
59778
59779 cache->bstop_percent = bstop;
59780diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
59781index 8c52472..c4e3a69 100644
59782--- a/fs/cachefiles/internal.h
59783+++ b/fs/cachefiles/internal.h
59784@@ -66,7 +66,7 @@ struct cachefiles_cache {
59785 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
59786 struct rb_root active_nodes; /* active nodes (can't be culled) */
59787 rwlock_t active_lock; /* lock for active_nodes */
59788- atomic_t gravecounter; /* graveyard uniquifier */
59789+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
59790 unsigned frun_percent; /* when to stop culling (% files) */
59791 unsigned fcull_percent; /* when to start culling (% files) */
59792 unsigned fstop_percent; /* when to stop allocating (% files) */
59793@@ -178,19 +178,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
59794 * proc.c
59795 */
59796 #ifdef CONFIG_CACHEFILES_HISTOGRAM
59797-extern atomic_t cachefiles_lookup_histogram[HZ];
59798-extern atomic_t cachefiles_mkdir_histogram[HZ];
59799-extern atomic_t cachefiles_create_histogram[HZ];
59800+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
59801+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
59802+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
59803
59804 extern int __init cachefiles_proc_init(void);
59805 extern void cachefiles_proc_cleanup(void);
59806 static inline
59807-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
59808+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
59809 {
59810 unsigned long jif = jiffies - start_jif;
59811 if (jif >= HZ)
59812 jif = HZ - 1;
59813- atomic_inc(&histogram[jif]);
59814+ atomic_inc_unchecked(&histogram[jif]);
59815 }
59816
59817 #else
59818diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
59819index dad7d95..07475af 100644
59820--- a/fs/cachefiles/namei.c
59821+++ b/fs/cachefiles/namei.c
59822@@ -312,7 +312,7 @@ try_again:
59823 /* first step is to make up a grave dentry in the graveyard */
59824 sprintf(nbuffer, "%08x%08x",
59825 (uint32_t) get_seconds(),
59826- (uint32_t) atomic_inc_return(&cache->gravecounter));
59827+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
59828
59829 /* do the multiway lock magic */
59830 trap = lock_rename(cache->graveyard, dir);
59831diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
59832index eccd339..4c1d995 100644
59833--- a/fs/cachefiles/proc.c
59834+++ b/fs/cachefiles/proc.c
59835@@ -14,9 +14,9 @@
59836 #include <linux/seq_file.h>
59837 #include "internal.h"
59838
59839-atomic_t cachefiles_lookup_histogram[HZ];
59840-atomic_t cachefiles_mkdir_histogram[HZ];
59841-atomic_t cachefiles_create_histogram[HZ];
59842+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
59843+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
59844+atomic_unchecked_t cachefiles_create_histogram[HZ];
59845
59846 /*
59847 * display the latency histogram
59848@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
59849 return 0;
59850 default:
59851 index = (unsigned long) v - 3;
59852- x = atomic_read(&cachefiles_lookup_histogram[index]);
59853- y = atomic_read(&cachefiles_mkdir_histogram[index]);
59854- z = atomic_read(&cachefiles_create_histogram[index]);
59855+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
59856+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
59857+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
59858 if (x == 0 && y == 0 && z == 0)
59859 return 0;
59860
59861diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
59862index 25e745b..220e604 100644
59863--- a/fs/cachefiles/rdwr.c
59864+++ b/fs/cachefiles/rdwr.c
59865@@ -937,7 +937,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
59866 old_fs = get_fs();
59867 set_fs(KERNEL_DS);
59868 ret = file->f_op->write(
59869- file, (const void __user *) data, len, &pos);
59870+ file, (const void __force_user *) data, len, &pos);
59871 set_fs(old_fs);
59872 kunmap(page);
59873 file_end_write(file);
59874diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
59875index c29d6ae..719b9bb 100644
59876--- a/fs/ceph/dir.c
59877+++ b/fs/ceph/dir.c
59878@@ -129,6 +129,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
59879 struct dentry *dentry, *last;
59880 struct ceph_dentry_info *di;
59881 int err = 0;
59882+ char d_name[DNAME_INLINE_LEN];
59883+ const unsigned char *name;
59884
59885 /* claim ref on last dentry we returned */
59886 last = fi->dentry;
59887@@ -192,7 +194,12 @@ more:
59888
59889 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos,
59890 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
59891- if (!dir_emit(ctx, dentry->d_name.name,
59892+ name = dentry->d_name.name;
59893+ if (name == dentry->d_iname) {
59894+ memcpy(d_name, name, dentry->d_name.len);
59895+ name = d_name;
59896+ }
59897+ if (!dir_emit(ctx, name,
59898 dentry->d_name.len,
59899 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
59900 dentry->d_inode->i_mode >> 12)) {
59901@@ -250,7 +257,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
59902 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
59903 struct ceph_mds_client *mdsc = fsc->mdsc;
59904 unsigned frag = fpos_frag(ctx->pos);
59905- int off = fpos_off(ctx->pos);
59906+ unsigned int off = fpos_off(ctx->pos);
59907 int err;
59908 u32 ftype;
59909 struct ceph_mds_reply_info_parsed *rinfo;
59910diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
59911index a822a6e..4644256 100644
59912--- a/fs/ceph/ioctl.c
59913+++ b/fs/ceph/ioctl.c
59914@@ -41,7 +41,7 @@ static long __validate_layout(struct ceph_mds_client *mdsc,
59915 /* validate striping parameters */
59916 if ((l->object_size & ~PAGE_MASK) ||
59917 (l->stripe_unit & ~PAGE_MASK) ||
59918- (l->stripe_unit != 0 &&
59919+ ((unsigned)l->stripe_unit != 0 &&
59920 ((unsigned)l->object_size % (unsigned)l->stripe_unit)))
59921 return -EINVAL;
59922
59923diff --git a/fs/ceph/super.c b/fs/ceph/super.c
59924index f6e1237..796ffd1 100644
59925--- a/fs/ceph/super.c
59926+++ b/fs/ceph/super.c
59927@@ -895,7 +895,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
59928 /*
59929 * construct our own bdi so we can control readahead, etc.
59930 */
59931-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
59932+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
59933
59934 static int ceph_register_bdi(struct super_block *sb,
59935 struct ceph_fs_client *fsc)
59936@@ -912,7 +912,7 @@ static int ceph_register_bdi(struct super_block *sb,
59937 default_backing_dev_info.ra_pages;
59938
59939 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
59940- atomic_long_inc_return(&bdi_seq));
59941+ atomic_long_inc_return_unchecked(&bdi_seq));
59942 if (!err)
59943 sb->s_bdi = &fsc->backing_dev_info;
59944 return err;
59945diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
59946index 44ec726..bcb06a3 100644
59947--- a/fs/cifs/cifs_debug.c
59948+++ b/fs/cifs/cifs_debug.c
59949@@ -286,8 +286,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
59950
59951 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
59952 #ifdef CONFIG_CIFS_STATS2
59953- atomic_set(&totBufAllocCount, 0);
59954- atomic_set(&totSmBufAllocCount, 0);
59955+ atomic_set_unchecked(&totBufAllocCount, 0);
59956+ atomic_set_unchecked(&totSmBufAllocCount, 0);
59957 #endif /* CONFIG_CIFS_STATS2 */
59958 spin_lock(&cifs_tcp_ses_lock);
59959 list_for_each(tmp1, &cifs_tcp_ses_list) {
59960@@ -300,7 +300,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
59961 tcon = list_entry(tmp3,
59962 struct cifs_tcon,
59963 tcon_list);
59964- atomic_set(&tcon->num_smbs_sent, 0);
59965+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
59966 if (server->ops->clear_stats)
59967 server->ops->clear_stats(tcon);
59968 }
59969@@ -332,8 +332,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
59970 smBufAllocCount.counter, cifs_min_small);
59971 #ifdef CONFIG_CIFS_STATS2
59972 seq_printf(m, "Total Large %d Small %d Allocations\n",
59973- atomic_read(&totBufAllocCount),
59974- atomic_read(&totSmBufAllocCount));
59975+ atomic_read_unchecked(&totBufAllocCount),
59976+ atomic_read_unchecked(&totSmBufAllocCount));
59977 #endif /* CONFIG_CIFS_STATS2 */
59978
59979 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
59980@@ -362,7 +362,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
59981 if (tcon->need_reconnect)
59982 seq_puts(m, "\tDISCONNECTED ");
59983 seq_printf(m, "\nSMBs: %d",
59984- atomic_read(&tcon->num_smbs_sent));
59985+ atomic_read_unchecked(&tcon->num_smbs_sent));
59986 if (server->ops->print_stats)
59987 server->ops->print_stats(m, tcon);
59988 }
59989diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
59990index 889b984..fcb8431 100644
59991--- a/fs/cifs/cifsfs.c
59992+++ b/fs/cifs/cifsfs.c
59993@@ -1092,7 +1092,7 @@ cifs_init_request_bufs(void)
59994 */
59995 cifs_req_cachep = kmem_cache_create("cifs_request",
59996 CIFSMaxBufSize + max_hdr_size, 0,
59997- SLAB_HWCACHE_ALIGN, NULL);
59998+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
59999 if (cifs_req_cachep == NULL)
60000 return -ENOMEM;
60001
60002@@ -1119,7 +1119,7 @@ cifs_init_request_bufs(void)
60003 efficient to alloc 1 per page off the slab compared to 17K (5page)
60004 alloc of large cifs buffers even when page debugging is on */
60005 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
60006- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
60007+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
60008 NULL);
60009 if (cifs_sm_req_cachep == NULL) {
60010 mempool_destroy(cifs_req_poolp);
60011@@ -1204,8 +1204,8 @@ init_cifs(void)
60012 atomic_set(&bufAllocCount, 0);
60013 atomic_set(&smBufAllocCount, 0);
60014 #ifdef CONFIG_CIFS_STATS2
60015- atomic_set(&totBufAllocCount, 0);
60016- atomic_set(&totSmBufAllocCount, 0);
60017+ atomic_set_unchecked(&totBufAllocCount, 0);
60018+ atomic_set_unchecked(&totSmBufAllocCount, 0);
60019 #endif /* CONFIG_CIFS_STATS2 */
60020
60021 atomic_set(&midCount, 0);
60022diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
60023index 25b8392..01e46dc 100644
60024--- a/fs/cifs/cifsglob.h
60025+++ b/fs/cifs/cifsglob.h
60026@@ -821,35 +821,35 @@ struct cifs_tcon {
60027 __u16 Flags; /* optional support bits */
60028 enum statusEnum tidStatus;
60029 #ifdef CONFIG_CIFS_STATS
60030- atomic_t num_smbs_sent;
60031+ atomic_unchecked_t num_smbs_sent;
60032 union {
60033 struct {
60034- atomic_t num_writes;
60035- atomic_t num_reads;
60036- atomic_t num_flushes;
60037- atomic_t num_oplock_brks;
60038- atomic_t num_opens;
60039- atomic_t num_closes;
60040- atomic_t num_deletes;
60041- atomic_t num_mkdirs;
60042- atomic_t num_posixopens;
60043- atomic_t num_posixmkdirs;
60044- atomic_t num_rmdirs;
60045- atomic_t num_renames;
60046- atomic_t num_t2renames;
60047- atomic_t num_ffirst;
60048- atomic_t num_fnext;
60049- atomic_t num_fclose;
60050- atomic_t num_hardlinks;
60051- atomic_t num_symlinks;
60052- atomic_t num_locks;
60053- atomic_t num_acl_get;
60054- atomic_t num_acl_set;
60055+ atomic_unchecked_t num_writes;
60056+ atomic_unchecked_t num_reads;
60057+ atomic_unchecked_t num_flushes;
60058+ atomic_unchecked_t num_oplock_brks;
60059+ atomic_unchecked_t num_opens;
60060+ atomic_unchecked_t num_closes;
60061+ atomic_unchecked_t num_deletes;
60062+ atomic_unchecked_t num_mkdirs;
60063+ atomic_unchecked_t num_posixopens;
60064+ atomic_unchecked_t num_posixmkdirs;
60065+ atomic_unchecked_t num_rmdirs;
60066+ atomic_unchecked_t num_renames;
60067+ atomic_unchecked_t num_t2renames;
60068+ atomic_unchecked_t num_ffirst;
60069+ atomic_unchecked_t num_fnext;
60070+ atomic_unchecked_t num_fclose;
60071+ atomic_unchecked_t num_hardlinks;
60072+ atomic_unchecked_t num_symlinks;
60073+ atomic_unchecked_t num_locks;
60074+ atomic_unchecked_t num_acl_get;
60075+ atomic_unchecked_t num_acl_set;
60076 } cifs_stats;
60077 #ifdef CONFIG_CIFS_SMB2
60078 struct {
60079- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
60080- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
60081+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
60082+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
60083 } smb2_stats;
60084 #endif /* CONFIG_CIFS_SMB2 */
60085 } stats;
60086@@ -1190,7 +1190,7 @@ convert_delimiter(char *path, char delim)
60087 }
60088
60089 #ifdef CONFIG_CIFS_STATS
60090-#define cifs_stats_inc atomic_inc
60091+#define cifs_stats_inc atomic_inc_unchecked
60092
60093 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
60094 unsigned int bytes)
60095@@ -1557,8 +1557,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
60096 /* Various Debug counters */
60097 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
60098 #ifdef CONFIG_CIFS_STATS2
60099-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
60100-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
60101+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
60102+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
60103 #endif
60104 GLOBAL_EXTERN atomic_t smBufAllocCount;
60105 GLOBAL_EXTERN atomic_t midCount;
60106diff --git a/fs/cifs/file.c b/fs/cifs/file.c
60107index 5f29354..359bc0d 100644
60108--- a/fs/cifs/file.c
60109+++ b/fs/cifs/file.c
60110@@ -2056,10 +2056,14 @@ static int cifs_writepages(struct address_space *mapping,
60111 index = mapping->writeback_index; /* Start from prev offset */
60112 end = -1;
60113 } else {
60114- index = wbc->range_start >> PAGE_CACHE_SHIFT;
60115- end = wbc->range_end >> PAGE_CACHE_SHIFT;
60116- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
60117+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
60118 range_whole = true;
60119+ index = 0;
60120+ end = ULONG_MAX;
60121+ } else {
60122+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
60123+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
60124+ }
60125 scanned = true;
60126 }
60127 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
60128diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
60129index b7415d5..3984ec0 100644
60130--- a/fs/cifs/misc.c
60131+++ b/fs/cifs/misc.c
60132@@ -170,7 +170,7 @@ cifs_buf_get(void)
60133 memset(ret_buf, 0, buf_size + 3);
60134 atomic_inc(&bufAllocCount);
60135 #ifdef CONFIG_CIFS_STATS2
60136- atomic_inc(&totBufAllocCount);
60137+ atomic_inc_unchecked(&totBufAllocCount);
60138 #endif /* CONFIG_CIFS_STATS2 */
60139 }
60140
60141@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
60142 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
60143 atomic_inc(&smBufAllocCount);
60144 #ifdef CONFIG_CIFS_STATS2
60145- atomic_inc(&totSmBufAllocCount);
60146+ atomic_inc_unchecked(&totSmBufAllocCount);
60147 #endif /* CONFIG_CIFS_STATS2 */
60148
60149 }
60150diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
60151index 52131d8..fd79e97 100644
60152--- a/fs/cifs/smb1ops.c
60153+++ b/fs/cifs/smb1ops.c
60154@@ -626,27 +626,27 @@ static void
60155 cifs_clear_stats(struct cifs_tcon *tcon)
60156 {
60157 #ifdef CONFIG_CIFS_STATS
60158- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
60159- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
60160- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
60161- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
60162- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
60163- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
60164- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
60165- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
60166- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
60167- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
60168- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
60169- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
60170- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
60171- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
60172- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
60173- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
60174- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
60175- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
60176- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
60177- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
60178- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
60179+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
60180+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
60181+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
60182+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
60183+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
60184+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
60185+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
60186+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
60187+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
60188+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
60189+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
60190+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
60191+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
60192+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
60193+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
60194+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
60195+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
60196+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
60197+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
60198+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
60199+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
60200 #endif
60201 }
60202
60203@@ -655,36 +655,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
60204 {
60205 #ifdef CONFIG_CIFS_STATS
60206 seq_printf(m, " Oplocks breaks: %d",
60207- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
60208+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
60209 seq_printf(m, "\nReads: %d Bytes: %llu",
60210- atomic_read(&tcon->stats.cifs_stats.num_reads),
60211+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
60212 (long long)(tcon->bytes_read));
60213 seq_printf(m, "\nWrites: %d Bytes: %llu",
60214- atomic_read(&tcon->stats.cifs_stats.num_writes),
60215+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
60216 (long long)(tcon->bytes_written));
60217 seq_printf(m, "\nFlushes: %d",
60218- atomic_read(&tcon->stats.cifs_stats.num_flushes));
60219+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
60220 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
60221- atomic_read(&tcon->stats.cifs_stats.num_locks),
60222- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
60223- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
60224+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
60225+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
60226+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
60227 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
60228- atomic_read(&tcon->stats.cifs_stats.num_opens),
60229- atomic_read(&tcon->stats.cifs_stats.num_closes),
60230- atomic_read(&tcon->stats.cifs_stats.num_deletes));
60231+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
60232+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
60233+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
60234 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
60235- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
60236- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
60237+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
60238+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
60239 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
60240- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
60241- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
60242+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
60243+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
60244 seq_printf(m, "\nRenames: %d T2 Renames %d",
60245- atomic_read(&tcon->stats.cifs_stats.num_renames),
60246- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
60247+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
60248+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
60249 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
60250- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
60251- atomic_read(&tcon->stats.cifs_stats.num_fnext),
60252- atomic_read(&tcon->stats.cifs_stats.num_fclose));
60253+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
60254+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
60255+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
60256 #endif
60257 }
60258
60259diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
60260index f522193..586121b 100644
60261--- a/fs/cifs/smb2ops.c
60262+++ b/fs/cifs/smb2ops.c
60263@@ -414,8 +414,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
60264 #ifdef CONFIG_CIFS_STATS
60265 int i;
60266 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
60267- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
60268- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
60269+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
60270+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
60271 }
60272 #endif
60273 }
60274@@ -455,65 +455,65 @@ static void
60275 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
60276 {
60277 #ifdef CONFIG_CIFS_STATS
60278- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
60279- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
60280+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
60281+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
60282 seq_printf(m, "\nNegotiates: %d sent %d failed",
60283- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
60284- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
60285+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
60286+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
60287 seq_printf(m, "\nSessionSetups: %d sent %d failed",
60288- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
60289- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
60290+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
60291+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
60292 seq_printf(m, "\nLogoffs: %d sent %d failed",
60293- atomic_read(&sent[SMB2_LOGOFF_HE]),
60294- atomic_read(&failed[SMB2_LOGOFF_HE]));
60295+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
60296+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
60297 seq_printf(m, "\nTreeConnects: %d sent %d failed",
60298- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
60299- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
60300+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
60301+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
60302 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
60303- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
60304- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
60305+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
60306+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
60307 seq_printf(m, "\nCreates: %d sent %d failed",
60308- atomic_read(&sent[SMB2_CREATE_HE]),
60309- atomic_read(&failed[SMB2_CREATE_HE]));
60310+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
60311+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
60312 seq_printf(m, "\nCloses: %d sent %d failed",
60313- atomic_read(&sent[SMB2_CLOSE_HE]),
60314- atomic_read(&failed[SMB2_CLOSE_HE]));
60315+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
60316+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
60317 seq_printf(m, "\nFlushes: %d sent %d failed",
60318- atomic_read(&sent[SMB2_FLUSH_HE]),
60319- atomic_read(&failed[SMB2_FLUSH_HE]));
60320+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
60321+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
60322 seq_printf(m, "\nReads: %d sent %d failed",
60323- atomic_read(&sent[SMB2_READ_HE]),
60324- atomic_read(&failed[SMB2_READ_HE]));
60325+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
60326+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
60327 seq_printf(m, "\nWrites: %d sent %d failed",
60328- atomic_read(&sent[SMB2_WRITE_HE]),
60329- atomic_read(&failed[SMB2_WRITE_HE]));
60330+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
60331+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
60332 seq_printf(m, "\nLocks: %d sent %d failed",
60333- atomic_read(&sent[SMB2_LOCK_HE]),
60334- atomic_read(&failed[SMB2_LOCK_HE]));
60335+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
60336+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
60337 seq_printf(m, "\nIOCTLs: %d sent %d failed",
60338- atomic_read(&sent[SMB2_IOCTL_HE]),
60339- atomic_read(&failed[SMB2_IOCTL_HE]));
60340+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
60341+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
60342 seq_printf(m, "\nCancels: %d sent %d failed",
60343- atomic_read(&sent[SMB2_CANCEL_HE]),
60344- atomic_read(&failed[SMB2_CANCEL_HE]));
60345+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
60346+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
60347 seq_printf(m, "\nEchos: %d sent %d failed",
60348- atomic_read(&sent[SMB2_ECHO_HE]),
60349- atomic_read(&failed[SMB2_ECHO_HE]));
60350+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
60351+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
60352 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
60353- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
60354- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
60355+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
60356+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
60357 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
60358- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
60359- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
60360+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
60361+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
60362 seq_printf(m, "\nQueryInfos: %d sent %d failed",
60363- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
60364- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
60365+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
60366+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
60367 seq_printf(m, "\nSetInfos: %d sent %d failed",
60368- atomic_read(&sent[SMB2_SET_INFO_HE]),
60369- atomic_read(&failed[SMB2_SET_INFO_HE]));
60370+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
60371+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
60372 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
60373- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
60374- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
60375+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
60376+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
60377 #endif
60378 }
60379
60380diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
60381index 74b3a66..0c709f3 100644
60382--- a/fs/cifs/smb2pdu.c
60383+++ b/fs/cifs/smb2pdu.c
60384@@ -2143,8 +2143,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
60385 default:
60386 cifs_dbg(VFS, "info level %u isn't supported\n",
60387 srch_inf->info_level);
60388- rc = -EINVAL;
60389- goto qdir_exit;
60390+ return -EINVAL;
60391 }
60392
60393 req->FileIndex = cpu_to_le32(index);
60394diff --git a/fs/coda/cache.c b/fs/coda/cache.c
60395index 278f8fd..e69c52d 100644
60396--- a/fs/coda/cache.c
60397+++ b/fs/coda/cache.c
60398@@ -24,7 +24,7 @@
60399 #include "coda_linux.h"
60400 #include "coda_cache.h"
60401
60402-static atomic_t permission_epoch = ATOMIC_INIT(0);
60403+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
60404
60405 /* replace or extend an acl cache hit */
60406 void coda_cache_enter(struct inode *inode, int mask)
60407@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
60408 struct coda_inode_info *cii = ITOC(inode);
60409
60410 spin_lock(&cii->c_lock);
60411- cii->c_cached_epoch = atomic_read(&permission_epoch);
60412+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
60413 if (!uid_eq(cii->c_uid, current_fsuid())) {
60414 cii->c_uid = current_fsuid();
60415 cii->c_cached_perm = mask;
60416@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
60417 {
60418 struct coda_inode_info *cii = ITOC(inode);
60419 spin_lock(&cii->c_lock);
60420- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
60421+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
60422 spin_unlock(&cii->c_lock);
60423 }
60424
60425 /* remove all acl caches */
60426 void coda_cache_clear_all(struct super_block *sb)
60427 {
60428- atomic_inc(&permission_epoch);
60429+ atomic_inc_unchecked(&permission_epoch);
60430 }
60431
60432
60433@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
60434 spin_lock(&cii->c_lock);
60435 hit = (mask & cii->c_cached_perm) == mask &&
60436 uid_eq(cii->c_uid, current_fsuid()) &&
60437- cii->c_cached_epoch == atomic_read(&permission_epoch);
60438+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
60439 spin_unlock(&cii->c_lock);
60440
60441 return hit;
60442diff --git a/fs/compat.c b/fs/compat.c
60443index 66d3d3c..9c10175 100644
60444--- a/fs/compat.c
60445+++ b/fs/compat.c
60446@@ -54,7 +54,7 @@
60447 #include <asm/ioctls.h>
60448 #include "internal.h"
60449
60450-int compat_log = 1;
60451+int compat_log = 0;
60452
60453 int compat_printk(const char *fmt, ...)
60454 {
60455@@ -512,7 +512,7 @@ COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
60456
60457 set_fs(KERNEL_DS);
60458 /* The __user pointer cast is valid because of the set_fs() */
60459- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
60460+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
60461 set_fs(oldfs);
60462 /* truncating is ok because it's a user address */
60463 if (!ret)
60464@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
60465 goto out;
60466
60467 ret = -EINVAL;
60468- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
60469+ if (nr_segs > UIO_MAXIOV)
60470 goto out;
60471 if (nr_segs > fast_segs) {
60472 ret = -ENOMEM;
60473@@ -850,6 +850,7 @@ struct compat_old_linux_dirent {
60474 struct compat_readdir_callback {
60475 struct dir_context ctx;
60476 struct compat_old_linux_dirent __user *dirent;
60477+ struct file * file;
60478 int result;
60479 };
60480
60481@@ -867,6 +868,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
60482 buf->result = -EOVERFLOW;
60483 return -EOVERFLOW;
60484 }
60485+
60486+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60487+ return 0;
60488+
60489 buf->result++;
60490 dirent = buf->dirent;
60491 if (!access_ok(VERIFY_WRITE, dirent,
60492@@ -898,6 +903,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
60493 if (!f.file)
60494 return -EBADF;
60495
60496+ buf.file = f.file;
60497 error = iterate_dir(f.file, &buf.ctx);
60498 if (buf.result)
60499 error = buf.result;
60500@@ -917,6 +923,7 @@ struct compat_getdents_callback {
60501 struct dir_context ctx;
60502 struct compat_linux_dirent __user *current_dir;
60503 struct compat_linux_dirent __user *previous;
60504+ struct file * file;
60505 int count;
60506 int error;
60507 };
60508@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
60509 buf->error = -EOVERFLOW;
60510 return -EOVERFLOW;
60511 }
60512+
60513+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60514+ return 0;
60515+
60516 dirent = buf->previous;
60517 if (dirent) {
60518 if (__put_user(offset, &dirent->d_off))
60519@@ -983,6 +994,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
60520 if (!f.file)
60521 return -EBADF;
60522
60523+ buf.file = f.file;
60524 error = iterate_dir(f.file, &buf.ctx);
60525 if (error >= 0)
60526 error = buf.error;
60527@@ -1003,6 +1015,7 @@ struct compat_getdents_callback64 {
60528 struct dir_context ctx;
60529 struct linux_dirent64 __user *current_dir;
60530 struct linux_dirent64 __user *previous;
60531+ struct file * file;
60532 int count;
60533 int error;
60534 };
60535@@ -1019,6 +1032,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
60536 buf->error = -EINVAL; /* only used if we fail.. */
60537 if (reclen > buf->count)
60538 return -EINVAL;
60539+
60540+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60541+ return 0;
60542+
60543 dirent = buf->previous;
60544
60545 if (dirent) {
60546@@ -1068,6 +1085,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
60547 if (!f.file)
60548 return -EBADF;
60549
60550+ buf.file = f.file;
60551 error = iterate_dir(f.file, &buf.ctx);
60552 if (error >= 0)
60553 error = buf.error;
60554diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
60555index 4d24d17..4f8c09e 100644
60556--- a/fs/compat_binfmt_elf.c
60557+++ b/fs/compat_binfmt_elf.c
60558@@ -30,11 +30,13 @@
60559 #undef elf_phdr
60560 #undef elf_shdr
60561 #undef elf_note
60562+#undef elf_dyn
60563 #undef elf_addr_t
60564 #define elfhdr elf32_hdr
60565 #define elf_phdr elf32_phdr
60566 #define elf_shdr elf32_shdr
60567 #define elf_note elf32_note
60568+#define elf_dyn Elf32_Dyn
60569 #define elf_addr_t Elf32_Addr
60570
60571 /*
60572diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
60573index afec645..9c65620 100644
60574--- a/fs/compat_ioctl.c
60575+++ b/fs/compat_ioctl.c
60576@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
60577 return -EFAULT;
60578 if (__get_user(udata, &ss32->iomem_base))
60579 return -EFAULT;
60580- ss.iomem_base = compat_ptr(udata);
60581+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
60582 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
60583 __get_user(ss.port_high, &ss32->port_high))
60584 return -EFAULT;
60585@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
60586 for (i = 0; i < nmsgs; i++) {
60587 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
60588 return -EFAULT;
60589- if (get_user(datap, &umsgs[i].buf) ||
60590- put_user(compat_ptr(datap), &tmsgs[i].buf))
60591+ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
60592+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
60593 return -EFAULT;
60594 }
60595 return sys_ioctl(fd, cmd, (unsigned long)tdata);
60596@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
60597 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
60598 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
60599 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
60600- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
60601+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
60602 return -EFAULT;
60603
60604 return ioctl_preallocate(file, p);
60605@@ -1618,8 +1618,8 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
60606 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
60607 {
60608 unsigned int a, b;
60609- a = *(unsigned int *)p;
60610- b = *(unsigned int *)q;
60611+ a = *(const unsigned int *)p;
60612+ b = *(const unsigned int *)q;
60613 if (a > b)
60614 return 1;
60615 if (a < b)
60616diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
60617index 668dcab..daebcd6 100644
60618--- a/fs/configfs/dir.c
60619+++ b/fs/configfs/dir.c
60620@@ -1548,7 +1548,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
60621 }
60622 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
60623 struct configfs_dirent *next;
60624- const char *name;
60625+ const unsigned char * name;
60626+ char d_name[sizeof(next->s_dentry->d_iname)];
60627 int len;
60628 struct inode *inode = NULL;
60629
60630@@ -1557,7 +1558,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
60631 continue;
60632
60633 name = configfs_get_name(next);
60634- len = strlen(name);
60635+ if (next->s_dentry && name == next->s_dentry->d_iname) {
60636+ len = next->s_dentry->d_name.len;
60637+ memcpy(d_name, name, len);
60638+ name = d_name;
60639+ } else
60640+ len = strlen(name);
60641
60642 /*
60643 * We'll have a dentry and an inode for
60644diff --git a/fs/coredump.c b/fs/coredump.c
60645index a93f7e6..d58bcbe 100644
60646--- a/fs/coredump.c
60647+++ b/fs/coredump.c
60648@@ -442,8 +442,8 @@ static void wait_for_dump_helpers(struct file *file)
60649 struct pipe_inode_info *pipe = file->private_data;
60650
60651 pipe_lock(pipe);
60652- pipe->readers++;
60653- pipe->writers--;
60654+ atomic_inc(&pipe->readers);
60655+ atomic_dec(&pipe->writers);
60656 wake_up_interruptible_sync(&pipe->wait);
60657 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
60658 pipe_unlock(pipe);
60659@@ -452,11 +452,11 @@ static void wait_for_dump_helpers(struct file *file)
60660 * We actually want wait_event_freezable() but then we need
60661 * to clear TIF_SIGPENDING and improve dump_interrupted().
60662 */
60663- wait_event_interruptible(pipe->wait, pipe->readers == 1);
60664+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
60665
60666 pipe_lock(pipe);
60667- pipe->readers--;
60668- pipe->writers++;
60669+ atomic_dec(&pipe->readers);
60670+ atomic_inc(&pipe->writers);
60671 pipe_unlock(pipe);
60672 }
60673
60674@@ -503,7 +503,9 @@ void do_coredump(const siginfo_t *siginfo)
60675 struct files_struct *displaced;
60676 bool need_nonrelative = false;
60677 bool core_dumped = false;
60678- static atomic_t core_dump_count = ATOMIC_INIT(0);
60679+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
60680+ long signr = siginfo->si_signo;
60681+ int dumpable;
60682 struct coredump_params cprm = {
60683 .siginfo = siginfo,
60684 .regs = signal_pt_regs(),
60685@@ -516,12 +518,17 @@ void do_coredump(const siginfo_t *siginfo)
60686 .mm_flags = mm->flags,
60687 };
60688
60689- audit_core_dumps(siginfo->si_signo);
60690+ audit_core_dumps(signr);
60691+
60692+ dumpable = __get_dumpable(cprm.mm_flags);
60693+
60694+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
60695+ gr_handle_brute_attach(dumpable);
60696
60697 binfmt = mm->binfmt;
60698 if (!binfmt || !binfmt->core_dump)
60699 goto fail;
60700- if (!__get_dumpable(cprm.mm_flags))
60701+ if (!dumpable)
60702 goto fail;
60703
60704 cred = prepare_creds();
60705@@ -540,7 +547,7 @@ void do_coredump(const siginfo_t *siginfo)
60706 need_nonrelative = true;
60707 }
60708
60709- retval = coredump_wait(siginfo->si_signo, &core_state);
60710+ retval = coredump_wait(signr, &core_state);
60711 if (retval < 0)
60712 goto fail_creds;
60713
60714@@ -583,7 +590,7 @@ void do_coredump(const siginfo_t *siginfo)
60715 }
60716 cprm.limit = RLIM_INFINITY;
60717
60718- dump_count = atomic_inc_return(&core_dump_count);
60719+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
60720 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
60721 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
60722 task_tgid_vnr(current), current->comm);
60723@@ -615,6 +622,8 @@ void do_coredump(const siginfo_t *siginfo)
60724 } else {
60725 struct inode *inode;
60726
60727+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
60728+
60729 if (cprm.limit < binfmt->min_coredump)
60730 goto fail_unlock;
60731
60732@@ -673,7 +682,7 @@ close_fail:
60733 filp_close(cprm.file, NULL);
60734 fail_dropcount:
60735 if (ispipe)
60736- atomic_dec(&core_dump_count);
60737+ atomic_dec_unchecked(&core_dump_count);
60738 fail_unlock:
60739 kfree(cn.corename);
60740 coredump_finish(mm, core_dumped);
60741@@ -694,6 +703,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
60742 struct file *file = cprm->file;
60743 loff_t pos = file->f_pos;
60744 ssize_t n;
60745+
60746+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
60747 if (cprm->written + nr > cprm->limit)
60748 return 0;
60749 while (nr) {
60750diff --git a/fs/dcache.c b/fs/dcache.c
60751index 34b40be8..2003532 100644
60752--- a/fs/dcache.c
60753+++ b/fs/dcache.c
60754@@ -478,7 +478,7 @@ static void __dentry_kill(struct dentry *dentry)
60755 * dentry_iput drops the locks, at which point nobody (except
60756 * transient RCU lookups) can reach this dentry.
60757 */
60758- BUG_ON((int)dentry->d_lockref.count > 0);
60759+ BUG_ON((int)__lockref_read(&dentry->d_lockref) > 0);
60760 this_cpu_dec(nr_dentry);
60761 if (dentry->d_op && dentry->d_op->d_release)
60762 dentry->d_op->d_release(dentry);
60763@@ -531,7 +531,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
60764 struct dentry *parent = dentry->d_parent;
60765 if (IS_ROOT(dentry))
60766 return NULL;
60767- if (unlikely((int)dentry->d_lockref.count < 0))
60768+ if (unlikely((int)__lockref_read(&dentry->d_lockref) < 0))
60769 return NULL;
60770 if (likely(spin_trylock(&parent->d_lock)))
60771 return parent;
60772@@ -608,7 +608,7 @@ repeat:
60773 dentry->d_flags |= DCACHE_REFERENCED;
60774 dentry_lru_add(dentry);
60775
60776- dentry->d_lockref.count--;
60777+ __lockref_dec(&dentry->d_lockref);
60778 spin_unlock(&dentry->d_lock);
60779 return;
60780
60781@@ -663,7 +663,7 @@ int d_invalidate(struct dentry * dentry)
60782 * We also need to leave mountpoints alone,
60783 * directory or not.
60784 */
60785- if (dentry->d_lockref.count > 1 && dentry->d_inode) {
60786+ if (__lockref_read(&dentry->d_lockref) > 1 && dentry->d_inode) {
60787 if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
60788 spin_unlock(&dentry->d_lock);
60789 return -EBUSY;
60790@@ -679,7 +679,7 @@ EXPORT_SYMBOL(d_invalidate);
60791 /* This must be called with d_lock held */
60792 static inline void __dget_dlock(struct dentry *dentry)
60793 {
60794- dentry->d_lockref.count++;
60795+ __lockref_inc(&dentry->d_lockref);
60796 }
60797
60798 static inline void __dget(struct dentry *dentry)
60799@@ -720,8 +720,8 @@ repeat:
60800 goto repeat;
60801 }
60802 rcu_read_unlock();
60803- BUG_ON(!ret->d_lockref.count);
60804- ret->d_lockref.count++;
60805+ BUG_ON(!__lockref_read(&ret->d_lockref));
60806+ __lockref_inc(&ret->d_lockref);
60807 spin_unlock(&ret->d_lock);
60808 return ret;
60809 }
60810@@ -798,7 +798,7 @@ restart:
60811 spin_lock(&inode->i_lock);
60812 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
60813 spin_lock(&dentry->d_lock);
60814- if (!dentry->d_lockref.count) {
60815+ if (!__lockref_read(&dentry->d_lockref)) {
60816 /*
60817 * inform the fs via d_prune that this dentry
60818 * is about to be unhashed and destroyed.
60819@@ -841,7 +841,7 @@ static void shrink_dentry_list(struct list_head *list)
60820 * We found an inuse dentry which was not removed from
60821 * the LRU because of laziness during lookup. Do not free it.
60822 */
60823- if ((int)dentry->d_lockref.count > 0) {
60824+ if ((int)__lockref_read(&dentry->d_lockref) > 0) {
60825 spin_unlock(&dentry->d_lock);
60826 if (parent)
60827 spin_unlock(&parent->d_lock);
60828@@ -879,8 +879,8 @@ static void shrink_dentry_list(struct list_head *list)
60829 dentry = parent;
60830 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
60831 parent = lock_parent(dentry);
60832- if (dentry->d_lockref.count != 1) {
60833- dentry->d_lockref.count--;
60834+ if (__lockref_read(&dentry->d_lockref) != 1) {
60835+ __lockref_inc(&dentry->d_lockref);
60836 spin_unlock(&dentry->d_lock);
60837 if (parent)
60838 spin_unlock(&parent->d_lock);
60839@@ -920,7 +920,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
60840 * counts, just remove them from the LRU. Otherwise give them
60841 * another pass through the LRU.
60842 */
60843- if (dentry->d_lockref.count) {
60844+ if (__lockref_read(&dentry->d_lockref) > 0) {
60845 d_lru_isolate(dentry);
60846 spin_unlock(&dentry->d_lock);
60847 return LRU_REMOVED;
60848@@ -1149,6 +1149,7 @@ out_unlock:
60849 return;
60850
60851 rename_retry:
60852+ done_seqretry(&rename_lock, seq);
60853 if (!retry)
60854 return;
60855 seq = 1;
60856@@ -1255,7 +1256,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
60857 } else {
60858 if (dentry->d_flags & DCACHE_LRU_LIST)
60859 d_lru_del(dentry);
60860- if (!dentry->d_lockref.count) {
60861+ if (!__lockref_read(&dentry->d_lockref)) {
60862 d_shrink_add(dentry, &data->dispose);
60863 data->found++;
60864 }
60865@@ -1303,7 +1304,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
60866 return D_WALK_CONTINUE;
60867
60868 /* root with refcount 1 is fine */
60869- if (dentry == _data && dentry->d_lockref.count == 1)
60870+ if (dentry == _data && __lockref_read(&dentry->d_lockref) == 1)
60871 return D_WALK_CONTINUE;
60872
60873 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
60874@@ -1312,7 +1313,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
60875 dentry->d_inode ?
60876 dentry->d_inode->i_ino : 0UL,
60877 dentry,
60878- dentry->d_lockref.count,
60879+ __lockref_read(&dentry->d_lockref),
60880 dentry->d_sb->s_type->name,
60881 dentry->d_sb->s_id);
60882 WARN_ON(1);
60883@@ -1438,7 +1439,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60884 */
60885 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
60886 if (name->len > DNAME_INLINE_LEN-1) {
60887- dname = kmalloc(name->len + 1, GFP_KERNEL);
60888+ dname = kmalloc(round_up(name->len + 1, sizeof(unsigned long)), GFP_KERNEL);
60889 if (!dname) {
60890 kmem_cache_free(dentry_cache, dentry);
60891 return NULL;
60892@@ -1456,7 +1457,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60893 smp_wmb();
60894 dentry->d_name.name = dname;
60895
60896- dentry->d_lockref.count = 1;
60897+ __lockref_set(&dentry->d_lockref, 1);
60898 dentry->d_flags = 0;
60899 spin_lock_init(&dentry->d_lock);
60900 seqcount_init(&dentry->d_seq);
60901@@ -2196,7 +2197,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
60902 goto next;
60903 }
60904
60905- dentry->d_lockref.count++;
60906+ __lockref_inc(&dentry->d_lockref);
60907 found = dentry;
60908 spin_unlock(&dentry->d_lock);
60909 break;
60910@@ -2295,7 +2296,7 @@ again:
60911 spin_lock(&dentry->d_lock);
60912 inode = dentry->d_inode;
60913 isdir = S_ISDIR(inode->i_mode);
60914- if (dentry->d_lockref.count == 1) {
60915+ if (__lockref_read(&dentry->d_lockref) == 1) {
60916 if (!spin_trylock(&inode->i_lock)) {
60917 spin_unlock(&dentry->d_lock);
60918 cpu_relax();
60919@@ -3307,7 +3308,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
60920
60921 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
60922 dentry->d_flags |= DCACHE_GENOCIDE;
60923- dentry->d_lockref.count--;
60924+ __lockref_dec(&dentry->d_lockref);
60925 }
60926 }
60927 return D_WALK_CONTINUE;
60928@@ -3423,7 +3424,8 @@ void __init vfs_caches_init(unsigned long mempages)
60929 mempages -= reserve;
60930
60931 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
60932- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
60933+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
60934+ SLAB_NO_SANITIZE, NULL);
60935
60936 dcache_init();
60937 inode_init();
60938diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
60939index 1e3b99d..6512101 100644
60940--- a/fs/debugfs/inode.c
60941+++ b/fs/debugfs/inode.c
60942@@ -416,7 +416,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
60943 */
60944 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
60945 {
60946+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
60947+ return __create_file(name, S_IFDIR | S_IRWXU,
60948+#else
60949 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
60950+#endif
60951 parent, NULL, NULL);
60952 }
60953 EXPORT_SYMBOL_GPL(debugfs_create_dir);
60954diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
60955index 57ee4c5..ecb13b0 100644
60956--- a/fs/ecryptfs/inode.c
60957+++ b/fs/ecryptfs/inode.c
60958@@ -673,7 +673,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
60959 old_fs = get_fs();
60960 set_fs(get_ds());
60961 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
60962- (char __user *)lower_buf,
60963+ (char __force_user *)lower_buf,
60964 PATH_MAX);
60965 set_fs(old_fs);
60966 if (rc < 0)
60967diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
60968index e4141f2..d8263e8 100644
60969--- a/fs/ecryptfs/miscdev.c
60970+++ b/fs/ecryptfs/miscdev.c
60971@@ -304,7 +304,7 @@ check_list:
60972 goto out_unlock_msg_ctx;
60973 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
60974 if (msg_ctx->msg) {
60975- if (copy_to_user(&buf[i], packet_length, packet_length_size))
60976+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
60977 goto out_unlock_msg_ctx;
60978 i += packet_length_size;
60979 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
60980diff --git a/fs/exec.c b/fs/exec.c
60981index a2b42a9..1e924b3 100644
60982--- a/fs/exec.c
60983+++ b/fs/exec.c
60984@@ -56,8 +56,20 @@
60985 #include <linux/pipe_fs_i.h>
60986 #include <linux/oom.h>
60987 #include <linux/compat.h>
60988+#include <linux/random.h>
60989+#include <linux/seq_file.h>
60990+#include <linux/coredump.h>
60991+#include <linux/mman.h>
60992+
60993+#ifdef CONFIG_PAX_REFCOUNT
60994+#include <linux/kallsyms.h>
60995+#include <linux/kdebug.h>
60996+#endif
60997+
60998+#include <trace/events/fs.h>
60999
61000 #include <asm/uaccess.h>
61001+#include <asm/sections.h>
61002 #include <asm/mmu_context.h>
61003 #include <asm/tlb.h>
61004
61005@@ -66,19 +78,34 @@
61006
61007 #include <trace/events/sched.h>
61008
61009+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
61010+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
61011+{
61012+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
61013+}
61014+#endif
61015+
61016+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
61017+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
61018+EXPORT_SYMBOL(pax_set_initial_flags_func);
61019+#endif
61020+
61021 int suid_dumpable = 0;
61022
61023 static LIST_HEAD(formats);
61024 static DEFINE_RWLOCK(binfmt_lock);
61025
61026+extern int gr_process_kernel_exec_ban(void);
61027+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
61028+
61029 void __register_binfmt(struct linux_binfmt * fmt, int insert)
61030 {
61031 BUG_ON(!fmt);
61032 if (WARN_ON(!fmt->load_binary))
61033 return;
61034 write_lock(&binfmt_lock);
61035- insert ? list_add(&fmt->lh, &formats) :
61036- list_add_tail(&fmt->lh, &formats);
61037+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
61038+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
61039 write_unlock(&binfmt_lock);
61040 }
61041
61042@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
61043 void unregister_binfmt(struct linux_binfmt * fmt)
61044 {
61045 write_lock(&binfmt_lock);
61046- list_del(&fmt->lh);
61047+ pax_list_del((struct list_head *)&fmt->lh);
61048 write_unlock(&binfmt_lock);
61049 }
61050
61051@@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
61052 int write)
61053 {
61054 struct page *page;
61055- int ret;
61056
61057-#ifdef CONFIG_STACK_GROWSUP
61058- if (write) {
61059- ret = expand_downwards(bprm->vma, pos);
61060- if (ret < 0)
61061- return NULL;
61062- }
61063-#endif
61064- ret = get_user_pages(current, bprm->mm, pos,
61065- 1, write, 1, &page, NULL);
61066- if (ret <= 0)
61067+ if (0 > expand_downwards(bprm->vma, pos))
61068+ return NULL;
61069+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
61070 return NULL;
61071
61072 if (write) {
61073@@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
61074 if (size <= ARG_MAX)
61075 return page;
61076
61077+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61078+ // only allow 512KB for argv+env on suid/sgid binaries
61079+ // to prevent easy ASLR exhaustion
61080+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
61081+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
61082+ (size > (512 * 1024))) {
61083+ put_page(page);
61084+ return NULL;
61085+ }
61086+#endif
61087+
61088 /*
61089 * Limit to 1/4-th the stack size for the argv+env strings.
61090 * This ensures that:
61091@@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
61092 vma->vm_end = STACK_TOP_MAX;
61093 vma->vm_start = vma->vm_end - PAGE_SIZE;
61094 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
61095+
61096+#ifdef CONFIG_PAX_SEGMEXEC
61097+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
61098+#endif
61099+
61100 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
61101 INIT_LIST_HEAD(&vma->anon_vma_chain);
61102
61103@@ -279,6 +314,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
61104 mm->stack_vm = mm->total_vm = 1;
61105 up_write(&mm->mmap_sem);
61106 bprm->p = vma->vm_end - sizeof(void *);
61107+
61108+#ifdef CONFIG_PAX_RANDUSTACK
61109+ if (randomize_va_space)
61110+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
61111+#endif
61112+
61113 return 0;
61114 err:
61115 up_write(&mm->mmap_sem);
61116@@ -395,7 +436,7 @@ struct user_arg_ptr {
61117 } ptr;
61118 };
61119
61120-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
61121+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
61122 {
61123 const char __user *native;
61124
61125@@ -404,14 +445,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
61126 compat_uptr_t compat;
61127
61128 if (get_user(compat, argv.ptr.compat + nr))
61129- return ERR_PTR(-EFAULT);
61130+ return (const char __force_user *)ERR_PTR(-EFAULT);
61131
61132 return compat_ptr(compat);
61133 }
61134 #endif
61135
61136 if (get_user(native, argv.ptr.native + nr))
61137- return ERR_PTR(-EFAULT);
61138+ return (const char __force_user *)ERR_PTR(-EFAULT);
61139
61140 return native;
61141 }
61142@@ -430,7 +471,7 @@ static int count(struct user_arg_ptr argv, int max)
61143 if (!p)
61144 break;
61145
61146- if (IS_ERR(p))
61147+ if (IS_ERR((const char __force_kernel *)p))
61148 return -EFAULT;
61149
61150 if (i >= max)
61151@@ -465,7 +506,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
61152
61153 ret = -EFAULT;
61154 str = get_user_arg_ptr(argv, argc);
61155- if (IS_ERR(str))
61156+ if (IS_ERR((const char __force_kernel *)str))
61157 goto out;
61158
61159 len = strnlen_user(str, MAX_ARG_STRLEN);
61160@@ -547,7 +588,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
61161 int r;
61162 mm_segment_t oldfs = get_fs();
61163 struct user_arg_ptr argv = {
61164- .ptr.native = (const char __user *const __user *)__argv,
61165+ .ptr.native = (const char __user * const __force_user *)__argv,
61166 };
61167
61168 set_fs(KERNEL_DS);
61169@@ -582,7 +623,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
61170 unsigned long new_end = old_end - shift;
61171 struct mmu_gather tlb;
61172
61173- BUG_ON(new_start > new_end);
61174+ if (new_start >= new_end || new_start < mmap_min_addr)
61175+ return -ENOMEM;
61176
61177 /*
61178 * ensure there are no vmas between where we want to go
61179@@ -591,6 +633,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
61180 if (vma != find_vma(mm, new_start))
61181 return -EFAULT;
61182
61183+#ifdef CONFIG_PAX_SEGMEXEC
61184+ BUG_ON(pax_find_mirror_vma(vma));
61185+#endif
61186+
61187 /*
61188 * cover the whole range: [new_start, old_end)
61189 */
61190@@ -671,10 +717,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
61191 stack_top = arch_align_stack(stack_top);
61192 stack_top = PAGE_ALIGN(stack_top);
61193
61194- if (unlikely(stack_top < mmap_min_addr) ||
61195- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
61196- return -ENOMEM;
61197-
61198 stack_shift = vma->vm_end - stack_top;
61199
61200 bprm->p -= stack_shift;
61201@@ -686,8 +728,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
61202 bprm->exec -= stack_shift;
61203
61204 down_write(&mm->mmap_sem);
61205+
61206+ /* Move stack pages down in memory. */
61207+ if (stack_shift) {
61208+ ret = shift_arg_pages(vma, stack_shift);
61209+ if (ret)
61210+ goto out_unlock;
61211+ }
61212+
61213 vm_flags = VM_STACK_FLAGS;
61214
61215+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
61216+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
61217+ vm_flags &= ~VM_EXEC;
61218+
61219+#ifdef CONFIG_PAX_MPROTECT
61220+ if (mm->pax_flags & MF_PAX_MPROTECT)
61221+ vm_flags &= ~VM_MAYEXEC;
61222+#endif
61223+
61224+ }
61225+#endif
61226+
61227 /*
61228 * Adjust stack execute permissions; explicitly enable for
61229 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
61230@@ -706,13 +768,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
61231 goto out_unlock;
61232 BUG_ON(prev != vma);
61233
61234- /* Move stack pages down in memory. */
61235- if (stack_shift) {
61236- ret = shift_arg_pages(vma, stack_shift);
61237- if (ret)
61238- goto out_unlock;
61239- }
61240-
61241 /* mprotect_fixup is overkill to remove the temporary stack flags */
61242 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
61243
61244@@ -736,6 +791,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
61245 #endif
61246 current->mm->start_stack = bprm->p;
61247 ret = expand_stack(vma, stack_base);
61248+
61249+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
61250+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
61251+ unsigned long size;
61252+ vm_flags_t vm_flags;
61253+
61254+ size = STACK_TOP - vma->vm_end;
61255+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
61256+
61257+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
61258+
61259+#ifdef CONFIG_X86
61260+ if (!ret) {
61261+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
61262+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
61263+ }
61264+#endif
61265+
61266+ }
61267+#endif
61268+
61269 if (ret)
61270 ret = -EFAULT;
61271
61272@@ -771,6 +847,8 @@ static struct file *do_open_exec(struct filename *name)
61273
61274 fsnotify_open(file);
61275
61276+ trace_open_exec(name->name);
61277+
61278 err = deny_write_access(file);
61279 if (err)
61280 goto exit;
61281@@ -800,7 +878,7 @@ int kernel_read(struct file *file, loff_t offset,
61282 old_fs = get_fs();
61283 set_fs(get_ds());
61284 /* The cast to a user pointer is valid due to the set_fs() */
61285- result = vfs_read(file, (void __user *)addr, count, &pos);
61286+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
61287 set_fs(old_fs);
61288 return result;
61289 }
61290@@ -845,6 +923,7 @@ static int exec_mmap(struct mm_struct *mm)
61291 tsk->mm = mm;
61292 tsk->active_mm = mm;
61293 activate_mm(active_mm, mm);
61294+ populate_stack();
61295 tsk->mm->vmacache_seqnum = 0;
61296 vmacache_flush(tsk);
61297 task_unlock(tsk);
61298@@ -1243,7 +1322,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
61299 }
61300 rcu_read_unlock();
61301
61302- if (p->fs->users > n_fs)
61303+ if (atomic_read(&p->fs->users) > n_fs)
61304 bprm->unsafe |= LSM_UNSAFE_SHARE;
61305 else
61306 p->fs->in_exec = 1;
61307@@ -1419,6 +1498,31 @@ static int exec_binprm(struct linux_binprm *bprm)
61308 return ret;
61309 }
61310
61311+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61312+static DEFINE_PER_CPU(u64, exec_counter);
61313+static int __init init_exec_counters(void)
61314+{
61315+ unsigned int cpu;
61316+
61317+ for_each_possible_cpu(cpu) {
61318+ per_cpu(exec_counter, cpu) = (u64)cpu;
61319+ }
61320+
61321+ return 0;
61322+}
61323+early_initcall(init_exec_counters);
61324+static inline void increment_exec_counter(void)
61325+{
61326+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
61327+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
61328+}
61329+#else
61330+static inline void increment_exec_counter(void) {}
61331+#endif
61332+
61333+extern void gr_handle_exec_args(struct linux_binprm *bprm,
61334+ struct user_arg_ptr argv);
61335+
61336 /*
61337 * sys_execve() executes a new program.
61338 */
61339@@ -1426,6 +1530,11 @@ static int do_execve_common(struct filename *filename,
61340 struct user_arg_ptr argv,
61341 struct user_arg_ptr envp)
61342 {
61343+#ifdef CONFIG_GRKERNSEC
61344+ struct file *old_exec_file;
61345+ struct acl_subject_label *old_acl;
61346+ struct rlimit old_rlim[RLIM_NLIMITS];
61347+#endif
61348 struct linux_binprm *bprm;
61349 struct file *file;
61350 struct files_struct *displaced;
61351@@ -1434,6 +1543,8 @@ static int do_execve_common(struct filename *filename,
61352 if (IS_ERR(filename))
61353 return PTR_ERR(filename);
61354
61355+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
61356+
61357 /*
61358 * We move the actual failure in case of RLIMIT_NPROC excess from
61359 * set*uid() to execve() because too many poorly written programs
61360@@ -1471,11 +1582,21 @@ static int do_execve_common(struct filename *filename,
61361 if (IS_ERR(file))
61362 goto out_unmark;
61363
61364+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
61365+ retval = -EPERM;
61366+ goto out_unmark;
61367+ }
61368+
61369 sched_exec();
61370
61371 bprm->file = file;
61372 bprm->filename = bprm->interp = filename->name;
61373
61374+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
61375+ retval = -EACCES;
61376+ goto out_unmark;
61377+ }
61378+
61379 retval = bprm_mm_init(bprm);
61380 if (retval)
61381 goto out_unmark;
61382@@ -1492,24 +1613,70 @@ static int do_execve_common(struct filename *filename,
61383 if (retval < 0)
61384 goto out;
61385
61386+#ifdef CONFIG_GRKERNSEC
61387+ old_acl = current->acl;
61388+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
61389+ old_exec_file = current->exec_file;
61390+ get_file(file);
61391+ current->exec_file = file;
61392+#endif
61393+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61394+ /* limit suid stack to 8MB
61395+ * we saved the old limits above and will restore them if this exec fails
61396+ */
61397+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
61398+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
61399+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
61400+#endif
61401+
61402+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
61403+ retval = -EPERM;
61404+ goto out_fail;
61405+ }
61406+
61407+ if (!gr_tpe_allow(file)) {
61408+ retval = -EACCES;
61409+ goto out_fail;
61410+ }
61411+
61412+ if (gr_check_crash_exec(file)) {
61413+ retval = -EACCES;
61414+ goto out_fail;
61415+ }
61416+
61417+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
61418+ bprm->unsafe);
61419+ if (retval < 0)
61420+ goto out_fail;
61421+
61422 retval = copy_strings_kernel(1, &bprm->filename, bprm);
61423 if (retval < 0)
61424- goto out;
61425+ goto out_fail;
61426
61427 bprm->exec = bprm->p;
61428 retval = copy_strings(bprm->envc, envp, bprm);
61429 if (retval < 0)
61430- goto out;
61431+ goto out_fail;
61432
61433 retval = copy_strings(bprm->argc, argv, bprm);
61434 if (retval < 0)
61435- goto out;
61436+ goto out_fail;
61437+
61438+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
61439+
61440+ gr_handle_exec_args(bprm, argv);
61441
61442 retval = exec_binprm(bprm);
61443 if (retval < 0)
61444- goto out;
61445+ goto out_fail;
61446+#ifdef CONFIG_GRKERNSEC
61447+ if (old_exec_file)
61448+ fput(old_exec_file);
61449+#endif
61450
61451 /* execve succeeded */
61452+
61453+ increment_exec_counter();
61454 current->fs->in_exec = 0;
61455 current->in_execve = 0;
61456 acct_update_integrals(current);
61457@@ -1520,6 +1687,14 @@ static int do_execve_common(struct filename *filename,
61458 put_files_struct(displaced);
61459 return retval;
61460
61461+out_fail:
61462+#ifdef CONFIG_GRKERNSEC
61463+ current->acl = old_acl;
61464+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
61465+ fput(current->exec_file);
61466+ current->exec_file = old_exec_file;
61467+#endif
61468+
61469 out:
61470 if (bprm->mm) {
61471 acct_arg_size(bprm, 0);
61472@@ -1611,3 +1786,312 @@ COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
61473 return compat_do_execve(getname(filename), argv, envp);
61474 }
61475 #endif
61476+
61477+int pax_check_flags(unsigned long *flags)
61478+{
61479+ int retval = 0;
61480+
61481+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
61482+ if (*flags & MF_PAX_SEGMEXEC)
61483+ {
61484+ *flags &= ~MF_PAX_SEGMEXEC;
61485+ retval = -EINVAL;
61486+ }
61487+#endif
61488+
61489+ if ((*flags & MF_PAX_PAGEEXEC)
61490+
61491+#ifdef CONFIG_PAX_PAGEEXEC
61492+ && (*flags & MF_PAX_SEGMEXEC)
61493+#endif
61494+
61495+ )
61496+ {
61497+ *flags &= ~MF_PAX_PAGEEXEC;
61498+ retval = -EINVAL;
61499+ }
61500+
61501+ if ((*flags & MF_PAX_MPROTECT)
61502+
61503+#ifdef CONFIG_PAX_MPROTECT
61504+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
61505+#endif
61506+
61507+ )
61508+ {
61509+ *flags &= ~MF_PAX_MPROTECT;
61510+ retval = -EINVAL;
61511+ }
61512+
61513+ if ((*flags & MF_PAX_EMUTRAMP)
61514+
61515+#ifdef CONFIG_PAX_EMUTRAMP
61516+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
61517+#endif
61518+
61519+ )
61520+ {
61521+ *flags &= ~MF_PAX_EMUTRAMP;
61522+ retval = -EINVAL;
61523+ }
61524+
61525+ return retval;
61526+}
61527+
61528+EXPORT_SYMBOL(pax_check_flags);
61529+
61530+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
61531+char *pax_get_path(const struct path *path, char *buf, int buflen)
61532+{
61533+ char *pathname = d_path(path, buf, buflen);
61534+
61535+ if (IS_ERR(pathname))
61536+ goto toolong;
61537+
61538+ pathname = mangle_path(buf, pathname, "\t\n\\");
61539+ if (!pathname)
61540+ goto toolong;
61541+
61542+ *pathname = 0;
61543+ return buf;
61544+
61545+toolong:
61546+ return "<path too long>";
61547+}
61548+EXPORT_SYMBOL(pax_get_path);
61549+
61550+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
61551+{
61552+ struct task_struct *tsk = current;
61553+ struct mm_struct *mm = current->mm;
61554+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
61555+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
61556+ char *path_exec = NULL;
61557+ char *path_fault = NULL;
61558+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
61559+ siginfo_t info = { };
61560+
61561+ if (buffer_exec && buffer_fault) {
61562+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
61563+
61564+ down_read(&mm->mmap_sem);
61565+ vma = mm->mmap;
61566+ while (vma && (!vma_exec || !vma_fault)) {
61567+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
61568+ vma_exec = vma;
61569+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
61570+ vma_fault = vma;
61571+ vma = vma->vm_next;
61572+ }
61573+ if (vma_exec)
61574+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
61575+ if (vma_fault) {
61576+ start = vma_fault->vm_start;
61577+ end = vma_fault->vm_end;
61578+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
61579+ if (vma_fault->vm_file)
61580+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
61581+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
61582+ path_fault = "<heap>";
61583+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
61584+ path_fault = "<stack>";
61585+ else
61586+ path_fault = "<anonymous mapping>";
61587+ }
61588+ up_read(&mm->mmap_sem);
61589+ }
61590+ if (tsk->signal->curr_ip)
61591+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
61592+ else
61593+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
61594+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
61595+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
61596+ free_page((unsigned long)buffer_exec);
61597+ free_page((unsigned long)buffer_fault);
61598+ pax_report_insns(regs, pc, sp);
61599+ info.si_signo = SIGKILL;
61600+ info.si_errno = 0;
61601+ info.si_code = SI_KERNEL;
61602+ info.si_pid = 0;
61603+ info.si_uid = 0;
61604+ do_coredump(&info);
61605+}
61606+#endif
61607+
61608+#ifdef CONFIG_PAX_REFCOUNT
61609+void pax_report_refcount_overflow(struct pt_regs *regs)
61610+{
61611+ if (current->signal->curr_ip)
61612+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
61613+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
61614+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
61615+ else
61616+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
61617+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
61618+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
61619+ preempt_disable();
61620+ show_regs(regs);
61621+ preempt_enable();
61622+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
61623+}
61624+#endif
61625+
61626+#ifdef CONFIG_PAX_USERCOPY
61627+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
61628+static noinline int check_stack_object(const void *obj, unsigned long len)
61629+{
61630+ const void * const stack = task_stack_page(current);
61631+ const void * const stackend = stack + THREAD_SIZE;
61632+
61633+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
61634+ const void *frame = NULL;
61635+ const void *oldframe;
61636+#endif
61637+
61638+ if (obj + len < obj)
61639+ return -1;
61640+
61641+ if (obj + len <= stack || stackend <= obj)
61642+ return 0;
61643+
61644+ if (obj < stack || stackend < obj + len)
61645+ return -1;
61646+
61647+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
61648+ oldframe = __builtin_frame_address(1);
61649+ if (oldframe)
61650+ frame = __builtin_frame_address(2);
61651+ /*
61652+ low ----------------------------------------------> high
61653+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
61654+ ^----------------^
61655+ allow copies only within here
61656+ */
61657+ while (stack <= frame && frame < stackend) {
61658+ /* if obj + len extends past the last frame, this
61659+ check won't pass and the next frame will be 0,
61660+ causing us to bail out and correctly report
61661+ the copy as invalid
61662+ */
61663+ if (obj + len <= frame)
61664+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
61665+ oldframe = frame;
61666+ frame = *(const void * const *)frame;
61667+ }
61668+ return -1;
61669+#else
61670+ return 1;
61671+#endif
61672+}
61673+
61674+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
61675+{
61676+ if (current->signal->curr_ip)
61677+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
61678+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
61679+ else
61680+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
61681+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
61682+ dump_stack();
61683+ gr_handle_kernel_exploit();
61684+ do_group_exit(SIGKILL);
61685+}
61686+#endif
61687+
61688+#ifdef CONFIG_PAX_USERCOPY
61689+
61690+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
61691+{
61692+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
61693+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
61694+#ifdef CONFIG_MODULES
61695+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
61696+#else
61697+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
61698+#endif
61699+
61700+#else
61701+ unsigned long textlow = (unsigned long)_stext;
61702+ unsigned long texthigh = (unsigned long)_etext;
61703+
61704+#ifdef CONFIG_X86_64
61705+ /* check against linear mapping as well */
61706+ if (high > (unsigned long)__va(__pa(textlow)) &&
61707+ low < (unsigned long)__va(__pa(texthigh)))
61708+ return true;
61709+#endif
61710+
61711+#endif
61712+
61713+ if (high <= textlow || low >= texthigh)
61714+ return false;
61715+ else
61716+ return true;
61717+}
61718+#endif
61719+
61720+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
61721+{
61722+#ifdef CONFIG_PAX_USERCOPY
61723+ const char *type;
61724+#endif
61725+
61726+#ifndef CONFIG_STACK_GROWSUP
61727+ unsigned long stackstart = (unsigned long)task_stack_page(current);
61728+ unsigned long currentsp = (unsigned long)&stackstart;
61729+ if (unlikely((currentsp < stackstart + 512 ||
61730+ currentsp >= stackstart + THREAD_SIZE) && !in_interrupt()))
61731+ BUG();
61732+#endif
61733+
61734+#ifndef CONFIG_PAX_USERCOPY_DEBUG
61735+ if (const_size)
61736+ return;
61737+#endif
61738+
61739+#ifdef CONFIG_PAX_USERCOPY
61740+ if (!n)
61741+ return;
61742+
61743+ type = check_heap_object(ptr, n);
61744+ if (!type) {
61745+ int ret = check_stack_object(ptr, n);
61746+ if (ret == 1 || ret == 2)
61747+ return;
61748+ if (ret == 0) {
61749+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
61750+ type = "<kernel text>";
61751+ else
61752+ return;
61753+ } else
61754+ type = "<process stack>";
61755+ }
61756+
61757+ pax_report_usercopy(ptr, n, to_user, type);
61758+#endif
61759+
61760+}
61761+EXPORT_SYMBOL(__check_object_size);
61762+
61763+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
61764+void pax_track_stack(void)
61765+{
61766+ unsigned long sp = (unsigned long)&sp;
61767+ if (sp < current_thread_info()->lowest_stack &&
61768+ sp > (unsigned long)task_stack_page(current))
61769+ current_thread_info()->lowest_stack = sp;
61770+ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
61771+ BUG();
61772+}
61773+EXPORT_SYMBOL(pax_track_stack);
61774+#endif
61775+
61776+#ifdef CONFIG_PAX_SIZE_OVERFLOW
61777+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
61778+{
61779+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
61780+ dump_stack();
61781+ do_group_exit(SIGKILL);
61782+}
61783+EXPORT_SYMBOL(report_size_overflow);
61784+#endif
61785diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
61786index 9f9992b..8b59411 100644
61787--- a/fs/ext2/balloc.c
61788+++ b/fs/ext2/balloc.c
61789@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
61790
61791 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
61792 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
61793- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
61794+ if (free_blocks < root_blocks + 1 &&
61795 !uid_eq(sbi->s_resuid, current_fsuid()) &&
61796 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
61797- !in_group_p (sbi->s_resgid))) {
61798+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
61799 return 0;
61800 }
61801 return 1;
61802diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
61803index 9142614..97484fa 100644
61804--- a/fs/ext2/xattr.c
61805+++ b/fs/ext2/xattr.c
61806@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
61807 struct buffer_head *bh = NULL;
61808 struct ext2_xattr_entry *entry;
61809 char *end;
61810- size_t rest = buffer_size;
61811+ size_t rest = buffer_size, total_size = 0;
61812 int error;
61813
61814 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
61815@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
61816 buffer += size;
61817 }
61818 rest -= size;
61819+ total_size += size;
61820 }
61821 }
61822- error = buffer_size - rest; /* total size */
61823+ error = total_size;
61824
61825 cleanup:
61826 brelse(bh);
61827diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
61828index 158b5d4..2432610 100644
61829--- a/fs/ext3/balloc.c
61830+++ b/fs/ext3/balloc.c
61831@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
61832
61833 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
61834 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
61835- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
61836+ if (free_blocks < root_blocks + 1 &&
61837 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
61838 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
61839- !in_group_p (sbi->s_resgid))) {
61840+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
61841 return 0;
61842 }
61843 return 1;
61844diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
61845index c6874be..f8a6ae8 100644
61846--- a/fs/ext3/xattr.c
61847+++ b/fs/ext3/xattr.c
61848@@ -330,7 +330,7 @@ static int
61849 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
61850 char *buffer, size_t buffer_size)
61851 {
61852- size_t rest = buffer_size;
61853+ size_t rest = buffer_size, total_size = 0;
61854
61855 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
61856 const struct xattr_handler *handler =
61857@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
61858 buffer += size;
61859 }
61860 rest -= size;
61861+ total_size += size;
61862 }
61863 }
61864- return buffer_size - rest;
61865+ return total_size;
61866 }
61867
61868 static int
61869diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
61870index e069155..b825b08 100644
61871--- a/fs/ext4/balloc.c
61872+++ b/fs/ext4/balloc.c
61873@@ -557,8 +557,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
61874 /* Hm, nope. Are (enough) root reserved clusters available? */
61875 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
61876 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
61877- capable(CAP_SYS_RESOURCE) ||
61878- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
61879+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
61880+ capable_nolog(CAP_SYS_RESOURCE)) {
61881
61882 if (free_clusters >= (nclusters + dirty_clusters +
61883 resv_clusters))
61884diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
61885index 96ac9d3..1c30e7e6 100644
61886--- a/fs/ext4/ext4.h
61887+++ b/fs/ext4/ext4.h
61888@@ -1275,19 +1275,19 @@ struct ext4_sb_info {
61889 unsigned long s_mb_last_start;
61890
61891 /* stats for buddy allocator */
61892- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
61893- atomic_t s_bal_success; /* we found long enough chunks */
61894- atomic_t s_bal_allocated; /* in blocks */
61895- atomic_t s_bal_ex_scanned; /* total extents scanned */
61896- atomic_t s_bal_goals; /* goal hits */
61897- atomic_t s_bal_breaks; /* too long searches */
61898- atomic_t s_bal_2orders; /* 2^order hits */
61899+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
61900+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
61901+ atomic_unchecked_t s_bal_allocated; /* in blocks */
61902+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
61903+ atomic_unchecked_t s_bal_goals; /* goal hits */
61904+ atomic_unchecked_t s_bal_breaks; /* too long searches */
61905+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
61906 spinlock_t s_bal_lock;
61907 unsigned long s_mb_buddies_generated;
61908 unsigned long long s_mb_generation_time;
61909- atomic_t s_mb_lost_chunks;
61910- atomic_t s_mb_preallocated;
61911- atomic_t s_mb_discarded;
61912+ atomic_unchecked_t s_mb_lost_chunks;
61913+ atomic_unchecked_t s_mb_preallocated;
61914+ atomic_unchecked_t s_mb_discarded;
61915 atomic_t s_lock_busy;
61916
61917 /* locality groups */
61918diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
61919index 8b0f9ef..cb9f620 100644
61920--- a/fs/ext4/mballoc.c
61921+++ b/fs/ext4/mballoc.c
61922@@ -1901,7 +1901,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
61923 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
61924
61925 if (EXT4_SB(sb)->s_mb_stats)
61926- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
61927+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
61928
61929 break;
61930 }
61931@@ -2211,7 +2211,7 @@ repeat:
61932 ac->ac_status = AC_STATUS_CONTINUE;
61933 ac->ac_flags |= EXT4_MB_HINT_FIRST;
61934 cr = 3;
61935- atomic_inc(&sbi->s_mb_lost_chunks);
61936+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
61937 goto repeat;
61938 }
61939 }
61940@@ -2717,25 +2717,25 @@ int ext4_mb_release(struct super_block *sb)
61941 if (sbi->s_mb_stats) {
61942 ext4_msg(sb, KERN_INFO,
61943 "mballoc: %u blocks %u reqs (%u success)",
61944- atomic_read(&sbi->s_bal_allocated),
61945- atomic_read(&sbi->s_bal_reqs),
61946- atomic_read(&sbi->s_bal_success));
61947+ atomic_read_unchecked(&sbi->s_bal_allocated),
61948+ atomic_read_unchecked(&sbi->s_bal_reqs),
61949+ atomic_read_unchecked(&sbi->s_bal_success));
61950 ext4_msg(sb, KERN_INFO,
61951 "mballoc: %u extents scanned, %u goal hits, "
61952 "%u 2^N hits, %u breaks, %u lost",
61953- atomic_read(&sbi->s_bal_ex_scanned),
61954- atomic_read(&sbi->s_bal_goals),
61955- atomic_read(&sbi->s_bal_2orders),
61956- atomic_read(&sbi->s_bal_breaks),
61957- atomic_read(&sbi->s_mb_lost_chunks));
61958+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
61959+ atomic_read_unchecked(&sbi->s_bal_goals),
61960+ atomic_read_unchecked(&sbi->s_bal_2orders),
61961+ atomic_read_unchecked(&sbi->s_bal_breaks),
61962+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
61963 ext4_msg(sb, KERN_INFO,
61964 "mballoc: %lu generated and it took %Lu",
61965 sbi->s_mb_buddies_generated,
61966 sbi->s_mb_generation_time);
61967 ext4_msg(sb, KERN_INFO,
61968 "mballoc: %u preallocated, %u discarded",
61969- atomic_read(&sbi->s_mb_preallocated),
61970- atomic_read(&sbi->s_mb_discarded));
61971+ atomic_read_unchecked(&sbi->s_mb_preallocated),
61972+ atomic_read_unchecked(&sbi->s_mb_discarded));
61973 }
61974
61975 free_percpu(sbi->s_locality_groups);
61976@@ -3192,16 +3192,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
61977 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
61978
61979 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
61980- atomic_inc(&sbi->s_bal_reqs);
61981- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
61982+ atomic_inc_unchecked(&sbi->s_bal_reqs);
61983+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
61984 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
61985- atomic_inc(&sbi->s_bal_success);
61986- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
61987+ atomic_inc_unchecked(&sbi->s_bal_success);
61988+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
61989 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
61990 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
61991- atomic_inc(&sbi->s_bal_goals);
61992+ atomic_inc_unchecked(&sbi->s_bal_goals);
61993 if (ac->ac_found > sbi->s_mb_max_to_scan)
61994- atomic_inc(&sbi->s_bal_breaks);
61995+ atomic_inc_unchecked(&sbi->s_bal_breaks);
61996 }
61997
61998 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
61999@@ -3628,7 +3628,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
62000 trace_ext4_mb_new_inode_pa(ac, pa);
62001
62002 ext4_mb_use_inode_pa(ac, pa);
62003- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
62004+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
62005
62006 ei = EXT4_I(ac->ac_inode);
62007 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
62008@@ -3688,7 +3688,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
62009 trace_ext4_mb_new_group_pa(ac, pa);
62010
62011 ext4_mb_use_group_pa(ac, pa);
62012- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
62013+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
62014
62015 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
62016 lg = ac->ac_lg;
62017@@ -3777,7 +3777,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
62018 * from the bitmap and continue.
62019 */
62020 }
62021- atomic_add(free, &sbi->s_mb_discarded);
62022+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
62023
62024 return err;
62025 }
62026@@ -3795,7 +3795,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
62027 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
62028 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
62029 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
62030- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
62031+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
62032 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
62033
62034 return 0;
62035diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
62036index 8313ca3..8a37d08 100644
62037--- a/fs/ext4/mmp.c
62038+++ b/fs/ext4/mmp.c
62039@@ -111,7 +111,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
62040 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
62041 const char *function, unsigned int line, const char *msg)
62042 {
62043- __ext4_warning(sb, function, line, msg);
62044+ __ext4_warning(sb, function, line, "%s", msg);
62045 __ext4_warning(sb, function, line,
62046 "MMP failure info: last update time: %llu, last update "
62047 "node: %s, last update device: %s\n",
62048diff --git a/fs/ext4/super.c b/fs/ext4/super.c
62049index b1f0ac7..77e9a05 100644
62050--- a/fs/ext4/super.c
62051+++ b/fs/ext4/super.c
62052@@ -1274,7 +1274,7 @@ static ext4_fsblk_t get_sb_block(void **data)
62053 }
62054
62055 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
62056-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
62057+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
62058 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
62059
62060 #ifdef CONFIG_QUOTA
62061@@ -2454,7 +2454,7 @@ struct ext4_attr {
62062 int offset;
62063 int deprecated_val;
62064 } u;
62065-};
62066+} __do_const;
62067
62068 static int parse_strtoull(const char *buf,
62069 unsigned long long max, unsigned long long *value)
62070diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
62071index 2d1e5803..1b082d415 100644
62072--- a/fs/ext4/xattr.c
62073+++ b/fs/ext4/xattr.c
62074@@ -399,7 +399,7 @@ static int
62075 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
62076 char *buffer, size_t buffer_size)
62077 {
62078- size_t rest = buffer_size;
62079+ size_t rest = buffer_size, total_size = 0;
62080
62081 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
62082 const struct xattr_handler *handler =
62083@@ -416,9 +416,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
62084 buffer += size;
62085 }
62086 rest -= size;
62087+ total_size += size;
62088 }
62089 }
62090- return buffer_size - rest;
62091+ return total_size;
62092 }
62093
62094 static int
62095diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
62096index 6df8d3d..b8b92c2 100644
62097--- a/fs/fat/namei_vfat.c
62098+++ b/fs/fat/namei_vfat.c
62099@@ -736,7 +736,12 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
62100 }
62101
62102 alias = d_find_alias(inode);
62103- if (alias && !vfat_d_anon_disconn(alias)) {
62104+ /*
62105+ * Checking "alias->d_parent == dentry->d_parent" to make sure
62106+ * FS is not corrupted (especially double linked dir).
62107+ */
62108+ if (alias && alias->d_parent == dentry->d_parent &&
62109+ !vfat_d_anon_disconn(alias)) {
62110 /*
62111 * This inode has non anonymous-DCACHE_DISCONNECTED
62112 * dentry. This means, the user did ->lookup() by an
62113@@ -755,12 +760,9 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
62114
62115 out:
62116 mutex_unlock(&MSDOS_SB(sb)->s_lock);
62117- dentry->d_time = dentry->d_parent->d_inode->i_version;
62118- dentry = d_splice_alias(inode, dentry);
62119- if (dentry)
62120- dentry->d_time = dentry->d_parent->d_inode->i_version;
62121- return dentry;
62122-
62123+ if (!inode)
62124+ dentry->d_time = dir->i_version;
62125+ return d_splice_alias(inode, dentry);
62126 error:
62127 mutex_unlock(&MSDOS_SB(sb)->s_lock);
62128 return ERR_PTR(err);
62129@@ -793,7 +795,6 @@ static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
62130 inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
62131 /* timestamp is already written, so mark_inode_dirty() is unneeded. */
62132
62133- dentry->d_time = dentry->d_parent->d_inode->i_version;
62134 d_instantiate(dentry, inode);
62135 out:
62136 mutex_unlock(&MSDOS_SB(sb)->s_lock);
62137@@ -824,6 +825,7 @@ static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
62138 clear_nlink(inode);
62139 inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
62140 fat_detach(inode);
62141+ dentry->d_time = dir->i_version;
62142 out:
62143 mutex_unlock(&MSDOS_SB(sb)->s_lock);
62144
62145@@ -849,6 +851,7 @@ static int vfat_unlink(struct inode *dir, struct dentry *dentry)
62146 clear_nlink(inode);
62147 inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
62148 fat_detach(inode);
62149+ dentry->d_time = dir->i_version;
62150 out:
62151 mutex_unlock(&MSDOS_SB(sb)->s_lock);
62152
62153@@ -889,7 +892,6 @@ static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
62154 inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
62155 /* timestamp is already written, so mark_inode_dirty() is unneeded. */
62156
62157- dentry->d_time = dentry->d_parent->d_inode->i_version;
62158 d_instantiate(dentry, inode);
62159
62160 mutex_unlock(&MSDOS_SB(sb)->s_lock);
62161diff --git a/fs/fcntl.c b/fs/fcntl.c
62162index 22d1c3d..600cf7e 100644
62163--- a/fs/fcntl.c
62164+++ b/fs/fcntl.c
62165@@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
62166 if (err)
62167 return err;
62168
62169+ if (gr_handle_chroot_fowner(pid, type))
62170+ return -ENOENT;
62171+ if (gr_check_protected_task_fowner(pid, type))
62172+ return -EACCES;
62173+
62174 f_modown(filp, pid, type, force);
62175 return 0;
62176 }
62177diff --git a/fs/fhandle.c b/fs/fhandle.c
62178index 999ff5c..ac037c9 100644
62179--- a/fs/fhandle.c
62180+++ b/fs/fhandle.c
62181@@ -8,6 +8,7 @@
62182 #include <linux/fs_struct.h>
62183 #include <linux/fsnotify.h>
62184 #include <linux/personality.h>
62185+#include <linux/grsecurity.h>
62186 #include <asm/uaccess.h>
62187 #include "internal.h"
62188 #include "mount.h"
62189@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path,
62190 } else
62191 retval = 0;
62192 /* copy the mount id */
62193- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
62194- sizeof(*mnt_id)) ||
62195+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
62196 copy_to_user(ufh, handle,
62197 sizeof(struct file_handle) + handle_bytes))
62198 retval = -EFAULT;
62199@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
62200 * the directory. Ideally we would like CAP_DAC_SEARCH.
62201 * But we don't have that
62202 */
62203- if (!capable(CAP_DAC_READ_SEARCH)) {
62204+ if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
62205 retval = -EPERM;
62206 goto out_err;
62207 }
62208diff --git a/fs/file.c b/fs/file.c
62209index 66923fe..2849783 100644
62210--- a/fs/file.c
62211+++ b/fs/file.c
62212@@ -16,6 +16,7 @@
62213 #include <linux/slab.h>
62214 #include <linux/vmalloc.h>
62215 #include <linux/file.h>
62216+#include <linux/security.h>
62217 #include <linux/fdtable.h>
62218 #include <linux/bitops.h>
62219 #include <linux/interrupt.h>
62220@@ -139,7 +140,7 @@ out:
62221 * Return <0 error code on error; 1 on successful completion.
62222 * The files->file_lock should be held on entry, and will be held on exit.
62223 */
62224-static int expand_fdtable(struct files_struct *files, int nr)
62225+static int expand_fdtable(struct files_struct *files, unsigned int nr)
62226 __releases(files->file_lock)
62227 __acquires(files->file_lock)
62228 {
62229@@ -184,7 +185,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
62230 * expanded and execution may have blocked.
62231 * The files->file_lock should be held on entry, and will be held on exit.
62232 */
62233-static int expand_files(struct files_struct *files, int nr)
62234+static int expand_files(struct files_struct *files, unsigned int nr)
62235 {
62236 struct fdtable *fdt;
62237
62238@@ -799,6 +800,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
62239 if (!file)
62240 return __close_fd(files, fd);
62241
62242+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
62243 if (fd >= rlimit(RLIMIT_NOFILE))
62244 return -EBADF;
62245
62246@@ -825,6 +827,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
62247 if (unlikely(oldfd == newfd))
62248 return -EINVAL;
62249
62250+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
62251 if (newfd >= rlimit(RLIMIT_NOFILE))
62252 return -EBADF;
62253
62254@@ -880,6 +883,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
62255 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
62256 {
62257 int err;
62258+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
62259 if (from >= rlimit(RLIMIT_NOFILE))
62260 return -EINVAL;
62261 err = alloc_fd(from, flags);
62262diff --git a/fs/filesystems.c b/fs/filesystems.c
62263index 5797d45..7d7d79a 100644
62264--- a/fs/filesystems.c
62265+++ b/fs/filesystems.c
62266@@ -275,7 +275,11 @@ struct file_system_type *get_fs_type(const char *name)
62267 int len = dot ? dot - name : strlen(name);
62268
62269 fs = __get_fs_type(name, len);
62270+#ifdef CONFIG_GRKERNSEC_MODHARDEN
62271+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
62272+#else
62273 if (!fs && (request_module("fs-%.*s", len, name) == 0))
62274+#endif
62275 fs = __get_fs_type(name, len);
62276
62277 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
62278diff --git a/fs/fs_struct.c b/fs/fs_struct.c
62279index 7dca743..543d620 100644
62280--- a/fs/fs_struct.c
62281+++ b/fs/fs_struct.c
62282@@ -4,6 +4,7 @@
62283 #include <linux/path.h>
62284 #include <linux/slab.h>
62285 #include <linux/fs_struct.h>
62286+#include <linux/grsecurity.h>
62287 #include "internal.h"
62288
62289 /*
62290@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
62291 write_seqcount_begin(&fs->seq);
62292 old_root = fs->root;
62293 fs->root = *path;
62294+ gr_set_chroot_entries(current, path);
62295 write_seqcount_end(&fs->seq);
62296 spin_unlock(&fs->lock);
62297 if (old_root.dentry)
62298@@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
62299 int hits = 0;
62300 spin_lock(&fs->lock);
62301 write_seqcount_begin(&fs->seq);
62302+ /* this root replacement is only done by pivot_root,
62303+ leave grsec's chroot tagging alone for this task
62304+ so that a pivoted root isn't treated as a chroot
62305+ */
62306 hits += replace_path(&fs->root, old_root, new_root);
62307 hits += replace_path(&fs->pwd, old_root, new_root);
62308 write_seqcount_end(&fs->seq);
62309@@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk)
62310 task_lock(tsk);
62311 spin_lock(&fs->lock);
62312 tsk->fs = NULL;
62313- kill = !--fs->users;
62314+ gr_clear_chroot_entries(tsk);
62315+ kill = !atomic_dec_return(&fs->users);
62316 spin_unlock(&fs->lock);
62317 task_unlock(tsk);
62318 if (kill)
62319@@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
62320 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
62321 /* We don't need to lock fs - think why ;-) */
62322 if (fs) {
62323- fs->users = 1;
62324+ atomic_set(&fs->users, 1);
62325 fs->in_exec = 0;
62326 spin_lock_init(&fs->lock);
62327 seqcount_init(&fs->seq);
62328@@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
62329 spin_lock(&old->lock);
62330 fs->root = old->root;
62331 path_get(&fs->root);
62332+ /* instead of calling gr_set_chroot_entries here,
62333+ we call it from every caller of this function
62334+ */
62335 fs->pwd = old->pwd;
62336 path_get(&fs->pwd);
62337 spin_unlock(&old->lock);
62338@@ -139,8 +149,9 @@ int unshare_fs_struct(void)
62339
62340 task_lock(current);
62341 spin_lock(&fs->lock);
62342- kill = !--fs->users;
62343+ kill = !atomic_dec_return(&fs->users);
62344 current->fs = new_fs;
62345+ gr_set_chroot_entries(current, &new_fs->root);
62346 spin_unlock(&fs->lock);
62347 task_unlock(current);
62348
62349@@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
62350
62351 int current_umask(void)
62352 {
62353- return current->fs->umask;
62354+ return current->fs->umask | gr_acl_umask();
62355 }
62356 EXPORT_SYMBOL(current_umask);
62357
62358 /* to be mentioned only in INIT_TASK */
62359 struct fs_struct init_fs = {
62360- .users = 1,
62361+ .users = ATOMIC_INIT(1),
62362 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
62363 .seq = SEQCNT_ZERO(init_fs.seq),
62364 .umask = 0022,
62365diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
62366index 89acec7..a575262 100644
62367--- a/fs/fscache/cookie.c
62368+++ b/fs/fscache/cookie.c
62369@@ -19,7 +19,7 @@
62370
62371 struct kmem_cache *fscache_cookie_jar;
62372
62373-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
62374+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
62375
62376 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
62377 static int fscache_alloc_object(struct fscache_cache *cache,
62378@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
62379 parent ? (char *) parent->def->name : "<no-parent>",
62380 def->name, netfs_data, enable);
62381
62382- fscache_stat(&fscache_n_acquires);
62383+ fscache_stat_unchecked(&fscache_n_acquires);
62384
62385 /* if there's no parent cookie, then we don't create one here either */
62386 if (!parent) {
62387- fscache_stat(&fscache_n_acquires_null);
62388+ fscache_stat_unchecked(&fscache_n_acquires_null);
62389 _leave(" [no parent]");
62390 return NULL;
62391 }
62392@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62393 /* allocate and initialise a cookie */
62394 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
62395 if (!cookie) {
62396- fscache_stat(&fscache_n_acquires_oom);
62397+ fscache_stat_unchecked(&fscache_n_acquires_oom);
62398 _leave(" [ENOMEM]");
62399 return NULL;
62400 }
62401@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
62402
62403 switch (cookie->def->type) {
62404 case FSCACHE_COOKIE_TYPE_INDEX:
62405- fscache_stat(&fscache_n_cookie_index);
62406+ fscache_stat_unchecked(&fscache_n_cookie_index);
62407 break;
62408 case FSCACHE_COOKIE_TYPE_DATAFILE:
62409- fscache_stat(&fscache_n_cookie_data);
62410+ fscache_stat_unchecked(&fscache_n_cookie_data);
62411 break;
62412 default:
62413- fscache_stat(&fscache_n_cookie_special);
62414+ fscache_stat_unchecked(&fscache_n_cookie_special);
62415 break;
62416 }
62417
62418@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62419 } else {
62420 atomic_dec(&parent->n_children);
62421 __fscache_cookie_put(cookie);
62422- fscache_stat(&fscache_n_acquires_nobufs);
62423+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
62424 _leave(" = NULL");
62425 return NULL;
62426 }
62427@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62428 }
62429 }
62430
62431- fscache_stat(&fscache_n_acquires_ok);
62432+ fscache_stat_unchecked(&fscache_n_acquires_ok);
62433 _leave(" = %p", cookie);
62434 return cookie;
62435 }
62436@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
62437 cache = fscache_select_cache_for_object(cookie->parent);
62438 if (!cache) {
62439 up_read(&fscache_addremove_sem);
62440- fscache_stat(&fscache_n_acquires_no_cache);
62441+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
62442 _leave(" = -ENOMEDIUM [no cache]");
62443 return -ENOMEDIUM;
62444 }
62445@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
62446 object = cache->ops->alloc_object(cache, cookie);
62447 fscache_stat_d(&fscache_n_cop_alloc_object);
62448 if (IS_ERR(object)) {
62449- fscache_stat(&fscache_n_object_no_alloc);
62450+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
62451 ret = PTR_ERR(object);
62452 goto error;
62453 }
62454
62455- fscache_stat(&fscache_n_object_alloc);
62456+ fscache_stat_unchecked(&fscache_n_object_alloc);
62457
62458- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
62459+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
62460
62461 _debug("ALLOC OBJ%x: %s {%lx}",
62462 object->debug_id, cookie->def->name, object->events);
62463@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
62464
62465 _enter("{%s}", cookie->def->name);
62466
62467- fscache_stat(&fscache_n_invalidates);
62468+ fscache_stat_unchecked(&fscache_n_invalidates);
62469
62470 /* Only permit invalidation of data files. Invalidating an index will
62471 * require the caller to release all its attachments to the tree rooted
62472@@ -476,10 +476,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
62473 {
62474 struct fscache_object *object;
62475
62476- fscache_stat(&fscache_n_updates);
62477+ fscache_stat_unchecked(&fscache_n_updates);
62478
62479 if (!cookie) {
62480- fscache_stat(&fscache_n_updates_null);
62481+ fscache_stat_unchecked(&fscache_n_updates_null);
62482 _leave(" [no cookie]");
62483 return;
62484 }
62485@@ -580,12 +580,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
62486 */
62487 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
62488 {
62489- fscache_stat(&fscache_n_relinquishes);
62490+ fscache_stat_unchecked(&fscache_n_relinquishes);
62491 if (retire)
62492- fscache_stat(&fscache_n_relinquishes_retire);
62493+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
62494
62495 if (!cookie) {
62496- fscache_stat(&fscache_n_relinquishes_null);
62497+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
62498 _leave(" [no cookie]");
62499 return;
62500 }
62501@@ -686,7 +686,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
62502 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
62503 goto inconsistent;
62504
62505- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
62506+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
62507
62508 __fscache_use_cookie(cookie);
62509 if (fscache_submit_op(object, op) < 0)
62510diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
62511index 7872a62..d91b19f 100644
62512--- a/fs/fscache/internal.h
62513+++ b/fs/fscache/internal.h
62514@@ -137,8 +137,8 @@ extern void fscache_operation_gc(struct work_struct *);
62515 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
62516 extern int fscache_wait_for_operation_activation(struct fscache_object *,
62517 struct fscache_operation *,
62518- atomic_t *,
62519- atomic_t *,
62520+ atomic_unchecked_t *,
62521+ atomic_unchecked_t *,
62522 void (*)(struct fscache_operation *));
62523 extern void fscache_invalidate_writes(struct fscache_cookie *);
62524
62525@@ -157,101 +157,101 @@ extern void fscache_proc_cleanup(void);
62526 * stats.c
62527 */
62528 #ifdef CONFIG_FSCACHE_STATS
62529-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
62530-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
62531+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
62532+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
62533
62534-extern atomic_t fscache_n_op_pend;
62535-extern atomic_t fscache_n_op_run;
62536-extern atomic_t fscache_n_op_enqueue;
62537-extern atomic_t fscache_n_op_deferred_release;
62538-extern atomic_t fscache_n_op_release;
62539-extern atomic_t fscache_n_op_gc;
62540-extern atomic_t fscache_n_op_cancelled;
62541-extern atomic_t fscache_n_op_rejected;
62542+extern atomic_unchecked_t fscache_n_op_pend;
62543+extern atomic_unchecked_t fscache_n_op_run;
62544+extern atomic_unchecked_t fscache_n_op_enqueue;
62545+extern atomic_unchecked_t fscache_n_op_deferred_release;
62546+extern atomic_unchecked_t fscache_n_op_release;
62547+extern atomic_unchecked_t fscache_n_op_gc;
62548+extern atomic_unchecked_t fscache_n_op_cancelled;
62549+extern atomic_unchecked_t fscache_n_op_rejected;
62550
62551-extern atomic_t fscache_n_attr_changed;
62552-extern atomic_t fscache_n_attr_changed_ok;
62553-extern atomic_t fscache_n_attr_changed_nobufs;
62554-extern atomic_t fscache_n_attr_changed_nomem;
62555-extern atomic_t fscache_n_attr_changed_calls;
62556+extern atomic_unchecked_t fscache_n_attr_changed;
62557+extern atomic_unchecked_t fscache_n_attr_changed_ok;
62558+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
62559+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
62560+extern atomic_unchecked_t fscache_n_attr_changed_calls;
62561
62562-extern atomic_t fscache_n_allocs;
62563-extern atomic_t fscache_n_allocs_ok;
62564-extern atomic_t fscache_n_allocs_wait;
62565-extern atomic_t fscache_n_allocs_nobufs;
62566-extern atomic_t fscache_n_allocs_intr;
62567-extern atomic_t fscache_n_allocs_object_dead;
62568-extern atomic_t fscache_n_alloc_ops;
62569-extern atomic_t fscache_n_alloc_op_waits;
62570+extern atomic_unchecked_t fscache_n_allocs;
62571+extern atomic_unchecked_t fscache_n_allocs_ok;
62572+extern atomic_unchecked_t fscache_n_allocs_wait;
62573+extern atomic_unchecked_t fscache_n_allocs_nobufs;
62574+extern atomic_unchecked_t fscache_n_allocs_intr;
62575+extern atomic_unchecked_t fscache_n_allocs_object_dead;
62576+extern atomic_unchecked_t fscache_n_alloc_ops;
62577+extern atomic_unchecked_t fscache_n_alloc_op_waits;
62578
62579-extern atomic_t fscache_n_retrievals;
62580-extern atomic_t fscache_n_retrievals_ok;
62581-extern atomic_t fscache_n_retrievals_wait;
62582-extern atomic_t fscache_n_retrievals_nodata;
62583-extern atomic_t fscache_n_retrievals_nobufs;
62584-extern atomic_t fscache_n_retrievals_intr;
62585-extern atomic_t fscache_n_retrievals_nomem;
62586-extern atomic_t fscache_n_retrievals_object_dead;
62587-extern atomic_t fscache_n_retrieval_ops;
62588-extern atomic_t fscache_n_retrieval_op_waits;
62589+extern atomic_unchecked_t fscache_n_retrievals;
62590+extern atomic_unchecked_t fscache_n_retrievals_ok;
62591+extern atomic_unchecked_t fscache_n_retrievals_wait;
62592+extern atomic_unchecked_t fscache_n_retrievals_nodata;
62593+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
62594+extern atomic_unchecked_t fscache_n_retrievals_intr;
62595+extern atomic_unchecked_t fscache_n_retrievals_nomem;
62596+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
62597+extern atomic_unchecked_t fscache_n_retrieval_ops;
62598+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
62599
62600-extern atomic_t fscache_n_stores;
62601-extern atomic_t fscache_n_stores_ok;
62602-extern atomic_t fscache_n_stores_again;
62603-extern atomic_t fscache_n_stores_nobufs;
62604-extern atomic_t fscache_n_stores_oom;
62605-extern atomic_t fscache_n_store_ops;
62606-extern atomic_t fscache_n_store_calls;
62607-extern atomic_t fscache_n_store_pages;
62608-extern atomic_t fscache_n_store_radix_deletes;
62609-extern atomic_t fscache_n_store_pages_over_limit;
62610+extern atomic_unchecked_t fscache_n_stores;
62611+extern atomic_unchecked_t fscache_n_stores_ok;
62612+extern atomic_unchecked_t fscache_n_stores_again;
62613+extern atomic_unchecked_t fscache_n_stores_nobufs;
62614+extern atomic_unchecked_t fscache_n_stores_oom;
62615+extern atomic_unchecked_t fscache_n_store_ops;
62616+extern atomic_unchecked_t fscache_n_store_calls;
62617+extern atomic_unchecked_t fscache_n_store_pages;
62618+extern atomic_unchecked_t fscache_n_store_radix_deletes;
62619+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
62620
62621-extern atomic_t fscache_n_store_vmscan_not_storing;
62622-extern atomic_t fscache_n_store_vmscan_gone;
62623-extern atomic_t fscache_n_store_vmscan_busy;
62624-extern atomic_t fscache_n_store_vmscan_cancelled;
62625-extern atomic_t fscache_n_store_vmscan_wait;
62626+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
62627+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
62628+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
62629+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
62630+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
62631
62632-extern atomic_t fscache_n_marks;
62633-extern atomic_t fscache_n_uncaches;
62634+extern atomic_unchecked_t fscache_n_marks;
62635+extern atomic_unchecked_t fscache_n_uncaches;
62636
62637-extern atomic_t fscache_n_acquires;
62638-extern atomic_t fscache_n_acquires_null;
62639-extern atomic_t fscache_n_acquires_no_cache;
62640-extern atomic_t fscache_n_acquires_ok;
62641-extern atomic_t fscache_n_acquires_nobufs;
62642-extern atomic_t fscache_n_acquires_oom;
62643+extern atomic_unchecked_t fscache_n_acquires;
62644+extern atomic_unchecked_t fscache_n_acquires_null;
62645+extern atomic_unchecked_t fscache_n_acquires_no_cache;
62646+extern atomic_unchecked_t fscache_n_acquires_ok;
62647+extern atomic_unchecked_t fscache_n_acquires_nobufs;
62648+extern atomic_unchecked_t fscache_n_acquires_oom;
62649
62650-extern atomic_t fscache_n_invalidates;
62651-extern atomic_t fscache_n_invalidates_run;
62652+extern atomic_unchecked_t fscache_n_invalidates;
62653+extern atomic_unchecked_t fscache_n_invalidates_run;
62654
62655-extern atomic_t fscache_n_updates;
62656-extern atomic_t fscache_n_updates_null;
62657-extern atomic_t fscache_n_updates_run;
62658+extern atomic_unchecked_t fscache_n_updates;
62659+extern atomic_unchecked_t fscache_n_updates_null;
62660+extern atomic_unchecked_t fscache_n_updates_run;
62661
62662-extern atomic_t fscache_n_relinquishes;
62663-extern atomic_t fscache_n_relinquishes_null;
62664-extern atomic_t fscache_n_relinquishes_waitcrt;
62665-extern atomic_t fscache_n_relinquishes_retire;
62666+extern atomic_unchecked_t fscache_n_relinquishes;
62667+extern atomic_unchecked_t fscache_n_relinquishes_null;
62668+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
62669+extern atomic_unchecked_t fscache_n_relinquishes_retire;
62670
62671-extern atomic_t fscache_n_cookie_index;
62672-extern atomic_t fscache_n_cookie_data;
62673-extern atomic_t fscache_n_cookie_special;
62674+extern atomic_unchecked_t fscache_n_cookie_index;
62675+extern atomic_unchecked_t fscache_n_cookie_data;
62676+extern atomic_unchecked_t fscache_n_cookie_special;
62677
62678-extern atomic_t fscache_n_object_alloc;
62679-extern atomic_t fscache_n_object_no_alloc;
62680-extern atomic_t fscache_n_object_lookups;
62681-extern atomic_t fscache_n_object_lookups_negative;
62682-extern atomic_t fscache_n_object_lookups_positive;
62683-extern atomic_t fscache_n_object_lookups_timed_out;
62684-extern atomic_t fscache_n_object_created;
62685-extern atomic_t fscache_n_object_avail;
62686-extern atomic_t fscache_n_object_dead;
62687+extern atomic_unchecked_t fscache_n_object_alloc;
62688+extern atomic_unchecked_t fscache_n_object_no_alloc;
62689+extern atomic_unchecked_t fscache_n_object_lookups;
62690+extern atomic_unchecked_t fscache_n_object_lookups_negative;
62691+extern atomic_unchecked_t fscache_n_object_lookups_positive;
62692+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
62693+extern atomic_unchecked_t fscache_n_object_created;
62694+extern atomic_unchecked_t fscache_n_object_avail;
62695+extern atomic_unchecked_t fscache_n_object_dead;
62696
62697-extern atomic_t fscache_n_checkaux_none;
62698-extern atomic_t fscache_n_checkaux_okay;
62699-extern atomic_t fscache_n_checkaux_update;
62700-extern atomic_t fscache_n_checkaux_obsolete;
62701+extern atomic_unchecked_t fscache_n_checkaux_none;
62702+extern atomic_unchecked_t fscache_n_checkaux_okay;
62703+extern atomic_unchecked_t fscache_n_checkaux_update;
62704+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
62705
62706 extern atomic_t fscache_n_cop_alloc_object;
62707 extern atomic_t fscache_n_cop_lookup_object;
62708@@ -276,6 +276,11 @@ static inline void fscache_stat(atomic_t *stat)
62709 atomic_inc(stat);
62710 }
62711
62712+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
62713+{
62714+ atomic_inc_unchecked(stat);
62715+}
62716+
62717 static inline void fscache_stat_d(atomic_t *stat)
62718 {
62719 atomic_dec(stat);
62720@@ -288,6 +293,7 @@ extern const struct file_operations fscache_stats_fops;
62721
62722 #define __fscache_stat(stat) (NULL)
62723 #define fscache_stat(stat) do {} while (0)
62724+#define fscache_stat_unchecked(stat) do {} while (0)
62725 #define fscache_stat_d(stat) do {} while (0)
62726 #endif
62727
62728diff --git a/fs/fscache/object.c b/fs/fscache/object.c
62729index da032da..0076ce7 100644
62730--- a/fs/fscache/object.c
62731+++ b/fs/fscache/object.c
62732@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
62733 _debug("LOOKUP \"%s\" in \"%s\"",
62734 cookie->def->name, object->cache->tag->name);
62735
62736- fscache_stat(&fscache_n_object_lookups);
62737+ fscache_stat_unchecked(&fscache_n_object_lookups);
62738 fscache_stat(&fscache_n_cop_lookup_object);
62739 ret = object->cache->ops->lookup_object(object);
62740 fscache_stat_d(&fscache_n_cop_lookup_object);
62741@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
62742 if (ret == -ETIMEDOUT) {
62743 /* probably stuck behind another object, so move this one to
62744 * the back of the queue */
62745- fscache_stat(&fscache_n_object_lookups_timed_out);
62746+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
62747 _leave(" [timeout]");
62748 return NO_TRANSIT;
62749 }
62750@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
62751 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
62752
62753 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
62754- fscache_stat(&fscache_n_object_lookups_negative);
62755+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
62756
62757 /* Allow write requests to begin stacking up and read requests to begin
62758 * returning ENODATA.
62759@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object)
62760 /* if we were still looking up, then we must have a positive lookup
62761 * result, in which case there may be data available */
62762 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
62763- fscache_stat(&fscache_n_object_lookups_positive);
62764+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
62765
62766 /* We do (presumably) have data */
62767 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
62768@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object)
62769 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
62770 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
62771 } else {
62772- fscache_stat(&fscache_n_object_created);
62773+ fscache_stat_unchecked(&fscache_n_object_created);
62774 }
62775
62776 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
62777@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
62778 fscache_stat_d(&fscache_n_cop_lookup_complete);
62779
62780 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
62781- fscache_stat(&fscache_n_object_avail);
62782+ fscache_stat_unchecked(&fscache_n_object_avail);
62783
62784 _leave("");
62785 return transit_to(JUMPSTART_DEPS);
62786@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
62787
62788 /* this just shifts the object release to the work processor */
62789 fscache_put_object(object);
62790- fscache_stat(&fscache_n_object_dead);
62791+ fscache_stat_unchecked(&fscache_n_object_dead);
62792
62793 _leave("");
62794 return transit_to(OBJECT_DEAD);
62795@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
62796 enum fscache_checkaux result;
62797
62798 if (!object->cookie->def->check_aux) {
62799- fscache_stat(&fscache_n_checkaux_none);
62800+ fscache_stat_unchecked(&fscache_n_checkaux_none);
62801 return FSCACHE_CHECKAUX_OKAY;
62802 }
62803
62804@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
62805 switch (result) {
62806 /* entry okay as is */
62807 case FSCACHE_CHECKAUX_OKAY:
62808- fscache_stat(&fscache_n_checkaux_okay);
62809+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
62810 break;
62811
62812 /* entry requires update */
62813 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
62814- fscache_stat(&fscache_n_checkaux_update);
62815+ fscache_stat_unchecked(&fscache_n_checkaux_update);
62816 break;
62817
62818 /* entry requires deletion */
62819 case FSCACHE_CHECKAUX_OBSOLETE:
62820- fscache_stat(&fscache_n_checkaux_obsolete);
62821+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
62822 break;
62823
62824 default:
62825@@ -993,7 +993,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
62826 {
62827 const struct fscache_state *s;
62828
62829- fscache_stat(&fscache_n_invalidates_run);
62830+ fscache_stat_unchecked(&fscache_n_invalidates_run);
62831 fscache_stat(&fscache_n_cop_invalidate_object);
62832 s = _fscache_invalidate_object(object, event);
62833 fscache_stat_d(&fscache_n_cop_invalidate_object);
62834@@ -1008,7 +1008,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
62835 {
62836 _enter("{OBJ%x},%d", object->debug_id, event);
62837
62838- fscache_stat(&fscache_n_updates_run);
62839+ fscache_stat_unchecked(&fscache_n_updates_run);
62840 fscache_stat(&fscache_n_cop_update_object);
62841 object->cache->ops->update_object(object);
62842 fscache_stat_d(&fscache_n_cop_update_object);
62843diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
62844index e7b87a0..a85d47a 100644
62845--- a/fs/fscache/operation.c
62846+++ b/fs/fscache/operation.c
62847@@ -17,7 +17,7 @@
62848 #include <linux/slab.h>
62849 #include "internal.h"
62850
62851-atomic_t fscache_op_debug_id;
62852+atomic_unchecked_t fscache_op_debug_id;
62853 EXPORT_SYMBOL(fscache_op_debug_id);
62854
62855 /**
62856@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
62857 ASSERTCMP(atomic_read(&op->usage), >, 0);
62858 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
62859
62860- fscache_stat(&fscache_n_op_enqueue);
62861+ fscache_stat_unchecked(&fscache_n_op_enqueue);
62862 switch (op->flags & FSCACHE_OP_TYPE) {
62863 case FSCACHE_OP_ASYNC:
62864 _debug("queue async");
62865@@ -72,7 +72,7 @@ static void fscache_run_op(struct fscache_object *object,
62866 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
62867 if (op->processor)
62868 fscache_enqueue_operation(op);
62869- fscache_stat(&fscache_n_op_run);
62870+ fscache_stat_unchecked(&fscache_n_op_run);
62871 }
62872
62873 /*
62874@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
62875 if (object->n_in_progress > 0) {
62876 atomic_inc(&op->usage);
62877 list_add_tail(&op->pend_link, &object->pending_ops);
62878- fscache_stat(&fscache_n_op_pend);
62879+ fscache_stat_unchecked(&fscache_n_op_pend);
62880 } else if (!list_empty(&object->pending_ops)) {
62881 atomic_inc(&op->usage);
62882 list_add_tail(&op->pend_link, &object->pending_ops);
62883- fscache_stat(&fscache_n_op_pend);
62884+ fscache_stat_unchecked(&fscache_n_op_pend);
62885 fscache_start_operations(object);
62886 } else {
62887 ASSERTCMP(object->n_in_progress, ==, 0);
62888@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
62889 object->n_exclusive++; /* reads and writes must wait */
62890 atomic_inc(&op->usage);
62891 list_add_tail(&op->pend_link, &object->pending_ops);
62892- fscache_stat(&fscache_n_op_pend);
62893+ fscache_stat_unchecked(&fscache_n_op_pend);
62894 ret = 0;
62895 } else {
62896 /* If we're in any other state, there must have been an I/O
62897@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_object *object,
62898 if (object->n_exclusive > 0) {
62899 atomic_inc(&op->usage);
62900 list_add_tail(&op->pend_link, &object->pending_ops);
62901- fscache_stat(&fscache_n_op_pend);
62902+ fscache_stat_unchecked(&fscache_n_op_pend);
62903 } else if (!list_empty(&object->pending_ops)) {
62904 atomic_inc(&op->usage);
62905 list_add_tail(&op->pend_link, &object->pending_ops);
62906- fscache_stat(&fscache_n_op_pend);
62907+ fscache_stat_unchecked(&fscache_n_op_pend);
62908 fscache_start_operations(object);
62909 } else {
62910 ASSERTCMP(object->n_exclusive, ==, 0);
62911@@ -227,10 +227,10 @@ int fscache_submit_op(struct fscache_object *object,
62912 object->n_ops++;
62913 atomic_inc(&op->usage);
62914 list_add_tail(&op->pend_link, &object->pending_ops);
62915- fscache_stat(&fscache_n_op_pend);
62916+ fscache_stat_unchecked(&fscache_n_op_pend);
62917 ret = 0;
62918 } else if (fscache_object_is_dying(object)) {
62919- fscache_stat(&fscache_n_op_rejected);
62920+ fscache_stat_unchecked(&fscache_n_op_rejected);
62921 op->state = FSCACHE_OP_ST_CANCELLED;
62922 ret = -ENOBUFS;
62923 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
62924@@ -309,7 +309,7 @@ int fscache_cancel_op(struct fscache_operation *op,
62925 ret = -EBUSY;
62926 if (op->state == FSCACHE_OP_ST_PENDING) {
62927 ASSERT(!list_empty(&op->pend_link));
62928- fscache_stat(&fscache_n_op_cancelled);
62929+ fscache_stat_unchecked(&fscache_n_op_cancelled);
62930 list_del_init(&op->pend_link);
62931 if (do_cancel)
62932 do_cancel(op);
62933@@ -341,7 +341,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
62934 while (!list_empty(&object->pending_ops)) {
62935 op = list_entry(object->pending_ops.next,
62936 struct fscache_operation, pend_link);
62937- fscache_stat(&fscache_n_op_cancelled);
62938+ fscache_stat_unchecked(&fscache_n_op_cancelled);
62939 list_del_init(&op->pend_link);
62940
62941 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
62942@@ -413,7 +413,7 @@ void fscache_put_operation(struct fscache_operation *op)
62943 op->state, ==, FSCACHE_OP_ST_CANCELLED);
62944 op->state = FSCACHE_OP_ST_DEAD;
62945
62946- fscache_stat(&fscache_n_op_release);
62947+ fscache_stat_unchecked(&fscache_n_op_release);
62948
62949 if (op->release) {
62950 op->release(op);
62951@@ -432,7 +432,7 @@ void fscache_put_operation(struct fscache_operation *op)
62952 * lock, and defer it otherwise */
62953 if (!spin_trylock(&object->lock)) {
62954 _debug("defer put");
62955- fscache_stat(&fscache_n_op_deferred_release);
62956+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
62957
62958 cache = object->cache;
62959 spin_lock(&cache->op_gc_list_lock);
62960@@ -485,7 +485,7 @@ void fscache_operation_gc(struct work_struct *work)
62961
62962 _debug("GC DEFERRED REL OBJ%x OP%x",
62963 object->debug_id, op->debug_id);
62964- fscache_stat(&fscache_n_op_gc);
62965+ fscache_stat_unchecked(&fscache_n_op_gc);
62966
62967 ASSERTCMP(atomic_read(&op->usage), ==, 0);
62968 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
62969diff --git a/fs/fscache/page.c b/fs/fscache/page.c
62970index de33b3f..8be4d29 100644
62971--- a/fs/fscache/page.c
62972+++ b/fs/fscache/page.c
62973@@ -74,7 +74,7 @@ try_again:
62974 val = radix_tree_lookup(&cookie->stores, page->index);
62975 if (!val) {
62976 rcu_read_unlock();
62977- fscache_stat(&fscache_n_store_vmscan_not_storing);
62978+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
62979 __fscache_uncache_page(cookie, page);
62980 return true;
62981 }
62982@@ -104,11 +104,11 @@ try_again:
62983 spin_unlock(&cookie->stores_lock);
62984
62985 if (xpage) {
62986- fscache_stat(&fscache_n_store_vmscan_cancelled);
62987- fscache_stat(&fscache_n_store_radix_deletes);
62988+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
62989+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
62990 ASSERTCMP(xpage, ==, page);
62991 } else {
62992- fscache_stat(&fscache_n_store_vmscan_gone);
62993+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
62994 }
62995
62996 wake_up_bit(&cookie->flags, 0);
62997@@ -123,11 +123,11 @@ page_busy:
62998 * sleeping on memory allocation, so we may need to impose a timeout
62999 * too. */
63000 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
63001- fscache_stat(&fscache_n_store_vmscan_busy);
63002+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
63003 return false;
63004 }
63005
63006- fscache_stat(&fscache_n_store_vmscan_wait);
63007+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
63008 if (!release_page_wait_timeout(cookie, page))
63009 _debug("fscache writeout timeout page: %p{%lx}",
63010 page, page->index);
63011@@ -156,7 +156,7 @@ static void fscache_end_page_write(struct fscache_object *object,
63012 FSCACHE_COOKIE_STORING_TAG);
63013 if (!radix_tree_tag_get(&cookie->stores, page->index,
63014 FSCACHE_COOKIE_PENDING_TAG)) {
63015- fscache_stat(&fscache_n_store_radix_deletes);
63016+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
63017 xpage = radix_tree_delete(&cookie->stores, page->index);
63018 }
63019 spin_unlock(&cookie->stores_lock);
63020@@ -177,7 +177,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
63021
63022 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
63023
63024- fscache_stat(&fscache_n_attr_changed_calls);
63025+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
63026
63027 if (fscache_object_is_active(object)) {
63028 fscache_stat(&fscache_n_cop_attr_changed);
63029@@ -204,11 +204,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
63030
63031 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
63032
63033- fscache_stat(&fscache_n_attr_changed);
63034+ fscache_stat_unchecked(&fscache_n_attr_changed);
63035
63036 op = kzalloc(sizeof(*op), GFP_KERNEL);
63037 if (!op) {
63038- fscache_stat(&fscache_n_attr_changed_nomem);
63039+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
63040 _leave(" = -ENOMEM");
63041 return -ENOMEM;
63042 }
63043@@ -230,7 +230,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
63044 if (fscache_submit_exclusive_op(object, op) < 0)
63045 goto nobufs_dec;
63046 spin_unlock(&cookie->lock);
63047- fscache_stat(&fscache_n_attr_changed_ok);
63048+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
63049 fscache_put_operation(op);
63050 _leave(" = 0");
63051 return 0;
63052@@ -242,7 +242,7 @@ nobufs:
63053 kfree(op);
63054 if (wake_cookie)
63055 __fscache_wake_unused_cookie(cookie);
63056- fscache_stat(&fscache_n_attr_changed_nobufs);
63057+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
63058 _leave(" = %d", -ENOBUFS);
63059 return -ENOBUFS;
63060 }
63061@@ -281,7 +281,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
63062 /* allocate a retrieval operation and attempt to submit it */
63063 op = kzalloc(sizeof(*op), GFP_NOIO);
63064 if (!op) {
63065- fscache_stat(&fscache_n_retrievals_nomem);
63066+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
63067 return NULL;
63068 }
63069
63070@@ -311,12 +311,12 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
63071 return 0;
63072 }
63073
63074- fscache_stat(&fscache_n_retrievals_wait);
63075+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
63076
63077 jif = jiffies;
63078 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
63079 TASK_INTERRUPTIBLE) != 0) {
63080- fscache_stat(&fscache_n_retrievals_intr);
63081+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
63082 _leave(" = -ERESTARTSYS");
63083 return -ERESTARTSYS;
63084 }
63085@@ -345,8 +345,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
63086 */
63087 int fscache_wait_for_operation_activation(struct fscache_object *object,
63088 struct fscache_operation *op,
63089- atomic_t *stat_op_waits,
63090- atomic_t *stat_object_dead,
63091+ atomic_unchecked_t *stat_op_waits,
63092+ atomic_unchecked_t *stat_object_dead,
63093 void (*do_cancel)(struct fscache_operation *))
63094 {
63095 int ret;
63096@@ -356,7 +356,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
63097
63098 _debug(">>> WT");
63099 if (stat_op_waits)
63100- fscache_stat(stat_op_waits);
63101+ fscache_stat_unchecked(stat_op_waits);
63102 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
63103 TASK_INTERRUPTIBLE) != 0) {
63104 ret = fscache_cancel_op(op, do_cancel);
63105@@ -373,7 +373,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
63106 check_if_dead:
63107 if (op->state == FSCACHE_OP_ST_CANCELLED) {
63108 if (stat_object_dead)
63109- fscache_stat(stat_object_dead);
63110+ fscache_stat_unchecked(stat_object_dead);
63111 _leave(" = -ENOBUFS [cancelled]");
63112 return -ENOBUFS;
63113 }
63114@@ -381,7 +381,7 @@ check_if_dead:
63115 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
63116 fscache_cancel_op(op, do_cancel);
63117 if (stat_object_dead)
63118- fscache_stat(stat_object_dead);
63119+ fscache_stat_unchecked(stat_object_dead);
63120 return -ENOBUFS;
63121 }
63122 return 0;
63123@@ -409,7 +409,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
63124
63125 _enter("%p,%p,,,", cookie, page);
63126
63127- fscache_stat(&fscache_n_retrievals);
63128+ fscache_stat_unchecked(&fscache_n_retrievals);
63129
63130 if (hlist_empty(&cookie->backing_objects))
63131 goto nobufs;
63132@@ -451,7 +451,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
63133 goto nobufs_unlock_dec;
63134 spin_unlock(&cookie->lock);
63135
63136- fscache_stat(&fscache_n_retrieval_ops);
63137+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
63138
63139 /* pin the netfs read context in case we need to do the actual netfs
63140 * read because we've encountered a cache read failure */
63141@@ -482,15 +482,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
63142
63143 error:
63144 if (ret == -ENOMEM)
63145- fscache_stat(&fscache_n_retrievals_nomem);
63146+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
63147 else if (ret == -ERESTARTSYS)
63148- fscache_stat(&fscache_n_retrievals_intr);
63149+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
63150 else if (ret == -ENODATA)
63151- fscache_stat(&fscache_n_retrievals_nodata);
63152+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
63153 else if (ret < 0)
63154- fscache_stat(&fscache_n_retrievals_nobufs);
63155+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63156 else
63157- fscache_stat(&fscache_n_retrievals_ok);
63158+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
63159
63160 fscache_put_retrieval(op);
63161 _leave(" = %d", ret);
63162@@ -505,7 +505,7 @@ nobufs_unlock:
63163 __fscache_wake_unused_cookie(cookie);
63164 kfree(op);
63165 nobufs:
63166- fscache_stat(&fscache_n_retrievals_nobufs);
63167+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63168 _leave(" = -ENOBUFS");
63169 return -ENOBUFS;
63170 }
63171@@ -544,7 +544,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
63172
63173 _enter("%p,,%d,,,", cookie, *nr_pages);
63174
63175- fscache_stat(&fscache_n_retrievals);
63176+ fscache_stat_unchecked(&fscache_n_retrievals);
63177
63178 if (hlist_empty(&cookie->backing_objects))
63179 goto nobufs;
63180@@ -582,7 +582,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
63181 goto nobufs_unlock_dec;
63182 spin_unlock(&cookie->lock);
63183
63184- fscache_stat(&fscache_n_retrieval_ops);
63185+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
63186
63187 /* pin the netfs read context in case we need to do the actual netfs
63188 * read because we've encountered a cache read failure */
63189@@ -613,15 +613,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
63190
63191 error:
63192 if (ret == -ENOMEM)
63193- fscache_stat(&fscache_n_retrievals_nomem);
63194+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
63195 else if (ret == -ERESTARTSYS)
63196- fscache_stat(&fscache_n_retrievals_intr);
63197+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
63198 else if (ret == -ENODATA)
63199- fscache_stat(&fscache_n_retrievals_nodata);
63200+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
63201 else if (ret < 0)
63202- fscache_stat(&fscache_n_retrievals_nobufs);
63203+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63204 else
63205- fscache_stat(&fscache_n_retrievals_ok);
63206+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
63207
63208 fscache_put_retrieval(op);
63209 _leave(" = %d", ret);
63210@@ -636,7 +636,7 @@ nobufs_unlock:
63211 if (wake_cookie)
63212 __fscache_wake_unused_cookie(cookie);
63213 nobufs:
63214- fscache_stat(&fscache_n_retrievals_nobufs);
63215+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63216 _leave(" = -ENOBUFS");
63217 return -ENOBUFS;
63218 }
63219@@ -661,7 +661,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
63220
63221 _enter("%p,%p,,,", cookie, page);
63222
63223- fscache_stat(&fscache_n_allocs);
63224+ fscache_stat_unchecked(&fscache_n_allocs);
63225
63226 if (hlist_empty(&cookie->backing_objects))
63227 goto nobufs;
63228@@ -695,7 +695,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
63229 goto nobufs_unlock_dec;
63230 spin_unlock(&cookie->lock);
63231
63232- fscache_stat(&fscache_n_alloc_ops);
63233+ fscache_stat_unchecked(&fscache_n_alloc_ops);
63234
63235 ret = fscache_wait_for_operation_activation(
63236 object, &op->op,
63237@@ -712,11 +712,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
63238
63239 error:
63240 if (ret == -ERESTARTSYS)
63241- fscache_stat(&fscache_n_allocs_intr);
63242+ fscache_stat_unchecked(&fscache_n_allocs_intr);
63243 else if (ret < 0)
63244- fscache_stat(&fscache_n_allocs_nobufs);
63245+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
63246 else
63247- fscache_stat(&fscache_n_allocs_ok);
63248+ fscache_stat_unchecked(&fscache_n_allocs_ok);
63249
63250 fscache_put_retrieval(op);
63251 _leave(" = %d", ret);
63252@@ -730,7 +730,7 @@ nobufs_unlock:
63253 if (wake_cookie)
63254 __fscache_wake_unused_cookie(cookie);
63255 nobufs:
63256- fscache_stat(&fscache_n_allocs_nobufs);
63257+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
63258 _leave(" = -ENOBUFS");
63259 return -ENOBUFS;
63260 }
63261@@ -806,7 +806,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63262
63263 spin_lock(&cookie->stores_lock);
63264
63265- fscache_stat(&fscache_n_store_calls);
63266+ fscache_stat_unchecked(&fscache_n_store_calls);
63267
63268 /* find a page to store */
63269 page = NULL;
63270@@ -817,7 +817,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63271 page = results[0];
63272 _debug("gang %d [%lx]", n, page->index);
63273 if (page->index > op->store_limit) {
63274- fscache_stat(&fscache_n_store_pages_over_limit);
63275+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
63276 goto superseded;
63277 }
63278
63279@@ -829,7 +829,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63280 spin_unlock(&cookie->stores_lock);
63281 spin_unlock(&object->lock);
63282
63283- fscache_stat(&fscache_n_store_pages);
63284+ fscache_stat_unchecked(&fscache_n_store_pages);
63285 fscache_stat(&fscache_n_cop_write_page);
63286 ret = object->cache->ops->write_page(op, page);
63287 fscache_stat_d(&fscache_n_cop_write_page);
63288@@ -933,7 +933,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63289 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
63290 ASSERT(PageFsCache(page));
63291
63292- fscache_stat(&fscache_n_stores);
63293+ fscache_stat_unchecked(&fscache_n_stores);
63294
63295 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
63296 _leave(" = -ENOBUFS [invalidating]");
63297@@ -992,7 +992,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63298 spin_unlock(&cookie->stores_lock);
63299 spin_unlock(&object->lock);
63300
63301- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
63302+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
63303 op->store_limit = object->store_limit;
63304
63305 __fscache_use_cookie(cookie);
63306@@ -1001,8 +1001,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63307
63308 spin_unlock(&cookie->lock);
63309 radix_tree_preload_end();
63310- fscache_stat(&fscache_n_store_ops);
63311- fscache_stat(&fscache_n_stores_ok);
63312+ fscache_stat_unchecked(&fscache_n_store_ops);
63313+ fscache_stat_unchecked(&fscache_n_stores_ok);
63314
63315 /* the work queue now carries its own ref on the object */
63316 fscache_put_operation(&op->op);
63317@@ -1010,14 +1010,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63318 return 0;
63319
63320 already_queued:
63321- fscache_stat(&fscache_n_stores_again);
63322+ fscache_stat_unchecked(&fscache_n_stores_again);
63323 already_pending:
63324 spin_unlock(&cookie->stores_lock);
63325 spin_unlock(&object->lock);
63326 spin_unlock(&cookie->lock);
63327 radix_tree_preload_end();
63328 kfree(op);
63329- fscache_stat(&fscache_n_stores_ok);
63330+ fscache_stat_unchecked(&fscache_n_stores_ok);
63331 _leave(" = 0");
63332 return 0;
63333
63334@@ -1039,14 +1039,14 @@ nobufs:
63335 kfree(op);
63336 if (wake_cookie)
63337 __fscache_wake_unused_cookie(cookie);
63338- fscache_stat(&fscache_n_stores_nobufs);
63339+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
63340 _leave(" = -ENOBUFS");
63341 return -ENOBUFS;
63342
63343 nomem_free:
63344 kfree(op);
63345 nomem:
63346- fscache_stat(&fscache_n_stores_oom);
63347+ fscache_stat_unchecked(&fscache_n_stores_oom);
63348 _leave(" = -ENOMEM");
63349 return -ENOMEM;
63350 }
63351@@ -1064,7 +1064,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
63352 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
63353 ASSERTCMP(page, !=, NULL);
63354
63355- fscache_stat(&fscache_n_uncaches);
63356+ fscache_stat_unchecked(&fscache_n_uncaches);
63357
63358 /* cache withdrawal may beat us to it */
63359 if (!PageFsCache(page))
63360@@ -1115,7 +1115,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
63361 struct fscache_cookie *cookie = op->op.object->cookie;
63362
63363 #ifdef CONFIG_FSCACHE_STATS
63364- atomic_inc(&fscache_n_marks);
63365+ atomic_inc_unchecked(&fscache_n_marks);
63366 #endif
63367
63368 _debug("- mark %p{%lx}", page, page->index);
63369diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
63370index 40d13c7..ddf52b9 100644
63371--- a/fs/fscache/stats.c
63372+++ b/fs/fscache/stats.c
63373@@ -18,99 +18,99 @@
63374 /*
63375 * operation counters
63376 */
63377-atomic_t fscache_n_op_pend;
63378-atomic_t fscache_n_op_run;
63379-atomic_t fscache_n_op_enqueue;
63380-atomic_t fscache_n_op_requeue;
63381-atomic_t fscache_n_op_deferred_release;
63382-atomic_t fscache_n_op_release;
63383-atomic_t fscache_n_op_gc;
63384-atomic_t fscache_n_op_cancelled;
63385-atomic_t fscache_n_op_rejected;
63386+atomic_unchecked_t fscache_n_op_pend;
63387+atomic_unchecked_t fscache_n_op_run;
63388+atomic_unchecked_t fscache_n_op_enqueue;
63389+atomic_unchecked_t fscache_n_op_requeue;
63390+atomic_unchecked_t fscache_n_op_deferred_release;
63391+atomic_unchecked_t fscache_n_op_release;
63392+atomic_unchecked_t fscache_n_op_gc;
63393+atomic_unchecked_t fscache_n_op_cancelled;
63394+atomic_unchecked_t fscache_n_op_rejected;
63395
63396-atomic_t fscache_n_attr_changed;
63397-atomic_t fscache_n_attr_changed_ok;
63398-atomic_t fscache_n_attr_changed_nobufs;
63399-atomic_t fscache_n_attr_changed_nomem;
63400-atomic_t fscache_n_attr_changed_calls;
63401+atomic_unchecked_t fscache_n_attr_changed;
63402+atomic_unchecked_t fscache_n_attr_changed_ok;
63403+atomic_unchecked_t fscache_n_attr_changed_nobufs;
63404+atomic_unchecked_t fscache_n_attr_changed_nomem;
63405+atomic_unchecked_t fscache_n_attr_changed_calls;
63406
63407-atomic_t fscache_n_allocs;
63408-atomic_t fscache_n_allocs_ok;
63409-atomic_t fscache_n_allocs_wait;
63410-atomic_t fscache_n_allocs_nobufs;
63411-atomic_t fscache_n_allocs_intr;
63412-atomic_t fscache_n_allocs_object_dead;
63413-atomic_t fscache_n_alloc_ops;
63414-atomic_t fscache_n_alloc_op_waits;
63415+atomic_unchecked_t fscache_n_allocs;
63416+atomic_unchecked_t fscache_n_allocs_ok;
63417+atomic_unchecked_t fscache_n_allocs_wait;
63418+atomic_unchecked_t fscache_n_allocs_nobufs;
63419+atomic_unchecked_t fscache_n_allocs_intr;
63420+atomic_unchecked_t fscache_n_allocs_object_dead;
63421+atomic_unchecked_t fscache_n_alloc_ops;
63422+atomic_unchecked_t fscache_n_alloc_op_waits;
63423
63424-atomic_t fscache_n_retrievals;
63425-atomic_t fscache_n_retrievals_ok;
63426-atomic_t fscache_n_retrievals_wait;
63427-atomic_t fscache_n_retrievals_nodata;
63428-atomic_t fscache_n_retrievals_nobufs;
63429-atomic_t fscache_n_retrievals_intr;
63430-atomic_t fscache_n_retrievals_nomem;
63431-atomic_t fscache_n_retrievals_object_dead;
63432-atomic_t fscache_n_retrieval_ops;
63433-atomic_t fscache_n_retrieval_op_waits;
63434+atomic_unchecked_t fscache_n_retrievals;
63435+atomic_unchecked_t fscache_n_retrievals_ok;
63436+atomic_unchecked_t fscache_n_retrievals_wait;
63437+atomic_unchecked_t fscache_n_retrievals_nodata;
63438+atomic_unchecked_t fscache_n_retrievals_nobufs;
63439+atomic_unchecked_t fscache_n_retrievals_intr;
63440+atomic_unchecked_t fscache_n_retrievals_nomem;
63441+atomic_unchecked_t fscache_n_retrievals_object_dead;
63442+atomic_unchecked_t fscache_n_retrieval_ops;
63443+atomic_unchecked_t fscache_n_retrieval_op_waits;
63444
63445-atomic_t fscache_n_stores;
63446-atomic_t fscache_n_stores_ok;
63447-atomic_t fscache_n_stores_again;
63448-atomic_t fscache_n_stores_nobufs;
63449-atomic_t fscache_n_stores_oom;
63450-atomic_t fscache_n_store_ops;
63451-atomic_t fscache_n_store_calls;
63452-atomic_t fscache_n_store_pages;
63453-atomic_t fscache_n_store_radix_deletes;
63454-atomic_t fscache_n_store_pages_over_limit;
63455+atomic_unchecked_t fscache_n_stores;
63456+atomic_unchecked_t fscache_n_stores_ok;
63457+atomic_unchecked_t fscache_n_stores_again;
63458+atomic_unchecked_t fscache_n_stores_nobufs;
63459+atomic_unchecked_t fscache_n_stores_oom;
63460+atomic_unchecked_t fscache_n_store_ops;
63461+atomic_unchecked_t fscache_n_store_calls;
63462+atomic_unchecked_t fscache_n_store_pages;
63463+atomic_unchecked_t fscache_n_store_radix_deletes;
63464+atomic_unchecked_t fscache_n_store_pages_over_limit;
63465
63466-atomic_t fscache_n_store_vmscan_not_storing;
63467-atomic_t fscache_n_store_vmscan_gone;
63468-atomic_t fscache_n_store_vmscan_busy;
63469-atomic_t fscache_n_store_vmscan_cancelled;
63470-atomic_t fscache_n_store_vmscan_wait;
63471+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
63472+atomic_unchecked_t fscache_n_store_vmscan_gone;
63473+atomic_unchecked_t fscache_n_store_vmscan_busy;
63474+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
63475+atomic_unchecked_t fscache_n_store_vmscan_wait;
63476
63477-atomic_t fscache_n_marks;
63478-atomic_t fscache_n_uncaches;
63479+atomic_unchecked_t fscache_n_marks;
63480+atomic_unchecked_t fscache_n_uncaches;
63481
63482-atomic_t fscache_n_acquires;
63483-atomic_t fscache_n_acquires_null;
63484-atomic_t fscache_n_acquires_no_cache;
63485-atomic_t fscache_n_acquires_ok;
63486-atomic_t fscache_n_acquires_nobufs;
63487-atomic_t fscache_n_acquires_oom;
63488+atomic_unchecked_t fscache_n_acquires;
63489+atomic_unchecked_t fscache_n_acquires_null;
63490+atomic_unchecked_t fscache_n_acquires_no_cache;
63491+atomic_unchecked_t fscache_n_acquires_ok;
63492+atomic_unchecked_t fscache_n_acquires_nobufs;
63493+atomic_unchecked_t fscache_n_acquires_oom;
63494
63495-atomic_t fscache_n_invalidates;
63496-atomic_t fscache_n_invalidates_run;
63497+atomic_unchecked_t fscache_n_invalidates;
63498+atomic_unchecked_t fscache_n_invalidates_run;
63499
63500-atomic_t fscache_n_updates;
63501-atomic_t fscache_n_updates_null;
63502-atomic_t fscache_n_updates_run;
63503+atomic_unchecked_t fscache_n_updates;
63504+atomic_unchecked_t fscache_n_updates_null;
63505+atomic_unchecked_t fscache_n_updates_run;
63506
63507-atomic_t fscache_n_relinquishes;
63508-atomic_t fscache_n_relinquishes_null;
63509-atomic_t fscache_n_relinquishes_waitcrt;
63510-atomic_t fscache_n_relinquishes_retire;
63511+atomic_unchecked_t fscache_n_relinquishes;
63512+atomic_unchecked_t fscache_n_relinquishes_null;
63513+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
63514+atomic_unchecked_t fscache_n_relinquishes_retire;
63515
63516-atomic_t fscache_n_cookie_index;
63517-atomic_t fscache_n_cookie_data;
63518-atomic_t fscache_n_cookie_special;
63519+atomic_unchecked_t fscache_n_cookie_index;
63520+atomic_unchecked_t fscache_n_cookie_data;
63521+atomic_unchecked_t fscache_n_cookie_special;
63522
63523-atomic_t fscache_n_object_alloc;
63524-atomic_t fscache_n_object_no_alloc;
63525-atomic_t fscache_n_object_lookups;
63526-atomic_t fscache_n_object_lookups_negative;
63527-atomic_t fscache_n_object_lookups_positive;
63528-atomic_t fscache_n_object_lookups_timed_out;
63529-atomic_t fscache_n_object_created;
63530-atomic_t fscache_n_object_avail;
63531-atomic_t fscache_n_object_dead;
63532+atomic_unchecked_t fscache_n_object_alloc;
63533+atomic_unchecked_t fscache_n_object_no_alloc;
63534+atomic_unchecked_t fscache_n_object_lookups;
63535+atomic_unchecked_t fscache_n_object_lookups_negative;
63536+atomic_unchecked_t fscache_n_object_lookups_positive;
63537+atomic_unchecked_t fscache_n_object_lookups_timed_out;
63538+atomic_unchecked_t fscache_n_object_created;
63539+atomic_unchecked_t fscache_n_object_avail;
63540+atomic_unchecked_t fscache_n_object_dead;
63541
63542-atomic_t fscache_n_checkaux_none;
63543-atomic_t fscache_n_checkaux_okay;
63544-atomic_t fscache_n_checkaux_update;
63545-atomic_t fscache_n_checkaux_obsolete;
63546+atomic_unchecked_t fscache_n_checkaux_none;
63547+atomic_unchecked_t fscache_n_checkaux_okay;
63548+atomic_unchecked_t fscache_n_checkaux_update;
63549+atomic_unchecked_t fscache_n_checkaux_obsolete;
63550
63551 atomic_t fscache_n_cop_alloc_object;
63552 atomic_t fscache_n_cop_lookup_object;
63553@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
63554 seq_puts(m, "FS-Cache statistics\n");
63555
63556 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
63557- atomic_read(&fscache_n_cookie_index),
63558- atomic_read(&fscache_n_cookie_data),
63559- atomic_read(&fscache_n_cookie_special));
63560+ atomic_read_unchecked(&fscache_n_cookie_index),
63561+ atomic_read_unchecked(&fscache_n_cookie_data),
63562+ atomic_read_unchecked(&fscache_n_cookie_special));
63563
63564 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
63565- atomic_read(&fscache_n_object_alloc),
63566- atomic_read(&fscache_n_object_no_alloc),
63567- atomic_read(&fscache_n_object_avail),
63568- atomic_read(&fscache_n_object_dead));
63569+ atomic_read_unchecked(&fscache_n_object_alloc),
63570+ atomic_read_unchecked(&fscache_n_object_no_alloc),
63571+ atomic_read_unchecked(&fscache_n_object_avail),
63572+ atomic_read_unchecked(&fscache_n_object_dead));
63573 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
63574- atomic_read(&fscache_n_checkaux_none),
63575- atomic_read(&fscache_n_checkaux_okay),
63576- atomic_read(&fscache_n_checkaux_update),
63577- atomic_read(&fscache_n_checkaux_obsolete));
63578+ atomic_read_unchecked(&fscache_n_checkaux_none),
63579+ atomic_read_unchecked(&fscache_n_checkaux_okay),
63580+ atomic_read_unchecked(&fscache_n_checkaux_update),
63581+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
63582
63583 seq_printf(m, "Pages : mrk=%u unc=%u\n",
63584- atomic_read(&fscache_n_marks),
63585- atomic_read(&fscache_n_uncaches));
63586+ atomic_read_unchecked(&fscache_n_marks),
63587+ atomic_read_unchecked(&fscache_n_uncaches));
63588
63589 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
63590 " oom=%u\n",
63591- atomic_read(&fscache_n_acquires),
63592- atomic_read(&fscache_n_acquires_null),
63593- atomic_read(&fscache_n_acquires_no_cache),
63594- atomic_read(&fscache_n_acquires_ok),
63595- atomic_read(&fscache_n_acquires_nobufs),
63596- atomic_read(&fscache_n_acquires_oom));
63597+ atomic_read_unchecked(&fscache_n_acquires),
63598+ atomic_read_unchecked(&fscache_n_acquires_null),
63599+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
63600+ atomic_read_unchecked(&fscache_n_acquires_ok),
63601+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
63602+ atomic_read_unchecked(&fscache_n_acquires_oom));
63603
63604 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
63605- atomic_read(&fscache_n_object_lookups),
63606- atomic_read(&fscache_n_object_lookups_negative),
63607- atomic_read(&fscache_n_object_lookups_positive),
63608- atomic_read(&fscache_n_object_created),
63609- atomic_read(&fscache_n_object_lookups_timed_out));
63610+ atomic_read_unchecked(&fscache_n_object_lookups),
63611+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
63612+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
63613+ atomic_read_unchecked(&fscache_n_object_created),
63614+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
63615
63616 seq_printf(m, "Invals : n=%u run=%u\n",
63617- atomic_read(&fscache_n_invalidates),
63618- atomic_read(&fscache_n_invalidates_run));
63619+ atomic_read_unchecked(&fscache_n_invalidates),
63620+ atomic_read_unchecked(&fscache_n_invalidates_run));
63621
63622 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
63623- atomic_read(&fscache_n_updates),
63624- atomic_read(&fscache_n_updates_null),
63625- atomic_read(&fscache_n_updates_run));
63626+ atomic_read_unchecked(&fscache_n_updates),
63627+ atomic_read_unchecked(&fscache_n_updates_null),
63628+ atomic_read_unchecked(&fscache_n_updates_run));
63629
63630 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
63631- atomic_read(&fscache_n_relinquishes),
63632- atomic_read(&fscache_n_relinquishes_null),
63633- atomic_read(&fscache_n_relinquishes_waitcrt),
63634- atomic_read(&fscache_n_relinquishes_retire));
63635+ atomic_read_unchecked(&fscache_n_relinquishes),
63636+ atomic_read_unchecked(&fscache_n_relinquishes_null),
63637+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
63638+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
63639
63640 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
63641- atomic_read(&fscache_n_attr_changed),
63642- atomic_read(&fscache_n_attr_changed_ok),
63643- atomic_read(&fscache_n_attr_changed_nobufs),
63644- atomic_read(&fscache_n_attr_changed_nomem),
63645- atomic_read(&fscache_n_attr_changed_calls));
63646+ atomic_read_unchecked(&fscache_n_attr_changed),
63647+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
63648+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
63649+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
63650+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
63651
63652 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
63653- atomic_read(&fscache_n_allocs),
63654- atomic_read(&fscache_n_allocs_ok),
63655- atomic_read(&fscache_n_allocs_wait),
63656- atomic_read(&fscache_n_allocs_nobufs),
63657- atomic_read(&fscache_n_allocs_intr));
63658+ atomic_read_unchecked(&fscache_n_allocs),
63659+ atomic_read_unchecked(&fscache_n_allocs_ok),
63660+ atomic_read_unchecked(&fscache_n_allocs_wait),
63661+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
63662+ atomic_read_unchecked(&fscache_n_allocs_intr));
63663 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
63664- atomic_read(&fscache_n_alloc_ops),
63665- atomic_read(&fscache_n_alloc_op_waits),
63666- atomic_read(&fscache_n_allocs_object_dead));
63667+ atomic_read_unchecked(&fscache_n_alloc_ops),
63668+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
63669+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
63670
63671 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
63672 " int=%u oom=%u\n",
63673- atomic_read(&fscache_n_retrievals),
63674- atomic_read(&fscache_n_retrievals_ok),
63675- atomic_read(&fscache_n_retrievals_wait),
63676- atomic_read(&fscache_n_retrievals_nodata),
63677- atomic_read(&fscache_n_retrievals_nobufs),
63678- atomic_read(&fscache_n_retrievals_intr),
63679- atomic_read(&fscache_n_retrievals_nomem));
63680+ atomic_read_unchecked(&fscache_n_retrievals),
63681+ atomic_read_unchecked(&fscache_n_retrievals_ok),
63682+ atomic_read_unchecked(&fscache_n_retrievals_wait),
63683+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
63684+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
63685+ atomic_read_unchecked(&fscache_n_retrievals_intr),
63686+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
63687 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
63688- atomic_read(&fscache_n_retrieval_ops),
63689- atomic_read(&fscache_n_retrieval_op_waits),
63690- atomic_read(&fscache_n_retrievals_object_dead));
63691+ atomic_read_unchecked(&fscache_n_retrieval_ops),
63692+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
63693+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
63694
63695 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
63696- atomic_read(&fscache_n_stores),
63697- atomic_read(&fscache_n_stores_ok),
63698- atomic_read(&fscache_n_stores_again),
63699- atomic_read(&fscache_n_stores_nobufs),
63700- atomic_read(&fscache_n_stores_oom));
63701+ atomic_read_unchecked(&fscache_n_stores),
63702+ atomic_read_unchecked(&fscache_n_stores_ok),
63703+ atomic_read_unchecked(&fscache_n_stores_again),
63704+ atomic_read_unchecked(&fscache_n_stores_nobufs),
63705+ atomic_read_unchecked(&fscache_n_stores_oom));
63706 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
63707- atomic_read(&fscache_n_store_ops),
63708- atomic_read(&fscache_n_store_calls),
63709- atomic_read(&fscache_n_store_pages),
63710- atomic_read(&fscache_n_store_radix_deletes),
63711- atomic_read(&fscache_n_store_pages_over_limit));
63712+ atomic_read_unchecked(&fscache_n_store_ops),
63713+ atomic_read_unchecked(&fscache_n_store_calls),
63714+ atomic_read_unchecked(&fscache_n_store_pages),
63715+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
63716+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
63717
63718 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
63719- atomic_read(&fscache_n_store_vmscan_not_storing),
63720- atomic_read(&fscache_n_store_vmscan_gone),
63721- atomic_read(&fscache_n_store_vmscan_busy),
63722- atomic_read(&fscache_n_store_vmscan_cancelled),
63723- atomic_read(&fscache_n_store_vmscan_wait));
63724+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
63725+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
63726+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
63727+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
63728+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
63729
63730 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
63731- atomic_read(&fscache_n_op_pend),
63732- atomic_read(&fscache_n_op_run),
63733- atomic_read(&fscache_n_op_enqueue),
63734- atomic_read(&fscache_n_op_cancelled),
63735- atomic_read(&fscache_n_op_rejected));
63736+ atomic_read_unchecked(&fscache_n_op_pend),
63737+ atomic_read_unchecked(&fscache_n_op_run),
63738+ atomic_read_unchecked(&fscache_n_op_enqueue),
63739+ atomic_read_unchecked(&fscache_n_op_cancelled),
63740+ atomic_read_unchecked(&fscache_n_op_rejected));
63741 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
63742- atomic_read(&fscache_n_op_deferred_release),
63743- atomic_read(&fscache_n_op_release),
63744- atomic_read(&fscache_n_op_gc));
63745+ atomic_read_unchecked(&fscache_n_op_deferred_release),
63746+ atomic_read_unchecked(&fscache_n_op_release),
63747+ atomic_read_unchecked(&fscache_n_op_gc));
63748
63749 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
63750 atomic_read(&fscache_n_cop_alloc_object),
63751diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
63752index 966ace8..030a03a 100644
63753--- a/fs/fuse/cuse.c
63754+++ b/fs/fuse/cuse.c
63755@@ -611,10 +611,12 @@ static int __init cuse_init(void)
63756 INIT_LIST_HEAD(&cuse_conntbl[i]);
63757
63758 /* inherit and extend fuse_dev_operations */
63759- cuse_channel_fops = fuse_dev_operations;
63760- cuse_channel_fops.owner = THIS_MODULE;
63761- cuse_channel_fops.open = cuse_channel_open;
63762- cuse_channel_fops.release = cuse_channel_release;
63763+ pax_open_kernel();
63764+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
63765+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
63766+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
63767+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
63768+ pax_close_kernel();
63769
63770 cuse_class = class_create(THIS_MODULE, "cuse");
63771 if (IS_ERR(cuse_class))
63772diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
63773index ca88731..8e9c55d 100644
63774--- a/fs/fuse/dev.c
63775+++ b/fs/fuse/dev.c
63776@@ -1318,7 +1318,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
63777 ret = 0;
63778 pipe_lock(pipe);
63779
63780- if (!pipe->readers) {
63781+ if (!atomic_read(&pipe->readers)) {
63782 send_sig(SIGPIPE, current, 0);
63783 if (!ret)
63784 ret = -EPIPE;
63785@@ -1347,7 +1347,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
63786 page_nr++;
63787 ret += buf->len;
63788
63789- if (pipe->files)
63790+ if (atomic_read(&pipe->files))
63791 do_wakeup = 1;
63792 }
63793
63794diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
63795index de1d84a..fd69c0c 100644
63796--- a/fs/fuse/dir.c
63797+++ b/fs/fuse/dir.c
63798@@ -1479,7 +1479,7 @@ static char *read_link(struct dentry *dentry)
63799 return link;
63800 }
63801
63802-static void free_link(char *link)
63803+static void free_link(const char *link)
63804 {
63805 if (!IS_ERR(link))
63806 free_page((unsigned long) link);
63807diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
63808index fd62cae..3494dfa 100644
63809--- a/fs/hostfs/hostfs_kern.c
63810+++ b/fs/hostfs/hostfs_kern.c
63811@@ -908,7 +908,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
63812
63813 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
63814 {
63815- char *s = nd_get_link(nd);
63816+ const char *s = nd_get_link(nd);
63817 if (!IS_ERR(s))
63818 __putname(s);
63819 }
63820diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
63821index 1e2872b..7aea000 100644
63822--- a/fs/hugetlbfs/inode.c
63823+++ b/fs/hugetlbfs/inode.c
63824@@ -154,6 +154,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
63825 struct mm_struct *mm = current->mm;
63826 struct vm_area_struct *vma;
63827 struct hstate *h = hstate_file(file);
63828+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
63829 struct vm_unmapped_area_info info;
63830
63831 if (len & ~huge_page_mask(h))
63832@@ -167,17 +168,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
63833 return addr;
63834 }
63835
63836+#ifdef CONFIG_PAX_RANDMMAP
63837+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
63838+#endif
63839+
63840 if (addr) {
63841 addr = ALIGN(addr, huge_page_size(h));
63842 vma = find_vma(mm, addr);
63843- if (TASK_SIZE - len >= addr &&
63844- (!vma || addr + len <= vma->vm_start))
63845+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
63846 return addr;
63847 }
63848
63849 info.flags = 0;
63850 info.length = len;
63851 info.low_limit = TASK_UNMAPPED_BASE;
63852+
63853+#ifdef CONFIG_PAX_RANDMMAP
63854+ if (mm->pax_flags & MF_PAX_RANDMMAP)
63855+ info.low_limit += mm->delta_mmap;
63856+#endif
63857+
63858 info.high_limit = TASK_SIZE;
63859 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
63860 info.align_offset = 0;
63861@@ -919,7 +929,7 @@ static struct file_system_type hugetlbfs_fs_type = {
63862 };
63863 MODULE_ALIAS_FS("hugetlbfs");
63864
63865-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
63866+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
63867
63868 static int can_do_hugetlb_shm(void)
63869 {
63870diff --git a/fs/inode.c b/fs/inode.c
63871index 26753ba..d19eb34 100644
63872--- a/fs/inode.c
63873+++ b/fs/inode.c
63874@@ -840,16 +840,20 @@ unsigned int get_next_ino(void)
63875 unsigned int *p = &get_cpu_var(last_ino);
63876 unsigned int res = *p;
63877
63878+start:
63879+
63880 #ifdef CONFIG_SMP
63881 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
63882- static atomic_t shared_last_ino;
63883- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
63884+ static atomic_unchecked_t shared_last_ino;
63885+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
63886
63887 res = next - LAST_INO_BATCH;
63888 }
63889 #endif
63890
63891- *p = ++res;
63892+ if (unlikely(!++res))
63893+ goto start; /* never zero */
63894+ *p = res;
63895 put_cpu_var(last_ino);
63896 return res;
63897 }
63898diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
63899index 4a6cf28..d3a29d3 100644
63900--- a/fs/jffs2/erase.c
63901+++ b/fs/jffs2/erase.c
63902@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
63903 struct jffs2_unknown_node marker = {
63904 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
63905 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
63906- .totlen = cpu_to_je32(c->cleanmarker_size)
63907+ .totlen = cpu_to_je32(c->cleanmarker_size),
63908+ .hdr_crc = cpu_to_je32(0)
63909 };
63910
63911 jffs2_prealloc_raw_node_refs(c, jeb, 1);
63912diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
63913index 09ed551..45684f8 100644
63914--- a/fs/jffs2/wbuf.c
63915+++ b/fs/jffs2/wbuf.c
63916@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
63917 {
63918 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
63919 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
63920- .totlen = constant_cpu_to_je32(8)
63921+ .totlen = constant_cpu_to_je32(8),
63922+ .hdr_crc = constant_cpu_to_je32(0)
63923 };
63924
63925 /*
63926diff --git a/fs/jfs/super.c b/fs/jfs/super.c
63927index adf8cb0..bb935fa 100644
63928--- a/fs/jfs/super.c
63929+++ b/fs/jfs/super.c
63930@@ -893,7 +893,7 @@ static int __init init_jfs_fs(void)
63931
63932 jfs_inode_cachep =
63933 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
63934- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
63935+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
63936 init_once);
63937 if (jfs_inode_cachep == NULL)
63938 return -ENOMEM;
63939diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
63940index a693f5b..82276a1 100644
63941--- a/fs/kernfs/dir.c
63942+++ b/fs/kernfs/dir.c
63943@@ -182,7 +182,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
63944 *
63945 * Returns 31 bit hash of ns + name (so it fits in an off_t )
63946 */
63947-static unsigned int kernfs_name_hash(const char *name, const void *ns)
63948+static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
63949 {
63950 unsigned long hash = init_name_hash();
63951 unsigned int len = strlen(name);
63952diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
63953index 4429d6d..9831f52 100644
63954--- a/fs/kernfs/file.c
63955+++ b/fs/kernfs/file.c
63956@@ -34,7 +34,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
63957
63958 struct kernfs_open_node {
63959 atomic_t refcnt;
63960- atomic_t event;
63961+ atomic_unchecked_t event;
63962 wait_queue_head_t poll;
63963 struct list_head files; /* goes through kernfs_open_file.list */
63964 };
63965@@ -163,7 +163,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
63966 {
63967 struct kernfs_open_file *of = sf->private;
63968
63969- of->event = atomic_read(&of->kn->attr.open->event);
63970+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
63971
63972 return of->kn->attr.ops->seq_show(sf, v);
63973 }
63974@@ -375,12 +375,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
63975 return ret;
63976 }
63977
63978-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
63979- void *buf, int len, int write)
63980+static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
63981+ void *buf, size_t len, int write)
63982 {
63983 struct file *file = vma->vm_file;
63984 struct kernfs_open_file *of = kernfs_of(file);
63985- int ret;
63986+ ssize_t ret;
63987
63988 if (!of->vm_ops)
63989 return -EINVAL;
63990@@ -581,7 +581,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
63991 return -ENOMEM;
63992
63993 atomic_set(&new_on->refcnt, 0);
63994- atomic_set(&new_on->event, 1);
63995+ atomic_set_unchecked(&new_on->event, 1);
63996 init_waitqueue_head(&new_on->poll);
63997 INIT_LIST_HEAD(&new_on->files);
63998 goto retry;
63999@@ -787,7 +787,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
64000
64001 kernfs_put_active(kn);
64002
64003- if (of->event != atomic_read(&on->event))
64004+ if (of->event != atomic_read_unchecked(&on->event))
64005 goto trigger;
64006
64007 return DEFAULT_POLLMASK;
64008@@ -818,7 +818,7 @@ repeat:
64009
64010 on = kn->attr.open;
64011 if (on) {
64012- atomic_inc(&on->event);
64013+ atomic_inc_unchecked(&on->event);
64014 wake_up_interruptible(&on->poll);
64015 }
64016
64017diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
64018index 8a19889..4c3069a 100644
64019--- a/fs/kernfs/symlink.c
64020+++ b/fs/kernfs/symlink.c
64021@@ -128,7 +128,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
64022 static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
64023 void *cookie)
64024 {
64025- char *page = nd_get_link(nd);
64026+ const char *page = nd_get_link(nd);
64027 if (!IS_ERR(page))
64028 free_page((unsigned long)page);
64029 }
64030diff --git a/fs/libfs.c b/fs/libfs.c
64031index 88e3e00..979c262 100644
64032--- a/fs/libfs.c
64033+++ b/fs/libfs.c
64034@@ -160,6 +160,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
64035
64036 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
64037 struct dentry *next = list_entry(p, struct dentry, d_u.d_child);
64038+ char d_name[sizeof(next->d_iname)];
64039+ const unsigned char *name;
64040+
64041 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
64042 if (!simple_positive(next)) {
64043 spin_unlock(&next->d_lock);
64044@@ -168,7 +171,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
64045
64046 spin_unlock(&next->d_lock);
64047 spin_unlock(&dentry->d_lock);
64048- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
64049+ name = next->d_name.name;
64050+ if (name == next->d_iname) {
64051+ memcpy(d_name, name, next->d_name.len);
64052+ name = d_name;
64053+ }
64054+ if (!dir_emit(ctx, name, next->d_name.len,
64055 next->d_inode->i_ino, dt_type(next->d_inode)))
64056 return 0;
64057 spin_lock(&dentry->d_lock);
64058@@ -1027,7 +1035,7 @@ EXPORT_SYMBOL(noop_fsync);
64059 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
64060 void *cookie)
64061 {
64062- char *s = nd_get_link(nd);
64063+ const char *s = nd_get_link(nd);
64064 if (!IS_ERR(s))
64065 kfree(s);
64066 }
64067diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
64068index acd3947..1f896e2 100644
64069--- a/fs/lockd/clntproc.c
64070+++ b/fs/lockd/clntproc.c
64071@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
64072 /*
64073 * Cookie counter for NLM requests
64074 */
64075-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
64076+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
64077
64078 void nlmclnt_next_cookie(struct nlm_cookie *c)
64079 {
64080- u32 cookie = atomic_inc_return(&nlm_cookie);
64081+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
64082
64083 memcpy(c->data, &cookie, 4);
64084 c->len=4;
64085diff --git a/fs/locks.c b/fs/locks.c
64086index bb08857..f65e8bf 100644
64087--- a/fs/locks.c
64088+++ b/fs/locks.c
64089@@ -2350,7 +2350,7 @@ void locks_remove_file(struct file *filp)
64090 locks_remove_posix(filp, filp);
64091
64092 if (filp->f_op->flock) {
64093- struct file_lock fl = {
64094+ struct file_lock flock = {
64095 .fl_owner = filp,
64096 .fl_pid = current->tgid,
64097 .fl_file = filp,
64098@@ -2358,9 +2358,9 @@ void locks_remove_file(struct file *filp)
64099 .fl_type = F_UNLCK,
64100 .fl_end = OFFSET_MAX,
64101 };
64102- filp->f_op->flock(filp, F_SETLKW, &fl);
64103- if (fl.fl_ops && fl.fl_ops->fl_release_private)
64104- fl.fl_ops->fl_release_private(&fl);
64105+ filp->f_op->flock(filp, F_SETLKW, &flock);
64106+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
64107+ flock.fl_ops->fl_release_private(&flock);
64108 }
64109
64110 spin_lock(&inode->i_lock);
64111diff --git a/fs/mount.h b/fs/mount.h
64112index 6740a62..ccb472f 100644
64113--- a/fs/mount.h
64114+++ b/fs/mount.h
64115@@ -11,7 +11,7 @@ struct mnt_namespace {
64116 u64 seq; /* Sequence number to prevent loops */
64117 wait_queue_head_t poll;
64118 u64 event;
64119-};
64120+} __randomize_layout;
64121
64122 struct mnt_pcp {
64123 int mnt_count;
64124@@ -57,7 +57,7 @@ struct mount {
64125 int mnt_expiry_mark; /* true if marked for expiry */
64126 struct hlist_head mnt_pins;
64127 struct path mnt_ex_mountpoint;
64128-};
64129+} __randomize_layout;
64130
64131 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
64132
64133diff --git a/fs/namei.c b/fs/namei.c
64134index bb02687..79cba2c 100644
64135--- a/fs/namei.c
64136+++ b/fs/namei.c
64137@@ -331,17 +331,32 @@ int generic_permission(struct inode *inode, int mask)
64138 if (ret != -EACCES)
64139 return ret;
64140
64141+#ifdef CONFIG_GRKERNSEC
64142+ /* we'll block if we have to log due to a denied capability use */
64143+ if (mask & MAY_NOT_BLOCK)
64144+ return -ECHILD;
64145+#endif
64146+
64147 if (S_ISDIR(inode->i_mode)) {
64148 /* DACs are overridable for directories */
64149- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
64150- return 0;
64151 if (!(mask & MAY_WRITE))
64152- if (capable_wrt_inode_uidgid(inode,
64153- CAP_DAC_READ_SEARCH))
64154+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
64155+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
64156 return 0;
64157+ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
64158+ return 0;
64159 return -EACCES;
64160 }
64161 /*
64162+ * Searching includes executable on directories, else just read.
64163+ */
64164+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
64165+ if (mask == MAY_READ)
64166+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
64167+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
64168+ return 0;
64169+
64170+ /*
64171 * Read/write DACs are always overridable.
64172 * Executable DACs are overridable when there is
64173 * at least one exec bit set.
64174@@ -350,14 +365,6 @@ int generic_permission(struct inode *inode, int mask)
64175 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
64176 return 0;
64177
64178- /*
64179- * Searching includes executable on directories, else just read.
64180- */
64181- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
64182- if (mask == MAY_READ)
64183- if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
64184- return 0;
64185-
64186 return -EACCES;
64187 }
64188 EXPORT_SYMBOL(generic_permission);
64189@@ -823,7 +830,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
64190 {
64191 struct dentry *dentry = link->dentry;
64192 int error;
64193- char *s;
64194+ const char *s;
64195
64196 BUG_ON(nd->flags & LOOKUP_RCU);
64197
64198@@ -844,6 +851,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
64199 if (error)
64200 goto out_put_nd_path;
64201
64202+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
64203+ dentry->d_inode, dentry, nd->path.mnt)) {
64204+ error = -EACCES;
64205+ goto out_put_nd_path;
64206+ }
64207+
64208 nd->last_type = LAST_BIND;
64209 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
64210 error = PTR_ERR(*p);
64211@@ -1607,6 +1620,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
64212 if (res)
64213 break;
64214 res = walk_component(nd, path, LOOKUP_FOLLOW);
64215+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
64216+ res = -EACCES;
64217 put_link(nd, &link, cookie);
64218 } while (res > 0);
64219
64220@@ -1679,7 +1694,7 @@ EXPORT_SYMBOL(full_name_hash);
64221 static inline u64 hash_name(const char *name)
64222 {
64223 unsigned long a, b, adata, bdata, mask, hash, len;
64224- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
64225+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
64226
64227 hash = a = 0;
64228 len = -sizeof(unsigned long);
64229@@ -1968,6 +1983,8 @@ static int path_lookupat(int dfd, const char *name,
64230 if (err)
64231 break;
64232 err = lookup_last(nd, &path);
64233+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
64234+ err = -EACCES;
64235 put_link(nd, &link, cookie);
64236 }
64237 }
64238@@ -1975,6 +1992,13 @@ static int path_lookupat(int dfd, const char *name,
64239 if (!err)
64240 err = complete_walk(nd);
64241
64242+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
64243+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
64244+ path_put(&nd->path);
64245+ err = -ENOENT;
64246+ }
64247+ }
64248+
64249 if (!err && nd->flags & LOOKUP_DIRECTORY) {
64250 if (!d_can_lookup(nd->path.dentry)) {
64251 path_put(&nd->path);
64252@@ -2002,8 +2026,15 @@ static int filename_lookup(int dfd, struct filename *name,
64253 retval = path_lookupat(dfd, name->name,
64254 flags | LOOKUP_REVAL, nd);
64255
64256- if (likely(!retval))
64257+ if (likely(!retval)) {
64258 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
64259+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
64260+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
64261+ path_put(&nd->path);
64262+ return -ENOENT;
64263+ }
64264+ }
64265+ }
64266 return retval;
64267 }
64268
64269@@ -2585,6 +2616,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
64270 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
64271 return -EPERM;
64272
64273+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
64274+ return -EPERM;
64275+ if (gr_handle_rawio(inode))
64276+ return -EPERM;
64277+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
64278+ return -EACCES;
64279+
64280 return 0;
64281 }
64282
64283@@ -2816,7 +2854,7 @@ looked_up:
64284 * cleared otherwise prior to returning.
64285 */
64286 static int lookup_open(struct nameidata *nd, struct path *path,
64287- struct file *file,
64288+ struct path *link, struct file *file,
64289 const struct open_flags *op,
64290 bool got_write, int *opened)
64291 {
64292@@ -2851,6 +2889,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
64293 /* Negative dentry, just create the file */
64294 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
64295 umode_t mode = op->mode;
64296+
64297+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
64298+ error = -EACCES;
64299+ goto out_dput;
64300+ }
64301+
64302+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
64303+ error = -EACCES;
64304+ goto out_dput;
64305+ }
64306+
64307 if (!IS_POSIXACL(dir->d_inode))
64308 mode &= ~current_umask();
64309 /*
64310@@ -2872,6 +2921,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
64311 nd->flags & LOOKUP_EXCL);
64312 if (error)
64313 goto out_dput;
64314+ else
64315+ gr_handle_create(dentry, nd->path.mnt);
64316 }
64317 out_no_open:
64318 path->dentry = dentry;
64319@@ -2886,7 +2937,7 @@ out_dput:
64320 /*
64321 * Handle the last step of open()
64322 */
64323-static int do_last(struct nameidata *nd, struct path *path,
64324+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
64325 struct file *file, const struct open_flags *op,
64326 int *opened, struct filename *name)
64327 {
64328@@ -2936,6 +2987,15 @@ static int do_last(struct nameidata *nd, struct path *path,
64329 if (error)
64330 return error;
64331
64332+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
64333+ error = -ENOENT;
64334+ goto out;
64335+ }
64336+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
64337+ error = -EACCES;
64338+ goto out;
64339+ }
64340+
64341 audit_inode(name, dir, LOOKUP_PARENT);
64342 error = -EISDIR;
64343 /* trailing slashes? */
64344@@ -2955,7 +3015,7 @@ retry_lookup:
64345 */
64346 }
64347 mutex_lock(&dir->d_inode->i_mutex);
64348- error = lookup_open(nd, path, file, op, got_write, opened);
64349+ error = lookup_open(nd, path, link, file, op, got_write, opened);
64350 mutex_unlock(&dir->d_inode->i_mutex);
64351
64352 if (error <= 0) {
64353@@ -2979,11 +3039,28 @@ retry_lookup:
64354 goto finish_open_created;
64355 }
64356
64357+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
64358+ error = -ENOENT;
64359+ goto exit_dput;
64360+ }
64361+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
64362+ error = -EACCES;
64363+ goto exit_dput;
64364+ }
64365+
64366 /*
64367 * create/update audit record if it already exists.
64368 */
64369- if (d_is_positive(path->dentry))
64370+ if (d_is_positive(path->dentry)) {
64371+ /* only check if O_CREAT is specified, all other checks need to go
64372+ into may_open */
64373+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
64374+ error = -EACCES;
64375+ goto exit_dput;
64376+ }
64377+
64378 audit_inode(name, path->dentry, 0);
64379+ }
64380
64381 /*
64382 * If atomic_open() acquired write access it is dropped now due to
64383@@ -3024,6 +3101,11 @@ finish_lookup:
64384 }
64385 }
64386 BUG_ON(inode != path->dentry->d_inode);
64387+ /* if we're resolving a symlink to another symlink */
64388+ if (link && gr_handle_symlink_owner(link, inode)) {
64389+ error = -EACCES;
64390+ goto out;
64391+ }
64392 return 1;
64393 }
64394
64395@@ -3033,7 +3115,6 @@ finish_lookup:
64396 save_parent.dentry = nd->path.dentry;
64397 save_parent.mnt = mntget(path->mnt);
64398 nd->path.dentry = path->dentry;
64399-
64400 }
64401 nd->inode = inode;
64402 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
64403@@ -3043,7 +3124,18 @@ finish_open:
64404 path_put(&save_parent);
64405 return error;
64406 }
64407+
64408+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
64409+ error = -ENOENT;
64410+ goto out;
64411+ }
64412+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
64413+ error = -EACCES;
64414+ goto out;
64415+ }
64416+
64417 audit_inode(name, nd->path.dentry, 0);
64418+
64419 error = -EISDIR;
64420 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
64421 goto out;
64422@@ -3207,7 +3299,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
64423 if (unlikely(error))
64424 goto out;
64425
64426- error = do_last(nd, &path, file, op, &opened, pathname);
64427+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
64428 while (unlikely(error > 0)) { /* trailing symlink */
64429 struct path link = path;
64430 void *cookie;
64431@@ -3225,7 +3317,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
64432 error = follow_link(&link, nd, &cookie);
64433 if (unlikely(error))
64434 break;
64435- error = do_last(nd, &path, file, op, &opened, pathname);
64436+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
64437 put_link(nd, &link, cookie);
64438 }
64439 out:
64440@@ -3325,9 +3417,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
64441 goto unlock;
64442
64443 error = -EEXIST;
64444- if (d_is_positive(dentry))
64445+ if (d_is_positive(dentry)) {
64446+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
64447+ error = -ENOENT;
64448 goto fail;
64449-
64450+ }
64451 /*
64452 * Special case - lookup gave negative, but... we had foo/bar/
64453 * From the vfs_mknod() POV we just have a negative dentry -
64454@@ -3379,6 +3473,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
64455 }
64456 EXPORT_SYMBOL(user_path_create);
64457
64458+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
64459+{
64460+ struct filename *tmp = getname(pathname);
64461+ struct dentry *res;
64462+ if (IS_ERR(tmp))
64463+ return ERR_CAST(tmp);
64464+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
64465+ if (IS_ERR(res))
64466+ putname(tmp);
64467+ else
64468+ *to = tmp;
64469+ return res;
64470+}
64471+
64472 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
64473 {
64474 int error = may_create(dir, dentry);
64475@@ -3442,6 +3550,17 @@ retry:
64476
64477 if (!IS_POSIXACL(path.dentry->d_inode))
64478 mode &= ~current_umask();
64479+
64480+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
64481+ error = -EPERM;
64482+ goto out;
64483+ }
64484+
64485+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
64486+ error = -EACCES;
64487+ goto out;
64488+ }
64489+
64490 error = security_path_mknod(&path, dentry, mode, dev);
64491 if (error)
64492 goto out;
64493@@ -3457,6 +3576,8 @@ retry:
64494 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
64495 break;
64496 }
64497+ if (!error)
64498+ gr_handle_create(dentry, path.mnt);
64499 out:
64500 done_path_create(&path, dentry);
64501 if (retry_estale(error, lookup_flags)) {
64502@@ -3511,9 +3632,16 @@ retry:
64503
64504 if (!IS_POSIXACL(path.dentry->d_inode))
64505 mode &= ~current_umask();
64506+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
64507+ error = -EACCES;
64508+ goto out;
64509+ }
64510 error = security_path_mkdir(&path, dentry, mode);
64511 if (!error)
64512 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
64513+ if (!error)
64514+ gr_handle_create(dentry, path.mnt);
64515+out:
64516 done_path_create(&path, dentry);
64517 if (retry_estale(error, lookup_flags)) {
64518 lookup_flags |= LOOKUP_REVAL;
64519@@ -3596,6 +3724,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
64520 struct filename *name;
64521 struct dentry *dentry;
64522 struct nameidata nd;
64523+ ino_t saved_ino = 0;
64524+ dev_t saved_dev = 0;
64525 unsigned int lookup_flags = 0;
64526 retry:
64527 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
64528@@ -3628,10 +3758,21 @@ retry:
64529 error = -ENOENT;
64530 goto exit3;
64531 }
64532+
64533+ saved_ino = dentry->d_inode->i_ino;
64534+ saved_dev = gr_get_dev_from_dentry(dentry);
64535+
64536+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
64537+ error = -EACCES;
64538+ goto exit3;
64539+ }
64540+
64541 error = security_path_rmdir(&nd.path, dentry);
64542 if (error)
64543 goto exit3;
64544 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
64545+ if (!error && (saved_dev || saved_ino))
64546+ gr_handle_delete(saved_ino, saved_dev);
64547 exit3:
64548 dput(dentry);
64549 exit2:
64550@@ -3722,6 +3863,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
64551 struct nameidata nd;
64552 struct inode *inode = NULL;
64553 struct inode *delegated_inode = NULL;
64554+ ino_t saved_ino = 0;
64555+ dev_t saved_dev = 0;
64556 unsigned int lookup_flags = 0;
64557 retry:
64558 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
64559@@ -3748,10 +3891,22 @@ retry_deleg:
64560 if (d_is_negative(dentry))
64561 goto slashes;
64562 ihold(inode);
64563+
64564+ if (inode->i_nlink <= 1) {
64565+ saved_ino = inode->i_ino;
64566+ saved_dev = gr_get_dev_from_dentry(dentry);
64567+ }
64568+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
64569+ error = -EACCES;
64570+ goto exit2;
64571+ }
64572+
64573 error = security_path_unlink(&nd.path, dentry);
64574 if (error)
64575 goto exit2;
64576 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
64577+ if (!error && (saved_ino || saved_dev))
64578+ gr_handle_delete(saved_ino, saved_dev);
64579 exit2:
64580 dput(dentry);
64581 }
64582@@ -3840,9 +3995,17 @@ retry:
64583 if (IS_ERR(dentry))
64584 goto out_putname;
64585
64586+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
64587+ error = -EACCES;
64588+ goto out;
64589+ }
64590+
64591 error = security_path_symlink(&path, dentry, from->name);
64592 if (!error)
64593 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
64594+ if (!error)
64595+ gr_handle_create(dentry, path.mnt);
64596+out:
64597 done_path_create(&path, dentry);
64598 if (retry_estale(error, lookup_flags)) {
64599 lookup_flags |= LOOKUP_REVAL;
64600@@ -3946,6 +4109,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
64601 struct dentry *new_dentry;
64602 struct path old_path, new_path;
64603 struct inode *delegated_inode = NULL;
64604+ struct filename *to = NULL;
64605 int how = 0;
64606 int error;
64607
64608@@ -3969,7 +4133,7 @@ retry:
64609 if (error)
64610 return error;
64611
64612- new_dentry = user_path_create(newdfd, newname, &new_path,
64613+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
64614 (how & LOOKUP_REVAL));
64615 error = PTR_ERR(new_dentry);
64616 if (IS_ERR(new_dentry))
64617@@ -3981,11 +4145,28 @@ retry:
64618 error = may_linkat(&old_path);
64619 if (unlikely(error))
64620 goto out_dput;
64621+
64622+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
64623+ old_path.dentry->d_inode,
64624+ old_path.dentry->d_inode->i_mode, to)) {
64625+ error = -EACCES;
64626+ goto out_dput;
64627+ }
64628+
64629+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
64630+ old_path.dentry, old_path.mnt, to)) {
64631+ error = -EACCES;
64632+ goto out_dput;
64633+ }
64634+
64635 error = security_path_link(old_path.dentry, &new_path, new_dentry);
64636 if (error)
64637 goto out_dput;
64638 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
64639+ if (!error)
64640+ gr_handle_create(new_dentry, new_path.mnt);
64641 out_dput:
64642+ putname(to);
64643 done_path_create(&new_path, new_dentry);
64644 if (delegated_inode) {
64645 error = break_deleg_wait(&delegated_inode);
64646@@ -4296,6 +4477,12 @@ retry_deleg:
64647 if (new_dentry == trap)
64648 goto exit5;
64649
64650+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
64651+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
64652+ to, flags);
64653+ if (error)
64654+ goto exit5;
64655+
64656 error = security_path_rename(&oldnd.path, old_dentry,
64657 &newnd.path, new_dentry, flags);
64658 if (error)
64659@@ -4303,6 +4490,9 @@ retry_deleg:
64660 error = vfs_rename(old_dir->d_inode, old_dentry,
64661 new_dir->d_inode, new_dentry,
64662 &delegated_inode, flags);
64663+ if (!error)
64664+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
64665+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0, flags);
64666 exit5:
64667 dput(new_dentry);
64668 exit4:
64669@@ -4345,14 +4535,24 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
64670
64671 int readlink_copy(char __user *buffer, int buflen, const char *link)
64672 {
64673+ char tmpbuf[64];
64674+ const char *newlink;
64675 int len = PTR_ERR(link);
64676+
64677 if (IS_ERR(link))
64678 goto out;
64679
64680 len = strlen(link);
64681 if (len > (unsigned) buflen)
64682 len = buflen;
64683- if (copy_to_user(buffer, link, len))
64684+
64685+ if (len < sizeof(tmpbuf)) {
64686+ memcpy(tmpbuf, link, len);
64687+ newlink = tmpbuf;
64688+ } else
64689+ newlink = link;
64690+
64691+ if (copy_to_user(buffer, newlink, len))
64692 len = -EFAULT;
64693 out:
64694 return len;
64695diff --git a/fs/namespace.c b/fs/namespace.c
64696index 550dbff..c4ad324 100644
64697--- a/fs/namespace.c
64698+++ b/fs/namespace.c
64699@@ -1362,6 +1362,9 @@ static int do_umount(struct mount *mnt, int flags)
64700 if (!(sb->s_flags & MS_RDONLY))
64701 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
64702 up_write(&sb->s_umount);
64703+
64704+ gr_log_remount(mnt->mnt_devname, retval);
64705+
64706 return retval;
64707 }
64708
64709@@ -1384,6 +1387,9 @@ static int do_umount(struct mount *mnt, int flags)
64710 }
64711 unlock_mount_hash();
64712 namespace_unlock();
64713+
64714+ gr_log_unmount(mnt->mnt_devname, retval);
64715+
64716 return retval;
64717 }
64718
64719@@ -1403,7 +1409,7 @@ static inline bool may_mount(void)
64720 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
64721 */
64722
64723-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
64724+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
64725 {
64726 struct path path;
64727 struct mount *mnt;
64728@@ -1445,7 +1451,7 @@ out:
64729 /*
64730 * The 2.0 compatible umount. No flags.
64731 */
64732-SYSCALL_DEFINE1(oldumount, char __user *, name)
64733+SYSCALL_DEFINE1(oldumount, const char __user *, name)
64734 {
64735 return sys_umount(name, 0);
64736 }
64737@@ -2494,6 +2500,16 @@ long do_mount(const char *dev_name, const char *dir_name,
64738 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
64739 MS_STRICTATIME);
64740
64741+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
64742+ retval = -EPERM;
64743+ goto dput_out;
64744+ }
64745+
64746+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
64747+ retval = -EPERM;
64748+ goto dput_out;
64749+ }
64750+
64751 if (flags & MS_REMOUNT)
64752 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
64753 data_page);
64754@@ -2508,6 +2524,9 @@ long do_mount(const char *dev_name, const char *dir_name,
64755 dev_name, data_page);
64756 dput_out:
64757 path_put(&path);
64758+
64759+ gr_log_mount(dev_name, dir_name, retval);
64760+
64761 return retval;
64762 }
64763
64764@@ -2525,7 +2544,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
64765 * number incrementing at 10Ghz will take 12,427 years to wrap which
64766 * is effectively never, so we can ignore the possibility.
64767 */
64768-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
64769+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
64770
64771 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64772 {
64773@@ -2540,7 +2559,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64774 kfree(new_ns);
64775 return ERR_PTR(ret);
64776 }
64777- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
64778+ new_ns->seq = atomic64_inc_return_unchecked(&mnt_ns_seq);
64779 atomic_set(&new_ns->count, 1);
64780 new_ns->root = NULL;
64781 INIT_LIST_HEAD(&new_ns->list);
64782@@ -2550,7 +2569,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64783 return new_ns;
64784 }
64785
64786-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
64787+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
64788 struct user_namespace *user_ns, struct fs_struct *new_fs)
64789 {
64790 struct mnt_namespace *new_ns;
64791@@ -2671,8 +2690,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
64792 }
64793 EXPORT_SYMBOL(mount_subtree);
64794
64795-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
64796- char __user *, type, unsigned long, flags, void __user *, data)
64797+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
64798+ const char __user *, type, unsigned long, flags, void __user *, data)
64799 {
64800 int ret;
64801 char *kernel_type;
64802@@ -2785,6 +2804,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
64803 if (error)
64804 goto out2;
64805
64806+ if (gr_handle_chroot_pivot()) {
64807+ error = -EPERM;
64808+ goto out2;
64809+ }
64810+
64811 get_fs_root(current->fs, &root);
64812 old_mp = lock_mount(&old);
64813 error = PTR_ERR(old_mp);
64814@@ -3056,7 +3080,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
64815 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
64816 return -EPERM;
64817
64818- if (fs->users != 1)
64819+ if (atomic_read(&fs->users) != 1)
64820 return -EINVAL;
64821
64822 get_mnt_ns(mnt_ns);
64823diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
64824index f4ccfe6..a5cf064 100644
64825--- a/fs/nfs/callback_xdr.c
64826+++ b/fs/nfs/callback_xdr.c
64827@@ -51,7 +51,7 @@ struct callback_op {
64828 callback_decode_arg_t decode_args;
64829 callback_encode_res_t encode_res;
64830 long res_maxsize;
64831-};
64832+} __do_const;
64833
64834 static struct callback_op callback_ops[];
64835
64836diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
64837index 0689aa5..299386e 100644
64838--- a/fs/nfs/inode.c
64839+++ b/fs/nfs/inode.c
64840@@ -1228,16 +1228,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
64841 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
64842 }
64843
64844-static atomic_long_t nfs_attr_generation_counter;
64845+static atomic_long_unchecked_t nfs_attr_generation_counter;
64846
64847 static unsigned long nfs_read_attr_generation_counter(void)
64848 {
64849- return atomic_long_read(&nfs_attr_generation_counter);
64850+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
64851 }
64852
64853 unsigned long nfs_inc_attr_generation_counter(void)
64854 {
64855- return atomic_long_inc_return(&nfs_attr_generation_counter);
64856+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
64857 }
64858
64859 void nfs_fattr_init(struct nfs_fattr *fattr)
64860diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
64861index 1d3cb47..2b8ed89 100644
64862--- a/fs/nfsd/nfs4proc.c
64863+++ b/fs/nfsd/nfs4proc.c
64864@@ -1155,7 +1155,7 @@ struct nfsd4_operation {
64865 nfsd4op_rsize op_rsize_bop;
64866 stateid_getter op_get_currentstateid;
64867 stateid_setter op_set_currentstateid;
64868-};
64869+} __do_const;
64870
64871 static struct nfsd4_operation nfsd4_ops[];
64872
64873diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
64874index 353aac8..32035ee 100644
64875--- a/fs/nfsd/nfs4xdr.c
64876+++ b/fs/nfsd/nfs4xdr.c
64877@@ -1534,7 +1534,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
64878
64879 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
64880
64881-static nfsd4_dec nfsd4_dec_ops[] = {
64882+static const nfsd4_dec nfsd4_dec_ops[] = {
64883 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
64884 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
64885 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
64886diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
64887index ff95676..96cf3f62 100644
64888--- a/fs/nfsd/nfscache.c
64889+++ b/fs/nfsd/nfscache.c
64890@@ -527,17 +527,20 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64891 {
64892 struct svc_cacherep *rp = rqstp->rq_cacherep;
64893 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
64894- int len;
64895+ long len;
64896 size_t bufsize = 0;
64897
64898 if (!rp)
64899 return;
64900
64901- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
64902- len >>= 2;
64903+ if (statp) {
64904+ len = (char*)statp - (char*)resv->iov_base;
64905+ len = resv->iov_len - len;
64906+ len >>= 2;
64907+ }
64908
64909 /* Don't cache excessive amounts of data and XDR failures */
64910- if (!statp || len > (256 >> 2)) {
64911+ if (!statp || len > (256 >> 2) || len < 0) {
64912 nfsd_reply_cache_free(rp);
64913 return;
64914 }
64915@@ -545,7 +548,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64916 switch (cachetype) {
64917 case RC_REPLSTAT:
64918 if (len != 1)
64919- printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
64920+ printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len);
64921 rp->c_replstat = *statp;
64922 break;
64923 case RC_REPLBUFF:
64924diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
64925index 6ab077b..5ac7f0b 100644
64926--- a/fs/nfsd/vfs.c
64927+++ b/fs/nfsd/vfs.c
64928@@ -855,7 +855,7 @@ __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
64929
64930 oldfs = get_fs();
64931 set_fs(KERNEL_DS);
64932- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
64933+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
64934 set_fs(oldfs);
64935 return nfsd_finish_read(file, count, host_err);
64936 }
64937@@ -943,7 +943,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
64938
64939 /* Write the data. */
64940 oldfs = get_fs(); set_fs(KERNEL_DS);
64941- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
64942+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
64943 set_fs(oldfs);
64944 if (host_err < 0)
64945 goto out_nfserr;
64946@@ -1485,7 +1485,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
64947 */
64948
64949 oldfs = get_fs(); set_fs(KERNEL_DS);
64950- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
64951+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
64952 set_fs(oldfs);
64953
64954 if (host_err < 0)
64955diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
64956index 52ccd34..7a6b202 100644
64957--- a/fs/nls/nls_base.c
64958+++ b/fs/nls/nls_base.c
64959@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
64960
64961 int __register_nls(struct nls_table *nls, struct module *owner)
64962 {
64963- struct nls_table ** tmp = &tables;
64964+ struct nls_table *tmp = tables;
64965
64966 if (nls->next)
64967 return -EBUSY;
64968
64969- nls->owner = owner;
64970+ pax_open_kernel();
64971+ *(void **)&nls->owner = owner;
64972+ pax_close_kernel();
64973 spin_lock(&nls_lock);
64974- while (*tmp) {
64975- if (nls == *tmp) {
64976+ while (tmp) {
64977+ if (nls == tmp) {
64978 spin_unlock(&nls_lock);
64979 return -EBUSY;
64980 }
64981- tmp = &(*tmp)->next;
64982+ tmp = tmp->next;
64983 }
64984- nls->next = tables;
64985+ pax_open_kernel();
64986+ *(struct nls_table **)&nls->next = tables;
64987+ pax_close_kernel();
64988 tables = nls;
64989 spin_unlock(&nls_lock);
64990 return 0;
64991@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
64992
64993 int unregister_nls(struct nls_table * nls)
64994 {
64995- struct nls_table ** tmp = &tables;
64996+ struct nls_table * const * tmp = &tables;
64997
64998 spin_lock(&nls_lock);
64999 while (*tmp) {
65000 if (nls == *tmp) {
65001- *tmp = nls->next;
65002+ pax_open_kernel();
65003+ *(struct nls_table **)tmp = nls->next;
65004+ pax_close_kernel();
65005 spin_unlock(&nls_lock);
65006 return 0;
65007 }
65008@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls)
65009 return -EINVAL;
65010 }
65011
65012-static struct nls_table *find_nls(char *charset)
65013+static struct nls_table *find_nls(const char *charset)
65014 {
65015 struct nls_table *nls;
65016 spin_lock(&nls_lock);
65017@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset)
65018 return nls;
65019 }
65020
65021-struct nls_table *load_nls(char *charset)
65022+struct nls_table *load_nls(const char *charset)
65023 {
65024 return try_then_request_module(find_nls(charset), "nls_%s", charset);
65025 }
65026diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
65027index 162b3f1..6076a7c 100644
65028--- a/fs/nls/nls_euc-jp.c
65029+++ b/fs/nls/nls_euc-jp.c
65030@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
65031 p_nls = load_nls("cp932");
65032
65033 if (p_nls) {
65034- table.charset2upper = p_nls->charset2upper;
65035- table.charset2lower = p_nls->charset2lower;
65036+ pax_open_kernel();
65037+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
65038+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
65039+ pax_close_kernel();
65040 return register_nls(&table);
65041 }
65042
65043diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
65044index a80a741..7b96e1b 100644
65045--- a/fs/nls/nls_koi8-ru.c
65046+++ b/fs/nls/nls_koi8-ru.c
65047@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
65048 p_nls = load_nls("koi8-u");
65049
65050 if (p_nls) {
65051- table.charset2upper = p_nls->charset2upper;
65052- table.charset2lower = p_nls->charset2lower;
65053+ pax_open_kernel();
65054+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
65055+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
65056+ pax_close_kernel();
65057 return register_nls(&table);
65058 }
65059
65060diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
65061index c991616..5ae51af 100644
65062--- a/fs/notify/fanotify/fanotify_user.c
65063+++ b/fs/notify/fanotify/fanotify_user.c
65064@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
65065
65066 fd = fanotify_event_metadata.fd;
65067 ret = -EFAULT;
65068- if (copy_to_user(buf, &fanotify_event_metadata,
65069- fanotify_event_metadata.event_len))
65070+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
65071+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
65072 goto out_close_fd;
65073
65074 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
65075diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
65076index 0f88bc0..7d888d7 100644
65077--- a/fs/notify/inotify/inotify_fsnotify.c
65078+++ b/fs/notify/inotify/inotify_fsnotify.c
65079@@ -165,8 +165,10 @@ static void inotify_free_group_priv(struct fsnotify_group *group)
65080 /* ideally the idr is empty and we won't hit the BUG in the callback */
65081 idr_for_each(&group->inotify_data.idr, idr_callback, group);
65082 idr_destroy(&group->inotify_data.idr);
65083- atomic_dec(&group->inotify_data.user->inotify_devs);
65084- free_uid(group->inotify_data.user);
65085+ if (group->inotify_data.user) {
65086+ atomic_dec(&group->inotify_data.user->inotify_devs);
65087+ free_uid(group->inotify_data.user);
65088+ }
65089 }
65090
65091 static void inotify_free_event(struct fsnotify_event *fsn_event)
65092diff --git a/fs/notify/notification.c b/fs/notify/notification.c
65093index a95d8e0..a91a5fd 100644
65094--- a/fs/notify/notification.c
65095+++ b/fs/notify/notification.c
65096@@ -48,7 +48,7 @@
65097 #include <linux/fsnotify_backend.h>
65098 #include "fsnotify.h"
65099
65100-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
65101+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
65102
65103 /**
65104 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
65105@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
65106 */
65107 u32 fsnotify_get_cookie(void)
65108 {
65109- return atomic_inc_return(&fsnotify_sync_cookie);
65110+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
65111 }
65112 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
65113
65114diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
65115index 9e38daf..5727cae 100644
65116--- a/fs/ntfs/dir.c
65117+++ b/fs/ntfs/dir.c
65118@@ -1310,7 +1310,7 @@ find_next_index_buffer:
65119 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
65120 ~(s64)(ndir->itype.index.block_size - 1)));
65121 /* Bounds checks. */
65122- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
65123+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
65124 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
65125 "inode 0x%lx or driver bug.", vdir->i_ino);
65126 goto err_out;
65127diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
65128index f5ec1ce..807fd78 100644
65129--- a/fs/ntfs/file.c
65130+++ b/fs/ntfs/file.c
65131@@ -1279,7 +1279,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
65132 char *addr;
65133 size_t total = 0;
65134 unsigned len;
65135- int left;
65136+ unsigned left;
65137
65138 do {
65139 len = PAGE_CACHE_SIZE - ofs;
65140diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
65141index 6c3296e..c0b99f0 100644
65142--- a/fs/ntfs/super.c
65143+++ b/fs/ntfs/super.c
65144@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
65145 if (!silent)
65146 ntfs_error(sb, "Primary boot sector is invalid.");
65147 } else if (!silent)
65148- ntfs_error(sb, read_err_str, "primary");
65149+ ntfs_error(sb, read_err_str, "%s", "primary");
65150 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
65151 if (bh_primary)
65152 brelse(bh_primary);
65153@@ -704,7 +704,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
65154 goto hotfix_primary_boot_sector;
65155 brelse(bh_backup);
65156 } else if (!silent)
65157- ntfs_error(sb, read_err_str, "backup");
65158+ ntfs_error(sb, read_err_str, "%s", "backup");
65159 /* Try to read NT3.51- backup boot sector. */
65160 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
65161 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
65162@@ -715,7 +715,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
65163 "sector.");
65164 brelse(bh_backup);
65165 } else if (!silent)
65166- ntfs_error(sb, read_err_str, "backup");
65167+ ntfs_error(sb, read_err_str, "%s", "backup");
65168 /* We failed. Cleanup and return. */
65169 if (bh_primary)
65170 brelse(bh_primary);
65171diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
65172index 0440134..d52c93a 100644
65173--- a/fs/ocfs2/localalloc.c
65174+++ b/fs/ocfs2/localalloc.c
65175@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
65176 goto bail;
65177 }
65178
65179- atomic_inc(&osb->alloc_stats.moves);
65180+ atomic_inc_unchecked(&osb->alloc_stats.moves);
65181
65182 bail:
65183 if (handle)
65184diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
65185index 8add6f1..b931e04 100644
65186--- a/fs/ocfs2/namei.c
65187+++ b/fs/ocfs2/namei.c
65188@@ -158,7 +158,7 @@ bail_add:
65189 * NOTE: This dentry already has ->d_op set from
65190 * ocfs2_get_parent() and ocfs2_get_dentry()
65191 */
65192- if (ret)
65193+ if (!IS_ERR_OR_NULL(ret))
65194 dentry = ret;
65195
65196 status = ocfs2_dentry_attach_lock(dentry, inode,
65197diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
65198index bbec539..7b266d5 100644
65199--- a/fs/ocfs2/ocfs2.h
65200+++ b/fs/ocfs2/ocfs2.h
65201@@ -236,11 +236,11 @@ enum ocfs2_vol_state
65202
65203 struct ocfs2_alloc_stats
65204 {
65205- atomic_t moves;
65206- atomic_t local_data;
65207- atomic_t bitmap_data;
65208- atomic_t bg_allocs;
65209- atomic_t bg_extends;
65210+ atomic_unchecked_t moves;
65211+ atomic_unchecked_t local_data;
65212+ atomic_unchecked_t bitmap_data;
65213+ atomic_unchecked_t bg_allocs;
65214+ atomic_unchecked_t bg_extends;
65215 };
65216
65217 enum ocfs2_local_alloc_state
65218diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
65219index 0cb889a..6a26b24 100644
65220--- a/fs/ocfs2/suballoc.c
65221+++ b/fs/ocfs2/suballoc.c
65222@@ -867,7 +867,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
65223 mlog_errno(status);
65224 goto bail;
65225 }
65226- atomic_inc(&osb->alloc_stats.bg_extends);
65227+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
65228
65229 /* You should never ask for this much metadata */
65230 BUG_ON(bits_wanted >
65231@@ -2014,7 +2014,7 @@ int ocfs2_claim_metadata(handle_t *handle,
65232 mlog_errno(status);
65233 goto bail;
65234 }
65235- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65236+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65237
65238 *suballoc_loc = res.sr_bg_blkno;
65239 *suballoc_bit_start = res.sr_bit_offset;
65240@@ -2180,7 +2180,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
65241 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
65242 res->sr_bits);
65243
65244- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65245+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65246
65247 BUG_ON(res->sr_bits != 1);
65248
65249@@ -2222,7 +2222,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
65250 mlog_errno(status);
65251 goto bail;
65252 }
65253- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65254+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65255
65256 BUG_ON(res.sr_bits != 1);
65257
65258@@ -2326,7 +2326,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
65259 cluster_start,
65260 num_clusters);
65261 if (!status)
65262- atomic_inc(&osb->alloc_stats.local_data);
65263+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
65264 } else {
65265 if (min_clusters > (osb->bitmap_cpg - 1)) {
65266 /* The only paths asking for contiguousness
65267@@ -2352,7 +2352,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
65268 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
65269 res.sr_bg_blkno,
65270 res.sr_bit_offset);
65271- atomic_inc(&osb->alloc_stats.bitmap_data);
65272+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
65273 *num_clusters = res.sr_bits;
65274 }
65275 }
65276diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
65277index 4142546..69375a9 100644
65278--- a/fs/ocfs2/super.c
65279+++ b/fs/ocfs2/super.c
65280@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
65281 "%10s => GlobalAllocs: %d LocalAllocs: %d "
65282 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
65283 "Stats",
65284- atomic_read(&osb->alloc_stats.bitmap_data),
65285- atomic_read(&osb->alloc_stats.local_data),
65286- atomic_read(&osb->alloc_stats.bg_allocs),
65287- atomic_read(&osb->alloc_stats.moves),
65288- atomic_read(&osb->alloc_stats.bg_extends));
65289+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
65290+ atomic_read_unchecked(&osb->alloc_stats.local_data),
65291+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
65292+ atomic_read_unchecked(&osb->alloc_stats.moves),
65293+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
65294
65295 out += snprintf(buf + out, len - out,
65296 "%10s => State: %u Descriptor: %llu Size: %u bits "
65297@@ -2100,11 +2100,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
65298
65299 mutex_init(&osb->system_file_mutex);
65300
65301- atomic_set(&osb->alloc_stats.moves, 0);
65302- atomic_set(&osb->alloc_stats.local_data, 0);
65303- atomic_set(&osb->alloc_stats.bitmap_data, 0);
65304- atomic_set(&osb->alloc_stats.bg_allocs, 0);
65305- atomic_set(&osb->alloc_stats.bg_extends, 0);
65306+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
65307+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
65308+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
65309+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
65310+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
65311
65312 /* Copy the blockcheck stats from the superblock probe */
65313 osb->osb_ecc_stats = *stats;
65314diff --git a/fs/open.c b/fs/open.c
65315index d6fd3ac..6ccf474 100644
65316--- a/fs/open.c
65317+++ b/fs/open.c
65318@@ -32,6 +32,8 @@
65319 #include <linux/dnotify.h>
65320 #include <linux/compat.h>
65321
65322+#define CREATE_TRACE_POINTS
65323+#include <trace/events/fs.h>
65324 #include "internal.h"
65325
65326 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
65327@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
65328 error = locks_verify_truncate(inode, NULL, length);
65329 if (!error)
65330 error = security_path_truncate(path);
65331+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
65332+ error = -EACCES;
65333 if (!error)
65334 error = do_truncate(path->dentry, length, 0, NULL);
65335
65336@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
65337 error = locks_verify_truncate(inode, f.file, length);
65338 if (!error)
65339 error = security_path_truncate(&f.file->f_path);
65340+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
65341+ error = -EACCES;
65342 if (!error)
65343 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
65344 sb_end_write(inode->i_sb);
65345@@ -380,6 +386,9 @@ retry:
65346 if (__mnt_is_readonly(path.mnt))
65347 res = -EROFS;
65348
65349+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
65350+ res = -EACCES;
65351+
65352 out_path_release:
65353 path_put(&path);
65354 if (retry_estale(res, lookup_flags)) {
65355@@ -411,6 +420,8 @@ retry:
65356 if (error)
65357 goto dput_and_out;
65358
65359+ gr_log_chdir(path.dentry, path.mnt);
65360+
65361 set_fs_pwd(current->fs, &path);
65362
65363 dput_and_out:
65364@@ -440,6 +451,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
65365 goto out_putf;
65366
65367 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
65368+
65369+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
65370+ error = -EPERM;
65371+
65372+ if (!error)
65373+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
65374+
65375 if (!error)
65376 set_fs_pwd(current->fs, &f.file->f_path);
65377 out_putf:
65378@@ -469,7 +487,13 @@ retry:
65379 if (error)
65380 goto dput_and_out;
65381
65382+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
65383+ goto dput_and_out;
65384+
65385 set_fs_root(current->fs, &path);
65386+
65387+ gr_handle_chroot_chdir(&path);
65388+
65389 error = 0;
65390 dput_and_out:
65391 path_put(&path);
65392@@ -493,6 +517,16 @@ static int chmod_common(struct path *path, umode_t mode)
65393 return error;
65394 retry_deleg:
65395 mutex_lock(&inode->i_mutex);
65396+
65397+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
65398+ error = -EACCES;
65399+ goto out_unlock;
65400+ }
65401+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
65402+ error = -EACCES;
65403+ goto out_unlock;
65404+ }
65405+
65406 error = security_path_chmod(path, mode);
65407 if (error)
65408 goto out_unlock;
65409@@ -558,6 +592,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
65410 uid = make_kuid(current_user_ns(), user);
65411 gid = make_kgid(current_user_ns(), group);
65412
65413+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
65414+ return -EACCES;
65415+
65416 newattrs.ia_valid = ATTR_CTIME;
65417 if (user != (uid_t) -1) {
65418 if (!uid_valid(uid))
65419@@ -983,6 +1020,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
65420 } else {
65421 fsnotify_open(f);
65422 fd_install(fd, f);
65423+ trace_do_sys_open(tmp->name, flags, mode);
65424 }
65425 }
65426 putname(tmp);
65427diff --git a/fs/pipe.c b/fs/pipe.c
65428index 21981e5..3d5f55c 100644
65429--- a/fs/pipe.c
65430+++ b/fs/pipe.c
65431@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
65432
65433 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
65434 {
65435- if (pipe->files)
65436+ if (atomic_read(&pipe->files))
65437 mutex_lock_nested(&pipe->mutex, subclass);
65438 }
65439
65440@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
65441
65442 void pipe_unlock(struct pipe_inode_info *pipe)
65443 {
65444- if (pipe->files)
65445+ if (atomic_read(&pipe->files))
65446 mutex_unlock(&pipe->mutex);
65447 }
65448 EXPORT_SYMBOL(pipe_unlock);
65449@@ -292,9 +292,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
65450 }
65451 if (bufs) /* More to do? */
65452 continue;
65453- if (!pipe->writers)
65454+ if (!atomic_read(&pipe->writers))
65455 break;
65456- if (!pipe->waiting_writers) {
65457+ if (!atomic_read(&pipe->waiting_writers)) {
65458 /* syscall merging: Usually we must not sleep
65459 * if O_NONBLOCK is set, or if we got some data.
65460 * But if a writer sleeps in kernel space, then
65461@@ -351,7 +351,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65462
65463 __pipe_lock(pipe);
65464
65465- if (!pipe->readers) {
65466+ if (!atomic_read(&pipe->readers)) {
65467 send_sig(SIGPIPE, current, 0);
65468 ret = -EPIPE;
65469 goto out;
65470@@ -387,7 +387,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65471 for (;;) {
65472 int bufs;
65473
65474- if (!pipe->readers) {
65475+ if (!atomic_read(&pipe->readers)) {
65476 send_sig(SIGPIPE, current, 0);
65477 if (!ret)
65478 ret = -EPIPE;
65479@@ -455,9 +455,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65480 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
65481 do_wakeup = 0;
65482 }
65483- pipe->waiting_writers++;
65484+ atomic_inc(&pipe->waiting_writers);
65485 pipe_wait(pipe);
65486- pipe->waiting_writers--;
65487+ atomic_dec(&pipe->waiting_writers);
65488 }
65489 out:
65490 __pipe_unlock(pipe);
65491@@ -512,7 +512,7 @@ pipe_poll(struct file *filp, poll_table *wait)
65492 mask = 0;
65493 if (filp->f_mode & FMODE_READ) {
65494 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
65495- if (!pipe->writers && filp->f_version != pipe->w_counter)
65496+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
65497 mask |= POLLHUP;
65498 }
65499
65500@@ -522,7 +522,7 @@ pipe_poll(struct file *filp, poll_table *wait)
65501 * Most Unices do not set POLLERR for FIFOs but on Linux they
65502 * behave exactly like pipes for poll().
65503 */
65504- if (!pipe->readers)
65505+ if (!atomic_read(&pipe->readers))
65506 mask |= POLLERR;
65507 }
65508
65509@@ -534,7 +534,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
65510 int kill = 0;
65511
65512 spin_lock(&inode->i_lock);
65513- if (!--pipe->files) {
65514+ if (atomic_dec_and_test(&pipe->files)) {
65515 inode->i_pipe = NULL;
65516 kill = 1;
65517 }
65518@@ -551,11 +551,11 @@ pipe_release(struct inode *inode, struct file *file)
65519
65520 __pipe_lock(pipe);
65521 if (file->f_mode & FMODE_READ)
65522- pipe->readers--;
65523+ atomic_dec(&pipe->readers);
65524 if (file->f_mode & FMODE_WRITE)
65525- pipe->writers--;
65526+ atomic_dec(&pipe->writers);
65527
65528- if (pipe->readers || pipe->writers) {
65529+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
65530 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
65531 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
65532 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
65533@@ -620,7 +620,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
65534 kfree(pipe);
65535 }
65536
65537-static struct vfsmount *pipe_mnt __read_mostly;
65538+struct vfsmount *pipe_mnt __read_mostly;
65539
65540 /*
65541 * pipefs_dname() is called from d_path().
65542@@ -650,8 +650,9 @@ static struct inode * get_pipe_inode(void)
65543 goto fail_iput;
65544
65545 inode->i_pipe = pipe;
65546- pipe->files = 2;
65547- pipe->readers = pipe->writers = 1;
65548+ atomic_set(&pipe->files, 2);
65549+ atomic_set(&pipe->readers, 1);
65550+ atomic_set(&pipe->writers, 1);
65551 inode->i_fop = &pipefifo_fops;
65552
65553 /*
65554@@ -830,17 +831,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
65555 spin_lock(&inode->i_lock);
65556 if (inode->i_pipe) {
65557 pipe = inode->i_pipe;
65558- pipe->files++;
65559+ atomic_inc(&pipe->files);
65560 spin_unlock(&inode->i_lock);
65561 } else {
65562 spin_unlock(&inode->i_lock);
65563 pipe = alloc_pipe_info();
65564 if (!pipe)
65565 return -ENOMEM;
65566- pipe->files = 1;
65567+ atomic_set(&pipe->files, 1);
65568 spin_lock(&inode->i_lock);
65569 if (unlikely(inode->i_pipe)) {
65570- inode->i_pipe->files++;
65571+ atomic_inc(&inode->i_pipe->files);
65572 spin_unlock(&inode->i_lock);
65573 free_pipe_info(pipe);
65574 pipe = inode->i_pipe;
65575@@ -865,10 +866,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
65576 * opened, even when there is no process writing the FIFO.
65577 */
65578 pipe->r_counter++;
65579- if (pipe->readers++ == 0)
65580+ if (atomic_inc_return(&pipe->readers) == 1)
65581 wake_up_partner(pipe);
65582
65583- if (!is_pipe && !pipe->writers) {
65584+ if (!is_pipe && !atomic_read(&pipe->writers)) {
65585 if ((filp->f_flags & O_NONBLOCK)) {
65586 /* suppress POLLHUP until we have
65587 * seen a writer */
65588@@ -887,14 +888,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
65589 * errno=ENXIO when there is no process reading the FIFO.
65590 */
65591 ret = -ENXIO;
65592- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
65593+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
65594 goto err;
65595
65596 pipe->w_counter++;
65597- if (!pipe->writers++)
65598+ if (atomic_inc_return(&pipe->writers) == 1)
65599 wake_up_partner(pipe);
65600
65601- if (!is_pipe && !pipe->readers) {
65602+ if (!is_pipe && !atomic_read(&pipe->readers)) {
65603 if (wait_for_partner(pipe, &pipe->r_counter))
65604 goto err_wr;
65605 }
65606@@ -908,11 +909,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
65607 * the process can at least talk to itself.
65608 */
65609
65610- pipe->readers++;
65611- pipe->writers++;
65612+ atomic_inc(&pipe->readers);
65613+ atomic_inc(&pipe->writers);
65614 pipe->r_counter++;
65615 pipe->w_counter++;
65616- if (pipe->readers == 1 || pipe->writers == 1)
65617+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
65618 wake_up_partner(pipe);
65619 break;
65620
65621@@ -926,13 +927,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
65622 return 0;
65623
65624 err_rd:
65625- if (!--pipe->readers)
65626+ if (atomic_dec_and_test(&pipe->readers))
65627 wake_up_interruptible(&pipe->wait);
65628 ret = -ERESTARTSYS;
65629 goto err;
65630
65631 err_wr:
65632- if (!--pipe->writers)
65633+ if (atomic_dec_and_test(&pipe->writers))
65634 wake_up_interruptible(&pipe->wait);
65635 ret = -ERESTARTSYS;
65636 goto err;
65637diff --git a/fs/posix_acl.c b/fs/posix_acl.c
65638index 0855f77..6787d50 100644
65639--- a/fs/posix_acl.c
65640+++ b/fs/posix_acl.c
65641@@ -20,6 +20,7 @@
65642 #include <linux/xattr.h>
65643 #include <linux/export.h>
65644 #include <linux/user_namespace.h>
65645+#include <linux/grsecurity.h>
65646
65647 struct posix_acl **acl_by_type(struct inode *inode, int type)
65648 {
65649@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
65650 }
65651 }
65652 if (mode_p)
65653- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
65654+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
65655 return not_equiv;
65656 }
65657 EXPORT_SYMBOL(posix_acl_equiv_mode);
65658@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
65659 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
65660 }
65661
65662- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
65663+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
65664 return not_equiv;
65665 }
65666
65667@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
65668 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
65669 int err = -ENOMEM;
65670 if (clone) {
65671+ *mode_p &= ~gr_acl_umask();
65672+
65673 err = posix_acl_create_masq(clone, mode_p);
65674 if (err < 0) {
65675 posix_acl_release(clone);
65676@@ -659,11 +662,12 @@ struct posix_acl *
65677 posix_acl_from_xattr(struct user_namespace *user_ns,
65678 const void *value, size_t size)
65679 {
65680- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
65681- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
65682+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
65683+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
65684 int count;
65685 struct posix_acl *acl;
65686 struct posix_acl_entry *acl_e;
65687+ umode_t umask = gr_acl_umask();
65688
65689 if (!value)
65690 return NULL;
65691@@ -689,12 +693,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65692
65693 switch(acl_e->e_tag) {
65694 case ACL_USER_OBJ:
65695+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65696+ break;
65697 case ACL_GROUP_OBJ:
65698 case ACL_MASK:
65699+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65700+ break;
65701 case ACL_OTHER:
65702+ acl_e->e_perm &= ~(umask & S_IRWXO);
65703 break;
65704
65705 case ACL_USER:
65706+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65707 acl_e->e_uid =
65708 make_kuid(user_ns,
65709 le32_to_cpu(entry->e_id));
65710@@ -702,6 +712,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65711 goto fail;
65712 break;
65713 case ACL_GROUP:
65714+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65715 acl_e->e_gid =
65716 make_kgid(user_ns,
65717 le32_to_cpu(entry->e_id));
65718diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
65719index 2183fcf..3c32a98 100644
65720--- a/fs/proc/Kconfig
65721+++ b/fs/proc/Kconfig
65722@@ -30,7 +30,7 @@ config PROC_FS
65723
65724 config PROC_KCORE
65725 bool "/proc/kcore support" if !ARM
65726- depends on PROC_FS && MMU
65727+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
65728 help
65729 Provides a virtual ELF core file of the live kernel. This can
65730 be read with gdb and other ELF tools. No modifications can be
65731@@ -38,8 +38,8 @@ config PROC_KCORE
65732
65733 config PROC_VMCORE
65734 bool "/proc/vmcore support"
65735- depends on PROC_FS && CRASH_DUMP
65736- default y
65737+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
65738+ default n
65739 help
65740 Exports the dump image of crashed kernel in ELF format.
65741
65742@@ -63,8 +63,8 @@ config PROC_SYSCTL
65743 limited in memory.
65744
65745 config PROC_PAGE_MONITOR
65746- default y
65747- depends on PROC_FS && MMU
65748+ default n
65749+ depends on PROC_FS && MMU && !GRKERNSEC
65750 bool "Enable /proc page monitoring" if EXPERT
65751 help
65752 Various /proc files exist to monitor process memory utilization:
65753diff --git a/fs/proc/array.c b/fs/proc/array.c
65754index cd3653e..9b9b79a 100644
65755--- a/fs/proc/array.c
65756+++ b/fs/proc/array.c
65757@@ -60,6 +60,7 @@
65758 #include <linux/tty.h>
65759 #include <linux/string.h>
65760 #include <linux/mman.h>
65761+#include <linux/grsecurity.h>
65762 #include <linux/proc_fs.h>
65763 #include <linux/ioport.h>
65764 #include <linux/uaccess.h>
65765@@ -347,6 +348,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
65766 seq_putc(m, '\n');
65767 }
65768
65769+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65770+static inline void task_pax(struct seq_file *m, struct task_struct *p)
65771+{
65772+ if (p->mm)
65773+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
65774+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
65775+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
65776+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
65777+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
65778+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
65779+ else
65780+ seq_printf(m, "PaX:\t-----\n");
65781+}
65782+#endif
65783+
65784 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
65785 struct pid *pid, struct task_struct *task)
65786 {
65787@@ -365,9 +381,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
65788 task_cpus_allowed(m, task);
65789 cpuset_task_status_allowed(m, task);
65790 task_context_switch_counts(m, task);
65791+
65792+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65793+ task_pax(m, task);
65794+#endif
65795+
65796+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
65797+ task_grsec_rbac(m, task);
65798+#endif
65799+
65800 return 0;
65801 }
65802
65803+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65804+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
65805+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
65806+ _mm->pax_flags & MF_PAX_SEGMEXEC))
65807+#endif
65808+
65809 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65810 struct pid *pid, struct task_struct *task, int whole)
65811 {
65812@@ -389,6 +420,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65813 char tcomm[sizeof(task->comm)];
65814 unsigned long flags;
65815
65816+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65817+ if (current->exec_id != m->exec_id) {
65818+ gr_log_badprocpid("stat");
65819+ return 0;
65820+ }
65821+#endif
65822+
65823 state = *get_task_state(task);
65824 vsize = eip = esp = 0;
65825 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
65826@@ -459,6 +497,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65827 gtime = task_gtime(task);
65828 }
65829
65830+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65831+ if (PAX_RAND_FLAGS(mm)) {
65832+ eip = 0;
65833+ esp = 0;
65834+ wchan = 0;
65835+ }
65836+#endif
65837+#ifdef CONFIG_GRKERNSEC_HIDESYM
65838+ wchan = 0;
65839+ eip =0;
65840+ esp =0;
65841+#endif
65842+
65843 /* scale priority and nice values from timeslices to -20..20 */
65844 /* to make it look like a "normal" Unix priority/nice value */
65845 priority = task_prio(task);
65846@@ -490,9 +541,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65847 seq_put_decimal_ull(m, ' ', vsize);
65848 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
65849 seq_put_decimal_ull(m, ' ', rsslim);
65850+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65851+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
65852+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
65853+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
65854+#else
65855 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
65856 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
65857 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
65858+#endif
65859 seq_put_decimal_ull(m, ' ', esp);
65860 seq_put_decimal_ull(m, ' ', eip);
65861 /* The signal information here is obsolete.
65862@@ -514,7 +571,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65863 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
65864 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
65865
65866- if (mm && permitted) {
65867+ if (mm && permitted
65868+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65869+ && !PAX_RAND_FLAGS(mm)
65870+#endif
65871+ ) {
65872 seq_put_decimal_ull(m, ' ', mm->start_data);
65873 seq_put_decimal_ull(m, ' ', mm->end_data);
65874 seq_put_decimal_ull(m, ' ', mm->start_brk);
65875@@ -552,8 +613,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
65876 struct pid *pid, struct task_struct *task)
65877 {
65878 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
65879- struct mm_struct *mm = get_task_mm(task);
65880+ struct mm_struct *mm;
65881
65882+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65883+ if (current->exec_id != m->exec_id) {
65884+ gr_log_badprocpid("statm");
65885+ return 0;
65886+ }
65887+#endif
65888+ mm = get_task_mm(task);
65889 if (mm) {
65890 size = task_statm(mm, &shared, &text, &data, &resident);
65891 mmput(mm);
65892@@ -576,6 +644,20 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
65893 return 0;
65894 }
65895
65896+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
65897+int proc_pid_ipaddr(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task)
65898+{
65899+ unsigned long flags;
65900+ u32 curr_ip = 0;
65901+
65902+ if (lock_task_sighand(task, &flags)) {
65903+ curr_ip = task->signal->curr_ip;
65904+ unlock_task_sighand(task, &flags);
65905+ }
65906+ return seq_printf(m, "%pI4\n", &curr_ip);
65907+}
65908+#endif
65909+
65910 #ifdef CONFIG_CHECKPOINT_RESTORE
65911 static struct pid *
65912 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
65913diff --git a/fs/proc/base.c b/fs/proc/base.c
65914index baf852b..03fe930 100644
65915--- a/fs/proc/base.c
65916+++ b/fs/proc/base.c
65917@@ -113,6 +113,14 @@ struct pid_entry {
65918 union proc_op op;
65919 };
65920
65921+struct getdents_callback {
65922+ struct linux_dirent __user * current_dir;
65923+ struct linux_dirent __user * previous;
65924+ struct file * file;
65925+ int count;
65926+ int error;
65927+};
65928+
65929 #define NOD(NAME, MODE, IOP, FOP, OP) { \
65930 .name = (NAME), \
65931 .len = sizeof(NAME) - 1, \
65932@@ -208,12 +216,28 @@ static int proc_pid_cmdline(struct seq_file *m, struct pid_namespace *ns,
65933 return 0;
65934 }
65935
65936+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65937+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
65938+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
65939+ _mm->pax_flags & MF_PAX_SEGMEXEC))
65940+#endif
65941+
65942 static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
65943 struct pid *pid, struct task_struct *task)
65944 {
65945 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
65946 if (mm && !IS_ERR(mm)) {
65947 unsigned int nwords = 0;
65948+
65949+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65950+ /* allow if we're currently ptracing this task */
65951+ if (PAX_RAND_FLAGS(mm) &&
65952+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
65953+ mmput(mm);
65954+ return 0;
65955+ }
65956+#endif
65957+
65958 do {
65959 nwords += 2;
65960 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
65961@@ -225,7 +249,7 @@ static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
65962 }
65963
65964
65965-#ifdef CONFIG_KALLSYMS
65966+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65967 /*
65968 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
65969 * Returns the resolved symbol. If that fails, simply return the address.
65970@@ -265,7 +289,7 @@ static void unlock_trace(struct task_struct *task)
65971 mutex_unlock(&task->signal->cred_guard_mutex);
65972 }
65973
65974-#ifdef CONFIG_STACKTRACE
65975+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65976
65977 #define MAX_STACK_TRACE_DEPTH 64
65978
65979@@ -487,7 +511,7 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
65980 return 0;
65981 }
65982
65983-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
65984+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
65985 static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
65986 struct pid *pid, struct task_struct *task)
65987 {
65988@@ -517,7 +541,7 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
65989 /************************************************************************/
65990
65991 /* permission checks */
65992-static int proc_fd_access_allowed(struct inode *inode)
65993+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
65994 {
65995 struct task_struct *task;
65996 int allowed = 0;
65997@@ -527,7 +551,10 @@ static int proc_fd_access_allowed(struct inode *inode)
65998 */
65999 task = get_proc_task(inode);
66000 if (task) {
66001- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
66002+ if (log)
66003+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
66004+ else
66005+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
66006 put_task_struct(task);
66007 }
66008 return allowed;
66009@@ -558,10 +585,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
66010 struct task_struct *task,
66011 int hide_pid_min)
66012 {
66013+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66014+ return false;
66015+
66016+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66017+ rcu_read_lock();
66018+ {
66019+ const struct cred *tmpcred = current_cred();
66020+ const struct cred *cred = __task_cred(task);
66021+
66022+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
66023+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66024+ || in_group_p(grsec_proc_gid)
66025+#endif
66026+ ) {
66027+ rcu_read_unlock();
66028+ return true;
66029+ }
66030+ }
66031+ rcu_read_unlock();
66032+
66033+ if (!pid->hide_pid)
66034+ return false;
66035+#endif
66036+
66037 if (pid->hide_pid < hide_pid_min)
66038 return true;
66039 if (in_group_p(pid->pid_gid))
66040 return true;
66041+
66042 return ptrace_may_access(task, PTRACE_MODE_READ);
66043 }
66044
66045@@ -579,7 +631,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
66046 put_task_struct(task);
66047
66048 if (!has_perms) {
66049+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66050+ {
66051+#else
66052 if (pid->hide_pid == 2) {
66053+#endif
66054 /*
66055 * Let's make getdents(), stat(), and open()
66056 * consistent with each other. If a process
66057@@ -640,6 +696,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
66058 if (!task)
66059 return -ESRCH;
66060
66061+ if (gr_acl_handle_procpidmem(task)) {
66062+ put_task_struct(task);
66063+ return -EPERM;
66064+ }
66065+
66066 mm = mm_access(task, mode);
66067 put_task_struct(task);
66068
66069@@ -655,6 +716,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
66070
66071 file->private_data = mm;
66072
66073+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66074+ file->f_version = current->exec_id;
66075+#endif
66076+
66077 return 0;
66078 }
66079
66080@@ -676,6 +741,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
66081 ssize_t copied;
66082 char *page;
66083
66084+#ifdef CONFIG_GRKERNSEC
66085+ if (write)
66086+ return -EPERM;
66087+#endif
66088+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66089+ if (file->f_version != current->exec_id) {
66090+ gr_log_badprocpid("mem");
66091+ return 0;
66092+ }
66093+#endif
66094+
66095 if (!mm)
66096 return 0;
66097
66098@@ -688,7 +764,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
66099 goto free;
66100
66101 while (count > 0) {
66102- int this_len = min_t(int, count, PAGE_SIZE);
66103+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
66104
66105 if (write && copy_from_user(page, buf, this_len)) {
66106 copied = -EFAULT;
66107@@ -780,6 +856,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
66108 if (!mm)
66109 return 0;
66110
66111+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66112+ if (file->f_version != current->exec_id) {
66113+ gr_log_badprocpid("environ");
66114+ return 0;
66115+ }
66116+#endif
66117+
66118 page = (char *)__get_free_page(GFP_TEMPORARY);
66119 if (!page)
66120 return -ENOMEM;
66121@@ -789,7 +872,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
66122 goto free;
66123 while (count > 0) {
66124 size_t this_len, max_len;
66125- int retval;
66126+ ssize_t retval;
66127
66128 if (src >= (mm->env_end - mm->env_start))
66129 break;
66130@@ -1403,7 +1486,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
66131 int error = -EACCES;
66132
66133 /* Are we allowed to snoop on the tasks file descriptors? */
66134- if (!proc_fd_access_allowed(inode))
66135+ if (!proc_fd_access_allowed(inode, 0))
66136 goto out;
66137
66138 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
66139@@ -1447,8 +1530,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
66140 struct path path;
66141
66142 /* Are we allowed to snoop on the tasks file descriptors? */
66143- if (!proc_fd_access_allowed(inode))
66144- goto out;
66145+ /* logging this is needed for learning on chromium to work properly,
66146+ but we don't want to flood the logs from 'ps' which does a readlink
66147+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
66148+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
66149+ */
66150+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
66151+ if (!proc_fd_access_allowed(inode,0))
66152+ goto out;
66153+ } else {
66154+ if (!proc_fd_access_allowed(inode,1))
66155+ goto out;
66156+ }
66157
66158 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
66159 if (error)
66160@@ -1498,7 +1591,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
66161 rcu_read_lock();
66162 cred = __task_cred(task);
66163 inode->i_uid = cred->euid;
66164+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66165+ inode->i_gid = grsec_proc_gid;
66166+#else
66167 inode->i_gid = cred->egid;
66168+#endif
66169 rcu_read_unlock();
66170 }
66171 security_task_to_inode(task, inode);
66172@@ -1534,10 +1631,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
66173 return -ENOENT;
66174 }
66175 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
66176+#ifdef CONFIG_GRKERNSEC_PROC_USER
66177+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
66178+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66179+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
66180+#endif
66181 task_dumpable(task)) {
66182 cred = __task_cred(task);
66183 stat->uid = cred->euid;
66184+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66185+ stat->gid = grsec_proc_gid;
66186+#else
66187 stat->gid = cred->egid;
66188+#endif
66189 }
66190 }
66191 rcu_read_unlock();
66192@@ -1575,11 +1681,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
66193
66194 if (task) {
66195 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
66196+#ifdef CONFIG_GRKERNSEC_PROC_USER
66197+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
66198+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66199+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
66200+#endif
66201 task_dumpable(task)) {
66202 rcu_read_lock();
66203 cred = __task_cred(task);
66204 inode->i_uid = cred->euid;
66205+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66206+ inode->i_gid = grsec_proc_gid;
66207+#else
66208 inode->i_gid = cred->egid;
66209+#endif
66210 rcu_read_unlock();
66211 } else {
66212 inode->i_uid = GLOBAL_ROOT_UID;
66213@@ -2114,6 +2229,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
66214 if (!task)
66215 goto out_no_task;
66216
66217+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66218+ goto out;
66219+
66220 /*
66221 * Yes, it does not scale. And it should not. Don't add
66222 * new entries into /proc/<tgid>/ without very good reasons.
66223@@ -2144,6 +2262,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
66224 if (!task)
66225 return -ENOENT;
66226
66227+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66228+ goto out;
66229+
66230 if (!dir_emit_dots(file, ctx))
66231 goto out;
66232
66233@@ -2535,7 +2656,7 @@ static const struct pid_entry tgid_base_stuff[] = {
66234 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
66235 #endif
66236 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
66237-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
66238+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66239 ONE("syscall", S_IRUSR, proc_pid_syscall),
66240 #endif
66241 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
66242@@ -2560,10 +2681,10 @@ static const struct pid_entry tgid_base_stuff[] = {
66243 #ifdef CONFIG_SECURITY
66244 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
66245 #endif
66246-#ifdef CONFIG_KALLSYMS
66247+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66248 ONE("wchan", S_IRUGO, proc_pid_wchan),
66249 #endif
66250-#ifdef CONFIG_STACKTRACE
66251+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66252 ONE("stack", S_IRUSR, proc_pid_stack),
66253 #endif
66254 #ifdef CONFIG_SCHEDSTATS
66255@@ -2597,6 +2718,9 @@ static const struct pid_entry tgid_base_stuff[] = {
66256 #ifdef CONFIG_HARDWALL
66257 ONE("hardwall", S_IRUGO, proc_pid_hardwall),
66258 #endif
66259+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66260+ ONE("ipaddr", S_IRUSR, proc_pid_ipaddr),
66261+#endif
66262 #ifdef CONFIG_USER_NS
66263 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
66264 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
66265@@ -2727,7 +2851,14 @@ static int proc_pid_instantiate(struct inode *dir,
66266 if (!inode)
66267 goto out;
66268
66269+#ifdef CONFIG_GRKERNSEC_PROC_USER
66270+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
66271+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66272+ inode->i_gid = grsec_proc_gid;
66273+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
66274+#else
66275 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
66276+#endif
66277 inode->i_op = &proc_tgid_base_inode_operations;
66278 inode->i_fop = &proc_tgid_base_operations;
66279 inode->i_flags|=S_IMMUTABLE;
66280@@ -2765,7 +2896,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
66281 if (!task)
66282 goto out;
66283
66284+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66285+ goto out_put_task;
66286+
66287 result = proc_pid_instantiate(dir, dentry, task, NULL);
66288+out_put_task:
66289 put_task_struct(task);
66290 out:
66291 return ERR_PTR(result);
66292@@ -2879,7 +3014,7 @@ static const struct pid_entry tid_base_stuff[] = {
66293 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
66294 #endif
66295 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
66296-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
66297+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66298 ONE("syscall", S_IRUSR, proc_pid_syscall),
66299 #endif
66300 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
66301@@ -2906,10 +3041,10 @@ static const struct pid_entry tid_base_stuff[] = {
66302 #ifdef CONFIG_SECURITY
66303 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
66304 #endif
66305-#ifdef CONFIG_KALLSYMS
66306+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66307 ONE("wchan", S_IRUGO, proc_pid_wchan),
66308 #endif
66309-#ifdef CONFIG_STACKTRACE
66310+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66311 ONE("stack", S_IRUSR, proc_pid_stack),
66312 #endif
66313 #ifdef CONFIG_SCHEDSTATS
66314diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
66315index cbd82df..c0407d2 100644
66316--- a/fs/proc/cmdline.c
66317+++ b/fs/proc/cmdline.c
66318@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
66319
66320 static int __init proc_cmdline_init(void)
66321 {
66322+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66323+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
66324+#else
66325 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
66326+#endif
66327 return 0;
66328 }
66329 fs_initcall(proc_cmdline_init);
66330diff --git a/fs/proc/devices.c b/fs/proc/devices.c
66331index 50493ed..248166b 100644
66332--- a/fs/proc/devices.c
66333+++ b/fs/proc/devices.c
66334@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
66335
66336 static int __init proc_devices_init(void)
66337 {
66338+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66339+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
66340+#else
66341 proc_create("devices", 0, NULL, &proc_devinfo_operations);
66342+#endif
66343 return 0;
66344 }
66345 fs_initcall(proc_devices_init);
66346diff --git a/fs/proc/fd.c b/fs/proc/fd.c
66347index 955bb55..71948bd 100644
66348--- a/fs/proc/fd.c
66349+++ b/fs/proc/fd.c
66350@@ -26,7 +26,8 @@ static int seq_show(struct seq_file *m, void *v)
66351 if (!task)
66352 return -ENOENT;
66353
66354- files = get_files_struct(task);
66355+ if (!gr_acl_handle_procpidmem(task))
66356+ files = get_files_struct(task);
66357 put_task_struct(task);
66358
66359 if (files) {
66360@@ -285,11 +286,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
66361 */
66362 int proc_fd_permission(struct inode *inode, int mask)
66363 {
66364+ struct task_struct *task;
66365 int rv = generic_permission(inode, mask);
66366- if (rv == 0)
66367- return 0;
66368+
66369 if (task_tgid(current) == proc_pid(inode))
66370 rv = 0;
66371+
66372+ task = get_proc_task(inode);
66373+ if (task == NULL)
66374+ return rv;
66375+
66376+ if (gr_acl_handle_procpidmem(task))
66377+ rv = -EACCES;
66378+
66379+ put_task_struct(task);
66380+
66381 return rv;
66382 }
66383
66384diff --git a/fs/proc/generic.c b/fs/proc/generic.c
66385index 317b726..e329aed 100644
66386--- a/fs/proc/generic.c
66387+++ b/fs/proc/generic.c
66388@@ -23,6 +23,7 @@
66389 #include <linux/bitops.h>
66390 #include <linux/spinlock.h>
66391 #include <linux/completion.h>
66392+#include <linux/grsecurity.h>
66393 #include <asm/uaccess.h>
66394
66395 #include "internal.h"
66396@@ -207,6 +208,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
66397 return proc_lookup_de(PDE(dir), dir, dentry);
66398 }
66399
66400+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
66401+ unsigned int flags)
66402+{
66403+ if (gr_proc_is_restricted())
66404+ return ERR_PTR(-EACCES);
66405+
66406+ return proc_lookup_de(PDE(dir), dir, dentry);
66407+}
66408+
66409 /*
66410 * This returns non-zero if at EOF, so that the /proc
66411 * root directory can use this and check if it should
66412@@ -264,6 +274,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
66413 return proc_readdir_de(PDE(inode), file, ctx);
66414 }
66415
66416+int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
66417+{
66418+ struct inode *inode = file_inode(file);
66419+
66420+ if (gr_proc_is_restricted())
66421+ return -EACCES;
66422+
66423+ return proc_readdir_de(PDE(inode), file, ctx);
66424+}
66425+
66426 /*
66427 * These are the generic /proc directory operations. They
66428 * use the in-memory "struct proc_dir_entry" tree to parse
66429@@ -275,6 +295,12 @@ static const struct file_operations proc_dir_operations = {
66430 .iterate = proc_readdir,
66431 };
66432
66433+static const struct file_operations proc_dir_restricted_operations = {
66434+ .llseek = generic_file_llseek,
66435+ .read = generic_read_dir,
66436+ .iterate = proc_readdir_restrict,
66437+};
66438+
66439 /*
66440 * proc directories can do almost nothing..
66441 */
66442@@ -284,6 +310,12 @@ static const struct inode_operations proc_dir_inode_operations = {
66443 .setattr = proc_notify_change,
66444 };
66445
66446+static const struct inode_operations proc_dir_restricted_inode_operations = {
66447+ .lookup = proc_lookup_restrict,
66448+ .getattr = proc_getattr,
66449+ .setattr = proc_notify_change,
66450+};
66451+
66452 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
66453 {
66454 struct proc_dir_entry *tmp;
66455@@ -294,8 +326,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
66456 return ret;
66457
66458 if (S_ISDIR(dp->mode)) {
66459- dp->proc_fops = &proc_dir_operations;
66460- dp->proc_iops = &proc_dir_inode_operations;
66461+ if (dp->restricted) {
66462+ dp->proc_fops = &proc_dir_restricted_operations;
66463+ dp->proc_iops = &proc_dir_restricted_inode_operations;
66464+ } else {
66465+ dp->proc_fops = &proc_dir_operations;
66466+ dp->proc_iops = &proc_dir_inode_operations;
66467+ }
66468 dir->nlink++;
66469 } else if (S_ISLNK(dp->mode)) {
66470 dp->proc_iops = &proc_link_inode_operations;
66471@@ -407,6 +444,27 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
66472 }
66473 EXPORT_SYMBOL_GPL(proc_mkdir_data);
66474
66475+struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
66476+ struct proc_dir_entry *parent, void *data)
66477+{
66478+ struct proc_dir_entry *ent;
66479+
66480+ if (mode == 0)
66481+ mode = S_IRUGO | S_IXUGO;
66482+
66483+ ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
66484+ if (ent) {
66485+ ent->data = data;
66486+ ent->restricted = 1;
66487+ if (proc_register(parent, ent) < 0) {
66488+ kfree(ent);
66489+ ent = NULL;
66490+ }
66491+ }
66492+ return ent;
66493+}
66494+EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
66495+
66496 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
66497 struct proc_dir_entry *parent)
66498 {
66499@@ -421,6 +479,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
66500 }
66501 EXPORT_SYMBOL(proc_mkdir);
66502
66503+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
66504+ struct proc_dir_entry *parent)
66505+{
66506+ return proc_mkdir_data_restrict(name, 0, parent, NULL);
66507+}
66508+EXPORT_SYMBOL(proc_mkdir_restrict);
66509+
66510 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
66511 struct proc_dir_entry *parent,
66512 const struct file_operations *proc_fops,
66513diff --git a/fs/proc/inode.c b/fs/proc/inode.c
66514index 333080d..0a35ec4 100644
66515--- a/fs/proc/inode.c
66516+++ b/fs/proc/inode.c
66517@@ -23,11 +23,17 @@
66518 #include <linux/slab.h>
66519 #include <linux/mount.h>
66520 #include <linux/magic.h>
66521+#include <linux/grsecurity.h>
66522
66523 #include <asm/uaccess.h>
66524
66525 #include "internal.h"
66526
66527+#ifdef CONFIG_PROC_SYSCTL
66528+extern const struct inode_operations proc_sys_inode_operations;
66529+extern const struct inode_operations proc_sys_dir_operations;
66530+#endif
66531+
66532 static void proc_evict_inode(struct inode *inode)
66533 {
66534 struct proc_dir_entry *de;
66535@@ -55,6 +61,13 @@ static void proc_evict_inode(struct inode *inode)
66536 ns = PROC_I(inode)->ns.ns;
66537 if (ns_ops && ns)
66538 ns_ops->put(ns);
66539+
66540+#ifdef CONFIG_PROC_SYSCTL
66541+ if (inode->i_op == &proc_sys_inode_operations ||
66542+ inode->i_op == &proc_sys_dir_operations)
66543+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
66544+#endif
66545+
66546 }
66547
66548 static struct kmem_cache * proc_inode_cachep;
66549@@ -413,7 +426,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
66550 if (de->mode) {
66551 inode->i_mode = de->mode;
66552 inode->i_uid = de->uid;
66553+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66554+ inode->i_gid = grsec_proc_gid;
66555+#else
66556 inode->i_gid = de->gid;
66557+#endif
66558 }
66559 if (de->size)
66560 inode->i_size = de->size;
66561diff --git a/fs/proc/internal.h b/fs/proc/internal.h
66562index 7da13e4..68d0981 100644
66563--- a/fs/proc/internal.h
66564+++ b/fs/proc/internal.h
66565@@ -46,9 +46,10 @@ struct proc_dir_entry {
66566 struct completion *pde_unload_completion;
66567 struct list_head pde_openers; /* who did ->open, but not ->release */
66568 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
66569+ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
66570 u8 namelen;
66571 char name[];
66572-};
66573+} __randomize_layout;
66574
66575 union proc_op {
66576 int (*proc_get_link)(struct dentry *, struct path *);
66577@@ -66,7 +67,7 @@ struct proc_inode {
66578 struct ctl_table *sysctl_entry;
66579 struct proc_ns ns;
66580 struct inode vfs_inode;
66581-};
66582+} __randomize_layout;
66583
66584 /*
66585 * General functions
66586@@ -154,6 +155,10 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
66587 struct pid *, struct task_struct *);
66588 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
66589 struct pid *, struct task_struct *);
66590+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66591+extern int proc_pid_ipaddr(struct seq_file *, struct pid_namespace *,
66592+ struct pid *, struct task_struct *);
66593+#endif
66594
66595 /*
66596 * base.c
66597@@ -178,9 +183,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
66598 * generic.c
66599 */
66600 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
66601+extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
66602 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
66603 struct dentry *);
66604 extern int proc_readdir(struct file *, struct dir_context *);
66605+extern int proc_readdir_restrict(struct file *, struct dir_context *);
66606 extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
66607
66608 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
66609diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
66610index a352d57..cb94a5c 100644
66611--- a/fs/proc/interrupts.c
66612+++ b/fs/proc/interrupts.c
66613@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
66614
66615 static int __init proc_interrupts_init(void)
66616 {
66617+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66618+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
66619+#else
66620 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
66621+#endif
66622 return 0;
66623 }
66624 fs_initcall(proc_interrupts_init);
66625diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
66626index 6df8d07..3321060 100644
66627--- a/fs/proc/kcore.c
66628+++ b/fs/proc/kcore.c
66629@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66630 * the addresses in the elf_phdr on our list.
66631 */
66632 start = kc_offset_to_vaddr(*fpos - elf_buflen);
66633- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
66634+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
66635+ if (tsz > buflen)
66636 tsz = buflen;
66637-
66638+
66639 while (buflen) {
66640 struct kcore_list *m;
66641
66642@@ -514,20 +515,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66643 kfree(elf_buf);
66644 } else {
66645 if (kern_addr_valid(start)) {
66646- unsigned long n;
66647+ char *elf_buf;
66648+ mm_segment_t oldfs;
66649
66650- n = copy_to_user(buffer, (char *)start, tsz);
66651- /*
66652- * We cannot distinguish between fault on source
66653- * and fault on destination. When this happens
66654- * we clear too and hope it will trigger the
66655- * EFAULT again.
66656- */
66657- if (n) {
66658- if (clear_user(buffer + tsz - n,
66659- n))
66660+ elf_buf = kmalloc(tsz, GFP_KERNEL);
66661+ if (!elf_buf)
66662+ return -ENOMEM;
66663+ oldfs = get_fs();
66664+ set_fs(KERNEL_DS);
66665+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
66666+ set_fs(oldfs);
66667+ if (copy_to_user(buffer, elf_buf, tsz)) {
66668+ kfree(elf_buf);
66669 return -EFAULT;
66670+ }
66671 }
66672+ set_fs(oldfs);
66673+ kfree(elf_buf);
66674 } else {
66675 if (clear_user(buffer, tsz))
66676 return -EFAULT;
66677@@ -547,6 +551,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66678
66679 static int open_kcore(struct inode *inode, struct file *filp)
66680 {
66681+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
66682+ return -EPERM;
66683+#endif
66684 if (!capable(CAP_SYS_RAWIO))
66685 return -EPERM;
66686 if (kcore_need_update)
66687diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
66688index aa1eee0..03dda72 100644
66689--- a/fs/proc/meminfo.c
66690+++ b/fs/proc/meminfo.c
66691@@ -187,7 +187,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
66692 vmi.used >> 10,
66693 vmi.largest_chunk >> 10
66694 #ifdef CONFIG_MEMORY_FAILURE
66695- ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
66696+ ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
66697 #endif
66698 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
66699 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
66700diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
66701index d4a3574..b421ce9 100644
66702--- a/fs/proc/nommu.c
66703+++ b/fs/proc/nommu.c
66704@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
66705
66706 if (file) {
66707 seq_pad(m, ' ');
66708- seq_path(m, &file->f_path, "");
66709+ seq_path(m, &file->f_path, "\n\\");
66710 }
66711
66712 seq_putc(m, '\n');
66713diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
66714index a63af3e..b4f262a 100644
66715--- a/fs/proc/proc_net.c
66716+++ b/fs/proc/proc_net.c
66717@@ -23,9 +23,27 @@
66718 #include <linux/nsproxy.h>
66719 #include <net/net_namespace.h>
66720 #include <linux/seq_file.h>
66721+#include <linux/grsecurity.h>
66722
66723 #include "internal.h"
66724
66725+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
66726+static struct seq_operations *ipv6_seq_ops_addr;
66727+
66728+void register_ipv6_seq_ops_addr(struct seq_operations *addr)
66729+{
66730+ ipv6_seq_ops_addr = addr;
66731+}
66732+
66733+void unregister_ipv6_seq_ops_addr(void)
66734+{
66735+ ipv6_seq_ops_addr = NULL;
66736+}
66737+
66738+EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
66739+EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
66740+#endif
66741+
66742 static inline struct net *PDE_NET(struct proc_dir_entry *pde)
66743 {
66744 return pde->parent->data;
66745@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode)
66746 return maybe_get_net(PDE_NET(PDE(inode)));
66747 }
66748
66749+extern const struct seq_operations dev_seq_ops;
66750+
66751 int seq_open_net(struct inode *ino, struct file *f,
66752 const struct seq_operations *ops, int size)
66753 {
66754@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f,
66755
66756 BUG_ON(size < sizeof(*p));
66757
66758+ /* only permit access to /proc/net/dev */
66759+ if (
66760+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
66761+ ops != ipv6_seq_ops_addr &&
66762+#endif
66763+ ops != &dev_seq_ops && gr_proc_is_restricted())
66764+ return -EACCES;
66765+
66766 net = get_proc_net(ino);
66767 if (net == NULL)
66768 return -ENXIO;
66769@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file,
66770 int err;
66771 struct net *net;
66772
66773+ if (gr_proc_is_restricted())
66774+ return -EACCES;
66775+
66776 err = -ENXIO;
66777 net = get_proc_net(inode);
66778 if (net == NULL)
66779diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
66780index f92d5dd..26398ac 100644
66781--- a/fs/proc/proc_sysctl.c
66782+++ b/fs/proc/proc_sysctl.c
66783@@ -11,13 +11,21 @@
66784 #include <linux/namei.h>
66785 #include <linux/mm.h>
66786 #include <linux/module.h>
66787+#include <linux/nsproxy.h>
66788+#ifdef CONFIG_GRKERNSEC
66789+#include <net/net_namespace.h>
66790+#endif
66791 #include "internal.h"
66792
66793+extern int gr_handle_chroot_sysctl(const int op);
66794+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66795+ const int op);
66796+
66797 static const struct dentry_operations proc_sys_dentry_operations;
66798 static const struct file_operations proc_sys_file_operations;
66799-static const struct inode_operations proc_sys_inode_operations;
66800+const struct inode_operations proc_sys_inode_operations;
66801 static const struct file_operations proc_sys_dir_file_operations;
66802-static const struct inode_operations proc_sys_dir_operations;
66803+const struct inode_operations proc_sys_dir_operations;
66804
66805 void proc_sys_poll_notify(struct ctl_table_poll *poll)
66806 {
66807@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
66808
66809 err = NULL;
66810 d_set_d_op(dentry, &proc_sys_dentry_operations);
66811+
66812+ gr_handle_proc_create(dentry, inode);
66813+
66814 d_add(dentry, inode);
66815
66816 out:
66817@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66818 struct inode *inode = file_inode(filp);
66819 struct ctl_table_header *head = grab_header(inode);
66820 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
66821+ int op = write ? MAY_WRITE : MAY_READ;
66822 ssize_t error;
66823 size_t res;
66824
66825@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66826 * and won't be until we finish.
66827 */
66828 error = -EPERM;
66829- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
66830+ if (sysctl_perm(head, table, op))
66831 goto out;
66832
66833 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
66834@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66835 if (!table->proc_handler)
66836 goto out;
66837
66838+#ifdef CONFIG_GRKERNSEC
66839+ error = -EPERM;
66840+ if (gr_handle_chroot_sysctl(op))
66841+ goto out;
66842+ dget(filp->f_path.dentry);
66843+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
66844+ dput(filp->f_path.dentry);
66845+ goto out;
66846+ }
66847+ dput(filp->f_path.dentry);
66848+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
66849+ goto out;
66850+ if (write) {
66851+ if (current->nsproxy->net_ns != table->extra2) {
66852+ if (!capable(CAP_SYS_ADMIN))
66853+ goto out;
66854+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
66855+ goto out;
66856+ }
66857+#endif
66858+
66859 /* careful: calling conventions are nasty here */
66860 res = count;
66861 error = table->proc_handler(table, write, buf, &res, ppos);
66862@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
66863 return false;
66864 } else {
66865 d_set_d_op(child, &proc_sys_dentry_operations);
66866+
66867+ gr_handle_proc_create(child, inode);
66868+
66869 d_add(child, inode);
66870 }
66871 } else {
66872@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, struct ctl_table *table,
66873 if ((*pos)++ < ctx->pos)
66874 return true;
66875
66876+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
66877+ return 0;
66878+
66879 if (unlikely(S_ISLNK(table->mode)))
66880 res = proc_sys_link_fill_cache(file, ctx, head, table);
66881 else
66882@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
66883 if (IS_ERR(head))
66884 return PTR_ERR(head);
66885
66886+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
66887+ return -ENOENT;
66888+
66889 generic_fillattr(inode, stat);
66890 if (table)
66891 stat->mode = (stat->mode & S_IFMT) | table->mode;
66892@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
66893 .llseek = generic_file_llseek,
66894 };
66895
66896-static const struct inode_operations proc_sys_inode_operations = {
66897+const struct inode_operations proc_sys_inode_operations = {
66898 .permission = proc_sys_permission,
66899 .setattr = proc_sys_setattr,
66900 .getattr = proc_sys_getattr,
66901 };
66902
66903-static const struct inode_operations proc_sys_dir_operations = {
66904+const struct inode_operations proc_sys_dir_operations = {
66905 .lookup = proc_sys_lookup,
66906 .permission = proc_sys_permission,
66907 .setattr = proc_sys_setattr,
66908@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
66909 static struct ctl_dir *new_dir(struct ctl_table_set *set,
66910 const char *name, int namelen)
66911 {
66912- struct ctl_table *table;
66913+ ctl_table_no_const *table;
66914 struct ctl_dir *new;
66915 struct ctl_node *node;
66916 char *new_name;
66917@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
66918 return NULL;
66919
66920 node = (struct ctl_node *)(new + 1);
66921- table = (struct ctl_table *)(node + 1);
66922+ table = (ctl_table_no_const *)(node + 1);
66923 new_name = (char *)(table + 2);
66924 memcpy(new_name, name, namelen);
66925 new_name[namelen] = '\0';
66926@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
66927 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
66928 struct ctl_table_root *link_root)
66929 {
66930- struct ctl_table *link_table, *entry, *link;
66931+ ctl_table_no_const *link_table, *link;
66932+ struct ctl_table *entry;
66933 struct ctl_table_header *links;
66934 struct ctl_node *node;
66935 char *link_name;
66936@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
66937 return NULL;
66938
66939 node = (struct ctl_node *)(links + 1);
66940- link_table = (struct ctl_table *)(node + nr_entries);
66941+ link_table = (ctl_table_no_const *)(node + nr_entries);
66942 link_name = (char *)&link_table[nr_entries + 1];
66943
66944 for (link = link_table, entry = table; entry->procname; link++, entry++) {
66945@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66946 struct ctl_table_header ***subheader, struct ctl_table_set *set,
66947 struct ctl_table *table)
66948 {
66949- struct ctl_table *ctl_table_arg = NULL;
66950- struct ctl_table *entry, *files;
66951+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
66952+ struct ctl_table *entry;
66953 int nr_files = 0;
66954 int nr_dirs = 0;
66955 int err = -ENOMEM;
66956@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66957 nr_files++;
66958 }
66959
66960- files = table;
66961 /* If there are mixed files and directories we need a new table */
66962 if (nr_dirs && nr_files) {
66963- struct ctl_table *new;
66964+ ctl_table_no_const *new;
66965 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
66966 GFP_KERNEL);
66967 if (!files)
66968@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66969 /* Register everything except a directory full of subdirectories */
66970 if (nr_files || !nr_dirs) {
66971 struct ctl_table_header *header;
66972- header = __register_sysctl_table(set, path, files);
66973+ header = __register_sysctl_table(set, path, files ? files : table);
66974 if (!header) {
66975 kfree(ctl_table_arg);
66976 goto out;
66977diff --git a/fs/proc/root.c b/fs/proc/root.c
66978index 094e44d..085a877 100644
66979--- a/fs/proc/root.c
66980+++ b/fs/proc/root.c
66981@@ -188,7 +188,15 @@ void __init proc_root_init(void)
66982 proc_mkdir("openprom", NULL);
66983 #endif
66984 proc_tty_init();
66985+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66986+#ifdef CONFIG_GRKERNSEC_PROC_USER
66987+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
66988+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66989+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
66990+#endif
66991+#else
66992 proc_mkdir("bus", NULL);
66993+#endif
66994 proc_sys_init();
66995 }
66996
66997diff --git a/fs/proc/stat.c b/fs/proc/stat.c
66998index bf2d03f..f058f9c 100644
66999--- a/fs/proc/stat.c
67000+++ b/fs/proc/stat.c
67001@@ -11,6 +11,7 @@
67002 #include <linux/irqnr.h>
67003 #include <linux/cputime.h>
67004 #include <linux/tick.h>
67005+#include <linux/grsecurity.h>
67006
67007 #ifndef arch_irq_stat_cpu
67008 #define arch_irq_stat_cpu(cpu) 0
67009@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
67010 u64 sum_softirq = 0;
67011 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
67012 struct timespec boottime;
67013+ int unrestricted = 1;
67014+
67015+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67016+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67017+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
67018+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67019+ && !in_group_p(grsec_proc_gid)
67020+#endif
67021+ )
67022+ unrestricted = 0;
67023+#endif
67024+#endif
67025
67026 user = nice = system = idle = iowait =
67027 irq = softirq = steal = 0;
67028@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v)
67029 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
67030 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
67031 idle += get_idle_time(i);
67032- iowait += get_iowait_time(i);
67033- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67034- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67035- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67036- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67037- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67038- sum += kstat_cpu_irqs_sum(i);
67039- sum += arch_irq_stat_cpu(i);
67040+ if (unrestricted) {
67041+ iowait += get_iowait_time(i);
67042+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67043+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67044+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67045+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67046+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67047+ sum += kstat_cpu_irqs_sum(i);
67048+ sum += arch_irq_stat_cpu(i);
67049+ for (j = 0; j < NR_SOFTIRQS; j++) {
67050+ unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
67051
67052- for (j = 0; j < NR_SOFTIRQS; j++) {
67053- unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
67054-
67055- per_softirq_sums[j] += softirq_stat;
67056- sum_softirq += softirq_stat;
67057+ per_softirq_sums[j] += softirq_stat;
67058+ sum_softirq += softirq_stat;
67059+ }
67060 }
67061 }
67062- sum += arch_irq_stat();
67063+ if (unrestricted)
67064+ sum += arch_irq_stat();
67065
67066 seq_puts(p, "cpu ");
67067 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
67068@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v)
67069 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
67070 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
67071 idle = get_idle_time(i);
67072- iowait = get_iowait_time(i);
67073- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67074- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67075- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67076- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67077- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67078+ if (unrestricted) {
67079+ iowait = get_iowait_time(i);
67080+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67081+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67082+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67083+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67084+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67085+ }
67086 seq_printf(p, "cpu%d", i);
67087 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
67088 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
67089@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
67090
67091 /* sum again ? it could be updated? */
67092 for_each_irq_nr(j)
67093- seq_put_decimal_ull(p, ' ', kstat_irqs(j));
67094+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs(j) : 0ULL);
67095
67096 seq_printf(p,
67097 "\nctxt %llu\n"
67098@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
67099 "processes %lu\n"
67100 "procs_running %lu\n"
67101 "procs_blocked %lu\n",
67102- nr_context_switches(),
67103+ unrestricted ? nr_context_switches() : 0ULL,
67104 (unsigned long)jif,
67105- total_forks,
67106- nr_running(),
67107- nr_iowait());
67108+ unrestricted ? total_forks : 0UL,
67109+ unrestricted ? nr_running() : 0UL,
67110+ unrestricted ? nr_iowait() : 0UL);
67111
67112 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
67113
67114diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
67115index c341568..75852a2 100644
67116--- a/fs/proc/task_mmu.c
67117+++ b/fs/proc/task_mmu.c
67118@@ -13,12 +13,19 @@
67119 #include <linux/swap.h>
67120 #include <linux/swapops.h>
67121 #include <linux/mmu_notifier.h>
67122+#include <linux/grsecurity.h>
67123
67124 #include <asm/elf.h>
67125 #include <asm/uaccess.h>
67126 #include <asm/tlbflush.h>
67127 #include "internal.h"
67128
67129+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67130+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
67131+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
67132+ _mm->pax_flags & MF_PAX_SEGMEXEC))
67133+#endif
67134+
67135 void task_mem(struct seq_file *m, struct mm_struct *mm)
67136 {
67137 unsigned long data, text, lib, swap;
67138@@ -54,8 +61,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
67139 "VmExe:\t%8lu kB\n"
67140 "VmLib:\t%8lu kB\n"
67141 "VmPTE:\t%8lu kB\n"
67142- "VmSwap:\t%8lu kB\n",
67143- hiwater_vm << (PAGE_SHIFT-10),
67144+ "VmSwap:\t%8lu kB\n"
67145+
67146+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
67147+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
67148+#endif
67149+
67150+ ,hiwater_vm << (PAGE_SHIFT-10),
67151 total_vm << (PAGE_SHIFT-10),
67152 mm->locked_vm << (PAGE_SHIFT-10),
67153 mm->pinned_vm << (PAGE_SHIFT-10),
67154@@ -65,7 +77,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
67155 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
67156 (PTRS_PER_PTE * sizeof(pte_t) *
67157 atomic_long_read(&mm->nr_ptes)) >> 10,
67158- swap << (PAGE_SHIFT-10));
67159+ swap << (PAGE_SHIFT-10)
67160+
67161+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
67162+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67163+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
67164+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
67165+#else
67166+ , mm->context.user_cs_base
67167+ , mm->context.user_cs_limit
67168+#endif
67169+#endif
67170+
67171+ );
67172 }
67173
67174 unsigned long task_vsize(struct mm_struct *mm)
67175@@ -271,13 +295,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67176 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
67177 }
67178
67179- /* We don't show the stack guard page in /proc/maps */
67180+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67181+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
67182+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
67183+#else
67184 start = vma->vm_start;
67185- if (stack_guard_page_start(vma, start))
67186- start += PAGE_SIZE;
67187 end = vma->vm_end;
67188- if (stack_guard_page_end(vma, end))
67189- end -= PAGE_SIZE;
67190+#endif
67191
67192 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
67193 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
67194@@ -287,7 +311,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67195 flags & VM_WRITE ? 'w' : '-',
67196 flags & VM_EXEC ? 'x' : '-',
67197 flags & VM_MAYSHARE ? 's' : 'p',
67198+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67199+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
67200+#else
67201 pgoff,
67202+#endif
67203 MAJOR(dev), MINOR(dev), ino);
67204
67205 /*
67206@@ -296,7 +324,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67207 */
67208 if (file) {
67209 seq_pad(m, ' ');
67210- seq_path(m, &file->f_path, "\n");
67211+ seq_path(m, &file->f_path, "\n\\");
67212 goto done;
67213 }
67214
67215@@ -328,8 +356,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67216 * Thread stack in /proc/PID/task/TID/maps or
67217 * the main process stack.
67218 */
67219- if (!is_pid || (vma->vm_start <= mm->start_stack &&
67220- vma->vm_end >= mm->start_stack)) {
67221+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
67222+ (vma->vm_start <= mm->start_stack &&
67223+ vma->vm_end >= mm->start_stack)) {
67224 name = "[stack]";
67225 } else {
67226 /* Thread stack in /proc/PID/maps */
67227@@ -353,6 +382,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
67228 struct proc_maps_private *priv = m->private;
67229 struct task_struct *task = priv->task;
67230
67231+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67232+ if (current->exec_id != m->exec_id) {
67233+ gr_log_badprocpid("maps");
67234+ return 0;
67235+ }
67236+#endif
67237+
67238 show_map_vma(m, vma, is_pid);
67239
67240 if (m->count < m->size) /* vma is copied successfully */
67241@@ -593,12 +629,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
67242 .private = &mss,
67243 };
67244
67245+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67246+ if (current->exec_id != m->exec_id) {
67247+ gr_log_badprocpid("smaps");
67248+ return 0;
67249+ }
67250+#endif
67251 memset(&mss, 0, sizeof mss);
67252- mss.vma = vma;
67253- /* mmap_sem is held in m_start */
67254- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
67255- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
67256-
67257+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67258+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
67259+#endif
67260+ mss.vma = vma;
67261+ /* mmap_sem is held in m_start */
67262+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
67263+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
67264+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67265+ }
67266+#endif
67267 show_map_vma(m, vma, is_pid);
67268
67269 seq_printf(m,
67270@@ -616,7 +663,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
67271 "KernelPageSize: %8lu kB\n"
67272 "MMUPageSize: %8lu kB\n"
67273 "Locked: %8lu kB\n",
67274+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67275+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
67276+#else
67277 (vma->vm_end - vma->vm_start) >> 10,
67278+#endif
67279 mss.resident >> 10,
67280 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
67281 mss.shared_clean >> 10,
67282@@ -1422,6 +1473,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
67283 char buffer[64];
67284 int nid;
67285
67286+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67287+ if (current->exec_id != m->exec_id) {
67288+ gr_log_badprocpid("numa_maps");
67289+ return 0;
67290+ }
67291+#endif
67292+
67293 if (!mm)
67294 return 0;
67295
67296@@ -1439,11 +1497,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
67297 mpol_to_str(buffer, sizeof(buffer), pol);
67298 mpol_cond_put(pol);
67299
67300+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67301+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
67302+#else
67303 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
67304+#endif
67305
67306 if (file) {
67307 seq_puts(m, " file=");
67308- seq_path(m, &file->f_path, "\n\t= ");
67309+ seq_path(m, &file->f_path, "\n\t\\= ");
67310 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
67311 seq_puts(m, " heap");
67312 } else {
67313diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
67314index 678455d..ebd3245 100644
67315--- a/fs/proc/task_nommu.c
67316+++ b/fs/proc/task_nommu.c
67317@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
67318 else
67319 bytes += kobjsize(mm);
67320
67321- if (current->fs && current->fs->users > 1)
67322+ if (current->fs && atomic_read(&current->fs->users) > 1)
67323 sbytes += kobjsize(current->fs);
67324 else
67325 bytes += kobjsize(current->fs);
67326@@ -161,7 +161,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
67327
67328 if (file) {
67329 seq_pad(m, ' ');
67330- seq_path(m, &file->f_path, "");
67331+ seq_path(m, &file->f_path, "\n\\");
67332 } else if (mm) {
67333 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
67334
67335diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
67336index a90d6d35..d08047c 100644
67337--- a/fs/proc/vmcore.c
67338+++ b/fs/proc/vmcore.c
67339@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
67340 nr_bytes = count;
67341
67342 /* If pfn is not ram, return zeros for sparse dump files */
67343- if (pfn_is_ram(pfn) == 0)
67344- memset(buf, 0, nr_bytes);
67345- else {
67346+ if (pfn_is_ram(pfn) == 0) {
67347+ if (userbuf) {
67348+ if (clear_user((char __force_user *)buf, nr_bytes))
67349+ return -EFAULT;
67350+ } else
67351+ memset(buf, 0, nr_bytes);
67352+ } else {
67353 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
67354 offset, userbuf);
67355 if (tmp < 0)
67356@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
67357 static int copy_to(void *target, void *src, size_t size, int userbuf)
67358 {
67359 if (userbuf) {
67360- if (copy_to_user((char __user *) target, src, size))
67361+ if (copy_to_user((char __force_user *) target, src, size))
67362 return -EFAULT;
67363 } else {
67364 memcpy(target, src, size);
67365@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
67366 if (*fpos < m->offset + m->size) {
67367 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
67368 start = m->paddr + *fpos - m->offset;
67369- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
67370+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
67371 if (tmp < 0)
67372 return tmp;
67373 buflen -= tsz;
67374@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
67375 static ssize_t read_vmcore(struct file *file, char __user *buffer,
67376 size_t buflen, loff_t *fpos)
67377 {
67378- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
67379+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
67380 }
67381
67382 /*
67383diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
67384index d3fb2b6..43a8140 100644
67385--- a/fs/qnx6/qnx6.h
67386+++ b/fs/qnx6/qnx6.h
67387@@ -74,7 +74,7 @@ enum {
67388 BYTESEX_BE,
67389 };
67390
67391-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
67392+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
67393 {
67394 if (sbi->s_bytesex == BYTESEX_LE)
67395 return le64_to_cpu((__force __le64)n);
67396@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
67397 return (__force __fs64)cpu_to_be64(n);
67398 }
67399
67400-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
67401+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
67402 {
67403 if (sbi->s_bytesex == BYTESEX_LE)
67404 return le32_to_cpu((__force __le32)n);
67405diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
67406index bb2869f..d34ada8 100644
67407--- a/fs/quota/netlink.c
67408+++ b/fs/quota/netlink.c
67409@@ -44,7 +44,7 @@ static struct genl_family quota_genl_family = {
67410 void quota_send_warning(struct kqid qid, dev_t dev,
67411 const char warntype)
67412 {
67413- static atomic_t seq;
67414+ static atomic_unchecked_t seq;
67415 struct sk_buff *skb;
67416 void *msg_head;
67417 int ret;
67418@@ -60,7 +60,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
67419 "VFS: Not enough memory to send quota warning.\n");
67420 return;
67421 }
67422- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
67423+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
67424 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
67425 if (!msg_head) {
67426 printk(KERN_ERR
67427diff --git a/fs/read_write.c b/fs/read_write.c
67428index 009d854..16ce214 100644
67429--- a/fs/read_write.c
67430+++ b/fs/read_write.c
67431@@ -495,7 +495,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
67432
67433 old_fs = get_fs();
67434 set_fs(get_ds());
67435- p = (__force const char __user *)buf;
67436+ p = (const char __force_user *)buf;
67437 if (count > MAX_RW_COUNT)
67438 count = MAX_RW_COUNT;
67439 if (file->f_op->write)
67440diff --git a/fs/readdir.c b/fs/readdir.c
67441index 33fd922..e0d6094 100644
67442--- a/fs/readdir.c
67443+++ b/fs/readdir.c
67444@@ -18,6 +18,7 @@
67445 #include <linux/security.h>
67446 #include <linux/syscalls.h>
67447 #include <linux/unistd.h>
67448+#include <linux/namei.h>
67449
67450 #include <asm/uaccess.h>
67451
67452@@ -71,6 +72,7 @@ struct old_linux_dirent {
67453 struct readdir_callback {
67454 struct dir_context ctx;
67455 struct old_linux_dirent __user * dirent;
67456+ struct file * file;
67457 int result;
67458 };
67459
67460@@ -88,6 +90,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
67461 buf->result = -EOVERFLOW;
67462 return -EOVERFLOW;
67463 }
67464+
67465+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67466+ return 0;
67467+
67468 buf->result++;
67469 dirent = buf->dirent;
67470 if (!access_ok(VERIFY_WRITE, dirent,
67471@@ -119,6 +125,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
67472 if (!f.file)
67473 return -EBADF;
67474
67475+ buf.file = f.file;
67476 error = iterate_dir(f.file, &buf.ctx);
67477 if (buf.result)
67478 error = buf.result;
67479@@ -144,6 +151,7 @@ struct getdents_callback {
67480 struct dir_context ctx;
67481 struct linux_dirent __user * current_dir;
67482 struct linux_dirent __user * previous;
67483+ struct file * file;
67484 int count;
67485 int error;
67486 };
67487@@ -165,6 +173,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
67488 buf->error = -EOVERFLOW;
67489 return -EOVERFLOW;
67490 }
67491+
67492+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67493+ return 0;
67494+
67495 dirent = buf->previous;
67496 if (dirent) {
67497 if (__put_user(offset, &dirent->d_off))
67498@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
67499 if (!f.file)
67500 return -EBADF;
67501
67502+ buf.file = f.file;
67503 error = iterate_dir(f.file, &buf.ctx);
67504 if (error >= 0)
67505 error = buf.error;
67506@@ -228,6 +241,7 @@ struct getdents_callback64 {
67507 struct dir_context ctx;
67508 struct linux_dirent64 __user * current_dir;
67509 struct linux_dirent64 __user * previous;
67510+ struct file *file;
67511 int count;
67512 int error;
67513 };
67514@@ -243,6 +257,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
67515 buf->error = -EINVAL; /* only used if we fail.. */
67516 if (reclen > buf->count)
67517 return -EINVAL;
67518+
67519+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67520+ return 0;
67521+
67522 dirent = buf->previous;
67523 if (dirent) {
67524 if (__put_user(offset, &dirent->d_off))
67525@@ -290,6 +308,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
67526 if (!f.file)
67527 return -EBADF;
67528
67529+ buf.file = f.file;
67530 error = iterate_dir(f.file, &buf.ctx);
67531 if (error >= 0)
67532 error = buf.error;
67533diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
67534index 9c02d96..6562c10 100644
67535--- a/fs/reiserfs/do_balan.c
67536+++ b/fs/reiserfs/do_balan.c
67537@@ -1887,7 +1887,7 @@ void do_balance(struct tree_balance *tb, struct item_head *ih,
67538 return;
67539 }
67540
67541- atomic_inc(&fs_generation(tb->tb_sb));
67542+ atomic_inc_unchecked(&fs_generation(tb->tb_sb));
67543 do_balance_starts(tb);
67544
67545 /*
67546diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
67547index aca73dd..e3c558d 100644
67548--- a/fs/reiserfs/item_ops.c
67549+++ b/fs/reiserfs/item_ops.c
67550@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
67551 }
67552
67553 static struct item_operations errcatch_ops = {
67554- errcatch_bytes_number,
67555- errcatch_decrement_key,
67556- errcatch_is_left_mergeable,
67557- errcatch_print_item,
67558- errcatch_check_item,
67559+ .bytes_number = errcatch_bytes_number,
67560+ .decrement_key = errcatch_decrement_key,
67561+ .is_left_mergeable = errcatch_is_left_mergeable,
67562+ .print_item = errcatch_print_item,
67563+ .check_item = errcatch_check_item,
67564
67565- errcatch_create_vi,
67566- errcatch_check_left,
67567- errcatch_check_right,
67568- errcatch_part_size,
67569- errcatch_unit_num,
67570- errcatch_print_vi
67571+ .create_vi = errcatch_create_vi,
67572+ .check_left = errcatch_check_left,
67573+ .check_right = errcatch_check_right,
67574+ .part_size = errcatch_part_size,
67575+ .unit_num = errcatch_unit_num,
67576+ .print_vi = errcatch_print_vi
67577 };
67578
67579 #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
67580diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
67581index 621b9f3..af527fd 100644
67582--- a/fs/reiserfs/procfs.c
67583+++ b/fs/reiserfs/procfs.c
67584@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
67585 "SMALL_TAILS " : "NO_TAILS ",
67586 replay_only(sb) ? "REPLAY_ONLY " : "",
67587 convert_reiserfs(sb) ? "CONV " : "",
67588- atomic_read(&r->s_generation_counter),
67589+ atomic_read_unchecked(&r->s_generation_counter),
67590 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
67591 SF(s_do_balance), SF(s_unneeded_left_neighbor),
67592 SF(s_good_search_by_key_reada), SF(s_bmaps),
67593diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
67594index 735c2c2..81b91af 100644
67595--- a/fs/reiserfs/reiserfs.h
67596+++ b/fs/reiserfs/reiserfs.h
67597@@ -573,7 +573,7 @@ struct reiserfs_sb_info {
67598 /* Comment? -Hans */
67599 wait_queue_head_t s_wait;
67600 /* increased by one every time the tree gets re-balanced */
67601- atomic_t s_generation_counter;
67602+ atomic_unchecked_t s_generation_counter;
67603
67604 /* File system properties. Currently holds on-disk FS format */
67605 unsigned long s_properties;
67606@@ -2294,7 +2294,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
67607 #define REISERFS_USER_MEM 1 /* user memory mode */
67608
67609 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
67610-#define get_generation(s) atomic_read (&fs_generation(s))
67611+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
67612 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
67613 #define __fs_changed(gen,s) (gen != get_generation (s))
67614 #define fs_changed(gen,s) \
67615diff --git a/fs/select.c b/fs/select.c
67616index 467bb1c..cf9d65a 100644
67617--- a/fs/select.c
67618+++ b/fs/select.c
67619@@ -20,6 +20,7 @@
67620 #include <linux/export.h>
67621 #include <linux/slab.h>
67622 #include <linux/poll.h>
67623+#include <linux/security.h>
67624 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
67625 #include <linux/file.h>
67626 #include <linux/fdtable.h>
67627@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
67628 struct poll_list *walk = head;
67629 unsigned long todo = nfds;
67630
67631+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
67632 if (nfds > rlimit(RLIMIT_NOFILE))
67633 return -EINVAL;
67634
67635diff --git a/fs/seq_file.c b/fs/seq_file.c
67636index 3857b72..0b7281e 100644
67637--- a/fs/seq_file.c
67638+++ b/fs/seq_file.c
67639@@ -12,6 +12,8 @@
67640 #include <linux/slab.h>
67641 #include <linux/cred.h>
67642 #include <linux/mm.h>
67643+#include <linux/sched.h>
67644+#include <linux/grsecurity.h>
67645
67646 #include <asm/uaccess.h>
67647 #include <asm/page.h>
67648@@ -34,12 +36,7 @@ static void seq_set_overflow(struct seq_file *m)
67649
67650 static void *seq_buf_alloc(unsigned long size)
67651 {
67652- void *buf;
67653-
67654- buf = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
67655- if (!buf && size > PAGE_SIZE)
67656- buf = vmalloc(size);
67657- return buf;
67658+ return kmalloc(size, GFP_KERNEL | GFP_USERCOPY);
67659 }
67660
67661 /**
67662@@ -72,6 +69,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
67663 #ifdef CONFIG_USER_NS
67664 p->user_ns = file->f_cred->user_ns;
67665 #endif
67666+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67667+ p->exec_id = current->exec_id;
67668+#endif
67669
67670 /*
67671 * Wrappers around seq_open(e.g. swaps_open) need to be
67672@@ -94,6 +94,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
67673 }
67674 EXPORT_SYMBOL(seq_open);
67675
67676+
67677+int seq_open_restrict(struct file *file, const struct seq_operations *op)
67678+{
67679+ if (gr_proc_is_restricted())
67680+ return -EACCES;
67681+
67682+ return seq_open(file, op);
67683+}
67684+EXPORT_SYMBOL(seq_open_restrict);
67685+
67686 static int traverse(struct seq_file *m, loff_t offset)
67687 {
67688 loff_t pos = 0, index;
67689@@ -165,7 +175,7 @@ Eoverflow:
67690 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
67691 {
67692 struct seq_file *m = file->private_data;
67693- size_t copied = 0;
67694+ ssize_t copied = 0;
67695 loff_t pos;
67696 size_t n;
67697 void *p;
67698@@ -596,7 +606,7 @@ static void single_stop(struct seq_file *p, void *v)
67699 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
67700 void *data)
67701 {
67702- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
67703+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
67704 int res = -ENOMEM;
67705
67706 if (op) {
67707@@ -632,6 +642,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
67708 }
67709 EXPORT_SYMBOL(single_open_size);
67710
67711+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
67712+ void *data)
67713+{
67714+ if (gr_proc_is_restricted())
67715+ return -EACCES;
67716+
67717+ return single_open(file, show, data);
67718+}
67719+EXPORT_SYMBOL(single_open_restrict);
67720+
67721+
67722 int single_release(struct inode *inode, struct file *file)
67723 {
67724 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
67725diff --git a/fs/splice.c b/fs/splice.c
67726index f5cb9ba..8ddb1e9 100644
67727--- a/fs/splice.c
67728+++ b/fs/splice.c
67729@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67730 pipe_lock(pipe);
67731
67732 for (;;) {
67733- if (!pipe->readers) {
67734+ if (!atomic_read(&pipe->readers)) {
67735 send_sig(SIGPIPE, current, 0);
67736 if (!ret)
67737 ret = -EPIPE;
67738@@ -216,7 +216,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67739 page_nr++;
67740 ret += buf->len;
67741
67742- if (pipe->files)
67743+ if (atomic_read(&pipe->files))
67744 do_wakeup = 1;
67745
67746 if (!--spd->nr_pages)
67747@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67748 do_wakeup = 0;
67749 }
67750
67751- pipe->waiting_writers++;
67752+ atomic_inc(&pipe->waiting_writers);
67753 pipe_wait(pipe);
67754- pipe->waiting_writers--;
67755+ atomic_dec(&pipe->waiting_writers);
67756 }
67757
67758 pipe_unlock(pipe);
67759@@ -576,7 +576,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
67760 old_fs = get_fs();
67761 set_fs(get_ds());
67762 /* The cast to a user pointer is valid due to the set_fs() */
67763- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
67764+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
67765 set_fs(old_fs);
67766
67767 return res;
67768@@ -591,7 +591,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
67769 old_fs = get_fs();
67770 set_fs(get_ds());
67771 /* The cast to a user pointer is valid due to the set_fs() */
67772- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
67773+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
67774 set_fs(old_fs);
67775
67776 return res;
67777@@ -644,7 +644,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
67778 goto err;
67779
67780 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
67781- vec[i].iov_base = (void __user *) page_address(page);
67782+ vec[i].iov_base = (void __force_user *) page_address(page);
67783 vec[i].iov_len = this_len;
67784 spd.pages[i] = page;
67785 spd.nr_pages++;
67786@@ -783,7 +783,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
67787 ops->release(pipe, buf);
67788 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
67789 pipe->nrbufs--;
67790- if (pipe->files)
67791+ if (atomic_read(&pipe->files))
67792 sd->need_wakeup = true;
67793 }
67794
67795@@ -807,10 +807,10 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
67796 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
67797 {
67798 while (!pipe->nrbufs) {
67799- if (!pipe->writers)
67800+ if (!atomic_read(&pipe->writers))
67801 return 0;
67802
67803- if (!pipe->waiting_writers && sd->num_spliced)
67804+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
67805 return 0;
67806
67807 if (sd->flags & SPLICE_F_NONBLOCK)
67808@@ -1040,7 +1040,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
67809 ops->release(pipe, buf);
67810 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
67811 pipe->nrbufs--;
67812- if (pipe->files)
67813+ if (atomic_read(&pipe->files))
67814 sd.need_wakeup = true;
67815 } else {
67816 buf->offset += ret;
67817@@ -1200,7 +1200,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
67818 * out of the pipe right after the splice_to_pipe(). So set
67819 * PIPE_READERS appropriately.
67820 */
67821- pipe->readers = 1;
67822+ atomic_set(&pipe->readers, 1);
67823
67824 current->splice_pipe = pipe;
67825 }
67826@@ -1496,6 +1496,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
67827
67828 partial[buffers].offset = off;
67829 partial[buffers].len = plen;
67830+ partial[buffers].private = 0;
67831
67832 off = 0;
67833 len -= plen;
67834@@ -1732,9 +1733,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67835 ret = -ERESTARTSYS;
67836 break;
67837 }
67838- if (!pipe->writers)
67839+ if (!atomic_read(&pipe->writers))
67840 break;
67841- if (!pipe->waiting_writers) {
67842+ if (!atomic_read(&pipe->waiting_writers)) {
67843 if (flags & SPLICE_F_NONBLOCK) {
67844 ret = -EAGAIN;
67845 break;
67846@@ -1766,7 +1767,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67847 pipe_lock(pipe);
67848
67849 while (pipe->nrbufs >= pipe->buffers) {
67850- if (!pipe->readers) {
67851+ if (!atomic_read(&pipe->readers)) {
67852 send_sig(SIGPIPE, current, 0);
67853 ret = -EPIPE;
67854 break;
67855@@ -1779,9 +1780,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67856 ret = -ERESTARTSYS;
67857 break;
67858 }
67859- pipe->waiting_writers++;
67860+ atomic_inc(&pipe->waiting_writers);
67861 pipe_wait(pipe);
67862- pipe->waiting_writers--;
67863+ atomic_dec(&pipe->waiting_writers);
67864 }
67865
67866 pipe_unlock(pipe);
67867@@ -1817,14 +1818,14 @@ retry:
67868 pipe_double_lock(ipipe, opipe);
67869
67870 do {
67871- if (!opipe->readers) {
67872+ if (!atomic_read(&opipe->readers)) {
67873 send_sig(SIGPIPE, current, 0);
67874 if (!ret)
67875 ret = -EPIPE;
67876 break;
67877 }
67878
67879- if (!ipipe->nrbufs && !ipipe->writers)
67880+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
67881 break;
67882
67883 /*
67884@@ -1921,7 +1922,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
67885 pipe_double_lock(ipipe, opipe);
67886
67887 do {
67888- if (!opipe->readers) {
67889+ if (!atomic_read(&opipe->readers)) {
67890 send_sig(SIGPIPE, current, 0);
67891 if (!ret)
67892 ret = -EPIPE;
67893@@ -1966,7 +1967,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
67894 * return EAGAIN if we have the potential of some data in the
67895 * future, otherwise just return 0
67896 */
67897- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
67898+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
67899 ret = -EAGAIN;
67900
67901 pipe_unlock(ipipe);
67902diff --git a/fs/stat.c b/fs/stat.c
67903index ae0c3ce..9ee641c 100644
67904--- a/fs/stat.c
67905+++ b/fs/stat.c
67906@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
67907 stat->gid = inode->i_gid;
67908 stat->rdev = inode->i_rdev;
67909 stat->size = i_size_read(inode);
67910- stat->atime = inode->i_atime;
67911- stat->mtime = inode->i_mtime;
67912+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
67913+ stat->atime = inode->i_ctime;
67914+ stat->mtime = inode->i_ctime;
67915+ } else {
67916+ stat->atime = inode->i_atime;
67917+ stat->mtime = inode->i_mtime;
67918+ }
67919 stat->ctime = inode->i_ctime;
67920 stat->blksize = (1 << inode->i_blkbits);
67921 stat->blocks = inode->i_blocks;
67922@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
67923 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
67924 {
67925 struct inode *inode = path->dentry->d_inode;
67926+ int retval;
67927
67928- if (inode->i_op->getattr)
67929- return inode->i_op->getattr(path->mnt, path->dentry, stat);
67930+ if (inode->i_op->getattr) {
67931+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
67932+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
67933+ stat->atime = stat->ctime;
67934+ stat->mtime = stat->ctime;
67935+ }
67936+ return retval;
67937+ }
67938
67939 generic_fillattr(inode, stat);
67940 return 0;
67941diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
67942index 0b45ff4..847de5b 100644
67943--- a/fs/sysfs/dir.c
67944+++ b/fs/sysfs/dir.c
67945@@ -41,9 +41,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
67946 int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
67947 {
67948 struct kernfs_node *parent, *kn;
67949+ const char *name;
67950+ umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
67951+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67952+ const char *parent_name;
67953+#endif
67954
67955 BUG_ON(!kobj);
67956
67957+ name = kobject_name(kobj);
67958+
67959 if (kobj->parent)
67960 parent = kobj->parent->sd;
67961 else
67962@@ -52,11 +59,22 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
67963 if (!parent)
67964 return -ENOENT;
67965
67966- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
67967- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
67968+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67969+ parent_name = parent->name;
67970+ mode = S_IRWXU;
67971+
67972+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
67973+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
67974+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
67975+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
67976+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
67977+#endif
67978+
67979+ kn = kernfs_create_dir_ns(parent, name,
67980+ mode, kobj, ns);
67981 if (IS_ERR(kn)) {
67982 if (PTR_ERR(kn) == -EEXIST)
67983- sysfs_warn_dup(parent, kobject_name(kobj));
67984+ sysfs_warn_dup(parent, name);
67985 return PTR_ERR(kn);
67986 }
67987
67988diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
67989index 69d4889..a810bd4 100644
67990--- a/fs/sysv/sysv.h
67991+++ b/fs/sysv/sysv.h
67992@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
67993 #endif
67994 }
67995
67996-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
67997+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
67998 {
67999 if (sbi->s_bytesex == BYTESEX_PDP)
68000 return PDP_swab((__force __u32)n);
68001diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
68002index fb08b0c..65fcc7e 100644
68003--- a/fs/ubifs/io.c
68004+++ b/fs/ubifs/io.c
68005@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
68006 return err;
68007 }
68008
68009-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
68010+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
68011 {
68012 int err;
68013
68014diff --git a/fs/udf/misc.c b/fs/udf/misc.c
68015index c175b4d..8f36a16 100644
68016--- a/fs/udf/misc.c
68017+++ b/fs/udf/misc.c
68018@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
68019
68020 u8 udf_tag_checksum(const struct tag *t)
68021 {
68022- u8 *data = (u8 *)t;
68023+ const u8 *data = (const u8 *)t;
68024 u8 checksum = 0;
68025 int i;
68026 for (i = 0; i < sizeof(struct tag); ++i)
68027diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
68028index 8d974c4..b82f6ec 100644
68029--- a/fs/ufs/swab.h
68030+++ b/fs/ufs/swab.h
68031@@ -22,7 +22,7 @@ enum {
68032 BYTESEX_BE
68033 };
68034
68035-static inline u64
68036+static inline u64 __intentional_overflow(-1)
68037 fs64_to_cpu(struct super_block *sbp, __fs64 n)
68038 {
68039 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
68040@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
68041 return (__force __fs64)cpu_to_be64(n);
68042 }
68043
68044-static inline u32
68045+static inline u32 __intentional_overflow(-1)
68046 fs32_to_cpu(struct super_block *sbp, __fs32 n)
68047 {
68048 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
68049diff --git a/fs/utimes.c b/fs/utimes.c
68050index aa138d6..5f3a811 100644
68051--- a/fs/utimes.c
68052+++ b/fs/utimes.c
68053@@ -1,6 +1,7 @@
68054 #include <linux/compiler.h>
68055 #include <linux/file.h>
68056 #include <linux/fs.h>
68057+#include <linux/security.h>
68058 #include <linux/linkage.h>
68059 #include <linux/mount.h>
68060 #include <linux/namei.h>
68061@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
68062 }
68063 }
68064 retry_deleg:
68065+
68066+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
68067+ error = -EACCES;
68068+ goto mnt_drop_write_and_out;
68069+ }
68070+
68071 mutex_lock(&inode->i_mutex);
68072 error = notify_change(path->dentry, &newattrs, &delegated_inode);
68073 mutex_unlock(&inode->i_mutex);
68074diff --git a/fs/xattr.c b/fs/xattr.c
68075index c69e6d4..cc56af5 100644
68076--- a/fs/xattr.c
68077+++ b/fs/xattr.c
68078@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
68079 return rc;
68080 }
68081
68082+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
68083+ssize_t
68084+pax_getxattr(struct dentry *dentry, void *value, size_t size)
68085+{
68086+ struct inode *inode = dentry->d_inode;
68087+ ssize_t error;
68088+
68089+ error = inode_permission(inode, MAY_EXEC);
68090+ if (error)
68091+ return error;
68092+
68093+ if (inode->i_op->getxattr)
68094+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
68095+ else
68096+ error = -EOPNOTSUPP;
68097+
68098+ return error;
68099+}
68100+EXPORT_SYMBOL(pax_getxattr);
68101+#endif
68102+
68103 ssize_t
68104 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
68105 {
68106@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
68107 * Extended attribute SET operations
68108 */
68109 static long
68110-setxattr(struct dentry *d, const char __user *name, const void __user *value,
68111+setxattr(struct path *path, const char __user *name, const void __user *value,
68112 size_t size, int flags)
68113 {
68114 int error;
68115@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
68116 posix_acl_fix_xattr_from_user(kvalue, size);
68117 }
68118
68119- error = vfs_setxattr(d, kname, kvalue, size, flags);
68120+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
68121+ error = -EACCES;
68122+ goto out;
68123+ }
68124+
68125+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
68126 out:
68127 if (vvalue)
68128 vfree(vvalue);
68129@@ -377,7 +403,7 @@ retry:
68130 return error;
68131 error = mnt_want_write(path.mnt);
68132 if (!error) {
68133- error = setxattr(path.dentry, name, value, size, flags);
68134+ error = setxattr(&path, name, value, size, flags);
68135 mnt_drop_write(path.mnt);
68136 }
68137 path_put(&path);
68138@@ -401,7 +427,7 @@ retry:
68139 return error;
68140 error = mnt_want_write(path.mnt);
68141 if (!error) {
68142- error = setxattr(path.dentry, name, value, size, flags);
68143+ error = setxattr(&path, name, value, size, flags);
68144 mnt_drop_write(path.mnt);
68145 }
68146 path_put(&path);
68147@@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
68148 const void __user *,value, size_t, size, int, flags)
68149 {
68150 struct fd f = fdget(fd);
68151- struct dentry *dentry;
68152 int error = -EBADF;
68153
68154 if (!f.file)
68155 return error;
68156- dentry = f.file->f_path.dentry;
68157- audit_inode(NULL, dentry, 0);
68158+ audit_inode(NULL, f.file->f_path.dentry, 0);
68159 error = mnt_want_write_file(f.file);
68160 if (!error) {
68161- error = setxattr(dentry, name, value, size, flags);
68162+ error = setxattr(&f.file->f_path, name, value, size, flags);
68163 mnt_drop_write_file(f.file);
68164 }
68165 fdput(f);
68166@@ -626,7 +650,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
68167 * Extended attribute REMOVE operations
68168 */
68169 static long
68170-removexattr(struct dentry *d, const char __user *name)
68171+removexattr(struct path *path, const char __user *name)
68172 {
68173 int error;
68174 char kname[XATTR_NAME_MAX + 1];
68175@@ -637,7 +661,10 @@ removexattr(struct dentry *d, const char __user *name)
68176 if (error < 0)
68177 return error;
68178
68179- return vfs_removexattr(d, kname);
68180+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
68181+ return -EACCES;
68182+
68183+ return vfs_removexattr(path->dentry, kname);
68184 }
68185
68186 SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
68187@@ -652,7 +679,7 @@ retry:
68188 return error;
68189 error = mnt_want_write(path.mnt);
68190 if (!error) {
68191- error = removexattr(path.dentry, name);
68192+ error = removexattr(&path, name);
68193 mnt_drop_write(path.mnt);
68194 }
68195 path_put(&path);
68196@@ -675,7 +702,7 @@ retry:
68197 return error;
68198 error = mnt_want_write(path.mnt);
68199 if (!error) {
68200- error = removexattr(path.dentry, name);
68201+ error = removexattr(&path, name);
68202 mnt_drop_write(path.mnt);
68203 }
68204 path_put(&path);
68205@@ -689,16 +716,16 @@ retry:
68206 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
68207 {
68208 struct fd f = fdget(fd);
68209- struct dentry *dentry;
68210+ struct path *path;
68211 int error = -EBADF;
68212
68213 if (!f.file)
68214 return error;
68215- dentry = f.file->f_path.dentry;
68216- audit_inode(NULL, dentry, 0);
68217+ path = &f.file->f_path;
68218+ audit_inode(NULL, path->dentry, 0);
68219 error = mnt_want_write_file(f.file);
68220 if (!error) {
68221- error = removexattr(dentry, name);
68222+ error = removexattr(path, name);
68223 mnt_drop_write_file(f.file);
68224 }
68225 fdput(f);
68226diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
68227index 86df952..ac430d6 100644
68228--- a/fs/xfs/libxfs/xfs_bmap.c
68229+++ b/fs/xfs/libxfs/xfs_bmap.c
68230@@ -583,7 +583,7 @@ xfs_bmap_validate_ret(
68231
68232 #else
68233 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
68234-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
68235+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
68236 #endif /* DEBUG */
68237
68238 /*
68239diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
68240index f1b69ed..3d0222f 100644
68241--- a/fs/xfs/xfs_dir2_readdir.c
68242+++ b/fs/xfs/xfs_dir2_readdir.c
68243@@ -159,7 +159,12 @@ xfs_dir2_sf_getdents(
68244 ino = dp->d_ops->sf_get_ino(sfp, sfep);
68245 filetype = dp->d_ops->sf_get_ftype(sfep);
68246 ctx->pos = off & 0x7fffffff;
68247- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
68248+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
68249+ char name[sfep->namelen];
68250+ memcpy(name, sfep->name, sfep->namelen);
68251+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(dp->i_mount, filetype)))
68252+ return 0;
68253+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
68254 xfs_dir3_get_dtype(dp->i_mount, filetype)))
68255 return 0;
68256 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
68257diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
68258index 3799695..0ddc953 100644
68259--- a/fs/xfs/xfs_ioctl.c
68260+++ b/fs/xfs/xfs_ioctl.c
68261@@ -122,7 +122,7 @@ xfs_find_handle(
68262 }
68263
68264 error = -EFAULT;
68265- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
68266+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
68267 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
68268 goto out_put;
68269
68270diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
68271index d10dc8f..56b3430 100644
68272--- a/fs/xfs/xfs_linux.h
68273+++ b/fs/xfs/xfs_linux.h
68274@@ -230,7 +230,7 @@ static inline kgid_t xfs_gid_to_kgid(__uint32_t gid)
68275 * of the compiler which do not like us using do_div in the middle
68276 * of large functions.
68277 */
68278-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
68279+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
68280 {
68281 __u32 mod;
68282
68283@@ -286,7 +286,7 @@ static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
68284 return 0;
68285 }
68286 #else
68287-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
68288+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
68289 {
68290 __u32 mod;
68291
68292diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
68293new file mode 100644
68294index 0000000..f27264e
68295--- /dev/null
68296+++ b/grsecurity/Kconfig
68297@@ -0,0 +1,1166 @@
68298+#
68299+# grecurity configuration
68300+#
68301+menu "Memory Protections"
68302+depends on GRKERNSEC
68303+
68304+config GRKERNSEC_KMEM
68305+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
68306+ default y if GRKERNSEC_CONFIG_AUTO
68307+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
68308+ help
68309+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
68310+ be written to or read from to modify or leak the contents of the running
68311+ kernel. /dev/port will also not be allowed to be opened, writing to
68312+ /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
68313+ If you have module support disabled, enabling this will close up several
68314+ ways that are currently used to insert malicious code into the running
68315+ kernel.
68316+
68317+ Even with this feature enabled, we still highly recommend that
68318+ you use the RBAC system, as it is still possible for an attacker to
68319+ modify the running kernel through other more obscure methods.
68320+
68321+ It is highly recommended that you say Y here if you meet all the
68322+ conditions above.
68323+
68324+config GRKERNSEC_VM86
68325+ bool "Restrict VM86 mode"
68326+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68327+ depends on X86_32
68328+
68329+ help
68330+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
68331+ make use of a special execution mode on 32bit x86 processors called
68332+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
68333+ video cards and will still work with this option enabled. The purpose
68334+ of the option is to prevent exploitation of emulation errors in
68335+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
68336+ Nearly all users should be able to enable this option.
68337+
68338+config GRKERNSEC_IO
68339+ bool "Disable privileged I/O"
68340+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68341+ depends on X86
68342+ select RTC_CLASS
68343+ select RTC_INTF_DEV
68344+ select RTC_DRV_CMOS
68345+
68346+ help
68347+ If you say Y here, all ioperm and iopl calls will return an error.
68348+ Ioperm and iopl can be used to modify the running kernel.
68349+ Unfortunately, some programs need this access to operate properly,
68350+ the most notable of which are XFree86 and hwclock. hwclock can be
68351+ remedied by having RTC support in the kernel, so real-time
68352+ clock support is enabled if this option is enabled, to ensure
68353+ that hwclock operates correctly. If hwclock still does not work,
68354+ either update udev or symlink /dev/rtc to /dev/rtc0.
68355+
68356+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
68357+ you may not be able to boot into a graphical environment with this
68358+ option enabled. In this case, you should use the RBAC system instead.
68359+
68360+config GRKERNSEC_BPF_HARDEN
68361+ bool "Harden BPF interpreter"
68362+ default y if GRKERNSEC_CONFIG_AUTO
68363+ help
68364+ Unlike previous versions of grsecurity that hardened both the BPF
68365+ interpreted code against corruption at rest as well as the JIT code
68366+ against JIT-spray attacks and attacker-controlled immediate values
68367+ for ROP, this feature will enforce disabling of the new eBPF JIT engine
68368+ and will ensure the interpreted code is read-only at rest. This feature
68369+ may be removed at a later time when eBPF stabilizes to entirely revert
68370+ back to the more secure pre-3.16 BPF interpreter/JIT.
68371+
68372+ If you're using KERNEXEC, it's recommended that you enable this option
68373+ to supplement the hardening of the kernel.
68374+
68375+config GRKERNSEC_PERF_HARDEN
68376+ bool "Disable unprivileged PERF_EVENTS usage by default"
68377+ default y if GRKERNSEC_CONFIG_AUTO
68378+ depends on PERF_EVENTS
68379+ help
68380+ If you say Y here, the range of acceptable values for the
68381+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
68382+ default to a new value: 3. When the sysctl is set to this value, no
68383+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
68384+
68385+ Though PERF_EVENTS can be used legitimately for performance monitoring
68386+ and low-level application profiling, it is forced on regardless of
68387+ configuration, has been at fault for several vulnerabilities, and
68388+ creates new opportunities for side channels and other information leaks.
68389+
68390+ This feature puts PERF_EVENTS into a secure default state and permits
68391+ the administrator to change out of it temporarily if unprivileged
68392+ application profiling is needed.
68393+
68394+config GRKERNSEC_RAND_THREADSTACK
68395+ bool "Insert random gaps between thread stacks"
68396+ default y if GRKERNSEC_CONFIG_AUTO
68397+ depends on PAX_RANDMMAP && !PPC
68398+ help
68399+ If you say Y here, a random-sized gap will be enforced between allocated
68400+ thread stacks. Glibc's NPTL and other threading libraries that
68401+ pass MAP_STACK to the kernel for thread stack allocation are supported.
68402+ The implementation currently provides 8 bits of entropy for the gap.
68403+
68404+ Many distributions do not compile threaded remote services with the
68405+ -fstack-check argument to GCC, causing the variable-sized stack-based
68406+ allocator, alloca(), to not probe the stack on allocation. This
68407+ permits an unbounded alloca() to skip over any guard page and potentially
68408+ modify another thread's stack reliably. An enforced random gap
68409+ reduces the reliability of such an attack and increases the chance
68410+ that such a read/write to another thread's stack instead lands in
68411+ an unmapped area, causing a crash and triggering grsecurity's
68412+ anti-bruteforcing logic.
68413+
68414+config GRKERNSEC_PROC_MEMMAP
68415+ bool "Harden ASLR against information leaks and entropy reduction"
68416+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
68417+ depends on PAX_NOEXEC || PAX_ASLR
68418+ help
68419+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
68420+ give no information about the addresses of its mappings if
68421+ PaX features that rely on random addresses are enabled on the task.
68422+ In addition to sanitizing this information and disabling other
68423+ dangerous sources of information, this option causes reads of sensitive
68424+ /proc/<pid> entries where the file descriptor was opened in a different
68425+ task than the one performing the read. Such attempts are logged.
68426+ This option also limits argv/env strings for suid/sgid binaries
68427+ to 512KB to prevent a complete exhaustion of the stack entropy provided
68428+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
68429+ binaries to prevent alternative mmap layouts from being abused.
68430+
68431+ If you use PaX it is essential that you say Y here as it closes up
68432+ several holes that make full ASLR useless locally.
68433+
68434+
68435+config GRKERNSEC_KSTACKOVERFLOW
68436+ bool "Prevent kernel stack overflows"
68437+ default y if GRKERNSEC_CONFIG_AUTO
68438+ depends on !IA64 && 64BIT
68439+ help
68440+ If you say Y here, the kernel's process stacks will be allocated
68441+ with vmalloc instead of the kernel's default allocator. This
68442+ introduces guard pages that in combination with the alloca checking
68443+ of the STACKLEAK feature prevents all forms of kernel process stack
68444+ overflow abuse. Note that this is different from kernel stack
68445+ buffer overflows.
68446+
68447+config GRKERNSEC_BRUTE
68448+ bool "Deter exploit bruteforcing"
68449+ default y if GRKERNSEC_CONFIG_AUTO
68450+ help
68451+ If you say Y here, attempts to bruteforce exploits against forking
68452+ daemons such as apache or sshd, as well as against suid/sgid binaries
68453+ will be deterred. When a child of a forking daemon is killed by PaX
68454+ or crashes due to an illegal instruction or other suspicious signal,
68455+ the parent process will be delayed 30 seconds upon every subsequent
68456+ fork until the administrator is able to assess the situation and
68457+ restart the daemon.
68458+ In the suid/sgid case, the attempt is logged, the user has all their
68459+ existing instances of the suid/sgid binary terminated and will
68460+ be unable to execute any suid/sgid binaries for 15 minutes.
68461+
68462+ It is recommended that you also enable signal logging in the auditing
68463+ section so that logs are generated when a process triggers a suspicious
68464+ signal.
68465+ If the sysctl option is enabled, a sysctl option with name
68466+ "deter_bruteforce" is created.
68467+
68468+config GRKERNSEC_MODHARDEN
68469+ bool "Harden module auto-loading"
68470+ default y if GRKERNSEC_CONFIG_AUTO
68471+ depends on MODULES
68472+ help
68473+ If you say Y here, module auto-loading in response to use of some
68474+ feature implemented by an unloaded module will be restricted to
68475+ root users. Enabling this option helps defend against attacks
68476+ by unprivileged users who abuse the auto-loading behavior to
68477+ cause a vulnerable module to load that is then exploited.
68478+
68479+ If this option prevents a legitimate use of auto-loading for a
68480+ non-root user, the administrator can execute modprobe manually
68481+ with the exact name of the module mentioned in the alert log.
68482+ Alternatively, the administrator can add the module to the list
68483+ of modules loaded at boot by modifying init scripts.
68484+
68485+ Modification of init scripts will most likely be needed on
68486+ Ubuntu servers with encrypted home directory support enabled,
68487+ as the first non-root user logging in will cause the ecb(aes),
68488+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
68489+
68490+config GRKERNSEC_HIDESYM
68491+ bool "Hide kernel symbols"
68492+ default y if GRKERNSEC_CONFIG_AUTO
68493+ select PAX_USERCOPY_SLABS
68494+ help
68495+ If you say Y here, getting information on loaded modules, and
68496+ displaying all kernel symbols through a syscall will be restricted
68497+ to users with CAP_SYS_MODULE. For software compatibility reasons,
68498+ /proc/kallsyms will be restricted to the root user. The RBAC
68499+ system can hide that entry even from root.
68500+
68501+ This option also prevents leaking of kernel addresses through
68502+ several /proc entries.
68503+
68504+ Note that this option is only effective provided the following
68505+ conditions are met:
68506+ 1) The kernel using grsecurity is not precompiled by some distribution
68507+ 2) You have also enabled GRKERNSEC_DMESG
68508+ 3) You are using the RBAC system and hiding other files such as your
68509+ kernel image and System.map. Alternatively, enabling this option
68510+ causes the permissions on /boot, /lib/modules, and the kernel
68511+ source directory to change at compile time to prevent
68512+ reading by non-root users.
68513+ If the above conditions are met, this option will aid in providing a
68514+ useful protection against local kernel exploitation of overflows
68515+ and arbitrary read/write vulnerabilities.
68516+
68517+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
68518+ in addition to this feature.
68519+
68520+config GRKERNSEC_RANDSTRUCT
68521+ bool "Randomize layout of sensitive kernel structures"
68522+ default y if GRKERNSEC_CONFIG_AUTO
68523+ select GRKERNSEC_HIDESYM
68524+ select MODVERSIONS if MODULES
68525+ help
68526+ If you say Y here, the layouts of a number of sensitive kernel
68527+ structures (task, fs, cred, etc) and all structures composed entirely
68528+ of function pointers (aka "ops" structs) will be randomized at compile-time.
68529+ This can introduce the requirement of an additional infoleak
68530+ vulnerability for exploits targeting these structure types.
68531+
68532+ Enabling this feature will introduce some performance impact, slightly
68533+ increase memory usage, and prevent the use of forensic tools like
68534+ Volatility against the system (unless the kernel source tree isn't
68535+ cleaned after kernel installation).
68536+
68537+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
68538+ It remains after a make clean to allow for external modules to be compiled
68539+ with the existing seed and will be removed by a make mrproper or
68540+ make distclean.
68541+
68542+ Note that the implementation requires gcc 4.6.4. or newer. You may need
68543+ to install the supporting headers explicitly in addition to the normal
68544+ gcc package.
68545+
68546+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
68547+ bool "Use cacheline-aware structure randomization"
68548+ depends on GRKERNSEC_RANDSTRUCT
68549+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
68550+ help
68551+ If you say Y here, the RANDSTRUCT randomization will make a best effort
68552+ at restricting randomization to cacheline-sized groups of elements. It
68553+ will further not randomize bitfields in structures. This reduces the
68554+ performance hit of RANDSTRUCT at the cost of weakened randomization.
68555+
68556+config GRKERNSEC_KERN_LOCKOUT
68557+ bool "Active kernel exploit response"
68558+ default y if GRKERNSEC_CONFIG_AUTO
68559+ depends on X86 || ARM || PPC || SPARC
68560+ help
68561+ If you say Y here, when a PaX alert is triggered due to suspicious
68562+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
68563+ or an OOPS occurs due to bad memory accesses, instead of just
68564+ terminating the offending process (and potentially allowing
68565+ a subsequent exploit from the same user), we will take one of two
68566+ actions:
68567+ If the user was root, we will panic the system
68568+ If the user was non-root, we will log the attempt, terminate
68569+ all processes owned by the user, then prevent them from creating
68570+ any new processes until the system is restarted
68571+ This deters repeated kernel exploitation/bruteforcing attempts
68572+ and is useful for later forensics.
68573+
68574+config GRKERNSEC_OLD_ARM_USERLAND
68575+ bool "Old ARM userland compatibility"
68576+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
68577+ help
68578+ If you say Y here, stubs of executable code to perform such operations
68579+ as "compare-exchange" will be placed at fixed locations in the ARM vector
68580+ table. This is unfortunately needed for old ARM userland meant to run
68581+ across a wide range of processors. Without this option enabled,
68582+ the get_tls and data memory barrier stubs will be emulated by the kernel,
68583+ which is enough for Linaro userlands or other userlands designed for v6
68584+ and newer ARM CPUs. It's recommended that you try without this option enabled
68585+ first, and only enable it if your userland does not boot (it will likely fail
68586+ at init time).
68587+
68588+endmenu
68589+menu "Role Based Access Control Options"
68590+depends on GRKERNSEC
68591+
68592+config GRKERNSEC_RBAC_DEBUG
68593+ bool
68594+
68595+config GRKERNSEC_NO_RBAC
68596+ bool "Disable RBAC system"
68597+ help
68598+ If you say Y here, the /dev/grsec device will be removed from the kernel,
68599+ preventing the RBAC system from being enabled. You should only say Y
68600+ here if you have no intention of using the RBAC system, so as to prevent
68601+ an attacker with root access from misusing the RBAC system to hide files
68602+ and processes when loadable module support and /dev/[k]mem have been
68603+ locked down.
68604+
68605+config GRKERNSEC_ACL_HIDEKERN
68606+ bool "Hide kernel processes"
68607+ help
68608+ If you say Y here, all kernel threads will be hidden to all
68609+ processes but those whose subject has the "view hidden processes"
68610+ flag.
68611+
68612+config GRKERNSEC_ACL_MAXTRIES
68613+ int "Maximum tries before password lockout"
68614+ default 3
68615+ help
68616+ This option enforces the maximum number of times a user can attempt
68617+ to authorize themselves with the grsecurity RBAC system before being
68618+ denied the ability to attempt authorization again for a specified time.
68619+ The lower the number, the harder it will be to brute-force a password.
68620+
68621+config GRKERNSEC_ACL_TIMEOUT
68622+ int "Time to wait after max password tries, in seconds"
68623+ default 30
68624+ help
68625+ This option specifies the time the user must wait after attempting to
68626+ authorize to the RBAC system with the maximum number of invalid
68627+ passwords. The higher the number, the harder it will be to brute-force
68628+ a password.
68629+
68630+endmenu
68631+menu "Filesystem Protections"
68632+depends on GRKERNSEC
68633+
68634+config GRKERNSEC_PROC
68635+ bool "Proc restrictions"
68636+ default y if GRKERNSEC_CONFIG_AUTO
68637+ help
68638+ If you say Y here, the permissions of the /proc filesystem
68639+ will be altered to enhance system security and privacy. You MUST
68640+ choose either a user only restriction or a user and group restriction.
68641+ Depending upon the option you choose, you can either restrict users to
68642+ see only the processes they themselves run, or choose a group that can
68643+ view all processes and files normally restricted to root if you choose
68644+ the "restrict to user only" option. NOTE: If you're running identd or
68645+ ntpd as a non-root user, you will have to run it as the group you
68646+ specify here.
68647+
68648+config GRKERNSEC_PROC_USER
68649+ bool "Restrict /proc to user only"
68650+ depends on GRKERNSEC_PROC
68651+ help
68652+ If you say Y here, non-root users will only be able to view their own
68653+ processes, and restricts them from viewing network-related information,
68654+ and viewing kernel symbol and module information.
68655+
68656+config GRKERNSEC_PROC_USERGROUP
68657+ bool "Allow special group"
68658+ default y if GRKERNSEC_CONFIG_AUTO
68659+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
68660+ help
68661+ If you say Y here, you will be able to select a group that will be
68662+ able to view all processes and network-related information. If you've
68663+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
68664+ remain hidden. This option is useful if you want to run identd as
68665+ a non-root user. The group you select may also be chosen at boot time
68666+ via "grsec_proc_gid=" on the kernel commandline.
68667+
68668+config GRKERNSEC_PROC_GID
68669+ int "GID for special group"
68670+ depends on GRKERNSEC_PROC_USERGROUP
68671+ default 1001
68672+
68673+config GRKERNSEC_PROC_ADD
68674+ bool "Additional restrictions"
68675+ default y if GRKERNSEC_CONFIG_AUTO
68676+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
68677+ help
68678+ If you say Y here, additional restrictions will be placed on
68679+ /proc that keep normal users from viewing device information and
68680+ slabinfo information that could be useful for exploits.
68681+
68682+config GRKERNSEC_LINK
68683+ bool "Linking restrictions"
68684+ default y if GRKERNSEC_CONFIG_AUTO
68685+ help
68686+ If you say Y here, /tmp race exploits will be prevented, since users
68687+ will no longer be able to follow symlinks owned by other users in
68688+ world-writable +t directories (e.g. /tmp), unless the owner of the
68689+ symlink is the owner of the directory. users will also not be
68690+ able to hardlink to files they do not own. If the sysctl option is
68691+ enabled, a sysctl option with name "linking_restrictions" is created.
68692+
68693+config GRKERNSEC_SYMLINKOWN
68694+ bool "Kernel-enforced SymlinksIfOwnerMatch"
68695+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
68696+ help
68697+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
68698+ that prevents it from being used as a security feature. As Apache
68699+ verifies the symlink by performing a stat() against the target of
68700+ the symlink before it is followed, an attacker can setup a symlink
68701+ to point to a same-owned file, then replace the symlink with one
68702+ that targets another user's file just after Apache "validates" the
68703+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
68704+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
68705+ will be in place for the group you specify. If the sysctl option
68706+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
68707+ created.
68708+
68709+config GRKERNSEC_SYMLINKOWN_GID
68710+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
68711+ depends on GRKERNSEC_SYMLINKOWN
68712+ default 1006
68713+ help
68714+ Setting this GID determines what group kernel-enforced
68715+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
68716+ is enabled, a sysctl option with name "symlinkown_gid" is created.
68717+
68718+config GRKERNSEC_FIFO
68719+ bool "FIFO restrictions"
68720+ default y if GRKERNSEC_CONFIG_AUTO
68721+ help
68722+ If you say Y here, users will not be able to write to FIFOs they don't
68723+ own in world-writable +t directories (e.g. /tmp), unless the owner of
68724+ the FIFO is the same owner of the directory it's held in. If the sysctl
68725+ option is enabled, a sysctl option with name "fifo_restrictions" is
68726+ created.
68727+
68728+config GRKERNSEC_SYSFS_RESTRICT
68729+ bool "Sysfs/debugfs restriction"
68730+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68731+ depends on SYSFS
68732+ help
68733+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
68734+ any filesystem normally mounted under it (e.g. debugfs) will be
68735+ mostly accessible only by root. These filesystems generally provide access
68736+ to hardware and debug information that isn't appropriate for unprivileged
68737+ users of the system. Sysfs and debugfs have also become a large source
68738+ of new vulnerabilities, ranging from infoleaks to local compromise.
68739+ There has been very little oversight with an eye toward security involved
68740+ in adding new exporters of information to these filesystems, so their
68741+ use is discouraged.
68742+ For reasons of compatibility, a few directories have been whitelisted
68743+ for access by non-root users:
68744+ /sys/fs/selinux
68745+ /sys/fs/fuse
68746+ /sys/devices/system/cpu
68747+
68748+config GRKERNSEC_ROFS
68749+ bool "Runtime read-only mount protection"
68750+ depends on SYSCTL
68751+ help
68752+ If you say Y here, a sysctl option with name "romount_protect" will
68753+ be created. By setting this option to 1 at runtime, filesystems
68754+ will be protected in the following ways:
68755+ * No new writable mounts will be allowed
68756+ * Existing read-only mounts won't be able to be remounted read/write
68757+ * Write operations will be denied on all block devices
68758+ This option acts independently of grsec_lock: once it is set to 1,
68759+ it cannot be turned off. Therefore, please be mindful of the resulting
68760+ behavior if this option is enabled in an init script on a read-only
68761+ filesystem.
68762+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
68763+ and GRKERNSEC_IO should be enabled and module loading disabled via
68764+ config or at runtime.
68765+ This feature is mainly intended for secure embedded systems.
68766+
68767+
68768+config GRKERNSEC_DEVICE_SIDECHANNEL
68769+ bool "Eliminate stat/notify-based device sidechannels"
68770+ default y if GRKERNSEC_CONFIG_AUTO
68771+ help
68772+ If you say Y here, timing analyses on block or character
68773+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
68774+ will be thwarted for unprivileged users. If a process without
68775+ CAP_MKNOD stats such a device, the last access and last modify times
68776+ will match the device's create time. No access or modify events
68777+ will be triggered through inotify/dnotify/fanotify for such devices.
68778+ This feature will prevent attacks that may at a minimum
68779+ allow an attacker to determine the administrator's password length.
68780+
68781+config GRKERNSEC_CHROOT
68782+ bool "Chroot jail restrictions"
68783+ default y if GRKERNSEC_CONFIG_AUTO
68784+ help
68785+ If you say Y here, you will be able to choose several options that will
68786+ make breaking out of a chrooted jail much more difficult. If you
68787+ encounter no software incompatibilities with the following options, it
68788+ is recommended that you enable each one.
68789+
68790+ Note that the chroot restrictions are not intended to apply to "chroots"
68791+ to directories that are simple bind mounts of the global root filesystem.
68792+ For several other reasons, a user shouldn't expect any significant
68793+ security by performing such a chroot.
68794+
68795+config GRKERNSEC_CHROOT_MOUNT
68796+ bool "Deny mounts"
68797+ default y if GRKERNSEC_CONFIG_AUTO
68798+ depends on GRKERNSEC_CHROOT
68799+ help
68800+ If you say Y here, processes inside a chroot will not be able to
68801+ mount or remount filesystems. If the sysctl option is enabled, a
68802+ sysctl option with name "chroot_deny_mount" is created.
68803+
68804+config GRKERNSEC_CHROOT_DOUBLE
68805+ bool "Deny double-chroots"
68806+ default y if GRKERNSEC_CONFIG_AUTO
68807+ depends on GRKERNSEC_CHROOT
68808+ help
68809+ If you say Y here, processes inside a chroot will not be able to chroot
68810+ again outside the chroot. This is a widely used method of breaking
68811+ out of a chroot jail and should not be allowed. If the sysctl
68812+ option is enabled, a sysctl option with name
68813+ "chroot_deny_chroot" is created.
68814+
68815+config GRKERNSEC_CHROOT_PIVOT
68816+ bool "Deny pivot_root in chroot"
68817+ default y if GRKERNSEC_CONFIG_AUTO
68818+ depends on GRKERNSEC_CHROOT
68819+ help
68820+ If you say Y here, processes inside a chroot will not be able to use
68821+ a function called pivot_root() that was introduced in Linux 2.3.41. It
68822+ works similar to chroot in that it changes the root filesystem. This
68823+ function could be misused in a chrooted process to attempt to break out
68824+ of the chroot, and therefore should not be allowed. If the sysctl
68825+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
68826+ created.
68827+
68828+config GRKERNSEC_CHROOT_CHDIR
68829+ bool "Enforce chdir(\"/\") on all chroots"
68830+ default y if GRKERNSEC_CONFIG_AUTO
68831+ depends on GRKERNSEC_CHROOT
68832+ help
68833+ If you say Y here, the current working directory of all newly-chrooted
68834+ applications will be set to the the root directory of the chroot.
68835+ The man page on chroot(2) states:
68836+ Note that this call does not change the current working
68837+ directory, so that `.' can be outside the tree rooted at
68838+ `/'. In particular, the super-user can escape from a
68839+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
68840+
68841+ It is recommended that you say Y here, since it's not known to break
68842+ any software. If the sysctl option is enabled, a sysctl option with
68843+ name "chroot_enforce_chdir" is created.
68844+
68845+config GRKERNSEC_CHROOT_CHMOD
68846+ bool "Deny (f)chmod +s"
68847+ default y if GRKERNSEC_CONFIG_AUTO
68848+ depends on GRKERNSEC_CHROOT
68849+ help
68850+ If you say Y here, processes inside a chroot will not be able to chmod
68851+ or fchmod files to make them have suid or sgid bits. This protects
68852+ against another published method of breaking a chroot. If the sysctl
68853+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
68854+ created.
68855+
68856+config GRKERNSEC_CHROOT_FCHDIR
68857+ bool "Deny fchdir and fhandle out of chroot"
68858+ default y if GRKERNSEC_CONFIG_AUTO
68859+ depends on GRKERNSEC_CHROOT
68860+ help
68861+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
68862+ to a file descriptor of the chrooting process that points to a directory
68863+ outside the filesystem will be stopped. Additionally, this option prevents
68864+ use of the recently-created syscall for opening files by a guessable "file
68865+ handle" inside a chroot. If the sysctl option is enabled, a sysctl option
68866+ with name "chroot_deny_fchdir" is created.
68867+
68868+config GRKERNSEC_CHROOT_MKNOD
68869+ bool "Deny mknod"
68870+ default y if GRKERNSEC_CONFIG_AUTO
68871+ depends on GRKERNSEC_CHROOT
68872+ help
68873+ If you say Y here, processes inside a chroot will not be allowed to
68874+ mknod. The problem with using mknod inside a chroot is that it
68875+ would allow an attacker to create a device entry that is the same
68876+ as one on the physical root of your system, which could range from
68877+ anything from the console device to a device for your harddrive (which
68878+ they could then use to wipe the drive or steal data). It is recommended
68879+ that you say Y here, unless you run into software incompatibilities.
68880+ If the sysctl option is enabled, a sysctl option with name
68881+ "chroot_deny_mknod" is created.
68882+
68883+config GRKERNSEC_CHROOT_SHMAT
68884+ bool "Deny shmat() out of chroot"
68885+ default y if GRKERNSEC_CONFIG_AUTO
68886+ depends on GRKERNSEC_CHROOT
68887+ help
68888+ If you say Y here, processes inside a chroot will not be able to attach
68889+ to shared memory segments that were created outside of the chroot jail.
68890+ It is recommended that you say Y here. If the sysctl option is enabled,
68891+ a sysctl option with name "chroot_deny_shmat" is created.
68892+
68893+config GRKERNSEC_CHROOT_UNIX
68894+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
68895+ default y if GRKERNSEC_CONFIG_AUTO
68896+ depends on GRKERNSEC_CHROOT
68897+ help
68898+ If you say Y here, processes inside a chroot will not be able to
68899+ connect to abstract (meaning not belonging to a filesystem) Unix
68900+ domain sockets that were bound outside of a chroot. It is recommended
68901+ that you say Y here. If the sysctl option is enabled, a sysctl option
68902+ with name "chroot_deny_unix" is created.
68903+
68904+config GRKERNSEC_CHROOT_FINDTASK
68905+ bool "Protect outside processes"
68906+ default y if GRKERNSEC_CONFIG_AUTO
68907+ depends on GRKERNSEC_CHROOT
68908+ help
68909+ If you say Y here, processes inside a chroot will not be able to
68910+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
68911+ getsid, or view any process outside of the chroot. If the sysctl
68912+ option is enabled, a sysctl option with name "chroot_findtask" is
68913+ created.
68914+
68915+config GRKERNSEC_CHROOT_NICE
68916+ bool "Restrict priority changes"
68917+ default y if GRKERNSEC_CONFIG_AUTO
68918+ depends on GRKERNSEC_CHROOT
68919+ help
68920+ If you say Y here, processes inside a chroot will not be able to raise
68921+ the priority of processes in the chroot, or alter the priority of
68922+ processes outside the chroot. This provides more security than simply
68923+ removing CAP_SYS_NICE from the process' capability set. If the
68924+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
68925+ is created.
68926+
68927+config GRKERNSEC_CHROOT_SYSCTL
68928+ bool "Deny sysctl writes"
68929+ default y if GRKERNSEC_CONFIG_AUTO
68930+ depends on GRKERNSEC_CHROOT
68931+ help
68932+ If you say Y here, an attacker in a chroot will not be able to
68933+ write to sysctl entries, either by sysctl(2) or through a /proc
68934+ interface. It is strongly recommended that you say Y here. If the
68935+ sysctl option is enabled, a sysctl option with name
68936+ "chroot_deny_sysctl" is created.
68937+
68938+config GRKERNSEC_CHROOT_CAPS
68939+ bool "Capability restrictions"
68940+ default y if GRKERNSEC_CONFIG_AUTO
68941+ depends on GRKERNSEC_CHROOT
68942+ help
68943+ If you say Y here, the capabilities on all processes within a
68944+ chroot jail will be lowered to stop module insertion, raw i/o,
68945+ system and net admin tasks, rebooting the system, modifying immutable
68946+ files, modifying IPC owned by another, and changing the system time.
68947+ This is left an option because it can break some apps. Disable this
68948+ if your chrooted apps are having problems performing those kinds of
68949+ tasks. If the sysctl option is enabled, a sysctl option with
68950+ name "chroot_caps" is created.
68951+
68952+config GRKERNSEC_CHROOT_INITRD
68953+ bool "Exempt initrd tasks from restrictions"
68954+ default y if GRKERNSEC_CONFIG_AUTO
68955+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
68956+ help
68957+ If you say Y here, tasks started prior to init will be exempted from
68958+ grsecurity's chroot restrictions. This option is mainly meant to
68959+ resolve Plymouth's performing privileged operations unnecessarily
68960+ in a chroot.
68961+
68962+endmenu
68963+menu "Kernel Auditing"
68964+depends on GRKERNSEC
68965+
68966+config GRKERNSEC_AUDIT_GROUP
68967+ bool "Single group for auditing"
68968+ help
68969+ If you say Y here, the exec and chdir logging features will only operate
68970+ on a group you specify. This option is recommended if you only want to
68971+ watch certain users instead of having a large amount of logs from the
68972+ entire system. If the sysctl option is enabled, a sysctl option with
68973+ name "audit_group" is created.
68974+
68975+config GRKERNSEC_AUDIT_GID
68976+ int "GID for auditing"
68977+ depends on GRKERNSEC_AUDIT_GROUP
68978+ default 1007
68979+
68980+config GRKERNSEC_EXECLOG
68981+ bool "Exec logging"
68982+ help
68983+ If you say Y here, all execve() calls will be logged (since the
68984+ other exec*() calls are frontends to execve(), all execution
68985+ will be logged). Useful for shell-servers that like to keep track
68986+ of their users. If the sysctl option is enabled, a sysctl option with
68987+ name "exec_logging" is created.
68988+ WARNING: This option when enabled will produce a LOT of logs, especially
68989+ on an active system.
68990+
68991+config GRKERNSEC_RESLOG
68992+ bool "Resource logging"
68993+ default y if GRKERNSEC_CONFIG_AUTO
68994+ help
68995+ If you say Y here, all attempts to overstep resource limits will
68996+ be logged with the resource name, the requested size, and the current
68997+ limit. It is highly recommended that you say Y here. If the sysctl
68998+ option is enabled, a sysctl option with name "resource_logging" is
68999+ created. If the RBAC system is enabled, the sysctl value is ignored.
69000+
69001+config GRKERNSEC_CHROOT_EXECLOG
69002+ bool "Log execs within chroot"
69003+ help
69004+ If you say Y here, all executions inside a chroot jail will be logged
69005+ to syslog. This can cause a large amount of logs if certain
69006+ applications (eg. djb's daemontools) are installed on the system, and
69007+ is therefore left as an option. If the sysctl option is enabled, a
69008+ sysctl option with name "chroot_execlog" is created.
69009+
69010+config GRKERNSEC_AUDIT_PTRACE
69011+ bool "Ptrace logging"
69012+ help
69013+ If you say Y here, all attempts to attach to a process via ptrace
69014+ will be logged. If the sysctl option is enabled, a sysctl option
69015+ with name "audit_ptrace" is created.
69016+
69017+config GRKERNSEC_AUDIT_CHDIR
69018+ bool "Chdir logging"
69019+ help
69020+ If you say Y here, all chdir() calls will be logged. If the sysctl
69021+ option is enabled, a sysctl option with name "audit_chdir" is created.
69022+
69023+config GRKERNSEC_AUDIT_MOUNT
69024+ bool "(Un)Mount logging"
69025+ help
69026+ If you say Y here, all mounts and unmounts will be logged. If the
69027+ sysctl option is enabled, a sysctl option with name "audit_mount" is
69028+ created.
69029+
69030+config GRKERNSEC_SIGNAL
69031+ bool "Signal logging"
69032+ default y if GRKERNSEC_CONFIG_AUTO
69033+ help
69034+ If you say Y here, certain important signals will be logged, such as
69035+ SIGSEGV, which will as a result inform you of when a error in a program
69036+ occurred, which in some cases could mean a possible exploit attempt.
69037+ If the sysctl option is enabled, a sysctl option with name
69038+ "signal_logging" is created.
69039+
69040+config GRKERNSEC_FORKFAIL
69041+ bool "Fork failure logging"
69042+ help
69043+ If you say Y here, all failed fork() attempts will be logged.
69044+ This could suggest a fork bomb, or someone attempting to overstep
69045+ their process limit. If the sysctl option is enabled, a sysctl option
69046+ with name "forkfail_logging" is created.
69047+
69048+config GRKERNSEC_TIME
69049+ bool "Time change logging"
69050+ default y if GRKERNSEC_CONFIG_AUTO
69051+ help
69052+ If you say Y here, any changes of the system clock will be logged.
69053+ If the sysctl option is enabled, a sysctl option with name
69054+ "timechange_logging" is created.
69055+
69056+config GRKERNSEC_PROC_IPADDR
69057+ bool "/proc/<pid>/ipaddr support"
69058+ default y if GRKERNSEC_CONFIG_AUTO
69059+ help
69060+ If you say Y here, a new entry will be added to each /proc/<pid>
69061+ directory that contains the IP address of the person using the task.
69062+ The IP is carried across local TCP and AF_UNIX stream sockets.
69063+ This information can be useful for IDS/IPSes to perform remote response
69064+ to a local attack. The entry is readable by only the owner of the
69065+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
69066+ the RBAC system), and thus does not create privacy concerns.
69067+
69068+config GRKERNSEC_RWXMAP_LOG
69069+ bool 'Denied RWX mmap/mprotect logging'
69070+ default y if GRKERNSEC_CONFIG_AUTO
69071+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
69072+ help
69073+ If you say Y here, calls to mmap() and mprotect() with explicit
69074+ usage of PROT_WRITE and PROT_EXEC together will be logged when
69075+ denied by the PAX_MPROTECT feature. This feature will also
69076+ log other problematic scenarios that can occur when PAX_MPROTECT
69077+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
69078+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
69079+ is created.
69080+
69081+endmenu
69082+
69083+menu "Executable Protections"
69084+depends on GRKERNSEC
69085+
69086+config GRKERNSEC_DMESG
69087+ bool "Dmesg(8) restriction"
69088+ default y if GRKERNSEC_CONFIG_AUTO
69089+ help
69090+ If you say Y here, non-root users will not be able to use dmesg(8)
69091+ to view the contents of the kernel's circular log buffer.
69092+ The kernel's log buffer often contains kernel addresses and other
69093+ identifying information useful to an attacker in fingerprinting a
69094+ system for a targeted exploit.
69095+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
69096+ created.
69097+
69098+config GRKERNSEC_HARDEN_PTRACE
69099+ bool "Deter ptrace-based process snooping"
69100+ default y if GRKERNSEC_CONFIG_AUTO
69101+ help
69102+ If you say Y here, TTY sniffers and other malicious monitoring
69103+ programs implemented through ptrace will be defeated. If you
69104+ have been using the RBAC system, this option has already been
69105+ enabled for several years for all users, with the ability to make
69106+ fine-grained exceptions.
69107+
69108+ This option only affects the ability of non-root users to ptrace
69109+ processes that are not a descendent of the ptracing process.
69110+ This means that strace ./binary and gdb ./binary will still work,
69111+ but attaching to arbitrary processes will not. If the sysctl
69112+ option is enabled, a sysctl option with name "harden_ptrace" is
69113+ created.
69114+
69115+config GRKERNSEC_PTRACE_READEXEC
69116+ bool "Require read access to ptrace sensitive binaries"
69117+ default y if GRKERNSEC_CONFIG_AUTO
69118+ help
69119+ If you say Y here, unprivileged users will not be able to ptrace unreadable
69120+ binaries. This option is useful in environments that
69121+ remove the read bits (e.g. file mode 4711) from suid binaries to
69122+ prevent infoleaking of their contents. This option adds
69123+ consistency to the use of that file mode, as the binary could normally
69124+ be read out when run without privileges while ptracing.
69125+
69126+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
69127+ is created.
69128+
69129+config GRKERNSEC_SETXID
69130+ bool "Enforce consistent multithreaded privileges"
69131+ default y if GRKERNSEC_CONFIG_AUTO
69132+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
69133+ help
69134+ If you say Y here, a change from a root uid to a non-root uid
69135+ in a multithreaded application will cause the resulting uids,
69136+ gids, supplementary groups, and capabilities in that thread
69137+ to be propagated to the other threads of the process. In most
69138+ cases this is unnecessary, as glibc will emulate this behavior
69139+ on behalf of the application. Other libcs do not act in the
69140+ same way, allowing the other threads of the process to continue
69141+ running with root privileges. If the sysctl option is enabled,
69142+ a sysctl option with name "consistent_setxid" is created.
69143+
69144+config GRKERNSEC_HARDEN_IPC
69145+ bool "Disallow access to overly-permissive IPC objects"
69146+ default y if GRKERNSEC_CONFIG_AUTO
69147+ depends on SYSVIPC
69148+ help
69149+ If you say Y here, access to overly-permissive IPC objects (shared
69150+ memory, message queues, and semaphores) will be denied for processes
69151+ given the following criteria beyond normal permission checks:
69152+ 1) If the IPC object is world-accessible and the euid doesn't match
69153+ that of the creator or current uid for the IPC object
69154+ 2) If the IPC object is group-accessible and the egid doesn't
69155+ match that of the creator or current gid for the IPC object
69156+ It's a common error to grant too much permission to these objects,
69157+ with impact ranging from denial of service and information leaking to
69158+ privilege escalation. This feature was developed in response to
69159+ research by Tim Brown:
69160+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
69161+ who found hundreds of such insecure usages. Processes with
69162+ CAP_IPC_OWNER are still permitted to access these IPC objects.
69163+ If the sysctl option is enabled, a sysctl option with name
69164+ "harden_ipc" is created.
69165+
69166+config GRKERNSEC_TPE
69167+ bool "Trusted Path Execution (TPE)"
69168+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
69169+ help
69170+ If you say Y here, you will be able to choose a gid to add to the
69171+ supplementary groups of users you want to mark as "untrusted."
69172+ These users will not be able to execute any files that are not in
69173+ root-owned directories writable only by root. If the sysctl option
69174+ is enabled, a sysctl option with name "tpe" is created.
69175+
69176+config GRKERNSEC_TPE_ALL
69177+ bool "Partially restrict all non-root users"
69178+ depends on GRKERNSEC_TPE
69179+ help
69180+ If you say Y here, all non-root users will be covered under
69181+ a weaker TPE restriction. This is separate from, and in addition to,
69182+ the main TPE options that you have selected elsewhere. Thus, if a
69183+ "trusted" GID is chosen, this restriction applies to even that GID.
69184+ Under this restriction, all non-root users will only be allowed to
69185+ execute files in directories they own that are not group or
69186+ world-writable, or in directories owned by root and writable only by
69187+ root. If the sysctl option is enabled, a sysctl option with name
69188+ "tpe_restrict_all" is created.
69189+
69190+config GRKERNSEC_TPE_INVERT
69191+ bool "Invert GID option"
69192+ depends on GRKERNSEC_TPE
69193+ help
69194+ If you say Y here, the group you specify in the TPE configuration will
69195+ decide what group TPE restrictions will be *disabled* for. This
69196+ option is useful if you want TPE restrictions to be applied to most
69197+ users on the system. If the sysctl option is enabled, a sysctl option
69198+ with name "tpe_invert" is created. Unlike other sysctl options, this
69199+ entry will default to on for backward-compatibility.
69200+
69201+config GRKERNSEC_TPE_GID
69202+ int
69203+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
69204+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
69205+
69206+config GRKERNSEC_TPE_UNTRUSTED_GID
69207+ int "GID for TPE-untrusted users"
69208+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
69209+ default 1005
69210+ help
69211+ Setting this GID determines what group TPE restrictions will be
69212+ *enabled* for. If the sysctl option is enabled, a sysctl option
69213+ with name "tpe_gid" is created.
69214+
69215+config GRKERNSEC_TPE_TRUSTED_GID
69216+ int "GID for TPE-trusted users"
69217+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
69218+ default 1005
69219+ help
69220+ Setting this GID determines what group TPE restrictions will be
69221+ *disabled* for. If the sysctl option is enabled, a sysctl option
69222+ with name "tpe_gid" is created.
69223+
69224+endmenu
69225+menu "Network Protections"
69226+depends on GRKERNSEC
69227+
69228+config GRKERNSEC_BLACKHOLE
69229+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
69230+ default y if GRKERNSEC_CONFIG_AUTO
69231+ depends on NET
69232+ help
69233+ If you say Y here, neither TCP resets nor ICMP
69234+ destination-unreachable packets will be sent in response to packets
69235+ sent to ports for which no associated listening process exists.
69236+ It will also prevent the sending of ICMP protocol unreachable packets
69237+ in response to packets with unknown protocols.
69238+ This feature supports both IPV4 and IPV6 and exempts the
69239+ loopback interface from blackholing. Enabling this feature
69240+ makes a host more resilient to DoS attacks and reduces network
69241+ visibility against scanners.
69242+
69243+ The blackhole feature as-implemented is equivalent to the FreeBSD
69244+ blackhole feature, as it prevents RST responses to all packets, not
69245+ just SYNs. Under most application behavior this causes no
69246+ problems, but applications (like haproxy) may not close certain
69247+ connections in a way that cleanly terminates them on the remote
69248+ end, leaving the remote host in LAST_ACK state. Because of this
69249+ side-effect and to prevent intentional LAST_ACK DoSes, this
69250+ feature also adds automatic mitigation against such attacks.
69251+ The mitigation drastically reduces the amount of time a socket
69252+ can spend in LAST_ACK state. If you're using haproxy and not
69253+ all servers it connects to have this option enabled, consider
69254+ disabling this feature on the haproxy host.
69255+
69256+ If the sysctl option is enabled, two sysctl options with names
69257+ "ip_blackhole" and "lastack_retries" will be created.
69258+ While "ip_blackhole" takes the standard zero/non-zero on/off
69259+ toggle, "lastack_retries" uses the same kinds of values as
69260+ "tcp_retries1" and "tcp_retries2". The default value of 4
69261+ prevents a socket from lasting more than 45 seconds in LAST_ACK
69262+ state.
69263+
69264+config GRKERNSEC_NO_SIMULT_CONNECT
69265+ bool "Disable TCP Simultaneous Connect"
69266+ default y if GRKERNSEC_CONFIG_AUTO
69267+ depends on NET
69268+ help
69269+ If you say Y here, a feature by Willy Tarreau will be enabled that
69270+ removes a weakness in Linux's strict implementation of TCP that
69271+ allows two clients to connect to each other without either entering
69272+ a listening state. The weakness allows an attacker to easily prevent
69273+ a client from connecting to a known server provided the source port
69274+ for the connection is guessed correctly.
69275+
69276+ As the weakness could be used to prevent an antivirus or IPS from
69277+ fetching updates, or prevent an SSL gateway from fetching a CRL,
69278+ it should be eliminated by enabling this option. Though Linux is
69279+ one of few operating systems supporting simultaneous connect, it
69280+ has no legitimate use in practice and is rarely supported by firewalls.
69281+
69282+config GRKERNSEC_SOCKET
69283+ bool "Socket restrictions"
69284+ depends on NET
69285+ help
69286+ If you say Y here, you will be able to choose from several options.
69287+ If you assign a GID on your system and add it to the supplementary
69288+ groups of users you want to restrict socket access to, this patch
69289+ will perform up to three things, based on the option(s) you choose.
69290+
69291+config GRKERNSEC_SOCKET_ALL
69292+ bool "Deny any sockets to group"
69293+ depends on GRKERNSEC_SOCKET
69294+ help
69295+ If you say Y here, you will be able to choose a GID of whose users will
69296+ be unable to connect to other hosts from your machine or run server
69297+ applications from your machine. If the sysctl option is enabled, a
69298+ sysctl option with name "socket_all" is created.
69299+
69300+config GRKERNSEC_SOCKET_ALL_GID
69301+ int "GID to deny all sockets for"
69302+ depends on GRKERNSEC_SOCKET_ALL
69303+ default 1004
69304+ help
69305+ Here you can choose the GID to disable socket access for. Remember to
69306+ add the users you want socket access disabled for to the GID
69307+ specified here. If the sysctl option is enabled, a sysctl option
69308+ with name "socket_all_gid" is created.
69309+
69310+config GRKERNSEC_SOCKET_CLIENT
69311+ bool "Deny client sockets to group"
69312+ depends on GRKERNSEC_SOCKET
69313+ help
69314+ If you say Y here, you will be able to choose a GID of whose users will
69315+ be unable to connect to other hosts from your machine, but will be
69316+ able to run servers. If this option is enabled, all users in the group
69317+ you specify will have to use passive mode when initiating ftp transfers
69318+ from the shell on your machine. If the sysctl option is enabled, a
69319+ sysctl option with name "socket_client" is created.
69320+
69321+config GRKERNSEC_SOCKET_CLIENT_GID
69322+ int "GID to deny client sockets for"
69323+ depends on GRKERNSEC_SOCKET_CLIENT
69324+ default 1003
69325+ help
69326+ Here you can choose the GID to disable client socket access for.
69327+ Remember to add the users you want client socket access disabled for to
69328+ the GID specified here. If the sysctl option is enabled, a sysctl
69329+ option with name "socket_client_gid" is created.
69330+
69331+config GRKERNSEC_SOCKET_SERVER
69332+ bool "Deny server sockets to group"
69333+ depends on GRKERNSEC_SOCKET
69334+ help
69335+ If you say Y here, you will be able to choose a GID of whose users will
69336+ be unable to run server applications from your machine. If the sysctl
69337+ option is enabled, a sysctl option with name "socket_server" is created.
69338+
69339+config GRKERNSEC_SOCKET_SERVER_GID
69340+ int "GID to deny server sockets for"
69341+ depends on GRKERNSEC_SOCKET_SERVER
69342+ default 1002
69343+ help
69344+ Here you can choose the GID to disable server socket access for.
69345+ Remember to add the users you want server socket access disabled for to
69346+ the GID specified here. If the sysctl option is enabled, a sysctl
69347+ option with name "socket_server_gid" is created.
69348+
69349+endmenu
69350+
69351+menu "Physical Protections"
69352+depends on GRKERNSEC
69353+
69354+config GRKERNSEC_DENYUSB
69355+ bool "Deny new USB connections after toggle"
69356+ default y if GRKERNSEC_CONFIG_AUTO
69357+ depends on SYSCTL && USB_SUPPORT
69358+ help
69359+ If you say Y here, a new sysctl option with name "deny_new_usb"
69360+ will be created. Setting its value to 1 will prevent any new
69361+ USB devices from being recognized by the OS. Any attempted USB
69362+ device insertion will be logged. This option is intended to be
69363+ used against custom USB devices designed to exploit vulnerabilities
69364+ in various USB device drivers.
69365+
69366+ For greatest effectiveness, this sysctl should be set after any
69367+ relevant init scripts. This option is safe to enable in distros
69368+ as each user can choose whether or not to toggle the sysctl.
69369+
69370+config GRKERNSEC_DENYUSB_FORCE
69371+ bool "Reject all USB devices not connected at boot"
69372+ select USB
69373+ depends on GRKERNSEC_DENYUSB
69374+ help
69375+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
69376+ that doesn't involve a sysctl entry. This option should only be
69377+ enabled if you're sure you want to deny all new USB connections
69378+ at runtime and don't want to modify init scripts. This should not
69379+ be enabled by distros. It forces the core USB code to be built
69380+ into the kernel image so that all devices connected at boot time
69381+ can be recognized and new USB device connections can be prevented
69382+ prior to init running.
69383+
69384+endmenu
69385+
69386+menu "Sysctl Support"
69387+depends on GRKERNSEC && SYSCTL
69388+
69389+config GRKERNSEC_SYSCTL
69390+ bool "Sysctl support"
69391+ default y if GRKERNSEC_CONFIG_AUTO
69392+ help
69393+ If you say Y here, you will be able to change the options that
69394+ grsecurity runs with at bootup, without having to recompile your
69395+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
69396+ to enable (1) or disable (0) various features. All the sysctl entries
69397+ are mutable until the "grsec_lock" entry is set to a non-zero value.
69398+ All features enabled in the kernel configuration are disabled at boot
69399+ if you do not say Y to the "Turn on features by default" option.
69400+ All options should be set at startup, and the grsec_lock entry should
69401+ be set to a non-zero value after all the options are set.
69402+ *THIS IS EXTREMELY IMPORTANT*
69403+
69404+config GRKERNSEC_SYSCTL_DISTRO
69405+ bool "Extra sysctl support for distro makers (READ HELP)"
69406+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
69407+ help
69408+ If you say Y here, additional sysctl options will be created
69409+ for features that affect processes running as root. Therefore,
69410+ it is critical when using this option that the grsec_lock entry be
69411+ enabled after boot. Only distros with prebuilt kernel packages
69412+ with this option enabled that can ensure grsec_lock is enabled
69413+ after boot should use this option.
69414+ *Failure to set grsec_lock after boot makes all grsec features
69415+ this option covers useless*
69416+
69417+ Currently this option creates the following sysctl entries:
69418+ "Disable Privileged I/O": "disable_priv_io"
69419+
69420+config GRKERNSEC_SYSCTL_ON
69421+ bool "Turn on features by default"
69422+ default y if GRKERNSEC_CONFIG_AUTO
69423+ depends on GRKERNSEC_SYSCTL
69424+ help
69425+ If you say Y here, instead of having all features enabled in the
69426+ kernel configuration disabled at boot time, the features will be
69427+ enabled at boot time. It is recommended you say Y here unless
69428+ there is some reason you would want all sysctl-tunable features to
69429+ be disabled by default. As mentioned elsewhere, it is important
69430+ to enable the grsec_lock entry once you have finished modifying
69431+ the sysctl entries.
69432+
69433+endmenu
69434+menu "Logging Options"
69435+depends on GRKERNSEC
69436+
69437+config GRKERNSEC_FLOODTIME
69438+ int "Seconds in between log messages (minimum)"
69439+ default 10
69440+ help
69441+ This option allows you to enforce the number of seconds between
69442+ grsecurity log messages. The default should be suitable for most
69443+ people, however, if you choose to change it, choose a value small enough
69444+ to allow informative logs to be produced, but large enough to
69445+ prevent flooding.
69446+
69447+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
69448+ any rate limiting on grsecurity log messages.
69449+
69450+config GRKERNSEC_FLOODBURST
69451+ int "Number of messages in a burst (maximum)"
69452+ default 6
69453+ help
69454+ This option allows you to choose the maximum number of messages allowed
69455+ within the flood time interval you chose in a separate option. The
69456+ default should be suitable for most people, however if you find that
69457+ many of your logs are being interpreted as flooding, you may want to
69458+ raise this value.
69459+
69460+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
69461+ any rate limiting on grsecurity log messages.
69462+
69463+endmenu
69464diff --git a/grsecurity/Makefile b/grsecurity/Makefile
69465new file mode 100644
69466index 0000000..30ababb
69467--- /dev/null
69468+++ b/grsecurity/Makefile
69469@@ -0,0 +1,54 @@
69470+# grsecurity – access control and security hardening for Linux
69471+# All code in this directory and various hooks located throughout the Linux kernel are
69472+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
69473+# http://www.grsecurity.net spender@grsecurity.net
69474+#
69475+# This program is free software; you can redistribute it and/or
69476+# modify it under the terms of the GNU General Public License version 2
69477+# as published by the Free Software Foundation.
69478+#
69479+# This program is distributed in the hope that it will be useful,
69480+# but WITHOUT ANY WARRANTY; without even the implied warranty of
69481+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
69482+# GNU General Public License for more details.
69483+#
69484+# You should have received a copy of the GNU General Public License
69485+# along with this program; if not, write to the Free Software
69486+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
69487+
69488+KBUILD_CFLAGS += -Werror
69489+
69490+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
69491+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
69492+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
69493+ grsec_usb.o grsec_ipc.o grsec_proc.o
69494+
69495+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
69496+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
69497+ gracl_learn.o grsec_log.o gracl_policy.o
69498+ifdef CONFIG_COMPAT
69499+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
69500+endif
69501+
69502+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
69503+
69504+ifdef CONFIG_NET
69505+obj-y += grsec_sock.o
69506+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
69507+endif
69508+
69509+ifndef CONFIG_GRKERNSEC
69510+obj-y += grsec_disabled.o
69511+endif
69512+
69513+ifdef CONFIG_GRKERNSEC_HIDESYM
69514+extra-y := grsec_hidesym.o
69515+$(obj)/grsec_hidesym.o:
69516+ @-chmod -f 500 /boot
69517+ @-chmod -f 500 /lib/modules
69518+ @-chmod -f 500 /lib64/modules
69519+ @-chmod -f 500 /lib32/modules
69520+ @-chmod -f 700 .
69521+ @-chmod -f 700 $(objtree)
69522+ @echo ' grsec: protected kernel image paths'
69523+endif
69524diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
69525new file mode 100644
69526index 0000000..6ae3aa0
69527--- /dev/null
69528+++ b/grsecurity/gracl.c
69529@@ -0,0 +1,2703 @@
69530+#include <linux/kernel.h>
69531+#include <linux/module.h>
69532+#include <linux/sched.h>
69533+#include <linux/mm.h>
69534+#include <linux/file.h>
69535+#include <linux/fs.h>
69536+#include <linux/namei.h>
69537+#include <linux/mount.h>
69538+#include <linux/tty.h>
69539+#include <linux/proc_fs.h>
69540+#include <linux/lglock.h>
69541+#include <linux/slab.h>
69542+#include <linux/vmalloc.h>
69543+#include <linux/types.h>
69544+#include <linux/sysctl.h>
69545+#include <linux/netdevice.h>
69546+#include <linux/ptrace.h>
69547+#include <linux/gracl.h>
69548+#include <linux/gralloc.h>
69549+#include <linux/security.h>
69550+#include <linux/grinternal.h>
69551+#include <linux/pid_namespace.h>
69552+#include <linux/stop_machine.h>
69553+#include <linux/fdtable.h>
69554+#include <linux/percpu.h>
69555+#include <linux/lglock.h>
69556+#include <linux/hugetlb.h>
69557+#include <linux/posix-timers.h>
69558+#include <linux/prefetch.h>
69559+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69560+#include <linux/magic.h>
69561+#include <linux/pagemap.h>
69562+#include "../fs/btrfs/async-thread.h"
69563+#include "../fs/btrfs/ctree.h"
69564+#include "../fs/btrfs/btrfs_inode.h"
69565+#endif
69566+#include "../fs/mount.h"
69567+
69568+#include <asm/uaccess.h>
69569+#include <asm/errno.h>
69570+#include <asm/mman.h>
69571+
69572+#define FOR_EACH_ROLE_START(role) \
69573+ role = running_polstate.role_list; \
69574+ while (role) {
69575+
69576+#define FOR_EACH_ROLE_END(role) \
69577+ role = role->prev; \
69578+ }
69579+
69580+extern struct path gr_real_root;
69581+
69582+static struct gr_policy_state running_polstate;
69583+struct gr_policy_state *polstate = &running_polstate;
69584+extern struct gr_alloc_state *current_alloc_state;
69585+
69586+extern char *gr_shared_page[4];
69587+DEFINE_RWLOCK(gr_inode_lock);
69588+
69589+static unsigned int gr_status __read_only = GR_STATUS_INIT;
69590+
69591+#ifdef CONFIG_NET
69592+extern struct vfsmount *sock_mnt;
69593+#endif
69594+
69595+extern struct vfsmount *pipe_mnt;
69596+extern struct vfsmount *shm_mnt;
69597+
69598+#ifdef CONFIG_HUGETLBFS
69599+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
69600+#endif
69601+
69602+extern u16 acl_sp_role_value;
69603+extern struct acl_object_label *fakefs_obj_rw;
69604+extern struct acl_object_label *fakefs_obj_rwx;
69605+
69606+int gr_acl_is_enabled(void)
69607+{
69608+ return (gr_status & GR_READY);
69609+}
69610+
69611+void gr_enable_rbac_system(void)
69612+{
69613+ pax_open_kernel();
69614+ gr_status |= GR_READY;
69615+ pax_close_kernel();
69616+}
69617+
69618+int gr_rbac_disable(void *unused)
69619+{
69620+ pax_open_kernel();
69621+ gr_status &= ~GR_READY;
69622+ pax_close_kernel();
69623+
69624+ return 0;
69625+}
69626+
69627+static inline dev_t __get_dev(const struct dentry *dentry)
69628+{
69629+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69630+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
69631+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
69632+ else
69633+#endif
69634+ return dentry->d_sb->s_dev;
69635+}
69636+
69637+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
69638+{
69639+ return __get_dev(dentry);
69640+}
69641+
69642+static char gr_task_roletype_to_char(struct task_struct *task)
69643+{
69644+ switch (task->role->roletype &
69645+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
69646+ GR_ROLE_SPECIAL)) {
69647+ case GR_ROLE_DEFAULT:
69648+ return 'D';
69649+ case GR_ROLE_USER:
69650+ return 'U';
69651+ case GR_ROLE_GROUP:
69652+ return 'G';
69653+ case GR_ROLE_SPECIAL:
69654+ return 'S';
69655+ }
69656+
69657+ return 'X';
69658+}
69659+
69660+char gr_roletype_to_char(void)
69661+{
69662+ return gr_task_roletype_to_char(current);
69663+}
69664+
69665+__inline__ int
69666+gr_acl_tpe_check(void)
69667+{
69668+ if (unlikely(!(gr_status & GR_READY)))
69669+ return 0;
69670+ if (current->role->roletype & GR_ROLE_TPE)
69671+ return 1;
69672+ else
69673+ return 0;
69674+}
69675+
69676+int
69677+gr_handle_rawio(const struct inode *inode)
69678+{
69679+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
69680+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
69681+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
69682+ !capable(CAP_SYS_RAWIO))
69683+ return 1;
69684+#endif
69685+ return 0;
69686+}
69687+
69688+int
69689+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
69690+{
69691+ if (likely(lena != lenb))
69692+ return 0;
69693+
69694+ return !memcmp(a, b, lena);
69695+}
69696+
69697+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
69698+{
69699+ *buflen -= namelen;
69700+ if (*buflen < 0)
69701+ return -ENAMETOOLONG;
69702+ *buffer -= namelen;
69703+ memcpy(*buffer, str, namelen);
69704+ return 0;
69705+}
69706+
69707+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
69708+{
69709+ return prepend(buffer, buflen, name->name, name->len);
69710+}
69711+
69712+static int prepend_path(const struct path *path, struct path *root,
69713+ char **buffer, int *buflen)
69714+{
69715+ struct dentry *dentry = path->dentry;
69716+ struct vfsmount *vfsmnt = path->mnt;
69717+ struct mount *mnt = real_mount(vfsmnt);
69718+ bool slash = false;
69719+ int error = 0;
69720+
69721+ while (dentry != root->dentry || vfsmnt != root->mnt) {
69722+ struct dentry * parent;
69723+
69724+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
69725+ /* Global root? */
69726+ if (!mnt_has_parent(mnt)) {
69727+ goto out;
69728+ }
69729+ dentry = mnt->mnt_mountpoint;
69730+ mnt = mnt->mnt_parent;
69731+ vfsmnt = &mnt->mnt;
69732+ continue;
69733+ }
69734+ parent = dentry->d_parent;
69735+ prefetch(parent);
69736+ spin_lock(&dentry->d_lock);
69737+ error = prepend_name(buffer, buflen, &dentry->d_name);
69738+ spin_unlock(&dentry->d_lock);
69739+ if (!error)
69740+ error = prepend(buffer, buflen, "/", 1);
69741+ if (error)
69742+ break;
69743+
69744+ slash = true;
69745+ dentry = parent;
69746+ }
69747+
69748+out:
69749+ if (!error && !slash)
69750+ error = prepend(buffer, buflen, "/", 1);
69751+
69752+ return error;
69753+}
69754+
69755+/* this must be called with mount_lock and rename_lock held */
69756+
69757+static char *__our_d_path(const struct path *path, struct path *root,
69758+ char *buf, int buflen)
69759+{
69760+ char *res = buf + buflen;
69761+ int error;
69762+
69763+ prepend(&res, &buflen, "\0", 1);
69764+ error = prepend_path(path, root, &res, &buflen);
69765+ if (error)
69766+ return ERR_PTR(error);
69767+
69768+ return res;
69769+}
69770+
69771+static char *
69772+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
69773+{
69774+ char *retval;
69775+
69776+ retval = __our_d_path(path, root, buf, buflen);
69777+ if (unlikely(IS_ERR(retval)))
69778+ retval = strcpy(buf, "<path too long>");
69779+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
69780+ retval[1] = '\0';
69781+
69782+ return retval;
69783+}
69784+
69785+static char *
69786+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
69787+ char *buf, int buflen)
69788+{
69789+ struct path path;
69790+ char *res;
69791+
69792+ path.dentry = (struct dentry *)dentry;
69793+ path.mnt = (struct vfsmount *)vfsmnt;
69794+
69795+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
69796+ by the RBAC system */
69797+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
69798+
69799+ return res;
69800+}
69801+
69802+static char *
69803+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
69804+ char *buf, int buflen)
69805+{
69806+ char *res;
69807+ struct path path;
69808+ struct path root;
69809+ struct task_struct *reaper = init_pid_ns.child_reaper;
69810+
69811+ path.dentry = (struct dentry *)dentry;
69812+ path.mnt = (struct vfsmount *)vfsmnt;
69813+
69814+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
69815+ get_fs_root(reaper->fs, &root);
69816+
69817+ read_seqlock_excl(&mount_lock);
69818+ write_seqlock(&rename_lock);
69819+ res = gen_full_path(&path, &root, buf, buflen);
69820+ write_sequnlock(&rename_lock);
69821+ read_sequnlock_excl(&mount_lock);
69822+
69823+ path_put(&root);
69824+ return res;
69825+}
69826+
69827+char *
69828+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
69829+{
69830+ char *ret;
69831+ read_seqlock_excl(&mount_lock);
69832+ write_seqlock(&rename_lock);
69833+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
69834+ PAGE_SIZE);
69835+ write_sequnlock(&rename_lock);
69836+ read_sequnlock_excl(&mount_lock);
69837+ return ret;
69838+}
69839+
69840+static char *
69841+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
69842+{
69843+ char *ret;
69844+ char *buf;
69845+ int buflen;
69846+
69847+ read_seqlock_excl(&mount_lock);
69848+ write_seqlock(&rename_lock);
69849+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
69850+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
69851+ buflen = (int)(ret - buf);
69852+ if (buflen >= 5)
69853+ prepend(&ret, &buflen, "/proc", 5);
69854+ else
69855+ ret = strcpy(buf, "<path too long>");
69856+ write_sequnlock(&rename_lock);
69857+ read_sequnlock_excl(&mount_lock);
69858+ return ret;
69859+}
69860+
69861+char *
69862+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
69863+{
69864+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
69865+ PAGE_SIZE);
69866+}
69867+
69868+char *
69869+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
69870+{
69871+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
69872+ PAGE_SIZE);
69873+}
69874+
69875+char *
69876+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
69877+{
69878+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
69879+ PAGE_SIZE);
69880+}
69881+
69882+char *
69883+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
69884+{
69885+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
69886+ PAGE_SIZE);
69887+}
69888+
69889+char *
69890+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
69891+{
69892+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
69893+ PAGE_SIZE);
69894+}
69895+
69896+__inline__ __u32
69897+to_gr_audit(const __u32 reqmode)
69898+{
69899+ /* masks off auditable permission flags, then shifts them to create
69900+ auditing flags, and adds the special case of append auditing if
69901+ we're requesting write */
69902+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
69903+}
69904+
69905+struct acl_role_label *
69906+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
69907+ const gid_t gid)
69908+{
69909+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
69910+ struct acl_role_label *match;
69911+ struct role_allowed_ip *ipp;
69912+ unsigned int x;
69913+ u32 curr_ip = task->signal->saved_ip;
69914+
69915+ match = state->acl_role_set.r_hash[index];
69916+
69917+ while (match) {
69918+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
69919+ for (x = 0; x < match->domain_child_num; x++) {
69920+ if (match->domain_children[x] == uid)
69921+ goto found;
69922+ }
69923+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
69924+ break;
69925+ match = match->next;
69926+ }
69927+found:
69928+ if (match == NULL) {
69929+ try_group:
69930+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
69931+ match = state->acl_role_set.r_hash[index];
69932+
69933+ while (match) {
69934+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
69935+ for (x = 0; x < match->domain_child_num; x++) {
69936+ if (match->domain_children[x] == gid)
69937+ goto found2;
69938+ }
69939+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
69940+ break;
69941+ match = match->next;
69942+ }
69943+found2:
69944+ if (match == NULL)
69945+ match = state->default_role;
69946+ if (match->allowed_ips == NULL)
69947+ return match;
69948+ else {
69949+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
69950+ if (likely
69951+ ((ntohl(curr_ip) & ipp->netmask) ==
69952+ (ntohl(ipp->addr) & ipp->netmask)))
69953+ return match;
69954+ }
69955+ match = state->default_role;
69956+ }
69957+ } else if (match->allowed_ips == NULL) {
69958+ return match;
69959+ } else {
69960+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
69961+ if (likely
69962+ ((ntohl(curr_ip) & ipp->netmask) ==
69963+ (ntohl(ipp->addr) & ipp->netmask)))
69964+ return match;
69965+ }
69966+ goto try_group;
69967+ }
69968+
69969+ return match;
69970+}
69971+
69972+static struct acl_role_label *
69973+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
69974+ const gid_t gid)
69975+{
69976+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
69977+}
69978+
69979+struct acl_subject_label *
69980+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
69981+ const struct acl_role_label *role)
69982+{
69983+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
69984+ struct acl_subject_label *match;
69985+
69986+ match = role->subj_hash[index];
69987+
69988+ while (match && (match->inode != ino || match->device != dev ||
69989+ (match->mode & GR_DELETED))) {
69990+ match = match->next;
69991+ }
69992+
69993+ if (match && !(match->mode & GR_DELETED))
69994+ return match;
69995+ else
69996+ return NULL;
69997+}
69998+
69999+struct acl_subject_label *
70000+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
70001+ const struct acl_role_label *role)
70002+{
70003+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
70004+ struct acl_subject_label *match;
70005+
70006+ match = role->subj_hash[index];
70007+
70008+ while (match && (match->inode != ino || match->device != dev ||
70009+ !(match->mode & GR_DELETED))) {
70010+ match = match->next;
70011+ }
70012+
70013+ if (match && (match->mode & GR_DELETED))
70014+ return match;
70015+ else
70016+ return NULL;
70017+}
70018+
70019+static struct acl_object_label *
70020+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
70021+ const struct acl_subject_label *subj)
70022+{
70023+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
70024+ struct acl_object_label *match;
70025+
70026+ match = subj->obj_hash[index];
70027+
70028+ while (match && (match->inode != ino || match->device != dev ||
70029+ (match->mode & GR_DELETED))) {
70030+ match = match->next;
70031+ }
70032+
70033+ if (match && !(match->mode & GR_DELETED))
70034+ return match;
70035+ else
70036+ return NULL;
70037+}
70038+
70039+static struct acl_object_label *
70040+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
70041+ const struct acl_subject_label *subj)
70042+{
70043+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
70044+ struct acl_object_label *match;
70045+
70046+ match = subj->obj_hash[index];
70047+
70048+ while (match && (match->inode != ino || match->device != dev ||
70049+ !(match->mode & GR_DELETED))) {
70050+ match = match->next;
70051+ }
70052+
70053+ if (match && (match->mode & GR_DELETED))
70054+ return match;
70055+
70056+ match = subj->obj_hash[index];
70057+
70058+ while (match && (match->inode != ino || match->device != dev ||
70059+ (match->mode & GR_DELETED))) {
70060+ match = match->next;
70061+ }
70062+
70063+ if (match && !(match->mode & GR_DELETED))
70064+ return match;
70065+ else
70066+ return NULL;
70067+}
70068+
70069+struct name_entry *
70070+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
70071+{
70072+ unsigned int len = strlen(name);
70073+ unsigned int key = full_name_hash(name, len);
70074+ unsigned int index = key % state->name_set.n_size;
70075+ struct name_entry *match;
70076+
70077+ match = state->name_set.n_hash[index];
70078+
70079+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
70080+ match = match->next;
70081+
70082+ return match;
70083+}
70084+
70085+static struct name_entry *
70086+lookup_name_entry(const char *name)
70087+{
70088+ return __lookup_name_entry(&running_polstate, name);
70089+}
70090+
70091+static struct name_entry *
70092+lookup_name_entry_create(const char *name)
70093+{
70094+ unsigned int len = strlen(name);
70095+ unsigned int key = full_name_hash(name, len);
70096+ unsigned int index = key % running_polstate.name_set.n_size;
70097+ struct name_entry *match;
70098+
70099+ match = running_polstate.name_set.n_hash[index];
70100+
70101+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
70102+ !match->deleted))
70103+ match = match->next;
70104+
70105+ if (match && match->deleted)
70106+ return match;
70107+
70108+ match = running_polstate.name_set.n_hash[index];
70109+
70110+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
70111+ match->deleted))
70112+ match = match->next;
70113+
70114+ if (match && !match->deleted)
70115+ return match;
70116+ else
70117+ return NULL;
70118+}
70119+
70120+static struct inodev_entry *
70121+lookup_inodev_entry(const ino_t ino, const dev_t dev)
70122+{
70123+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
70124+ struct inodev_entry *match;
70125+
70126+ match = running_polstate.inodev_set.i_hash[index];
70127+
70128+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
70129+ match = match->next;
70130+
70131+ return match;
70132+}
70133+
70134+void
70135+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
70136+{
70137+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
70138+ state->inodev_set.i_size);
70139+ struct inodev_entry **curr;
70140+
70141+ entry->prev = NULL;
70142+
70143+ curr = &state->inodev_set.i_hash[index];
70144+ if (*curr != NULL)
70145+ (*curr)->prev = entry;
70146+
70147+ entry->next = *curr;
70148+ *curr = entry;
70149+
70150+ return;
70151+}
70152+
70153+static void
70154+insert_inodev_entry(struct inodev_entry *entry)
70155+{
70156+ __insert_inodev_entry(&running_polstate, entry);
70157+}
70158+
70159+void
70160+insert_acl_obj_label(struct acl_object_label *obj,
70161+ struct acl_subject_label *subj)
70162+{
70163+ unsigned int index =
70164+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
70165+ struct acl_object_label **curr;
70166+
70167+ obj->prev = NULL;
70168+
70169+ curr = &subj->obj_hash[index];
70170+ if (*curr != NULL)
70171+ (*curr)->prev = obj;
70172+
70173+ obj->next = *curr;
70174+ *curr = obj;
70175+
70176+ return;
70177+}
70178+
70179+void
70180+insert_acl_subj_label(struct acl_subject_label *obj,
70181+ struct acl_role_label *role)
70182+{
70183+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
70184+ struct acl_subject_label **curr;
70185+
70186+ obj->prev = NULL;
70187+
70188+ curr = &role->subj_hash[index];
70189+ if (*curr != NULL)
70190+ (*curr)->prev = obj;
70191+
70192+ obj->next = *curr;
70193+ *curr = obj;
70194+
70195+ return;
70196+}
70197+
70198+/* derived from glibc fnmatch() 0: match, 1: no match*/
70199+
70200+static int
70201+glob_match(const char *p, const char *n)
70202+{
70203+ char c;
70204+
70205+ while ((c = *p++) != '\0') {
70206+ switch (c) {
70207+ case '?':
70208+ if (*n == '\0')
70209+ return 1;
70210+ else if (*n == '/')
70211+ return 1;
70212+ break;
70213+ case '\\':
70214+ if (*n != c)
70215+ return 1;
70216+ break;
70217+ case '*':
70218+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
70219+ if (*n == '/')
70220+ return 1;
70221+ else if (c == '?') {
70222+ if (*n == '\0')
70223+ return 1;
70224+ else
70225+ ++n;
70226+ }
70227+ }
70228+ if (c == '\0') {
70229+ return 0;
70230+ } else {
70231+ const char *endp;
70232+
70233+ if ((endp = strchr(n, '/')) == NULL)
70234+ endp = n + strlen(n);
70235+
70236+ if (c == '[') {
70237+ for (--p; n < endp; ++n)
70238+ if (!glob_match(p, n))
70239+ return 0;
70240+ } else if (c == '/') {
70241+ while (*n != '\0' && *n != '/')
70242+ ++n;
70243+ if (*n == '/' && !glob_match(p, n + 1))
70244+ return 0;
70245+ } else {
70246+ for (--p; n < endp; ++n)
70247+ if (*n == c && !glob_match(p, n))
70248+ return 0;
70249+ }
70250+
70251+ return 1;
70252+ }
70253+ case '[':
70254+ {
70255+ int not;
70256+ char cold;
70257+
70258+ if (*n == '\0' || *n == '/')
70259+ return 1;
70260+
70261+ not = (*p == '!' || *p == '^');
70262+ if (not)
70263+ ++p;
70264+
70265+ c = *p++;
70266+ for (;;) {
70267+ unsigned char fn = (unsigned char)*n;
70268+
70269+ if (c == '\0')
70270+ return 1;
70271+ else {
70272+ if (c == fn)
70273+ goto matched;
70274+ cold = c;
70275+ c = *p++;
70276+
70277+ if (c == '-' && *p != ']') {
70278+ unsigned char cend = *p++;
70279+
70280+ if (cend == '\0')
70281+ return 1;
70282+
70283+ if (cold <= fn && fn <= cend)
70284+ goto matched;
70285+
70286+ c = *p++;
70287+ }
70288+ }
70289+
70290+ if (c == ']')
70291+ break;
70292+ }
70293+ if (!not)
70294+ return 1;
70295+ break;
70296+ matched:
70297+ while (c != ']') {
70298+ if (c == '\0')
70299+ return 1;
70300+
70301+ c = *p++;
70302+ }
70303+ if (not)
70304+ return 1;
70305+ }
70306+ break;
70307+ default:
70308+ if (c != *n)
70309+ return 1;
70310+ }
70311+
70312+ ++n;
70313+ }
70314+
70315+ if (*n == '\0')
70316+ return 0;
70317+
70318+ if (*n == '/')
70319+ return 0;
70320+
70321+ return 1;
70322+}
70323+
70324+static struct acl_object_label *
70325+chk_glob_label(struct acl_object_label *globbed,
70326+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
70327+{
70328+ struct acl_object_label *tmp;
70329+
70330+ if (*path == NULL)
70331+ *path = gr_to_filename_nolock(dentry, mnt);
70332+
70333+ tmp = globbed;
70334+
70335+ while (tmp) {
70336+ if (!glob_match(tmp->filename, *path))
70337+ return tmp;
70338+ tmp = tmp->next;
70339+ }
70340+
70341+ return NULL;
70342+}
70343+
70344+static struct acl_object_label *
70345+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
70346+ const ino_t curr_ino, const dev_t curr_dev,
70347+ const struct acl_subject_label *subj, char **path, const int checkglob)
70348+{
70349+ struct acl_subject_label *tmpsubj;
70350+ struct acl_object_label *retval;
70351+ struct acl_object_label *retval2;
70352+
70353+ tmpsubj = (struct acl_subject_label *) subj;
70354+ read_lock(&gr_inode_lock);
70355+ do {
70356+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
70357+ if (retval) {
70358+ if (checkglob && retval->globbed) {
70359+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
70360+ if (retval2)
70361+ retval = retval2;
70362+ }
70363+ break;
70364+ }
70365+ } while ((tmpsubj = tmpsubj->parent_subject));
70366+ read_unlock(&gr_inode_lock);
70367+
70368+ return retval;
70369+}
70370+
70371+static __inline__ struct acl_object_label *
70372+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
70373+ struct dentry *curr_dentry,
70374+ const struct acl_subject_label *subj, char **path, const int checkglob)
70375+{
70376+ int newglob = checkglob;
70377+ ino_t inode;
70378+ dev_t device;
70379+
70380+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
70381+ as we don't want a / * rule to match instead of the / object
70382+ don't do this for create lookups that call this function though, since they're looking up
70383+ on the parent and thus need globbing checks on all paths
70384+ */
70385+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
70386+ newglob = GR_NO_GLOB;
70387+
70388+ spin_lock(&curr_dentry->d_lock);
70389+ inode = curr_dentry->d_inode->i_ino;
70390+ device = __get_dev(curr_dentry);
70391+ spin_unlock(&curr_dentry->d_lock);
70392+
70393+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
70394+}
70395+
70396+#ifdef CONFIG_HUGETLBFS
70397+static inline bool
70398+is_hugetlbfs_mnt(const struct vfsmount *mnt)
70399+{
70400+ int i;
70401+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
70402+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
70403+ return true;
70404+ }
70405+
70406+ return false;
70407+}
70408+#endif
70409+
70410+static struct acl_object_label *
70411+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70412+ const struct acl_subject_label *subj, char *path, const int checkglob)
70413+{
70414+ struct dentry *dentry = (struct dentry *) l_dentry;
70415+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
70416+ struct mount *real_mnt = real_mount(mnt);
70417+ struct acl_object_label *retval;
70418+ struct dentry *parent;
70419+
70420+ read_seqlock_excl(&mount_lock);
70421+ write_seqlock(&rename_lock);
70422+
70423+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
70424+#ifdef CONFIG_NET
70425+ mnt == sock_mnt ||
70426+#endif
70427+#ifdef CONFIG_HUGETLBFS
70428+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
70429+#endif
70430+ /* ignore Eric Biederman */
70431+ IS_PRIVATE(l_dentry->d_inode))) {
70432+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
70433+ goto out;
70434+ }
70435+
70436+ for (;;) {
70437+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
70438+ break;
70439+
70440+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
70441+ if (!mnt_has_parent(real_mnt))
70442+ break;
70443+
70444+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70445+ if (retval != NULL)
70446+ goto out;
70447+
70448+ dentry = real_mnt->mnt_mountpoint;
70449+ real_mnt = real_mnt->mnt_parent;
70450+ mnt = &real_mnt->mnt;
70451+ continue;
70452+ }
70453+
70454+ parent = dentry->d_parent;
70455+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70456+ if (retval != NULL)
70457+ goto out;
70458+
70459+ dentry = parent;
70460+ }
70461+
70462+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70463+
70464+ /* gr_real_root is pinned so we don't have to hold a reference */
70465+ if (retval == NULL)
70466+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
70467+out:
70468+ write_sequnlock(&rename_lock);
70469+ read_sequnlock_excl(&mount_lock);
70470+
70471+ BUG_ON(retval == NULL);
70472+
70473+ return retval;
70474+}
70475+
70476+static __inline__ struct acl_object_label *
70477+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70478+ const struct acl_subject_label *subj)
70479+{
70480+ char *path = NULL;
70481+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
70482+}
70483+
70484+static __inline__ struct acl_object_label *
70485+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70486+ const struct acl_subject_label *subj)
70487+{
70488+ char *path = NULL;
70489+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
70490+}
70491+
70492+static __inline__ struct acl_object_label *
70493+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70494+ const struct acl_subject_label *subj, char *path)
70495+{
70496+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
70497+}
70498+
70499+struct acl_subject_label *
70500+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70501+ const struct acl_role_label *role)
70502+{
70503+ struct dentry *dentry = (struct dentry *) l_dentry;
70504+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
70505+ struct mount *real_mnt = real_mount(mnt);
70506+ struct acl_subject_label *retval;
70507+ struct dentry *parent;
70508+
70509+ read_seqlock_excl(&mount_lock);
70510+ write_seqlock(&rename_lock);
70511+
70512+ for (;;) {
70513+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
70514+ break;
70515+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
70516+ if (!mnt_has_parent(real_mnt))
70517+ break;
70518+
70519+ spin_lock(&dentry->d_lock);
70520+ read_lock(&gr_inode_lock);
70521+ retval =
70522+ lookup_acl_subj_label(dentry->d_inode->i_ino,
70523+ __get_dev(dentry), role);
70524+ read_unlock(&gr_inode_lock);
70525+ spin_unlock(&dentry->d_lock);
70526+ if (retval != NULL)
70527+ goto out;
70528+
70529+ dentry = real_mnt->mnt_mountpoint;
70530+ real_mnt = real_mnt->mnt_parent;
70531+ mnt = &real_mnt->mnt;
70532+ continue;
70533+ }
70534+
70535+ spin_lock(&dentry->d_lock);
70536+ read_lock(&gr_inode_lock);
70537+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
70538+ __get_dev(dentry), role);
70539+ read_unlock(&gr_inode_lock);
70540+ parent = dentry->d_parent;
70541+ spin_unlock(&dentry->d_lock);
70542+
70543+ if (retval != NULL)
70544+ goto out;
70545+
70546+ dentry = parent;
70547+ }
70548+
70549+ spin_lock(&dentry->d_lock);
70550+ read_lock(&gr_inode_lock);
70551+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
70552+ __get_dev(dentry), role);
70553+ read_unlock(&gr_inode_lock);
70554+ spin_unlock(&dentry->d_lock);
70555+
70556+ if (unlikely(retval == NULL)) {
70557+ /* gr_real_root is pinned, we don't need to hold a reference */
70558+ read_lock(&gr_inode_lock);
70559+ retval = lookup_acl_subj_label(gr_real_root.dentry->d_inode->i_ino,
70560+ __get_dev(gr_real_root.dentry), role);
70561+ read_unlock(&gr_inode_lock);
70562+ }
70563+out:
70564+ write_sequnlock(&rename_lock);
70565+ read_sequnlock_excl(&mount_lock);
70566+
70567+ BUG_ON(retval == NULL);
70568+
70569+ return retval;
70570+}
70571+
70572+void
70573+assign_special_role(const char *rolename)
70574+{
70575+ struct acl_object_label *obj;
70576+ struct acl_role_label *r;
70577+ struct acl_role_label *assigned = NULL;
70578+ struct task_struct *tsk;
70579+ struct file *filp;
70580+
70581+ FOR_EACH_ROLE_START(r)
70582+ if (!strcmp(rolename, r->rolename) &&
70583+ (r->roletype & GR_ROLE_SPECIAL)) {
70584+ assigned = r;
70585+ break;
70586+ }
70587+ FOR_EACH_ROLE_END(r)
70588+
70589+ if (!assigned)
70590+ return;
70591+
70592+ read_lock(&tasklist_lock);
70593+ read_lock(&grsec_exec_file_lock);
70594+
70595+ tsk = current->real_parent;
70596+ if (tsk == NULL)
70597+ goto out_unlock;
70598+
70599+ filp = tsk->exec_file;
70600+ if (filp == NULL)
70601+ goto out_unlock;
70602+
70603+ tsk->is_writable = 0;
70604+ tsk->inherited = 0;
70605+
70606+ tsk->acl_sp_role = 1;
70607+ tsk->acl_role_id = ++acl_sp_role_value;
70608+ tsk->role = assigned;
70609+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
70610+
70611+ /* ignore additional mmap checks for processes that are writable
70612+ by the default ACL */
70613+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
70614+ if (unlikely(obj->mode & GR_WRITE))
70615+ tsk->is_writable = 1;
70616+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
70617+ if (unlikely(obj->mode & GR_WRITE))
70618+ tsk->is_writable = 1;
70619+
70620+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70621+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
70622+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
70623+#endif
70624+
70625+out_unlock:
70626+ read_unlock(&grsec_exec_file_lock);
70627+ read_unlock(&tasklist_lock);
70628+ return;
70629+}
70630+
70631+
70632+static void
70633+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
70634+{
70635+ struct task_struct *task = current;
70636+ const struct cred *cred = current_cred();
70637+
70638+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
70639+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70640+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70641+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
70642+
70643+ return;
70644+}
70645+
70646+static void
70647+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
70648+{
70649+ struct task_struct *task = current;
70650+ const struct cred *cred = current_cred();
70651+
70652+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
70653+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70654+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70655+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
70656+
70657+ return;
70658+}
70659+
70660+static void
70661+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
70662+{
70663+ struct task_struct *task = current;
70664+ const struct cred *cred = current_cred();
70665+
70666+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
70667+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70668+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70669+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
70670+
70671+ return;
70672+}
70673+
70674+static void
70675+gr_set_proc_res(struct task_struct *task)
70676+{
70677+ struct acl_subject_label *proc;
70678+ unsigned short i;
70679+
70680+ proc = task->acl;
70681+
70682+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
70683+ return;
70684+
70685+ for (i = 0; i < RLIM_NLIMITS; i++) {
70686+ if (!(proc->resmask & (1U << i)))
70687+ continue;
70688+
70689+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
70690+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
70691+
70692+ if (i == RLIMIT_CPU)
70693+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
70694+ }
70695+
70696+ return;
70697+}
70698+
70699+/* both of the below must be called with
70700+ rcu_read_lock();
70701+ read_lock(&tasklist_lock);
70702+ read_lock(&grsec_exec_file_lock);
70703+*/
70704+
70705+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename)
70706+{
70707+ char *tmpname;
70708+ struct acl_subject_label *tmpsubj;
70709+ struct file *filp;
70710+ struct name_entry *nmatch;
70711+
70712+ filp = task->exec_file;
70713+ if (filp == NULL)
70714+ return NULL;
70715+
70716+ /* the following is to apply the correct subject
70717+ on binaries running when the RBAC system
70718+ is enabled, when the binaries have been
70719+ replaced or deleted since their execution
70720+ -----
70721+ when the RBAC system starts, the inode/dev
70722+ from exec_file will be one the RBAC system
70723+ is unaware of. It only knows the inode/dev
70724+ of the present file on disk, or the absence
70725+ of it.
70726+ */
70727+
70728+ if (filename)
70729+ nmatch = __lookup_name_entry(state, filename);
70730+ else {
70731+ preempt_disable();
70732+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
70733+
70734+ nmatch = __lookup_name_entry(state, tmpname);
70735+ preempt_enable();
70736+ }
70737+ tmpsubj = NULL;
70738+ if (nmatch) {
70739+ if (nmatch->deleted)
70740+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
70741+ else
70742+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
70743+ }
70744+ /* this also works for the reload case -- if we don't match a potentially inherited subject
70745+ then we fall back to a normal lookup based on the binary's ino/dev
70746+ */
70747+ if (tmpsubj == NULL)
70748+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
70749+
70750+ return tmpsubj;
70751+}
70752+
70753+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename)
70754+{
70755+ return __gr_get_subject_for_task(&running_polstate, task, filename);
70756+}
70757+
70758+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
70759+{
70760+ struct acl_object_label *obj;
70761+ struct file *filp;
70762+
70763+ filp = task->exec_file;
70764+
70765+ task->acl = subj;
70766+ task->is_writable = 0;
70767+ /* ignore additional mmap checks for processes that are writable
70768+ by the default ACL */
70769+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
70770+ if (unlikely(obj->mode & GR_WRITE))
70771+ task->is_writable = 1;
70772+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
70773+ if (unlikely(obj->mode & GR_WRITE))
70774+ task->is_writable = 1;
70775+
70776+ gr_set_proc_res(task);
70777+
70778+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70779+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
70780+#endif
70781+}
70782+
70783+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
70784+{
70785+ __gr_apply_subject_to_task(&running_polstate, task, subj);
70786+}
70787+
70788+__u32
70789+gr_search_file(const struct dentry * dentry, const __u32 mode,
70790+ const struct vfsmount * mnt)
70791+{
70792+ __u32 retval = mode;
70793+ struct acl_subject_label *curracl;
70794+ struct acl_object_label *currobj;
70795+
70796+ if (unlikely(!(gr_status & GR_READY)))
70797+ return (mode & ~GR_AUDITS);
70798+
70799+ curracl = current->acl;
70800+
70801+ currobj = chk_obj_label(dentry, mnt, curracl);
70802+ retval = currobj->mode & mode;
70803+
70804+ /* if we're opening a specified transfer file for writing
70805+ (e.g. /dev/initctl), then transfer our role to init
70806+ */
70807+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
70808+ current->role->roletype & GR_ROLE_PERSIST)) {
70809+ struct task_struct *task = init_pid_ns.child_reaper;
70810+
70811+ if (task->role != current->role) {
70812+ struct acl_subject_label *subj;
70813+
70814+ task->acl_sp_role = 0;
70815+ task->acl_role_id = current->acl_role_id;
70816+ task->role = current->role;
70817+ rcu_read_lock();
70818+ read_lock(&grsec_exec_file_lock);
70819+ subj = gr_get_subject_for_task(task, NULL);
70820+ gr_apply_subject_to_task(task, subj);
70821+ read_unlock(&grsec_exec_file_lock);
70822+ rcu_read_unlock();
70823+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
70824+ }
70825+ }
70826+
70827+ if (unlikely
70828+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
70829+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
70830+ __u32 new_mode = mode;
70831+
70832+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
70833+
70834+ retval = new_mode;
70835+
70836+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
70837+ new_mode |= GR_INHERIT;
70838+
70839+ if (!(mode & GR_NOLEARN))
70840+ gr_log_learn(dentry, mnt, new_mode);
70841+ }
70842+
70843+ return retval;
70844+}
70845+
70846+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
70847+ const struct dentry *parent,
70848+ const struct vfsmount *mnt)
70849+{
70850+ struct name_entry *match;
70851+ struct acl_object_label *matchpo;
70852+ struct acl_subject_label *curracl;
70853+ char *path;
70854+
70855+ if (unlikely(!(gr_status & GR_READY)))
70856+ return NULL;
70857+
70858+ preempt_disable();
70859+ path = gr_to_filename_rbac(new_dentry, mnt);
70860+ match = lookup_name_entry_create(path);
70861+
70862+ curracl = current->acl;
70863+
70864+ if (match) {
70865+ read_lock(&gr_inode_lock);
70866+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
70867+ read_unlock(&gr_inode_lock);
70868+
70869+ if (matchpo) {
70870+ preempt_enable();
70871+ return matchpo;
70872+ }
70873+ }
70874+
70875+ // lookup parent
70876+
70877+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
70878+
70879+ preempt_enable();
70880+ return matchpo;
70881+}
70882+
70883+__u32
70884+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
70885+ const struct vfsmount * mnt, const __u32 mode)
70886+{
70887+ struct acl_object_label *matchpo;
70888+ __u32 retval;
70889+
70890+ if (unlikely(!(gr_status & GR_READY)))
70891+ return (mode & ~GR_AUDITS);
70892+
70893+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
70894+
70895+ retval = matchpo->mode & mode;
70896+
70897+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
70898+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
70899+ __u32 new_mode = mode;
70900+
70901+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
70902+
70903+ gr_log_learn(new_dentry, mnt, new_mode);
70904+ return new_mode;
70905+ }
70906+
70907+ return retval;
70908+}
70909+
70910+__u32
70911+gr_check_link(const struct dentry * new_dentry,
70912+ const struct dentry * parent_dentry,
70913+ const struct vfsmount * parent_mnt,
70914+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
70915+{
70916+ struct acl_object_label *obj;
70917+ __u32 oldmode, newmode;
70918+ __u32 needmode;
70919+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
70920+ GR_DELETE | GR_INHERIT;
70921+
70922+ if (unlikely(!(gr_status & GR_READY)))
70923+ return (GR_CREATE | GR_LINK);
70924+
70925+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
70926+ oldmode = obj->mode;
70927+
70928+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
70929+ newmode = obj->mode;
70930+
70931+ needmode = newmode & checkmodes;
70932+
70933+ // old name for hardlink must have at least the permissions of the new name
70934+ if ((oldmode & needmode) != needmode)
70935+ goto bad;
70936+
70937+ // if old name had restrictions/auditing, make sure the new name does as well
70938+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
70939+
70940+ // don't allow hardlinking of suid/sgid/fcapped files without permission
70941+ if (is_privileged_binary(old_dentry))
70942+ needmode |= GR_SETID;
70943+
70944+ if ((newmode & needmode) != needmode)
70945+ goto bad;
70946+
70947+ // enforce minimum permissions
70948+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
70949+ return newmode;
70950+bad:
70951+ needmode = oldmode;
70952+ if (is_privileged_binary(old_dentry))
70953+ needmode |= GR_SETID;
70954+
70955+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
70956+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
70957+ return (GR_CREATE | GR_LINK);
70958+ } else if (newmode & GR_SUPPRESS)
70959+ return GR_SUPPRESS;
70960+ else
70961+ return 0;
70962+}
70963+
70964+int
70965+gr_check_hidden_task(const struct task_struct *task)
70966+{
70967+ if (unlikely(!(gr_status & GR_READY)))
70968+ return 0;
70969+
70970+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
70971+ return 1;
70972+
70973+ return 0;
70974+}
70975+
70976+int
70977+gr_check_protected_task(const struct task_struct *task)
70978+{
70979+ if (unlikely(!(gr_status & GR_READY) || !task))
70980+ return 0;
70981+
70982+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
70983+ task->acl != current->acl)
70984+ return 1;
70985+
70986+ return 0;
70987+}
70988+
70989+int
70990+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
70991+{
70992+ struct task_struct *p;
70993+ int ret = 0;
70994+
70995+ if (unlikely(!(gr_status & GR_READY) || !pid))
70996+ return ret;
70997+
70998+ read_lock(&tasklist_lock);
70999+ do_each_pid_task(pid, type, p) {
71000+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
71001+ p->acl != current->acl) {
71002+ ret = 1;
71003+ goto out;
71004+ }
71005+ } while_each_pid_task(pid, type, p);
71006+out:
71007+ read_unlock(&tasklist_lock);
71008+
71009+ return ret;
71010+}
71011+
71012+void
71013+gr_copy_label(struct task_struct *tsk)
71014+{
71015+ struct task_struct *p = current;
71016+
71017+ tsk->inherited = p->inherited;
71018+ tsk->acl_sp_role = 0;
71019+ tsk->acl_role_id = p->acl_role_id;
71020+ tsk->acl = p->acl;
71021+ tsk->role = p->role;
71022+ tsk->signal->used_accept = 0;
71023+ tsk->signal->curr_ip = p->signal->curr_ip;
71024+ tsk->signal->saved_ip = p->signal->saved_ip;
71025+ if (p->exec_file)
71026+ get_file(p->exec_file);
71027+ tsk->exec_file = p->exec_file;
71028+ tsk->is_writable = p->is_writable;
71029+ if (unlikely(p->signal->used_accept)) {
71030+ p->signal->curr_ip = 0;
71031+ p->signal->saved_ip = 0;
71032+ }
71033+
71034+ return;
71035+}
71036+
71037+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
71038+
71039+int
71040+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
71041+{
71042+ unsigned int i;
71043+ __u16 num;
71044+ uid_t *uidlist;
71045+ uid_t curuid;
71046+ int realok = 0;
71047+ int effectiveok = 0;
71048+ int fsok = 0;
71049+ uid_t globalreal, globaleffective, globalfs;
71050+
71051+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
71052+ struct user_struct *user;
71053+
71054+ if (!uid_valid(real))
71055+ goto skipit;
71056+
71057+ /* find user based on global namespace */
71058+
71059+ globalreal = GR_GLOBAL_UID(real);
71060+
71061+ user = find_user(make_kuid(&init_user_ns, globalreal));
71062+ if (user == NULL)
71063+ goto skipit;
71064+
71065+ if (gr_process_kernel_setuid_ban(user)) {
71066+ /* for find_user */
71067+ free_uid(user);
71068+ return 1;
71069+ }
71070+
71071+ /* for find_user */
71072+ free_uid(user);
71073+
71074+skipit:
71075+#endif
71076+
71077+ if (unlikely(!(gr_status & GR_READY)))
71078+ return 0;
71079+
71080+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
71081+ gr_log_learn_uid_change(real, effective, fs);
71082+
71083+ num = current->acl->user_trans_num;
71084+ uidlist = current->acl->user_transitions;
71085+
71086+ if (uidlist == NULL)
71087+ return 0;
71088+
71089+ if (!uid_valid(real)) {
71090+ realok = 1;
71091+ globalreal = (uid_t)-1;
71092+ } else {
71093+ globalreal = GR_GLOBAL_UID(real);
71094+ }
71095+ if (!uid_valid(effective)) {
71096+ effectiveok = 1;
71097+ globaleffective = (uid_t)-1;
71098+ } else {
71099+ globaleffective = GR_GLOBAL_UID(effective);
71100+ }
71101+ if (!uid_valid(fs)) {
71102+ fsok = 1;
71103+ globalfs = (uid_t)-1;
71104+ } else {
71105+ globalfs = GR_GLOBAL_UID(fs);
71106+ }
71107+
71108+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
71109+ for (i = 0; i < num; i++) {
71110+ curuid = uidlist[i];
71111+ if (globalreal == curuid)
71112+ realok = 1;
71113+ if (globaleffective == curuid)
71114+ effectiveok = 1;
71115+ if (globalfs == curuid)
71116+ fsok = 1;
71117+ }
71118+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
71119+ for (i = 0; i < num; i++) {
71120+ curuid = uidlist[i];
71121+ if (globalreal == curuid)
71122+ break;
71123+ if (globaleffective == curuid)
71124+ break;
71125+ if (globalfs == curuid)
71126+ break;
71127+ }
71128+ /* not in deny list */
71129+ if (i == num) {
71130+ realok = 1;
71131+ effectiveok = 1;
71132+ fsok = 1;
71133+ }
71134+ }
71135+
71136+ if (realok && effectiveok && fsok)
71137+ return 0;
71138+ else {
71139+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
71140+ return 1;
71141+ }
71142+}
71143+
71144+int
71145+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
71146+{
71147+ unsigned int i;
71148+ __u16 num;
71149+ gid_t *gidlist;
71150+ gid_t curgid;
71151+ int realok = 0;
71152+ int effectiveok = 0;
71153+ int fsok = 0;
71154+ gid_t globalreal, globaleffective, globalfs;
71155+
71156+ if (unlikely(!(gr_status & GR_READY)))
71157+ return 0;
71158+
71159+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
71160+ gr_log_learn_gid_change(real, effective, fs);
71161+
71162+ num = current->acl->group_trans_num;
71163+ gidlist = current->acl->group_transitions;
71164+
71165+ if (gidlist == NULL)
71166+ return 0;
71167+
71168+ if (!gid_valid(real)) {
71169+ realok = 1;
71170+ globalreal = (gid_t)-1;
71171+ } else {
71172+ globalreal = GR_GLOBAL_GID(real);
71173+ }
71174+ if (!gid_valid(effective)) {
71175+ effectiveok = 1;
71176+ globaleffective = (gid_t)-1;
71177+ } else {
71178+ globaleffective = GR_GLOBAL_GID(effective);
71179+ }
71180+ if (!gid_valid(fs)) {
71181+ fsok = 1;
71182+ globalfs = (gid_t)-1;
71183+ } else {
71184+ globalfs = GR_GLOBAL_GID(fs);
71185+ }
71186+
71187+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
71188+ for (i = 0; i < num; i++) {
71189+ curgid = gidlist[i];
71190+ if (globalreal == curgid)
71191+ realok = 1;
71192+ if (globaleffective == curgid)
71193+ effectiveok = 1;
71194+ if (globalfs == curgid)
71195+ fsok = 1;
71196+ }
71197+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
71198+ for (i = 0; i < num; i++) {
71199+ curgid = gidlist[i];
71200+ if (globalreal == curgid)
71201+ break;
71202+ if (globaleffective == curgid)
71203+ break;
71204+ if (globalfs == curgid)
71205+ break;
71206+ }
71207+ /* not in deny list */
71208+ if (i == num) {
71209+ realok = 1;
71210+ effectiveok = 1;
71211+ fsok = 1;
71212+ }
71213+ }
71214+
71215+ if (realok && effectiveok && fsok)
71216+ return 0;
71217+ else {
71218+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
71219+ return 1;
71220+ }
71221+}
71222+
71223+extern int gr_acl_is_capable(const int cap);
71224+
71225+void
71226+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
71227+{
71228+ struct acl_role_label *role = task->role;
71229+ struct acl_subject_label *subj = NULL;
71230+ struct acl_object_label *obj;
71231+ struct file *filp;
71232+ uid_t uid;
71233+ gid_t gid;
71234+
71235+ if (unlikely(!(gr_status & GR_READY)))
71236+ return;
71237+
71238+ uid = GR_GLOBAL_UID(kuid);
71239+ gid = GR_GLOBAL_GID(kgid);
71240+
71241+ filp = task->exec_file;
71242+
71243+ /* kernel process, we'll give them the kernel role */
71244+ if (unlikely(!filp)) {
71245+ task->role = running_polstate.kernel_role;
71246+ task->acl = running_polstate.kernel_role->root_label;
71247+ return;
71248+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
71249+ /* save the current ip at time of role lookup so that the proper
71250+ IP will be learned for role_allowed_ip */
71251+ task->signal->saved_ip = task->signal->curr_ip;
71252+ role = lookup_acl_role_label(task, uid, gid);
71253+ }
71254+
71255+ /* don't change the role if we're not a privileged process */
71256+ if (role && task->role != role &&
71257+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
71258+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
71259+ return;
71260+
71261+ /* perform subject lookup in possibly new role
71262+ we can use this result below in the case where role == task->role
71263+ */
71264+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
71265+
71266+ /* if we changed uid/gid, but result in the same role
71267+ and are using inheritance, don't lose the inherited subject
71268+ if current subject is other than what normal lookup
71269+ would result in, we arrived via inheritance, don't
71270+ lose subject
71271+ */
71272+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
71273+ (subj == task->acl)))
71274+ task->acl = subj;
71275+
71276+ /* leave task->inherited unaffected */
71277+
71278+ task->role = role;
71279+
71280+ task->is_writable = 0;
71281+
71282+ /* ignore additional mmap checks for processes that are writable
71283+ by the default ACL */
71284+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71285+ if (unlikely(obj->mode & GR_WRITE))
71286+ task->is_writable = 1;
71287+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
71288+ if (unlikely(obj->mode & GR_WRITE))
71289+ task->is_writable = 1;
71290+
71291+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71292+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71293+#endif
71294+
71295+ gr_set_proc_res(task);
71296+
71297+ return;
71298+}
71299+
71300+int
71301+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
71302+ const int unsafe_flags)
71303+{
71304+ struct task_struct *task = current;
71305+ struct acl_subject_label *newacl;
71306+ struct acl_object_label *obj;
71307+ __u32 retmode;
71308+
71309+ if (unlikely(!(gr_status & GR_READY)))
71310+ return 0;
71311+
71312+ newacl = chk_subj_label(dentry, mnt, task->role);
71313+
71314+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
71315+ did an exec
71316+ */
71317+ rcu_read_lock();
71318+ read_lock(&tasklist_lock);
71319+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
71320+ (task->parent->acl->mode & GR_POVERRIDE))) {
71321+ read_unlock(&tasklist_lock);
71322+ rcu_read_unlock();
71323+ goto skip_check;
71324+ }
71325+ read_unlock(&tasklist_lock);
71326+ rcu_read_unlock();
71327+
71328+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
71329+ !(task->role->roletype & GR_ROLE_GOD) &&
71330+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
71331+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
71332+ if (unsafe_flags & LSM_UNSAFE_SHARE)
71333+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
71334+ else
71335+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
71336+ return -EACCES;
71337+ }
71338+
71339+skip_check:
71340+
71341+ obj = chk_obj_label(dentry, mnt, task->acl);
71342+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
71343+
71344+ if (!(task->acl->mode & GR_INHERITLEARN) &&
71345+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
71346+ if (obj->nested)
71347+ task->acl = obj->nested;
71348+ else
71349+ task->acl = newacl;
71350+ task->inherited = 0;
71351+ } else {
71352+ task->inherited = 1;
71353+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
71354+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
71355+ }
71356+
71357+ task->is_writable = 0;
71358+
71359+ /* ignore additional mmap checks for processes that are writable
71360+ by the default ACL */
71361+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
71362+ if (unlikely(obj->mode & GR_WRITE))
71363+ task->is_writable = 1;
71364+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
71365+ if (unlikely(obj->mode & GR_WRITE))
71366+ task->is_writable = 1;
71367+
71368+ gr_set_proc_res(task);
71369+
71370+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71371+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71372+#endif
71373+ return 0;
71374+}
71375+
71376+/* always called with valid inodev ptr */
71377+static void
71378+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
71379+{
71380+ struct acl_object_label *matchpo;
71381+ struct acl_subject_label *matchps;
71382+ struct acl_subject_label *subj;
71383+ struct acl_role_label *role;
71384+ unsigned int x;
71385+
71386+ FOR_EACH_ROLE_START(role)
71387+ FOR_EACH_SUBJECT_START(role, subj, x)
71388+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
71389+ matchpo->mode |= GR_DELETED;
71390+ FOR_EACH_SUBJECT_END(subj,x)
71391+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
71392+ /* nested subjects aren't in the role's subj_hash table */
71393+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
71394+ matchpo->mode |= GR_DELETED;
71395+ FOR_EACH_NESTED_SUBJECT_END(subj)
71396+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
71397+ matchps->mode |= GR_DELETED;
71398+ FOR_EACH_ROLE_END(role)
71399+
71400+ inodev->nentry->deleted = 1;
71401+
71402+ return;
71403+}
71404+
71405+void
71406+gr_handle_delete(const ino_t ino, const dev_t dev)
71407+{
71408+ struct inodev_entry *inodev;
71409+
71410+ if (unlikely(!(gr_status & GR_READY)))
71411+ return;
71412+
71413+ write_lock(&gr_inode_lock);
71414+ inodev = lookup_inodev_entry(ino, dev);
71415+ if (inodev != NULL)
71416+ do_handle_delete(inodev, ino, dev);
71417+ write_unlock(&gr_inode_lock);
71418+
71419+ return;
71420+}
71421+
71422+static void
71423+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
71424+ const ino_t newinode, const dev_t newdevice,
71425+ struct acl_subject_label *subj)
71426+{
71427+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
71428+ struct acl_object_label *match;
71429+
71430+ match = subj->obj_hash[index];
71431+
71432+ while (match && (match->inode != oldinode ||
71433+ match->device != olddevice ||
71434+ !(match->mode & GR_DELETED)))
71435+ match = match->next;
71436+
71437+ if (match && (match->inode == oldinode)
71438+ && (match->device == olddevice)
71439+ && (match->mode & GR_DELETED)) {
71440+ if (match->prev == NULL) {
71441+ subj->obj_hash[index] = match->next;
71442+ if (match->next != NULL)
71443+ match->next->prev = NULL;
71444+ } else {
71445+ match->prev->next = match->next;
71446+ if (match->next != NULL)
71447+ match->next->prev = match->prev;
71448+ }
71449+ match->prev = NULL;
71450+ match->next = NULL;
71451+ match->inode = newinode;
71452+ match->device = newdevice;
71453+ match->mode &= ~GR_DELETED;
71454+
71455+ insert_acl_obj_label(match, subj);
71456+ }
71457+
71458+ return;
71459+}
71460+
71461+static void
71462+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
71463+ const ino_t newinode, const dev_t newdevice,
71464+ struct acl_role_label *role)
71465+{
71466+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
71467+ struct acl_subject_label *match;
71468+
71469+ match = role->subj_hash[index];
71470+
71471+ while (match && (match->inode != oldinode ||
71472+ match->device != olddevice ||
71473+ !(match->mode & GR_DELETED)))
71474+ match = match->next;
71475+
71476+ if (match && (match->inode == oldinode)
71477+ && (match->device == olddevice)
71478+ && (match->mode & GR_DELETED)) {
71479+ if (match->prev == NULL) {
71480+ role->subj_hash[index] = match->next;
71481+ if (match->next != NULL)
71482+ match->next->prev = NULL;
71483+ } else {
71484+ match->prev->next = match->next;
71485+ if (match->next != NULL)
71486+ match->next->prev = match->prev;
71487+ }
71488+ match->prev = NULL;
71489+ match->next = NULL;
71490+ match->inode = newinode;
71491+ match->device = newdevice;
71492+ match->mode &= ~GR_DELETED;
71493+
71494+ insert_acl_subj_label(match, role);
71495+ }
71496+
71497+ return;
71498+}
71499+
71500+static void
71501+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
71502+ const ino_t newinode, const dev_t newdevice)
71503+{
71504+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
71505+ struct inodev_entry *match;
71506+
71507+ match = running_polstate.inodev_set.i_hash[index];
71508+
71509+ while (match && (match->nentry->inode != oldinode ||
71510+ match->nentry->device != olddevice || !match->nentry->deleted))
71511+ match = match->next;
71512+
71513+ if (match && (match->nentry->inode == oldinode)
71514+ && (match->nentry->device == olddevice) &&
71515+ match->nentry->deleted) {
71516+ if (match->prev == NULL) {
71517+ running_polstate.inodev_set.i_hash[index] = match->next;
71518+ if (match->next != NULL)
71519+ match->next->prev = NULL;
71520+ } else {
71521+ match->prev->next = match->next;
71522+ if (match->next != NULL)
71523+ match->next->prev = match->prev;
71524+ }
71525+ match->prev = NULL;
71526+ match->next = NULL;
71527+ match->nentry->inode = newinode;
71528+ match->nentry->device = newdevice;
71529+ match->nentry->deleted = 0;
71530+
71531+ insert_inodev_entry(match);
71532+ }
71533+
71534+ return;
71535+}
71536+
71537+static void
71538+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
71539+{
71540+ struct acl_subject_label *subj;
71541+ struct acl_role_label *role;
71542+ unsigned int x;
71543+
71544+ FOR_EACH_ROLE_START(role)
71545+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
71546+
71547+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
71548+ if ((subj->inode == ino) && (subj->device == dev)) {
71549+ subj->inode = ino;
71550+ subj->device = dev;
71551+ }
71552+ /* nested subjects aren't in the role's subj_hash table */
71553+ update_acl_obj_label(matchn->inode, matchn->device,
71554+ ino, dev, subj);
71555+ FOR_EACH_NESTED_SUBJECT_END(subj)
71556+ FOR_EACH_SUBJECT_START(role, subj, x)
71557+ update_acl_obj_label(matchn->inode, matchn->device,
71558+ ino, dev, subj);
71559+ FOR_EACH_SUBJECT_END(subj,x)
71560+ FOR_EACH_ROLE_END(role)
71561+
71562+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
71563+
71564+ return;
71565+}
71566+
71567+static void
71568+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
71569+ const struct vfsmount *mnt)
71570+{
71571+ ino_t ino = dentry->d_inode->i_ino;
71572+ dev_t dev = __get_dev(dentry);
71573+
71574+ __do_handle_create(matchn, ino, dev);
71575+
71576+ return;
71577+}
71578+
71579+void
71580+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
71581+{
71582+ struct name_entry *matchn;
71583+
71584+ if (unlikely(!(gr_status & GR_READY)))
71585+ return;
71586+
71587+ preempt_disable();
71588+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
71589+
71590+ if (unlikely((unsigned long)matchn)) {
71591+ write_lock(&gr_inode_lock);
71592+ do_handle_create(matchn, dentry, mnt);
71593+ write_unlock(&gr_inode_lock);
71594+ }
71595+ preempt_enable();
71596+
71597+ return;
71598+}
71599+
71600+void
71601+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
71602+{
71603+ struct name_entry *matchn;
71604+
71605+ if (unlikely(!(gr_status & GR_READY)))
71606+ return;
71607+
71608+ preempt_disable();
71609+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
71610+
71611+ if (unlikely((unsigned long)matchn)) {
71612+ write_lock(&gr_inode_lock);
71613+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
71614+ write_unlock(&gr_inode_lock);
71615+ }
71616+ preempt_enable();
71617+
71618+ return;
71619+}
71620+
71621+void
71622+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
71623+ struct dentry *old_dentry,
71624+ struct dentry *new_dentry,
71625+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
71626+{
71627+ struct name_entry *matchn;
71628+ struct name_entry *matchn2 = NULL;
71629+ struct inodev_entry *inodev;
71630+ struct inode *inode = new_dentry->d_inode;
71631+ ino_t old_ino = old_dentry->d_inode->i_ino;
71632+ dev_t old_dev = __get_dev(old_dentry);
71633+ unsigned int exchange = flags & RENAME_EXCHANGE;
71634+
71635+ /* vfs_rename swaps the name and parent link for old_dentry and
71636+ new_dentry
71637+ at this point, old_dentry has the new name, parent link, and inode
71638+ for the renamed file
71639+ if a file is being replaced by a rename, new_dentry has the inode
71640+ and name for the replaced file
71641+ */
71642+
71643+ if (unlikely(!(gr_status & GR_READY)))
71644+ return;
71645+
71646+ preempt_disable();
71647+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
71648+
71649+ /* exchange cases:
71650+ a filename exists for the source, but not dest
71651+ do a recreate on source
71652+ a filename exists for the dest, but not source
71653+ do a recreate on dest
71654+ a filename exists for both source and dest
71655+ delete source and dest, then create source and dest
71656+ a filename exists for neither source nor dest
71657+ no updates needed
71658+
71659+ the name entry lookups get us the old inode/dev associated with
71660+ each name, so do the deletes first (if possible) so that when
71661+ we do the create, we pick up on the right entries
71662+ */
71663+
71664+ if (exchange)
71665+ matchn2 = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt));
71666+
71667+ /* we wouldn't have to check d_inode if it weren't for
71668+ NFS silly-renaming
71669+ */
71670+
71671+ write_lock(&gr_inode_lock);
71672+ if (unlikely((replace || exchange) && inode)) {
71673+ ino_t new_ino = inode->i_ino;
71674+ dev_t new_dev = __get_dev(new_dentry);
71675+
71676+ inodev = lookup_inodev_entry(new_ino, new_dev);
71677+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
71678+ do_handle_delete(inodev, new_ino, new_dev);
71679+ }
71680+
71681+ inodev = lookup_inodev_entry(old_ino, old_dev);
71682+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
71683+ do_handle_delete(inodev, old_ino, old_dev);
71684+
71685+ if (unlikely(matchn != NULL))
71686+ do_handle_create(matchn, old_dentry, mnt);
71687+
71688+ if (unlikely(matchn2 != NULL))
71689+ do_handle_create(matchn2, new_dentry, mnt);
71690+
71691+ write_unlock(&gr_inode_lock);
71692+ preempt_enable();
71693+
71694+ return;
71695+}
71696+
71697+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
71698+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
71699+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
71700+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
71701+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
71702+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
71703+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
71704+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
71705+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
71706+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
71707+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
71708+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
71709+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
71710+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
71711+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
71712+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
71713+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
71714+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
71715+};
71716+
71717+void
71718+gr_learn_resource(const struct task_struct *task,
71719+ const int res, const unsigned long wanted, const int gt)
71720+{
71721+ struct acl_subject_label *acl;
71722+ const struct cred *cred;
71723+
71724+ if (unlikely((gr_status & GR_READY) &&
71725+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
71726+ goto skip_reslog;
71727+
71728+ gr_log_resource(task, res, wanted, gt);
71729+skip_reslog:
71730+
71731+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
71732+ return;
71733+
71734+ acl = task->acl;
71735+
71736+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
71737+ !(acl->resmask & (1U << (unsigned short) res))))
71738+ return;
71739+
71740+ if (wanted >= acl->res[res].rlim_cur) {
71741+ unsigned long res_add;
71742+
71743+ res_add = wanted + res_learn_bumps[res];
71744+
71745+ acl->res[res].rlim_cur = res_add;
71746+
71747+ if (wanted > acl->res[res].rlim_max)
71748+ acl->res[res].rlim_max = res_add;
71749+
71750+ /* only log the subject filename, since resource logging is supported for
71751+ single-subject learning only */
71752+ rcu_read_lock();
71753+ cred = __task_cred(task);
71754+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
71755+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
71756+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
71757+ "", (unsigned long) res, &task->signal->saved_ip);
71758+ rcu_read_unlock();
71759+ }
71760+
71761+ return;
71762+}
71763+EXPORT_SYMBOL_GPL(gr_learn_resource);
71764+#endif
71765+
71766+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
71767+void
71768+pax_set_initial_flags(struct linux_binprm *bprm)
71769+{
71770+ struct task_struct *task = current;
71771+ struct acl_subject_label *proc;
71772+ unsigned long flags;
71773+
71774+ if (unlikely(!(gr_status & GR_READY)))
71775+ return;
71776+
71777+ flags = pax_get_flags(task);
71778+
71779+ proc = task->acl;
71780+
71781+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
71782+ flags &= ~MF_PAX_PAGEEXEC;
71783+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
71784+ flags &= ~MF_PAX_SEGMEXEC;
71785+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
71786+ flags &= ~MF_PAX_RANDMMAP;
71787+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
71788+ flags &= ~MF_PAX_EMUTRAMP;
71789+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
71790+ flags &= ~MF_PAX_MPROTECT;
71791+
71792+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
71793+ flags |= MF_PAX_PAGEEXEC;
71794+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
71795+ flags |= MF_PAX_SEGMEXEC;
71796+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
71797+ flags |= MF_PAX_RANDMMAP;
71798+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
71799+ flags |= MF_PAX_EMUTRAMP;
71800+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
71801+ flags |= MF_PAX_MPROTECT;
71802+
71803+ pax_set_flags(task, flags);
71804+
71805+ return;
71806+}
71807+#endif
71808+
71809+int
71810+gr_handle_proc_ptrace(struct task_struct *task)
71811+{
71812+ struct file *filp;
71813+ struct task_struct *tmp = task;
71814+ struct task_struct *curtemp = current;
71815+ __u32 retmode;
71816+
71817+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
71818+ if (unlikely(!(gr_status & GR_READY)))
71819+ return 0;
71820+#endif
71821+
71822+ read_lock(&tasklist_lock);
71823+ read_lock(&grsec_exec_file_lock);
71824+ filp = task->exec_file;
71825+
71826+ while (task_pid_nr(tmp) > 0) {
71827+ if (tmp == curtemp)
71828+ break;
71829+ tmp = tmp->real_parent;
71830+ }
71831+
71832+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
71833+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
71834+ read_unlock(&grsec_exec_file_lock);
71835+ read_unlock(&tasklist_lock);
71836+ return 1;
71837+ }
71838+
71839+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
71840+ if (!(gr_status & GR_READY)) {
71841+ read_unlock(&grsec_exec_file_lock);
71842+ read_unlock(&tasklist_lock);
71843+ return 0;
71844+ }
71845+#endif
71846+
71847+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
71848+ read_unlock(&grsec_exec_file_lock);
71849+ read_unlock(&tasklist_lock);
71850+
71851+ if (retmode & GR_NOPTRACE)
71852+ return 1;
71853+
71854+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
71855+ && (current->acl != task->acl || (current->acl != current->role->root_label
71856+ && task_pid_nr(current) != task_pid_nr(task))))
71857+ return 1;
71858+
71859+ return 0;
71860+}
71861+
71862+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
71863+{
71864+ if (unlikely(!(gr_status & GR_READY)))
71865+ return;
71866+
71867+ if (!(current->role->roletype & GR_ROLE_GOD))
71868+ return;
71869+
71870+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
71871+ p->role->rolename, gr_task_roletype_to_char(p),
71872+ p->acl->filename);
71873+}
71874+
71875+int
71876+gr_handle_ptrace(struct task_struct *task, const long request)
71877+{
71878+ struct task_struct *tmp = task;
71879+ struct task_struct *curtemp = current;
71880+ __u32 retmode;
71881+
71882+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
71883+ if (unlikely(!(gr_status & GR_READY)))
71884+ return 0;
71885+#endif
71886+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
71887+ read_lock(&tasklist_lock);
71888+ while (task_pid_nr(tmp) > 0) {
71889+ if (tmp == curtemp)
71890+ break;
71891+ tmp = tmp->real_parent;
71892+ }
71893+
71894+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
71895+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
71896+ read_unlock(&tasklist_lock);
71897+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71898+ return 1;
71899+ }
71900+ read_unlock(&tasklist_lock);
71901+ }
71902+
71903+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
71904+ if (!(gr_status & GR_READY))
71905+ return 0;
71906+#endif
71907+
71908+ read_lock(&grsec_exec_file_lock);
71909+ if (unlikely(!task->exec_file)) {
71910+ read_unlock(&grsec_exec_file_lock);
71911+ return 0;
71912+ }
71913+
71914+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
71915+ read_unlock(&grsec_exec_file_lock);
71916+
71917+ if (retmode & GR_NOPTRACE) {
71918+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71919+ return 1;
71920+ }
71921+
71922+ if (retmode & GR_PTRACERD) {
71923+ switch (request) {
71924+ case PTRACE_SEIZE:
71925+ case PTRACE_POKETEXT:
71926+ case PTRACE_POKEDATA:
71927+ case PTRACE_POKEUSR:
71928+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
71929+ case PTRACE_SETREGS:
71930+ case PTRACE_SETFPREGS:
71931+#endif
71932+#ifdef CONFIG_X86
71933+ case PTRACE_SETFPXREGS:
71934+#endif
71935+#ifdef CONFIG_ALTIVEC
71936+ case PTRACE_SETVRREGS:
71937+#endif
71938+ return 1;
71939+ default:
71940+ return 0;
71941+ }
71942+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
71943+ !(current->role->roletype & GR_ROLE_GOD) &&
71944+ (current->acl != task->acl)) {
71945+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71946+ return 1;
71947+ }
71948+
71949+ return 0;
71950+}
71951+
71952+static int is_writable_mmap(const struct file *filp)
71953+{
71954+ struct task_struct *task = current;
71955+ struct acl_object_label *obj, *obj2;
71956+
71957+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
71958+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
71959+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71960+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
71961+ task->role->root_label);
71962+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
71963+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
71964+ return 1;
71965+ }
71966+ }
71967+ return 0;
71968+}
71969+
71970+int
71971+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
71972+{
71973+ __u32 mode;
71974+
71975+ if (unlikely(!file || !(prot & PROT_EXEC)))
71976+ return 1;
71977+
71978+ if (is_writable_mmap(file))
71979+ return 0;
71980+
71981+ mode =
71982+ gr_search_file(file->f_path.dentry,
71983+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
71984+ file->f_path.mnt);
71985+
71986+ if (!gr_tpe_allow(file))
71987+ return 0;
71988+
71989+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
71990+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71991+ return 0;
71992+ } else if (unlikely(!(mode & GR_EXEC))) {
71993+ return 0;
71994+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
71995+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71996+ return 1;
71997+ }
71998+
71999+ return 1;
72000+}
72001+
72002+int
72003+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
72004+{
72005+ __u32 mode;
72006+
72007+ if (unlikely(!file || !(prot & PROT_EXEC)))
72008+ return 1;
72009+
72010+ if (is_writable_mmap(file))
72011+ return 0;
72012+
72013+ mode =
72014+ gr_search_file(file->f_path.dentry,
72015+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
72016+ file->f_path.mnt);
72017+
72018+ if (!gr_tpe_allow(file))
72019+ return 0;
72020+
72021+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
72022+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72023+ return 0;
72024+ } else if (unlikely(!(mode & GR_EXEC))) {
72025+ return 0;
72026+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
72027+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72028+ return 1;
72029+ }
72030+
72031+ return 1;
72032+}
72033+
72034+void
72035+gr_acl_handle_psacct(struct task_struct *task, const long code)
72036+{
72037+ unsigned long runtime, cputime;
72038+ cputime_t utime, stime;
72039+ unsigned int wday, cday;
72040+ __u8 whr, chr;
72041+ __u8 wmin, cmin;
72042+ __u8 wsec, csec;
72043+ struct timespec curtime, starttime;
72044+
72045+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
72046+ !(task->acl->mode & GR_PROCACCT)))
72047+ return;
72048+
72049+ curtime = ns_to_timespec(ktime_get_ns());
72050+ starttime = ns_to_timespec(task->start_time);
72051+ runtime = curtime.tv_sec - starttime.tv_sec;
72052+ wday = runtime / (60 * 60 * 24);
72053+ runtime -= wday * (60 * 60 * 24);
72054+ whr = runtime / (60 * 60);
72055+ runtime -= whr * (60 * 60);
72056+ wmin = runtime / 60;
72057+ runtime -= wmin * 60;
72058+ wsec = runtime;
72059+
72060+ task_cputime(task, &utime, &stime);
72061+ cputime = cputime_to_secs(utime + stime);
72062+ cday = cputime / (60 * 60 * 24);
72063+ cputime -= cday * (60 * 60 * 24);
72064+ chr = cputime / (60 * 60);
72065+ cputime -= chr * (60 * 60);
72066+ cmin = cputime / 60;
72067+ cputime -= cmin * 60;
72068+ csec = cputime;
72069+
72070+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
72071+
72072+ return;
72073+}
72074+
72075+#ifdef CONFIG_TASKSTATS
72076+int gr_is_taskstats_denied(int pid)
72077+{
72078+ struct task_struct *task;
72079+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72080+ const struct cred *cred;
72081+#endif
72082+ int ret = 0;
72083+
72084+ /* restrict taskstats viewing to un-chrooted root users
72085+ who have the 'view' subject flag if the RBAC system is enabled
72086+ */
72087+
72088+ rcu_read_lock();
72089+ read_lock(&tasklist_lock);
72090+ task = find_task_by_vpid(pid);
72091+ if (task) {
72092+#ifdef CONFIG_GRKERNSEC_CHROOT
72093+ if (proc_is_chrooted(task))
72094+ ret = -EACCES;
72095+#endif
72096+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72097+ cred = __task_cred(task);
72098+#ifdef CONFIG_GRKERNSEC_PROC_USER
72099+ if (gr_is_global_nonroot(cred->uid))
72100+ ret = -EACCES;
72101+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72102+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
72103+ ret = -EACCES;
72104+#endif
72105+#endif
72106+ if (gr_status & GR_READY) {
72107+ if (!(task->acl->mode & GR_VIEW))
72108+ ret = -EACCES;
72109+ }
72110+ } else
72111+ ret = -ENOENT;
72112+
72113+ read_unlock(&tasklist_lock);
72114+ rcu_read_unlock();
72115+
72116+ return ret;
72117+}
72118+#endif
72119+
72120+/* AUXV entries are filled via a descendant of search_binary_handler
72121+ after we've already applied the subject for the target
72122+*/
72123+int gr_acl_enable_at_secure(void)
72124+{
72125+ if (unlikely(!(gr_status & GR_READY)))
72126+ return 0;
72127+
72128+ if (current->acl->mode & GR_ATSECURE)
72129+ return 1;
72130+
72131+ return 0;
72132+}
72133+
72134+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
72135+{
72136+ struct task_struct *task = current;
72137+ struct dentry *dentry = file->f_path.dentry;
72138+ struct vfsmount *mnt = file->f_path.mnt;
72139+ struct acl_object_label *obj, *tmp;
72140+ struct acl_subject_label *subj;
72141+ unsigned int bufsize;
72142+ int is_not_root;
72143+ char *path;
72144+ dev_t dev = __get_dev(dentry);
72145+
72146+ if (unlikely(!(gr_status & GR_READY)))
72147+ return 1;
72148+
72149+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
72150+ return 1;
72151+
72152+ /* ignore Eric Biederman */
72153+ if (IS_PRIVATE(dentry->d_inode))
72154+ return 1;
72155+
72156+ subj = task->acl;
72157+ read_lock(&gr_inode_lock);
72158+ do {
72159+ obj = lookup_acl_obj_label(ino, dev, subj);
72160+ if (obj != NULL) {
72161+ read_unlock(&gr_inode_lock);
72162+ return (obj->mode & GR_FIND) ? 1 : 0;
72163+ }
72164+ } while ((subj = subj->parent_subject));
72165+ read_unlock(&gr_inode_lock);
72166+
72167+ /* this is purely an optimization since we're looking for an object
72168+ for the directory we're doing a readdir on
72169+ if it's possible for any globbed object to match the entry we're
72170+ filling into the directory, then the object we find here will be
72171+ an anchor point with attached globbed objects
72172+ */
72173+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
72174+ if (obj->globbed == NULL)
72175+ return (obj->mode & GR_FIND) ? 1 : 0;
72176+
72177+ is_not_root = ((obj->filename[0] == '/') &&
72178+ (obj->filename[1] == '\0')) ? 0 : 1;
72179+ bufsize = PAGE_SIZE - namelen - is_not_root;
72180+
72181+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
72182+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
72183+ return 1;
72184+
72185+ preempt_disable();
72186+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
72187+ bufsize);
72188+
72189+ bufsize = strlen(path);
72190+
72191+ /* if base is "/", don't append an additional slash */
72192+ if (is_not_root)
72193+ *(path + bufsize) = '/';
72194+ memcpy(path + bufsize + is_not_root, name, namelen);
72195+ *(path + bufsize + namelen + is_not_root) = '\0';
72196+
72197+ tmp = obj->globbed;
72198+ while (tmp) {
72199+ if (!glob_match(tmp->filename, path)) {
72200+ preempt_enable();
72201+ return (tmp->mode & GR_FIND) ? 1 : 0;
72202+ }
72203+ tmp = tmp->next;
72204+ }
72205+ preempt_enable();
72206+ return (obj->mode & GR_FIND) ? 1 : 0;
72207+}
72208+
72209+void gr_put_exec_file(struct task_struct *task)
72210+{
72211+ struct file *filp;
72212+
72213+ write_lock(&grsec_exec_file_lock);
72214+ filp = task->exec_file;
72215+ task->exec_file = NULL;
72216+ write_unlock(&grsec_exec_file_lock);
72217+
72218+ if (filp)
72219+ fput(filp);
72220+
72221+ return;
72222+}
72223+
72224+
72225+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
72226+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
72227+#endif
72228+#ifdef CONFIG_SECURITY
72229+EXPORT_SYMBOL_GPL(gr_check_user_change);
72230+EXPORT_SYMBOL_GPL(gr_check_group_change);
72231+#endif
72232+
72233diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
72234new file mode 100644
72235index 0000000..18ffbbd
72236--- /dev/null
72237+++ b/grsecurity/gracl_alloc.c
72238@@ -0,0 +1,105 @@
72239+#include <linux/kernel.h>
72240+#include <linux/mm.h>
72241+#include <linux/slab.h>
72242+#include <linux/vmalloc.h>
72243+#include <linux/gracl.h>
72244+#include <linux/grsecurity.h>
72245+
72246+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
72247+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
72248+
72249+static __inline__ int
72250+alloc_pop(void)
72251+{
72252+ if (current_alloc_state->alloc_stack_next == 1)
72253+ return 0;
72254+
72255+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
72256+
72257+ current_alloc_state->alloc_stack_next--;
72258+
72259+ return 1;
72260+}
72261+
72262+static __inline__ int
72263+alloc_push(void *buf)
72264+{
72265+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
72266+ return 1;
72267+
72268+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
72269+
72270+ current_alloc_state->alloc_stack_next++;
72271+
72272+ return 0;
72273+}
72274+
72275+void *
72276+acl_alloc(unsigned long len)
72277+{
72278+ void *ret = NULL;
72279+
72280+ if (!len || len > PAGE_SIZE)
72281+ goto out;
72282+
72283+ ret = kmalloc(len, GFP_KERNEL);
72284+
72285+ if (ret) {
72286+ if (alloc_push(ret)) {
72287+ kfree(ret);
72288+ ret = NULL;
72289+ }
72290+ }
72291+
72292+out:
72293+ return ret;
72294+}
72295+
72296+void *
72297+acl_alloc_num(unsigned long num, unsigned long len)
72298+{
72299+ if (!len || (num > (PAGE_SIZE / len)))
72300+ return NULL;
72301+
72302+ return acl_alloc(num * len);
72303+}
72304+
72305+void
72306+acl_free_all(void)
72307+{
72308+ if (!current_alloc_state->alloc_stack)
72309+ return;
72310+
72311+ while (alloc_pop()) ;
72312+
72313+ if (current_alloc_state->alloc_stack) {
72314+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
72315+ kfree(current_alloc_state->alloc_stack);
72316+ else
72317+ vfree(current_alloc_state->alloc_stack);
72318+ }
72319+
72320+ current_alloc_state->alloc_stack = NULL;
72321+ current_alloc_state->alloc_stack_size = 1;
72322+ current_alloc_state->alloc_stack_next = 1;
72323+
72324+ return;
72325+}
72326+
72327+int
72328+acl_alloc_stack_init(unsigned long size)
72329+{
72330+ if ((size * sizeof (void *)) <= PAGE_SIZE)
72331+ current_alloc_state->alloc_stack =
72332+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
72333+ else
72334+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
72335+
72336+ current_alloc_state->alloc_stack_size = size;
72337+ current_alloc_state->alloc_stack_next = 1;
72338+
72339+ if (!current_alloc_state->alloc_stack)
72340+ return 0;
72341+ else
72342+ return 1;
72343+}
72344diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
72345new file mode 100644
72346index 0000000..1a94c11
72347--- /dev/null
72348+++ b/grsecurity/gracl_cap.c
72349@@ -0,0 +1,127 @@
72350+#include <linux/kernel.h>
72351+#include <linux/module.h>
72352+#include <linux/sched.h>
72353+#include <linux/gracl.h>
72354+#include <linux/grsecurity.h>
72355+#include <linux/grinternal.h>
72356+
72357+extern const char *captab_log[];
72358+extern int captab_log_entries;
72359+
72360+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
72361+{
72362+ struct acl_subject_label *curracl;
72363+
72364+ if (!gr_acl_is_enabled())
72365+ return 1;
72366+
72367+ curracl = task->acl;
72368+
72369+ if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
72370+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
72371+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
72372+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
72373+ gr_to_filename(task->exec_file->f_path.dentry,
72374+ task->exec_file->f_path.mnt) : curracl->filename,
72375+ curracl->filename, 0UL,
72376+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
72377+ return 1;
72378+ }
72379+
72380+ return 0;
72381+}
72382+
72383+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
72384+{
72385+ struct acl_subject_label *curracl;
72386+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
72387+ kernel_cap_t cap_audit = __cap_empty_set;
72388+
72389+ if (!gr_acl_is_enabled())
72390+ return 1;
72391+
72392+ curracl = task->acl;
72393+
72394+ cap_drop = curracl->cap_lower;
72395+ cap_mask = curracl->cap_mask;
72396+ cap_audit = curracl->cap_invert_audit;
72397+
72398+ while ((curracl = curracl->parent_subject)) {
72399+ /* if the cap isn't specified in the current computed mask but is specified in the
72400+ current level subject, and is lowered in the current level subject, then add
72401+ it to the set of dropped capabilities
72402+ otherwise, add the current level subject's mask to the current computed mask
72403+ */
72404+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
72405+ cap_raise(cap_mask, cap);
72406+ if (cap_raised(curracl->cap_lower, cap))
72407+ cap_raise(cap_drop, cap);
72408+ if (cap_raised(curracl->cap_invert_audit, cap))
72409+ cap_raise(cap_audit, cap);
72410+ }
72411+ }
72412+
72413+ if (!cap_raised(cap_drop, cap)) {
72414+ if (cap_raised(cap_audit, cap))
72415+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
72416+ return 1;
72417+ }
72418+
72419+ /* only learn the capability use if the process has the capability in the
72420+ general case, the two uses in sys.c of gr_learn_cap are an exception
72421+ to this rule to ensure any role transition involves what the full-learned
72422+ policy believes in a privileged process
72423+ */
72424+ if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap))
72425+ return 1;
72426+
72427+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
72428+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
72429+
72430+ return 0;
72431+}
72432+
72433+int
72434+gr_acl_is_capable(const int cap)
72435+{
72436+ return gr_task_acl_is_capable(current, current_cred(), cap);
72437+}
72438+
72439+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
72440+{
72441+ struct acl_subject_label *curracl;
72442+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
72443+
72444+ if (!gr_acl_is_enabled())
72445+ return 1;
72446+
72447+ curracl = task->acl;
72448+
72449+ cap_drop = curracl->cap_lower;
72450+ cap_mask = curracl->cap_mask;
72451+
72452+ while ((curracl = curracl->parent_subject)) {
72453+ /* if the cap isn't specified in the current computed mask but is specified in the
72454+ current level subject, and is lowered in the current level subject, then add
72455+ it to the set of dropped capabilities
72456+ otherwise, add the current level subject's mask to the current computed mask
72457+ */
72458+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
72459+ cap_raise(cap_mask, cap);
72460+ if (cap_raised(curracl->cap_lower, cap))
72461+ cap_raise(cap_drop, cap);
72462+ }
72463+ }
72464+
72465+ if (!cap_raised(cap_drop, cap))
72466+ return 1;
72467+
72468+ return 0;
72469+}
72470+
72471+int
72472+gr_acl_is_capable_nolog(const int cap)
72473+{
72474+ return gr_task_acl_is_capable_nolog(current, cap);
72475+}
72476+
72477diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
72478new file mode 100644
72479index 0000000..ca25605
72480--- /dev/null
72481+++ b/grsecurity/gracl_compat.c
72482@@ -0,0 +1,270 @@
72483+#include <linux/kernel.h>
72484+#include <linux/gracl.h>
72485+#include <linux/compat.h>
72486+#include <linux/gracl_compat.h>
72487+
72488+#include <asm/uaccess.h>
72489+
72490+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
72491+{
72492+ struct gr_arg_wrapper_compat uwrapcompat;
72493+
72494+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
72495+ return -EFAULT;
72496+
72497+ if (((uwrapcompat.version != GRSECURITY_VERSION) &&
72498+ (uwrapcompat.version != 0x2901)) ||
72499+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
72500+ return -EINVAL;
72501+
72502+ uwrap->arg = compat_ptr(uwrapcompat.arg);
72503+ uwrap->version = uwrapcompat.version;
72504+ uwrap->size = sizeof(struct gr_arg);
72505+
72506+ return 0;
72507+}
72508+
72509+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
72510+{
72511+ struct gr_arg_compat argcompat;
72512+
72513+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
72514+ return -EFAULT;
72515+
72516+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
72517+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
72518+ arg->role_db.num_roles = argcompat.role_db.num_roles;
72519+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
72520+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
72521+ arg->role_db.num_objects = argcompat.role_db.num_objects;
72522+
72523+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
72524+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
72525+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
72526+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
72527+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
72528+ arg->segv_device = argcompat.segv_device;
72529+ arg->segv_inode = argcompat.segv_inode;
72530+ arg->segv_uid = argcompat.segv_uid;
72531+ arg->num_sprole_pws = argcompat.num_sprole_pws;
72532+ arg->mode = argcompat.mode;
72533+
72534+ return 0;
72535+}
72536+
72537+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
72538+{
72539+ struct acl_object_label_compat objcompat;
72540+
72541+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
72542+ return -EFAULT;
72543+
72544+ obj->filename = compat_ptr(objcompat.filename);
72545+ obj->inode = objcompat.inode;
72546+ obj->device = objcompat.device;
72547+ obj->mode = objcompat.mode;
72548+
72549+ obj->nested = compat_ptr(objcompat.nested);
72550+ obj->globbed = compat_ptr(objcompat.globbed);
72551+
72552+ obj->prev = compat_ptr(objcompat.prev);
72553+ obj->next = compat_ptr(objcompat.next);
72554+
72555+ return 0;
72556+}
72557+
72558+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
72559+{
72560+ unsigned int i;
72561+ struct acl_subject_label_compat subjcompat;
72562+
72563+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
72564+ return -EFAULT;
72565+
72566+ subj->filename = compat_ptr(subjcompat.filename);
72567+ subj->inode = subjcompat.inode;
72568+ subj->device = subjcompat.device;
72569+ subj->mode = subjcompat.mode;
72570+ subj->cap_mask = subjcompat.cap_mask;
72571+ subj->cap_lower = subjcompat.cap_lower;
72572+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
72573+
72574+ for (i = 0; i < GR_NLIMITS; i++) {
72575+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
72576+ subj->res[i].rlim_cur = RLIM_INFINITY;
72577+ else
72578+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
72579+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
72580+ subj->res[i].rlim_max = RLIM_INFINITY;
72581+ else
72582+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
72583+ }
72584+ subj->resmask = subjcompat.resmask;
72585+
72586+ subj->user_trans_type = subjcompat.user_trans_type;
72587+ subj->group_trans_type = subjcompat.group_trans_type;
72588+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
72589+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
72590+ subj->user_trans_num = subjcompat.user_trans_num;
72591+ subj->group_trans_num = subjcompat.group_trans_num;
72592+
72593+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
72594+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
72595+ subj->ip_type = subjcompat.ip_type;
72596+ subj->ips = compat_ptr(subjcompat.ips);
72597+ subj->ip_num = subjcompat.ip_num;
72598+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
72599+
72600+ subj->crashes = subjcompat.crashes;
72601+ subj->expires = subjcompat.expires;
72602+
72603+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
72604+ subj->hash = compat_ptr(subjcompat.hash);
72605+ subj->prev = compat_ptr(subjcompat.prev);
72606+ subj->next = compat_ptr(subjcompat.next);
72607+
72608+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
72609+ subj->obj_hash_size = subjcompat.obj_hash_size;
72610+ subj->pax_flags = subjcompat.pax_flags;
72611+
72612+ return 0;
72613+}
72614+
72615+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
72616+{
72617+ struct acl_role_label_compat rolecompat;
72618+
72619+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
72620+ return -EFAULT;
72621+
72622+ role->rolename = compat_ptr(rolecompat.rolename);
72623+ role->uidgid = rolecompat.uidgid;
72624+ role->roletype = rolecompat.roletype;
72625+
72626+ role->auth_attempts = rolecompat.auth_attempts;
72627+ role->expires = rolecompat.expires;
72628+
72629+ role->root_label = compat_ptr(rolecompat.root_label);
72630+ role->hash = compat_ptr(rolecompat.hash);
72631+
72632+ role->prev = compat_ptr(rolecompat.prev);
72633+ role->next = compat_ptr(rolecompat.next);
72634+
72635+ role->transitions = compat_ptr(rolecompat.transitions);
72636+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
72637+ role->domain_children = compat_ptr(rolecompat.domain_children);
72638+ role->domain_child_num = rolecompat.domain_child_num;
72639+
72640+ role->umask = rolecompat.umask;
72641+
72642+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
72643+ role->subj_hash_size = rolecompat.subj_hash_size;
72644+
72645+ return 0;
72646+}
72647+
72648+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
72649+{
72650+ struct role_allowed_ip_compat roleip_compat;
72651+
72652+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
72653+ return -EFAULT;
72654+
72655+ roleip->addr = roleip_compat.addr;
72656+ roleip->netmask = roleip_compat.netmask;
72657+
72658+ roleip->prev = compat_ptr(roleip_compat.prev);
72659+ roleip->next = compat_ptr(roleip_compat.next);
72660+
72661+ return 0;
72662+}
72663+
72664+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
72665+{
72666+ struct role_transition_compat trans_compat;
72667+
72668+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
72669+ return -EFAULT;
72670+
72671+ trans->rolename = compat_ptr(trans_compat.rolename);
72672+
72673+ trans->prev = compat_ptr(trans_compat.prev);
72674+ trans->next = compat_ptr(trans_compat.next);
72675+
72676+ return 0;
72677+
72678+}
72679+
72680+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
72681+{
72682+ struct gr_hash_struct_compat hash_compat;
72683+
72684+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
72685+ return -EFAULT;
72686+
72687+ hash->table = compat_ptr(hash_compat.table);
72688+ hash->nametable = compat_ptr(hash_compat.nametable);
72689+ hash->first = compat_ptr(hash_compat.first);
72690+
72691+ hash->table_size = hash_compat.table_size;
72692+ hash->used_size = hash_compat.used_size;
72693+
72694+ hash->type = hash_compat.type;
72695+
72696+ return 0;
72697+}
72698+
72699+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
72700+{
72701+ compat_uptr_t ptrcompat;
72702+
72703+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
72704+ return -EFAULT;
72705+
72706+ *(void **)ptr = compat_ptr(ptrcompat);
72707+
72708+ return 0;
72709+}
72710+
72711+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
72712+{
72713+ struct acl_ip_label_compat ip_compat;
72714+
72715+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
72716+ return -EFAULT;
72717+
72718+ ip->iface = compat_ptr(ip_compat.iface);
72719+ ip->addr = ip_compat.addr;
72720+ ip->netmask = ip_compat.netmask;
72721+ ip->low = ip_compat.low;
72722+ ip->high = ip_compat.high;
72723+ ip->mode = ip_compat.mode;
72724+ ip->type = ip_compat.type;
72725+
72726+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
72727+
72728+ ip->prev = compat_ptr(ip_compat.prev);
72729+ ip->next = compat_ptr(ip_compat.next);
72730+
72731+ return 0;
72732+}
72733+
72734+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
72735+{
72736+ struct sprole_pw_compat pw_compat;
72737+
72738+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
72739+ return -EFAULT;
72740+
72741+ pw->rolename = compat_ptr(pw_compat.rolename);
72742+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
72743+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
72744+
72745+ return 0;
72746+}
72747+
72748+size_t get_gr_arg_wrapper_size_compat(void)
72749+{
72750+ return sizeof(struct gr_arg_wrapper_compat);
72751+}
72752+
72753diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
72754new file mode 100644
72755index 0000000..4008fdc
72756--- /dev/null
72757+++ b/grsecurity/gracl_fs.c
72758@@ -0,0 +1,445 @@
72759+#include <linux/kernel.h>
72760+#include <linux/sched.h>
72761+#include <linux/types.h>
72762+#include <linux/fs.h>
72763+#include <linux/file.h>
72764+#include <linux/stat.h>
72765+#include <linux/grsecurity.h>
72766+#include <linux/grinternal.h>
72767+#include <linux/gracl.h>
72768+
72769+umode_t
72770+gr_acl_umask(void)
72771+{
72772+ if (unlikely(!gr_acl_is_enabled()))
72773+ return 0;
72774+
72775+ return current->role->umask;
72776+}
72777+
72778+__u32
72779+gr_acl_handle_hidden_file(const struct dentry * dentry,
72780+ const struct vfsmount * mnt)
72781+{
72782+ __u32 mode;
72783+
72784+ if (unlikely(d_is_negative(dentry)))
72785+ return GR_FIND;
72786+
72787+ mode =
72788+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
72789+
72790+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
72791+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
72792+ return mode;
72793+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
72794+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
72795+ return 0;
72796+ } else if (unlikely(!(mode & GR_FIND)))
72797+ return 0;
72798+
72799+ return GR_FIND;
72800+}
72801+
72802+__u32
72803+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
72804+ int acc_mode)
72805+{
72806+ __u32 reqmode = GR_FIND;
72807+ __u32 mode;
72808+
72809+ if (unlikely(d_is_negative(dentry)))
72810+ return reqmode;
72811+
72812+ if (acc_mode & MAY_APPEND)
72813+ reqmode |= GR_APPEND;
72814+ else if (acc_mode & MAY_WRITE)
72815+ reqmode |= GR_WRITE;
72816+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
72817+ reqmode |= GR_READ;
72818+
72819+ mode =
72820+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
72821+ mnt);
72822+
72823+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72824+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
72825+ reqmode & GR_READ ? " reading" : "",
72826+ reqmode & GR_WRITE ? " writing" : reqmode &
72827+ GR_APPEND ? " appending" : "");
72828+ return reqmode;
72829+ } else
72830+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72831+ {
72832+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
72833+ reqmode & GR_READ ? " reading" : "",
72834+ reqmode & GR_WRITE ? " writing" : reqmode &
72835+ GR_APPEND ? " appending" : "");
72836+ return 0;
72837+ } else if (unlikely((mode & reqmode) != reqmode))
72838+ return 0;
72839+
72840+ return reqmode;
72841+}
72842+
72843+__u32
72844+gr_acl_handle_creat(const struct dentry * dentry,
72845+ const struct dentry * p_dentry,
72846+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
72847+ const int imode)
72848+{
72849+ __u32 reqmode = GR_WRITE | GR_CREATE;
72850+ __u32 mode;
72851+
72852+ if (acc_mode & MAY_APPEND)
72853+ reqmode |= GR_APPEND;
72854+ // if a directory was required or the directory already exists, then
72855+ // don't count this open as a read
72856+ if ((acc_mode & MAY_READ) &&
72857+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
72858+ reqmode |= GR_READ;
72859+ if ((open_flags & O_CREAT) &&
72860+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
72861+ reqmode |= GR_SETID;
72862+
72863+ mode =
72864+ gr_check_create(dentry, p_dentry, p_mnt,
72865+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
72866+
72867+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72868+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
72869+ reqmode & GR_READ ? " reading" : "",
72870+ reqmode & GR_WRITE ? " writing" : reqmode &
72871+ GR_APPEND ? " appending" : "");
72872+ return reqmode;
72873+ } else
72874+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72875+ {
72876+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
72877+ reqmode & GR_READ ? " reading" : "",
72878+ reqmode & GR_WRITE ? " writing" : reqmode &
72879+ GR_APPEND ? " appending" : "");
72880+ return 0;
72881+ } else if (unlikely((mode & reqmode) != reqmode))
72882+ return 0;
72883+
72884+ return reqmode;
72885+}
72886+
72887+__u32
72888+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
72889+ const int fmode)
72890+{
72891+ __u32 mode, reqmode = GR_FIND;
72892+
72893+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
72894+ reqmode |= GR_EXEC;
72895+ if (fmode & S_IWOTH)
72896+ reqmode |= GR_WRITE;
72897+ if (fmode & S_IROTH)
72898+ reqmode |= GR_READ;
72899+
72900+ mode =
72901+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
72902+ mnt);
72903+
72904+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72905+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
72906+ reqmode & GR_READ ? " reading" : "",
72907+ reqmode & GR_WRITE ? " writing" : "",
72908+ reqmode & GR_EXEC ? " executing" : "");
72909+ return reqmode;
72910+ } else
72911+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72912+ {
72913+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
72914+ reqmode & GR_READ ? " reading" : "",
72915+ reqmode & GR_WRITE ? " writing" : "",
72916+ reqmode & GR_EXEC ? " executing" : "");
72917+ return 0;
72918+ } else if (unlikely((mode & reqmode) != reqmode))
72919+ return 0;
72920+
72921+ return reqmode;
72922+}
72923+
72924+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
72925+{
72926+ __u32 mode;
72927+
72928+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
72929+
72930+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
72931+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
72932+ return mode;
72933+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
72934+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
72935+ return 0;
72936+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
72937+ return 0;
72938+
72939+ return (reqmode);
72940+}
72941+
72942+__u32
72943+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
72944+{
72945+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
72946+}
72947+
72948+__u32
72949+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
72950+{
72951+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
72952+}
72953+
72954+__u32
72955+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
72956+{
72957+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
72958+}
72959+
72960+__u32
72961+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
72962+{
72963+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
72964+}
72965+
72966+__u32
72967+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
72968+ umode_t *modeptr)
72969+{
72970+ umode_t mode;
72971+
72972+ *modeptr &= ~gr_acl_umask();
72973+ mode = *modeptr;
72974+
72975+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
72976+ return 1;
72977+
72978+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
72979+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
72980+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
72981+ GR_CHMOD_ACL_MSG);
72982+ } else {
72983+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
72984+ }
72985+}
72986+
72987+__u32
72988+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
72989+{
72990+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
72991+}
72992+
72993+__u32
72994+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
72995+{
72996+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
72997+}
72998+
72999+__u32
73000+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
73001+{
73002+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
73003+}
73004+
73005+__u32
73006+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
73007+{
73008+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
73009+}
73010+
73011+__u32
73012+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
73013+{
73014+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
73015+ GR_UNIXCONNECT_ACL_MSG);
73016+}
73017+
73018+/* hardlinks require at minimum create and link permission,
73019+ any additional privilege required is based on the
73020+ privilege of the file being linked to
73021+*/
73022+__u32
73023+gr_acl_handle_link(const struct dentry * new_dentry,
73024+ const struct dentry * parent_dentry,
73025+ const struct vfsmount * parent_mnt,
73026+ const struct dentry * old_dentry,
73027+ const struct vfsmount * old_mnt, const struct filename *to)
73028+{
73029+ __u32 mode;
73030+ __u32 needmode = GR_CREATE | GR_LINK;
73031+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
73032+
73033+ mode =
73034+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
73035+ old_mnt);
73036+
73037+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
73038+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
73039+ return mode;
73040+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
73041+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
73042+ return 0;
73043+ } else if (unlikely((mode & needmode) != needmode))
73044+ return 0;
73045+
73046+ return 1;
73047+}
73048+
73049+__u32
73050+gr_acl_handle_symlink(const struct dentry * new_dentry,
73051+ const struct dentry * parent_dentry,
73052+ const struct vfsmount * parent_mnt, const struct filename *from)
73053+{
73054+ __u32 needmode = GR_WRITE | GR_CREATE;
73055+ __u32 mode;
73056+
73057+ mode =
73058+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
73059+ GR_CREATE | GR_AUDIT_CREATE |
73060+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
73061+
73062+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
73063+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
73064+ return mode;
73065+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
73066+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
73067+ return 0;
73068+ } else if (unlikely((mode & needmode) != needmode))
73069+ return 0;
73070+
73071+ return (GR_WRITE | GR_CREATE);
73072+}
73073+
73074+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
73075+{
73076+ __u32 mode;
73077+
73078+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
73079+
73080+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
73081+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
73082+ return mode;
73083+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
73084+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
73085+ return 0;
73086+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
73087+ return 0;
73088+
73089+ return (reqmode);
73090+}
73091+
73092+__u32
73093+gr_acl_handle_mknod(const struct dentry * new_dentry,
73094+ const struct dentry * parent_dentry,
73095+ const struct vfsmount * parent_mnt,
73096+ const int mode)
73097+{
73098+ __u32 reqmode = GR_WRITE | GR_CREATE;
73099+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
73100+ reqmode |= GR_SETID;
73101+
73102+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
73103+ reqmode, GR_MKNOD_ACL_MSG);
73104+}
73105+
73106+__u32
73107+gr_acl_handle_mkdir(const struct dentry *new_dentry,
73108+ const struct dentry *parent_dentry,
73109+ const struct vfsmount *parent_mnt)
73110+{
73111+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
73112+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
73113+}
73114+
73115+#define RENAME_CHECK_SUCCESS(old, new) \
73116+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
73117+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
73118+
73119+int
73120+gr_acl_handle_rename(struct dentry *new_dentry,
73121+ struct dentry *parent_dentry,
73122+ const struct vfsmount *parent_mnt,
73123+ struct dentry *old_dentry,
73124+ struct inode *old_parent_inode,
73125+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags)
73126+{
73127+ __u32 comp1, comp2;
73128+ int error = 0;
73129+
73130+ if (unlikely(!gr_acl_is_enabled()))
73131+ return 0;
73132+
73133+ if (flags & RENAME_EXCHANGE) {
73134+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
73135+ GR_AUDIT_READ | GR_AUDIT_WRITE |
73136+ GR_SUPPRESS, parent_mnt);
73137+ comp2 =
73138+ gr_search_file(old_dentry,
73139+ GR_READ | GR_WRITE | GR_AUDIT_READ |
73140+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
73141+ } else if (d_is_negative(new_dentry)) {
73142+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
73143+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
73144+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
73145+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
73146+ GR_DELETE | GR_AUDIT_DELETE |
73147+ GR_AUDIT_READ | GR_AUDIT_WRITE |
73148+ GR_SUPPRESS, old_mnt);
73149+ } else {
73150+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
73151+ GR_CREATE | GR_DELETE |
73152+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
73153+ GR_AUDIT_READ | GR_AUDIT_WRITE |
73154+ GR_SUPPRESS, parent_mnt);
73155+ comp2 =
73156+ gr_search_file(old_dentry,
73157+ GR_READ | GR_WRITE | GR_AUDIT_READ |
73158+ GR_DELETE | GR_AUDIT_DELETE |
73159+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
73160+ }
73161+
73162+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
73163+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
73164+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
73165+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
73166+ && !(comp2 & GR_SUPPRESS)) {
73167+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
73168+ error = -EACCES;
73169+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
73170+ error = -EACCES;
73171+
73172+ return error;
73173+}
73174+
73175+void
73176+gr_acl_handle_exit(void)
73177+{
73178+ u16 id;
73179+ char *rolename;
73180+
73181+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
73182+ !(current->role->roletype & GR_ROLE_PERSIST))) {
73183+ id = current->acl_role_id;
73184+ rolename = current->role->rolename;
73185+ gr_set_acls(1);
73186+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
73187+ }
73188+
73189+ gr_put_exec_file(current);
73190+ return;
73191+}
73192+
73193+int
73194+gr_acl_handle_procpidmem(const struct task_struct *task)
73195+{
73196+ if (unlikely(!gr_acl_is_enabled()))
73197+ return 0;
73198+
73199+ if (task != current && task->acl->mode & GR_PROTPROCFD)
73200+ return -EACCES;
73201+
73202+ return 0;
73203+}
73204diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
73205new file mode 100644
73206index 0000000..f056b81
73207--- /dev/null
73208+++ b/grsecurity/gracl_ip.c
73209@@ -0,0 +1,386 @@
73210+#include <linux/kernel.h>
73211+#include <asm/uaccess.h>
73212+#include <asm/errno.h>
73213+#include <net/sock.h>
73214+#include <linux/file.h>
73215+#include <linux/fs.h>
73216+#include <linux/net.h>
73217+#include <linux/in.h>
73218+#include <linux/skbuff.h>
73219+#include <linux/ip.h>
73220+#include <linux/udp.h>
73221+#include <linux/types.h>
73222+#include <linux/sched.h>
73223+#include <linux/netdevice.h>
73224+#include <linux/inetdevice.h>
73225+#include <linux/gracl.h>
73226+#include <linux/grsecurity.h>
73227+#include <linux/grinternal.h>
73228+
73229+#define GR_BIND 0x01
73230+#define GR_CONNECT 0x02
73231+#define GR_INVERT 0x04
73232+#define GR_BINDOVERRIDE 0x08
73233+#define GR_CONNECTOVERRIDE 0x10
73234+#define GR_SOCK_FAMILY 0x20
73235+
73236+static const char * gr_protocols[IPPROTO_MAX] = {
73237+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
73238+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
73239+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
73240+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
73241+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
73242+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
73243+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
73244+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
73245+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
73246+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
73247+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
73248+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
73249+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
73250+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
73251+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
73252+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
73253+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
73254+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
73255+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
73256+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
73257+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
73258+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
73259+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
73260+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
73261+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
73262+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
73263+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
73264+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
73265+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
73266+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
73267+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
73268+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
73269+ };
73270+
73271+static const char * gr_socktypes[SOCK_MAX] = {
73272+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
73273+ "unknown:7", "unknown:8", "unknown:9", "packet"
73274+ };
73275+
73276+static const char * gr_sockfamilies[AF_MAX+1] = {
73277+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
73278+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
73279+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
73280+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
73281+ };
73282+
73283+const char *
73284+gr_proto_to_name(unsigned char proto)
73285+{
73286+ return gr_protocols[proto];
73287+}
73288+
73289+const char *
73290+gr_socktype_to_name(unsigned char type)
73291+{
73292+ return gr_socktypes[type];
73293+}
73294+
73295+const char *
73296+gr_sockfamily_to_name(unsigned char family)
73297+{
73298+ return gr_sockfamilies[family];
73299+}
73300+
73301+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
73302+
73303+int
73304+gr_search_socket(const int domain, const int type, const int protocol)
73305+{
73306+ struct acl_subject_label *curr;
73307+ const struct cred *cred = current_cred();
73308+
73309+ if (unlikely(!gr_acl_is_enabled()))
73310+ goto exit;
73311+
73312+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
73313+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
73314+ goto exit; // let the kernel handle it
73315+
73316+ curr = current->acl;
73317+
73318+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
73319+ /* the family is allowed, if this is PF_INET allow it only if
73320+ the extra sock type/protocol checks pass */
73321+ if (domain == PF_INET)
73322+ goto inet_check;
73323+ goto exit;
73324+ } else {
73325+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73326+ __u32 fakeip = 0;
73327+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73328+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73329+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73330+ gr_to_filename(current->exec_file->f_path.dentry,
73331+ current->exec_file->f_path.mnt) :
73332+ curr->filename, curr->filename,
73333+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
73334+ &current->signal->saved_ip);
73335+ goto exit;
73336+ }
73337+ goto exit_fail;
73338+ }
73339+
73340+inet_check:
73341+ /* the rest of this checking is for IPv4 only */
73342+ if (!curr->ips)
73343+ goto exit;
73344+
73345+ if ((curr->ip_type & (1U << type)) &&
73346+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
73347+ goto exit;
73348+
73349+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73350+ /* we don't place acls on raw sockets , and sometimes
73351+ dgram/ip sockets are opened for ioctl and not
73352+ bind/connect, so we'll fake a bind learn log */
73353+ if (type == SOCK_RAW || type == SOCK_PACKET) {
73354+ __u32 fakeip = 0;
73355+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73356+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73357+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73358+ gr_to_filename(current->exec_file->f_path.dentry,
73359+ current->exec_file->f_path.mnt) :
73360+ curr->filename, curr->filename,
73361+ &fakeip, 0, type,
73362+ protocol, GR_CONNECT, &current->signal->saved_ip);
73363+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
73364+ __u32 fakeip = 0;
73365+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73366+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73367+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73368+ gr_to_filename(current->exec_file->f_path.dentry,
73369+ current->exec_file->f_path.mnt) :
73370+ curr->filename, curr->filename,
73371+ &fakeip, 0, type,
73372+ protocol, GR_BIND, &current->signal->saved_ip);
73373+ }
73374+ /* we'll log when they use connect or bind */
73375+ goto exit;
73376+ }
73377+
73378+exit_fail:
73379+ if (domain == PF_INET)
73380+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
73381+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
73382+ else if (rcu_access_pointer(net_families[domain]) != NULL)
73383+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
73384+ gr_socktype_to_name(type), protocol);
73385+
73386+ return 0;
73387+exit:
73388+ return 1;
73389+}
73390+
73391+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
73392+{
73393+ if ((ip->mode & mode) &&
73394+ (ip_port >= ip->low) &&
73395+ (ip_port <= ip->high) &&
73396+ ((ntohl(ip_addr) & our_netmask) ==
73397+ (ntohl(our_addr) & our_netmask))
73398+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
73399+ && (ip->type & (1U << type))) {
73400+ if (ip->mode & GR_INVERT)
73401+ return 2; // specifically denied
73402+ else
73403+ return 1; // allowed
73404+ }
73405+
73406+ return 0; // not specifically allowed, may continue parsing
73407+}
73408+
73409+static int
73410+gr_search_connectbind(const int full_mode, struct sock *sk,
73411+ struct sockaddr_in *addr, const int type)
73412+{
73413+ char iface[IFNAMSIZ] = {0};
73414+ struct acl_subject_label *curr;
73415+ struct acl_ip_label *ip;
73416+ struct inet_sock *isk;
73417+ struct net_device *dev;
73418+ struct in_device *idev;
73419+ unsigned long i;
73420+ int ret;
73421+ int mode = full_mode & (GR_BIND | GR_CONNECT);
73422+ __u32 ip_addr = 0;
73423+ __u32 our_addr;
73424+ __u32 our_netmask;
73425+ char *p;
73426+ __u16 ip_port = 0;
73427+ const struct cred *cred = current_cred();
73428+
73429+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
73430+ return 0;
73431+
73432+ curr = current->acl;
73433+ isk = inet_sk(sk);
73434+
73435+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
73436+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
73437+ addr->sin_addr.s_addr = curr->inaddr_any_override;
73438+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
73439+ struct sockaddr_in saddr;
73440+ int err;
73441+
73442+ saddr.sin_family = AF_INET;
73443+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
73444+ saddr.sin_port = isk->inet_sport;
73445+
73446+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
73447+ if (err)
73448+ return err;
73449+
73450+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
73451+ if (err)
73452+ return err;
73453+ }
73454+
73455+ if (!curr->ips)
73456+ return 0;
73457+
73458+ ip_addr = addr->sin_addr.s_addr;
73459+ ip_port = ntohs(addr->sin_port);
73460+
73461+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73462+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73463+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73464+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73465+ gr_to_filename(current->exec_file->f_path.dentry,
73466+ current->exec_file->f_path.mnt) :
73467+ curr->filename, curr->filename,
73468+ &ip_addr, ip_port, type,
73469+ sk->sk_protocol, mode, &current->signal->saved_ip);
73470+ return 0;
73471+ }
73472+
73473+ for (i = 0; i < curr->ip_num; i++) {
73474+ ip = *(curr->ips + i);
73475+ if (ip->iface != NULL) {
73476+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
73477+ p = strchr(iface, ':');
73478+ if (p != NULL)
73479+ *p = '\0';
73480+ dev = dev_get_by_name(sock_net(sk), iface);
73481+ if (dev == NULL)
73482+ continue;
73483+ idev = in_dev_get(dev);
73484+ if (idev == NULL) {
73485+ dev_put(dev);
73486+ continue;
73487+ }
73488+ rcu_read_lock();
73489+ for_ifa(idev) {
73490+ if (!strcmp(ip->iface, ifa->ifa_label)) {
73491+ our_addr = ifa->ifa_address;
73492+ our_netmask = 0xffffffff;
73493+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
73494+ if (ret == 1) {
73495+ rcu_read_unlock();
73496+ in_dev_put(idev);
73497+ dev_put(dev);
73498+ return 0;
73499+ } else if (ret == 2) {
73500+ rcu_read_unlock();
73501+ in_dev_put(idev);
73502+ dev_put(dev);
73503+ goto denied;
73504+ }
73505+ }
73506+ } endfor_ifa(idev);
73507+ rcu_read_unlock();
73508+ in_dev_put(idev);
73509+ dev_put(dev);
73510+ } else {
73511+ our_addr = ip->addr;
73512+ our_netmask = ip->netmask;
73513+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
73514+ if (ret == 1)
73515+ return 0;
73516+ else if (ret == 2)
73517+ goto denied;
73518+ }
73519+ }
73520+
73521+denied:
73522+ if (mode == GR_BIND)
73523+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
73524+ else if (mode == GR_CONNECT)
73525+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
73526+
73527+ return -EACCES;
73528+}
73529+
73530+int
73531+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
73532+{
73533+ /* always allow disconnection of dgram sockets with connect */
73534+ if (addr->sin_family == AF_UNSPEC)
73535+ return 0;
73536+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
73537+}
73538+
73539+int
73540+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
73541+{
73542+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
73543+}
73544+
73545+int gr_search_listen(struct socket *sock)
73546+{
73547+ struct sock *sk = sock->sk;
73548+ struct sockaddr_in addr;
73549+
73550+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
73551+ addr.sin_port = inet_sk(sk)->inet_sport;
73552+
73553+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
73554+}
73555+
73556+int gr_search_accept(struct socket *sock)
73557+{
73558+ struct sock *sk = sock->sk;
73559+ struct sockaddr_in addr;
73560+
73561+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
73562+ addr.sin_port = inet_sk(sk)->inet_sport;
73563+
73564+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
73565+}
73566+
73567+int
73568+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
73569+{
73570+ if (addr)
73571+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
73572+ else {
73573+ struct sockaddr_in sin;
73574+ const struct inet_sock *inet = inet_sk(sk);
73575+
73576+ sin.sin_addr.s_addr = inet->inet_daddr;
73577+ sin.sin_port = inet->inet_dport;
73578+
73579+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
73580+ }
73581+}
73582+
73583+int
73584+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
73585+{
73586+ struct sockaddr_in sin;
73587+
73588+ if (unlikely(skb->len < sizeof (struct udphdr)))
73589+ return 0; // skip this packet
73590+
73591+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
73592+ sin.sin_port = udp_hdr(skb)->source;
73593+
73594+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
73595+}
73596diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
73597new file mode 100644
73598index 0000000..25f54ef
73599--- /dev/null
73600+++ b/grsecurity/gracl_learn.c
73601@@ -0,0 +1,207 @@
73602+#include <linux/kernel.h>
73603+#include <linux/mm.h>
73604+#include <linux/sched.h>
73605+#include <linux/poll.h>
73606+#include <linux/string.h>
73607+#include <linux/file.h>
73608+#include <linux/types.h>
73609+#include <linux/vmalloc.h>
73610+#include <linux/grinternal.h>
73611+
73612+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
73613+ size_t count, loff_t *ppos);
73614+extern int gr_acl_is_enabled(void);
73615+
73616+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
73617+static int gr_learn_attached;
73618+
73619+/* use a 512k buffer */
73620+#define LEARN_BUFFER_SIZE (512 * 1024)
73621+
73622+static DEFINE_SPINLOCK(gr_learn_lock);
73623+static DEFINE_MUTEX(gr_learn_user_mutex);
73624+
73625+/* we need to maintain two buffers, so that the kernel context of grlearn
73626+ uses a semaphore around the userspace copying, and the other kernel contexts
73627+ use a spinlock when copying into the buffer, since they cannot sleep
73628+*/
73629+static char *learn_buffer;
73630+static char *learn_buffer_user;
73631+static int learn_buffer_len;
73632+static int learn_buffer_user_len;
73633+
73634+static ssize_t
73635+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
73636+{
73637+ DECLARE_WAITQUEUE(wait, current);
73638+ ssize_t retval = 0;
73639+
73640+ add_wait_queue(&learn_wait, &wait);
73641+ set_current_state(TASK_INTERRUPTIBLE);
73642+ do {
73643+ mutex_lock(&gr_learn_user_mutex);
73644+ spin_lock(&gr_learn_lock);
73645+ if (learn_buffer_len)
73646+ break;
73647+ spin_unlock(&gr_learn_lock);
73648+ mutex_unlock(&gr_learn_user_mutex);
73649+ if (file->f_flags & O_NONBLOCK) {
73650+ retval = -EAGAIN;
73651+ goto out;
73652+ }
73653+ if (signal_pending(current)) {
73654+ retval = -ERESTARTSYS;
73655+ goto out;
73656+ }
73657+
73658+ schedule();
73659+ } while (1);
73660+
73661+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
73662+ learn_buffer_user_len = learn_buffer_len;
73663+ retval = learn_buffer_len;
73664+ learn_buffer_len = 0;
73665+
73666+ spin_unlock(&gr_learn_lock);
73667+
73668+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
73669+ retval = -EFAULT;
73670+
73671+ mutex_unlock(&gr_learn_user_mutex);
73672+out:
73673+ set_current_state(TASK_RUNNING);
73674+ remove_wait_queue(&learn_wait, &wait);
73675+ return retval;
73676+}
73677+
73678+static unsigned int
73679+poll_learn(struct file * file, poll_table * wait)
73680+{
73681+ poll_wait(file, &learn_wait, wait);
73682+
73683+ if (learn_buffer_len)
73684+ return (POLLIN | POLLRDNORM);
73685+
73686+ return 0;
73687+}
73688+
73689+void
73690+gr_clear_learn_entries(void)
73691+{
73692+ char *tmp;
73693+
73694+ mutex_lock(&gr_learn_user_mutex);
73695+ spin_lock(&gr_learn_lock);
73696+ tmp = learn_buffer;
73697+ learn_buffer = NULL;
73698+ spin_unlock(&gr_learn_lock);
73699+ if (tmp)
73700+ vfree(tmp);
73701+ if (learn_buffer_user != NULL) {
73702+ vfree(learn_buffer_user);
73703+ learn_buffer_user = NULL;
73704+ }
73705+ learn_buffer_len = 0;
73706+ mutex_unlock(&gr_learn_user_mutex);
73707+
73708+ return;
73709+}
73710+
73711+void
73712+gr_add_learn_entry(const char *fmt, ...)
73713+{
73714+ va_list args;
73715+ unsigned int len;
73716+
73717+ if (!gr_learn_attached)
73718+ return;
73719+
73720+ spin_lock(&gr_learn_lock);
73721+
73722+ /* leave a gap at the end so we know when it's "full" but don't have to
73723+ compute the exact length of the string we're trying to append
73724+ */
73725+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
73726+ spin_unlock(&gr_learn_lock);
73727+ wake_up_interruptible(&learn_wait);
73728+ return;
73729+ }
73730+ if (learn_buffer == NULL) {
73731+ spin_unlock(&gr_learn_lock);
73732+ return;
73733+ }
73734+
73735+ va_start(args, fmt);
73736+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
73737+ va_end(args);
73738+
73739+ learn_buffer_len += len + 1;
73740+
73741+ spin_unlock(&gr_learn_lock);
73742+ wake_up_interruptible(&learn_wait);
73743+
73744+ return;
73745+}
73746+
73747+static int
73748+open_learn(struct inode *inode, struct file *file)
73749+{
73750+ if (file->f_mode & FMODE_READ && gr_learn_attached)
73751+ return -EBUSY;
73752+ if (file->f_mode & FMODE_READ) {
73753+ int retval = 0;
73754+ mutex_lock(&gr_learn_user_mutex);
73755+ if (learn_buffer == NULL)
73756+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
73757+ if (learn_buffer_user == NULL)
73758+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
73759+ if (learn_buffer == NULL) {
73760+ retval = -ENOMEM;
73761+ goto out_error;
73762+ }
73763+ if (learn_buffer_user == NULL) {
73764+ retval = -ENOMEM;
73765+ goto out_error;
73766+ }
73767+ learn_buffer_len = 0;
73768+ learn_buffer_user_len = 0;
73769+ gr_learn_attached = 1;
73770+out_error:
73771+ mutex_unlock(&gr_learn_user_mutex);
73772+ return retval;
73773+ }
73774+ return 0;
73775+}
73776+
73777+static int
73778+close_learn(struct inode *inode, struct file *file)
73779+{
73780+ if (file->f_mode & FMODE_READ) {
73781+ char *tmp = NULL;
73782+ mutex_lock(&gr_learn_user_mutex);
73783+ spin_lock(&gr_learn_lock);
73784+ tmp = learn_buffer;
73785+ learn_buffer = NULL;
73786+ spin_unlock(&gr_learn_lock);
73787+ if (tmp)
73788+ vfree(tmp);
73789+ if (learn_buffer_user != NULL) {
73790+ vfree(learn_buffer_user);
73791+ learn_buffer_user = NULL;
73792+ }
73793+ learn_buffer_len = 0;
73794+ learn_buffer_user_len = 0;
73795+ gr_learn_attached = 0;
73796+ mutex_unlock(&gr_learn_user_mutex);
73797+ }
73798+
73799+ return 0;
73800+}
73801+
73802+const struct file_operations grsec_fops = {
73803+ .read = read_learn,
73804+ .write = write_grsec_handler,
73805+ .open = open_learn,
73806+ .release = close_learn,
73807+ .poll = poll_learn,
73808+};
73809diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
73810new file mode 100644
73811index 0000000..3f8ade0
73812--- /dev/null
73813+++ b/grsecurity/gracl_policy.c
73814@@ -0,0 +1,1782 @@
73815+#include <linux/kernel.h>
73816+#include <linux/module.h>
73817+#include <linux/sched.h>
73818+#include <linux/mm.h>
73819+#include <linux/file.h>
73820+#include <linux/fs.h>
73821+#include <linux/namei.h>
73822+#include <linux/mount.h>
73823+#include <linux/tty.h>
73824+#include <linux/proc_fs.h>
73825+#include <linux/lglock.h>
73826+#include <linux/slab.h>
73827+#include <linux/vmalloc.h>
73828+#include <linux/types.h>
73829+#include <linux/sysctl.h>
73830+#include <linux/netdevice.h>
73831+#include <linux/ptrace.h>
73832+#include <linux/gracl.h>
73833+#include <linux/gralloc.h>
73834+#include <linux/security.h>
73835+#include <linux/grinternal.h>
73836+#include <linux/pid_namespace.h>
73837+#include <linux/stop_machine.h>
73838+#include <linux/fdtable.h>
73839+#include <linux/percpu.h>
73840+#include <linux/lglock.h>
73841+#include <linux/hugetlb.h>
73842+#include <linux/posix-timers.h>
73843+#include "../fs/mount.h"
73844+
73845+#include <asm/uaccess.h>
73846+#include <asm/errno.h>
73847+#include <asm/mman.h>
73848+
73849+extern struct gr_policy_state *polstate;
73850+
73851+#define FOR_EACH_ROLE_START(role) \
73852+ role = polstate->role_list; \
73853+ while (role) {
73854+
73855+#define FOR_EACH_ROLE_END(role) \
73856+ role = role->prev; \
73857+ }
73858+
73859+struct path gr_real_root;
73860+
73861+extern struct gr_alloc_state *current_alloc_state;
73862+
73863+u16 acl_sp_role_value;
73864+
73865+static DEFINE_MUTEX(gr_dev_mutex);
73866+
73867+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
73868+extern void gr_clear_learn_entries(void);
73869+
73870+struct gr_arg *gr_usermode __read_only;
73871+unsigned char *gr_system_salt __read_only;
73872+unsigned char *gr_system_sum __read_only;
73873+
73874+static unsigned int gr_auth_attempts = 0;
73875+static unsigned long gr_auth_expires = 0UL;
73876+
73877+struct acl_object_label *fakefs_obj_rw;
73878+struct acl_object_label *fakefs_obj_rwx;
73879+
73880+extern int gr_init_uidset(void);
73881+extern void gr_free_uidset(void);
73882+extern void gr_remove_uid(uid_t uid);
73883+extern int gr_find_uid(uid_t uid);
73884+
73885+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename);
73886+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
73887+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
73888+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
73889+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
73890+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
73891+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
73892+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
73893+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
73894+extern struct acl_subject_label *lookup_acl_subj_label(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
73895+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
73896+extern void assign_special_role(const char *rolename);
73897+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
73898+extern int gr_rbac_disable(void *unused);
73899+extern void gr_enable_rbac_system(void);
73900+
73901+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
73902+{
73903+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
73904+ return -EFAULT;
73905+
73906+ return 0;
73907+}
73908+
73909+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
73910+{
73911+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
73912+ return -EFAULT;
73913+
73914+ return 0;
73915+}
73916+
73917+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
73918+{
73919+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
73920+ return -EFAULT;
73921+
73922+ return 0;
73923+}
73924+
73925+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
73926+{
73927+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
73928+ return -EFAULT;
73929+
73930+ return 0;
73931+}
73932+
73933+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
73934+{
73935+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
73936+ return -EFAULT;
73937+
73938+ return 0;
73939+}
73940+
73941+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
73942+{
73943+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
73944+ return -EFAULT;
73945+
73946+ return 0;
73947+}
73948+
73949+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
73950+{
73951+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
73952+ return -EFAULT;
73953+
73954+ return 0;
73955+}
73956+
73957+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
73958+{
73959+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
73960+ return -EFAULT;
73961+
73962+ return 0;
73963+}
73964+
73965+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
73966+{
73967+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
73968+ return -EFAULT;
73969+
73970+ return 0;
73971+}
73972+
73973+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
73974+{
73975+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
73976+ return -EFAULT;
73977+
73978+ if (((uwrap->version != GRSECURITY_VERSION) &&
73979+ (uwrap->version != 0x2901)) ||
73980+ (uwrap->size != sizeof(struct gr_arg)))
73981+ return -EINVAL;
73982+
73983+ return 0;
73984+}
73985+
73986+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
73987+{
73988+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
73989+ return -EFAULT;
73990+
73991+ return 0;
73992+}
73993+
73994+static size_t get_gr_arg_wrapper_size_normal(void)
73995+{
73996+ return sizeof(struct gr_arg_wrapper);
73997+}
73998+
73999+#ifdef CONFIG_COMPAT
74000+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
74001+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
74002+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
74003+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
74004+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
74005+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
74006+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
74007+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
74008+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
74009+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
74010+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
74011+extern size_t get_gr_arg_wrapper_size_compat(void);
74012+
74013+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
74014+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
74015+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
74016+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
74017+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
74018+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
74019+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
74020+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
74021+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
74022+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
74023+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
74024+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
74025+
74026+#else
74027+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
74028+#define copy_gr_arg copy_gr_arg_normal
74029+#define copy_gr_hash_struct copy_gr_hash_struct_normal
74030+#define copy_acl_object_label copy_acl_object_label_normal
74031+#define copy_acl_subject_label copy_acl_subject_label_normal
74032+#define copy_acl_role_label copy_acl_role_label_normal
74033+#define copy_acl_ip_label copy_acl_ip_label_normal
74034+#define copy_pointer_from_array copy_pointer_from_array_normal
74035+#define copy_sprole_pw copy_sprole_pw_normal
74036+#define copy_role_transition copy_role_transition_normal
74037+#define copy_role_allowed_ip copy_role_allowed_ip_normal
74038+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
74039+#endif
74040+
74041+static struct acl_subject_label *
74042+lookup_subject_map(const struct acl_subject_label *userp)
74043+{
74044+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
74045+ struct subject_map *match;
74046+
74047+ match = polstate->subj_map_set.s_hash[index];
74048+
74049+ while (match && match->user != userp)
74050+ match = match->next;
74051+
74052+ if (match != NULL)
74053+ return match->kernel;
74054+ else
74055+ return NULL;
74056+}
74057+
74058+static void
74059+insert_subj_map_entry(struct subject_map *subjmap)
74060+{
74061+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
74062+ struct subject_map **curr;
74063+
74064+ subjmap->prev = NULL;
74065+
74066+ curr = &polstate->subj_map_set.s_hash[index];
74067+ if (*curr != NULL)
74068+ (*curr)->prev = subjmap;
74069+
74070+ subjmap->next = *curr;
74071+ *curr = subjmap;
74072+
74073+ return;
74074+}
74075+
74076+static void
74077+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
74078+{
74079+ unsigned int index =
74080+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
74081+ struct acl_role_label **curr;
74082+ struct acl_role_label *tmp, *tmp2;
74083+
74084+ curr = &polstate->acl_role_set.r_hash[index];
74085+
74086+ /* simple case, slot is empty, just set it to our role */
74087+ if (*curr == NULL) {
74088+ *curr = role;
74089+ } else {
74090+ /* example:
74091+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
74092+ 2 -> 3
74093+ */
74094+ /* first check to see if we can already be reached via this slot */
74095+ tmp = *curr;
74096+ while (tmp && tmp != role)
74097+ tmp = tmp->next;
74098+ if (tmp == role) {
74099+ /* we don't need to add ourselves to this slot's chain */
74100+ return;
74101+ }
74102+ /* we need to add ourselves to this chain, two cases */
74103+ if (role->next == NULL) {
74104+ /* simple case, append the current chain to our role */
74105+ role->next = *curr;
74106+ *curr = role;
74107+ } else {
74108+ /* 1 -> 2 -> 3 -> 4
74109+ 2 -> 3 -> 4
74110+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
74111+ */
74112+ /* trickier case: walk our role's chain until we find
74113+ the role for the start of the current slot's chain */
74114+ tmp = role;
74115+ tmp2 = *curr;
74116+ while (tmp->next && tmp->next != tmp2)
74117+ tmp = tmp->next;
74118+ if (tmp->next == tmp2) {
74119+ /* from example above, we found 3, so just
74120+ replace this slot's chain with ours */
74121+ *curr = role;
74122+ } else {
74123+ /* we didn't find a subset of our role's chain
74124+ in the current slot's chain, so append their
74125+ chain to ours, and set us as the first role in
74126+ the slot's chain
74127+
74128+ we could fold this case with the case above,
74129+ but making it explicit for clarity
74130+ */
74131+ tmp->next = tmp2;
74132+ *curr = role;
74133+ }
74134+ }
74135+ }
74136+
74137+ return;
74138+}
74139+
74140+static void
74141+insert_acl_role_label(struct acl_role_label *role)
74142+{
74143+ int i;
74144+
74145+ if (polstate->role_list == NULL) {
74146+ polstate->role_list = role;
74147+ role->prev = NULL;
74148+ } else {
74149+ role->prev = polstate->role_list;
74150+ polstate->role_list = role;
74151+ }
74152+
74153+ /* used for hash chains */
74154+ role->next = NULL;
74155+
74156+ if (role->roletype & GR_ROLE_DOMAIN) {
74157+ for (i = 0; i < role->domain_child_num; i++)
74158+ __insert_acl_role_label(role, role->domain_children[i]);
74159+ } else
74160+ __insert_acl_role_label(role, role->uidgid);
74161+}
74162+
74163+static int
74164+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
74165+{
74166+ struct name_entry **curr, *nentry;
74167+ struct inodev_entry *ientry;
74168+ unsigned int len = strlen(name);
74169+ unsigned int key = full_name_hash(name, len);
74170+ unsigned int index = key % polstate->name_set.n_size;
74171+
74172+ curr = &polstate->name_set.n_hash[index];
74173+
74174+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
74175+ curr = &((*curr)->next);
74176+
74177+ if (*curr != NULL)
74178+ return 1;
74179+
74180+ nentry = acl_alloc(sizeof (struct name_entry));
74181+ if (nentry == NULL)
74182+ return 0;
74183+ ientry = acl_alloc(sizeof (struct inodev_entry));
74184+ if (ientry == NULL)
74185+ return 0;
74186+ ientry->nentry = nentry;
74187+
74188+ nentry->key = key;
74189+ nentry->name = name;
74190+ nentry->inode = inode;
74191+ nentry->device = device;
74192+ nentry->len = len;
74193+ nentry->deleted = deleted;
74194+
74195+ nentry->prev = NULL;
74196+ curr = &polstate->name_set.n_hash[index];
74197+ if (*curr != NULL)
74198+ (*curr)->prev = nentry;
74199+ nentry->next = *curr;
74200+ *curr = nentry;
74201+
74202+ /* insert us into the table searchable by inode/dev */
74203+ __insert_inodev_entry(polstate, ientry);
74204+
74205+ return 1;
74206+}
74207+
74208+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
74209+
74210+static void *
74211+create_table(__u32 * len, int elementsize)
74212+{
74213+ unsigned int table_sizes[] = {
74214+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
74215+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
74216+ 4194301, 8388593, 16777213, 33554393, 67108859
74217+ };
74218+ void *newtable = NULL;
74219+ unsigned int pwr = 0;
74220+
74221+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
74222+ table_sizes[pwr] <= *len)
74223+ pwr++;
74224+
74225+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
74226+ return newtable;
74227+
74228+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
74229+ newtable =
74230+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
74231+ else
74232+ newtable = vmalloc(table_sizes[pwr] * elementsize);
74233+
74234+ *len = table_sizes[pwr];
74235+
74236+ return newtable;
74237+}
74238+
74239+static int
74240+init_variables(const struct gr_arg *arg, bool reload)
74241+{
74242+ struct task_struct *reaper = init_pid_ns.child_reaper;
74243+ unsigned int stacksize;
74244+
74245+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
74246+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
74247+ polstate->name_set.n_size = arg->role_db.num_objects;
74248+ polstate->inodev_set.i_size = arg->role_db.num_objects;
74249+
74250+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
74251+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
74252+ return 1;
74253+
74254+ if (!reload) {
74255+ if (!gr_init_uidset())
74256+ return 1;
74257+ }
74258+
74259+ /* set up the stack that holds allocation info */
74260+
74261+ stacksize = arg->role_db.num_pointers + 5;
74262+
74263+ if (!acl_alloc_stack_init(stacksize))
74264+ return 1;
74265+
74266+ if (!reload) {
74267+ /* grab reference for the real root dentry and vfsmount */
74268+ get_fs_root(reaper->fs, &gr_real_root);
74269+
74270+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
74271+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
74272+#endif
74273+
74274+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
74275+ if (fakefs_obj_rw == NULL)
74276+ return 1;
74277+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
74278+
74279+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
74280+ if (fakefs_obj_rwx == NULL)
74281+ return 1;
74282+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
74283+ }
74284+
74285+ polstate->subj_map_set.s_hash =
74286+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
74287+ polstate->acl_role_set.r_hash =
74288+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
74289+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
74290+ polstate->inodev_set.i_hash =
74291+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
74292+
74293+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
74294+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
74295+ return 1;
74296+
74297+ memset(polstate->subj_map_set.s_hash, 0,
74298+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
74299+ memset(polstate->acl_role_set.r_hash, 0,
74300+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
74301+ memset(polstate->name_set.n_hash, 0,
74302+ sizeof (struct name_entry *) * polstate->name_set.n_size);
74303+ memset(polstate->inodev_set.i_hash, 0,
74304+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
74305+
74306+ return 0;
74307+}
74308+
74309+/* free information not needed after startup
74310+ currently contains user->kernel pointer mappings for subjects
74311+*/
74312+
74313+static void
74314+free_init_variables(void)
74315+{
74316+ __u32 i;
74317+
74318+ if (polstate->subj_map_set.s_hash) {
74319+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
74320+ if (polstate->subj_map_set.s_hash[i]) {
74321+ kfree(polstate->subj_map_set.s_hash[i]);
74322+ polstate->subj_map_set.s_hash[i] = NULL;
74323+ }
74324+ }
74325+
74326+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
74327+ PAGE_SIZE)
74328+ kfree(polstate->subj_map_set.s_hash);
74329+ else
74330+ vfree(polstate->subj_map_set.s_hash);
74331+ }
74332+
74333+ return;
74334+}
74335+
74336+static void
74337+free_variables(bool reload)
74338+{
74339+ struct acl_subject_label *s;
74340+ struct acl_role_label *r;
74341+ struct task_struct *task, *task2;
74342+ unsigned int x;
74343+
74344+ if (!reload) {
74345+ gr_clear_learn_entries();
74346+
74347+ read_lock(&tasklist_lock);
74348+ do_each_thread(task2, task) {
74349+ task->acl_sp_role = 0;
74350+ task->acl_role_id = 0;
74351+ task->inherited = 0;
74352+ task->acl = NULL;
74353+ task->role = NULL;
74354+ } while_each_thread(task2, task);
74355+ read_unlock(&tasklist_lock);
74356+
74357+ kfree(fakefs_obj_rw);
74358+ fakefs_obj_rw = NULL;
74359+ kfree(fakefs_obj_rwx);
74360+ fakefs_obj_rwx = NULL;
74361+
74362+ /* release the reference to the real root dentry and vfsmount */
74363+ path_put(&gr_real_root);
74364+ memset(&gr_real_root, 0, sizeof(gr_real_root));
74365+ }
74366+
74367+ /* free all object hash tables */
74368+
74369+ FOR_EACH_ROLE_START(r)
74370+ if (r->subj_hash == NULL)
74371+ goto next_role;
74372+ FOR_EACH_SUBJECT_START(r, s, x)
74373+ if (s->obj_hash == NULL)
74374+ break;
74375+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
74376+ kfree(s->obj_hash);
74377+ else
74378+ vfree(s->obj_hash);
74379+ FOR_EACH_SUBJECT_END(s, x)
74380+ FOR_EACH_NESTED_SUBJECT_START(r, s)
74381+ if (s->obj_hash == NULL)
74382+ break;
74383+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
74384+ kfree(s->obj_hash);
74385+ else
74386+ vfree(s->obj_hash);
74387+ FOR_EACH_NESTED_SUBJECT_END(s)
74388+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
74389+ kfree(r->subj_hash);
74390+ else
74391+ vfree(r->subj_hash);
74392+ r->subj_hash = NULL;
74393+next_role:
74394+ FOR_EACH_ROLE_END(r)
74395+
74396+ acl_free_all();
74397+
74398+ if (polstate->acl_role_set.r_hash) {
74399+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
74400+ PAGE_SIZE)
74401+ kfree(polstate->acl_role_set.r_hash);
74402+ else
74403+ vfree(polstate->acl_role_set.r_hash);
74404+ }
74405+ if (polstate->name_set.n_hash) {
74406+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
74407+ PAGE_SIZE)
74408+ kfree(polstate->name_set.n_hash);
74409+ else
74410+ vfree(polstate->name_set.n_hash);
74411+ }
74412+
74413+ if (polstate->inodev_set.i_hash) {
74414+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
74415+ PAGE_SIZE)
74416+ kfree(polstate->inodev_set.i_hash);
74417+ else
74418+ vfree(polstate->inodev_set.i_hash);
74419+ }
74420+
74421+ if (!reload)
74422+ gr_free_uidset();
74423+
74424+ memset(&polstate->name_set, 0, sizeof (struct name_db));
74425+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
74426+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
74427+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
74428+
74429+ polstate->default_role = NULL;
74430+ polstate->kernel_role = NULL;
74431+ polstate->role_list = NULL;
74432+
74433+ return;
74434+}
74435+
74436+static struct acl_subject_label *
74437+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
74438+
74439+static int alloc_and_copy_string(char **name, unsigned int maxlen)
74440+{
74441+ unsigned int len = strnlen_user(*name, maxlen);
74442+ char *tmp;
74443+
74444+ if (!len || len >= maxlen)
74445+ return -EINVAL;
74446+
74447+ if ((tmp = (char *) acl_alloc(len)) == NULL)
74448+ return -ENOMEM;
74449+
74450+ if (copy_from_user(tmp, *name, len))
74451+ return -EFAULT;
74452+
74453+ tmp[len-1] = '\0';
74454+ *name = tmp;
74455+
74456+ return 0;
74457+}
74458+
74459+static int
74460+copy_user_glob(struct acl_object_label *obj)
74461+{
74462+ struct acl_object_label *g_tmp, **guser;
74463+ int error;
74464+
74465+ if (obj->globbed == NULL)
74466+ return 0;
74467+
74468+ guser = &obj->globbed;
74469+ while (*guser) {
74470+ g_tmp = (struct acl_object_label *)
74471+ acl_alloc(sizeof (struct acl_object_label));
74472+ if (g_tmp == NULL)
74473+ return -ENOMEM;
74474+
74475+ if (copy_acl_object_label(g_tmp, *guser))
74476+ return -EFAULT;
74477+
74478+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
74479+ if (error)
74480+ return error;
74481+
74482+ *guser = g_tmp;
74483+ guser = &(g_tmp->next);
74484+ }
74485+
74486+ return 0;
74487+}
74488+
74489+static int
74490+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
74491+ struct acl_role_label *role)
74492+{
74493+ struct acl_object_label *o_tmp;
74494+ int ret;
74495+
74496+ while (userp) {
74497+ if ((o_tmp = (struct acl_object_label *)
74498+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
74499+ return -ENOMEM;
74500+
74501+ if (copy_acl_object_label(o_tmp, userp))
74502+ return -EFAULT;
74503+
74504+ userp = o_tmp->prev;
74505+
74506+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
74507+ if (ret)
74508+ return ret;
74509+
74510+ insert_acl_obj_label(o_tmp, subj);
74511+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
74512+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
74513+ return -ENOMEM;
74514+
74515+ ret = copy_user_glob(o_tmp);
74516+ if (ret)
74517+ return ret;
74518+
74519+ if (o_tmp->nested) {
74520+ int already_copied;
74521+
74522+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
74523+ if (IS_ERR(o_tmp->nested))
74524+ return PTR_ERR(o_tmp->nested);
74525+
74526+ /* insert into nested subject list if we haven't copied this one yet
74527+ to prevent duplicate entries */
74528+ if (!already_copied) {
74529+ o_tmp->nested->next = role->hash->first;
74530+ role->hash->first = o_tmp->nested;
74531+ }
74532+ }
74533+ }
74534+
74535+ return 0;
74536+}
74537+
74538+static __u32
74539+count_user_subjs(struct acl_subject_label *userp)
74540+{
74541+ struct acl_subject_label s_tmp;
74542+ __u32 num = 0;
74543+
74544+ while (userp) {
74545+ if (copy_acl_subject_label(&s_tmp, userp))
74546+ break;
74547+
74548+ userp = s_tmp.prev;
74549+ }
74550+
74551+ return num;
74552+}
74553+
74554+static int
74555+copy_user_allowedips(struct acl_role_label *rolep)
74556+{
74557+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
74558+
74559+ ruserip = rolep->allowed_ips;
74560+
74561+ while (ruserip) {
74562+ rlast = rtmp;
74563+
74564+ if ((rtmp = (struct role_allowed_ip *)
74565+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
74566+ return -ENOMEM;
74567+
74568+ if (copy_role_allowed_ip(rtmp, ruserip))
74569+ return -EFAULT;
74570+
74571+ ruserip = rtmp->prev;
74572+
74573+ if (!rlast) {
74574+ rtmp->prev = NULL;
74575+ rolep->allowed_ips = rtmp;
74576+ } else {
74577+ rlast->next = rtmp;
74578+ rtmp->prev = rlast;
74579+ }
74580+
74581+ if (!ruserip)
74582+ rtmp->next = NULL;
74583+ }
74584+
74585+ return 0;
74586+}
74587+
74588+static int
74589+copy_user_transitions(struct acl_role_label *rolep)
74590+{
74591+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
74592+ int error;
74593+
74594+ rusertp = rolep->transitions;
74595+
74596+ while (rusertp) {
74597+ rlast = rtmp;
74598+
74599+ if ((rtmp = (struct role_transition *)
74600+ acl_alloc(sizeof (struct role_transition))) == NULL)
74601+ return -ENOMEM;
74602+
74603+ if (copy_role_transition(rtmp, rusertp))
74604+ return -EFAULT;
74605+
74606+ rusertp = rtmp->prev;
74607+
74608+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
74609+ if (error)
74610+ return error;
74611+
74612+ if (!rlast) {
74613+ rtmp->prev = NULL;
74614+ rolep->transitions = rtmp;
74615+ } else {
74616+ rlast->next = rtmp;
74617+ rtmp->prev = rlast;
74618+ }
74619+
74620+ if (!rusertp)
74621+ rtmp->next = NULL;
74622+ }
74623+
74624+ return 0;
74625+}
74626+
74627+static __u32 count_user_objs(const struct acl_object_label __user *userp)
74628+{
74629+ struct acl_object_label o_tmp;
74630+ __u32 num = 0;
74631+
74632+ while (userp) {
74633+ if (copy_acl_object_label(&o_tmp, userp))
74634+ break;
74635+
74636+ userp = o_tmp.prev;
74637+ num++;
74638+ }
74639+
74640+ return num;
74641+}
74642+
74643+static struct acl_subject_label *
74644+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
74645+{
74646+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
74647+ __u32 num_objs;
74648+ struct acl_ip_label **i_tmp, *i_utmp2;
74649+ struct gr_hash_struct ghash;
74650+ struct subject_map *subjmap;
74651+ unsigned int i_num;
74652+ int err;
74653+
74654+ if (already_copied != NULL)
74655+ *already_copied = 0;
74656+
74657+ s_tmp = lookup_subject_map(userp);
74658+
74659+ /* we've already copied this subject into the kernel, just return
74660+ the reference to it, and don't copy it over again
74661+ */
74662+ if (s_tmp) {
74663+ if (already_copied != NULL)
74664+ *already_copied = 1;
74665+ return(s_tmp);
74666+ }
74667+
74668+ if ((s_tmp = (struct acl_subject_label *)
74669+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
74670+ return ERR_PTR(-ENOMEM);
74671+
74672+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
74673+ if (subjmap == NULL)
74674+ return ERR_PTR(-ENOMEM);
74675+
74676+ subjmap->user = userp;
74677+ subjmap->kernel = s_tmp;
74678+ insert_subj_map_entry(subjmap);
74679+
74680+ if (copy_acl_subject_label(s_tmp, userp))
74681+ return ERR_PTR(-EFAULT);
74682+
74683+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
74684+ if (err)
74685+ return ERR_PTR(err);
74686+
74687+ if (!strcmp(s_tmp->filename, "/"))
74688+ role->root_label = s_tmp;
74689+
74690+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
74691+ return ERR_PTR(-EFAULT);
74692+
74693+ /* copy user and group transition tables */
74694+
74695+ if (s_tmp->user_trans_num) {
74696+ uid_t *uidlist;
74697+
74698+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
74699+ if (uidlist == NULL)
74700+ return ERR_PTR(-ENOMEM);
74701+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
74702+ return ERR_PTR(-EFAULT);
74703+
74704+ s_tmp->user_transitions = uidlist;
74705+ }
74706+
74707+ if (s_tmp->group_trans_num) {
74708+ gid_t *gidlist;
74709+
74710+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
74711+ if (gidlist == NULL)
74712+ return ERR_PTR(-ENOMEM);
74713+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
74714+ return ERR_PTR(-EFAULT);
74715+
74716+ s_tmp->group_transitions = gidlist;
74717+ }
74718+
74719+ /* set up object hash table */
74720+ num_objs = count_user_objs(ghash.first);
74721+
74722+ s_tmp->obj_hash_size = num_objs;
74723+ s_tmp->obj_hash =
74724+ (struct acl_object_label **)
74725+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
74726+
74727+ if (!s_tmp->obj_hash)
74728+ return ERR_PTR(-ENOMEM);
74729+
74730+ memset(s_tmp->obj_hash, 0,
74731+ s_tmp->obj_hash_size *
74732+ sizeof (struct acl_object_label *));
74733+
74734+ /* add in objects */
74735+ err = copy_user_objs(ghash.first, s_tmp, role);
74736+
74737+ if (err)
74738+ return ERR_PTR(err);
74739+
74740+ /* set pointer for parent subject */
74741+ if (s_tmp->parent_subject) {
74742+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
74743+
74744+ if (IS_ERR(s_tmp2))
74745+ return s_tmp2;
74746+
74747+ s_tmp->parent_subject = s_tmp2;
74748+ }
74749+
74750+ /* add in ip acls */
74751+
74752+ if (!s_tmp->ip_num) {
74753+ s_tmp->ips = NULL;
74754+ goto insert;
74755+ }
74756+
74757+ i_tmp =
74758+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
74759+ sizeof (struct acl_ip_label *));
74760+
74761+ if (!i_tmp)
74762+ return ERR_PTR(-ENOMEM);
74763+
74764+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
74765+ *(i_tmp + i_num) =
74766+ (struct acl_ip_label *)
74767+ acl_alloc(sizeof (struct acl_ip_label));
74768+ if (!*(i_tmp + i_num))
74769+ return ERR_PTR(-ENOMEM);
74770+
74771+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
74772+ return ERR_PTR(-EFAULT);
74773+
74774+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
74775+ return ERR_PTR(-EFAULT);
74776+
74777+ if ((*(i_tmp + i_num))->iface == NULL)
74778+ continue;
74779+
74780+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
74781+ if (err)
74782+ return ERR_PTR(err);
74783+ }
74784+
74785+ s_tmp->ips = i_tmp;
74786+
74787+insert:
74788+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
74789+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
74790+ return ERR_PTR(-ENOMEM);
74791+
74792+ return s_tmp;
74793+}
74794+
74795+static int
74796+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
74797+{
74798+ struct acl_subject_label s_pre;
74799+ struct acl_subject_label * ret;
74800+ int err;
74801+
74802+ while (userp) {
74803+ if (copy_acl_subject_label(&s_pre, userp))
74804+ return -EFAULT;
74805+
74806+ ret = do_copy_user_subj(userp, role, NULL);
74807+
74808+ err = PTR_ERR(ret);
74809+ if (IS_ERR(ret))
74810+ return err;
74811+
74812+ insert_acl_subj_label(ret, role);
74813+
74814+ userp = s_pre.prev;
74815+ }
74816+
74817+ return 0;
74818+}
74819+
74820+static int
74821+copy_user_acl(struct gr_arg *arg)
74822+{
74823+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
74824+ struct acl_subject_label *subj_list;
74825+ struct sprole_pw *sptmp;
74826+ struct gr_hash_struct *ghash;
74827+ uid_t *domainlist;
74828+ unsigned int r_num;
74829+ int err = 0;
74830+ __u16 i;
74831+ __u32 num_subjs;
74832+
74833+ /* we need a default and kernel role */
74834+ if (arg->role_db.num_roles < 2)
74835+ return -EINVAL;
74836+
74837+ /* copy special role authentication info from userspace */
74838+
74839+ polstate->num_sprole_pws = arg->num_sprole_pws;
74840+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
74841+
74842+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
74843+ return -ENOMEM;
74844+
74845+ for (i = 0; i < polstate->num_sprole_pws; i++) {
74846+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
74847+ if (!sptmp)
74848+ return -ENOMEM;
74849+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
74850+ return -EFAULT;
74851+
74852+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
74853+ if (err)
74854+ return err;
74855+
74856+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
74857+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
74858+#endif
74859+
74860+ polstate->acl_special_roles[i] = sptmp;
74861+ }
74862+
74863+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
74864+
74865+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
74866+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
74867+
74868+ if (!r_tmp)
74869+ return -ENOMEM;
74870+
74871+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
74872+ return -EFAULT;
74873+
74874+ if (copy_acl_role_label(r_tmp, r_utmp2))
74875+ return -EFAULT;
74876+
74877+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
74878+ if (err)
74879+ return err;
74880+
74881+ if (!strcmp(r_tmp->rolename, "default")
74882+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
74883+ polstate->default_role = r_tmp;
74884+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
74885+ polstate->kernel_role = r_tmp;
74886+ }
74887+
74888+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
74889+ return -ENOMEM;
74890+
74891+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
74892+ return -EFAULT;
74893+
74894+ r_tmp->hash = ghash;
74895+
74896+ num_subjs = count_user_subjs(r_tmp->hash->first);
74897+
74898+ r_tmp->subj_hash_size = num_subjs;
74899+ r_tmp->subj_hash =
74900+ (struct acl_subject_label **)
74901+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
74902+
74903+ if (!r_tmp->subj_hash)
74904+ return -ENOMEM;
74905+
74906+ err = copy_user_allowedips(r_tmp);
74907+ if (err)
74908+ return err;
74909+
74910+ /* copy domain info */
74911+ if (r_tmp->domain_children != NULL) {
74912+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
74913+ if (domainlist == NULL)
74914+ return -ENOMEM;
74915+
74916+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
74917+ return -EFAULT;
74918+
74919+ r_tmp->domain_children = domainlist;
74920+ }
74921+
74922+ err = copy_user_transitions(r_tmp);
74923+ if (err)
74924+ return err;
74925+
74926+ memset(r_tmp->subj_hash, 0,
74927+ r_tmp->subj_hash_size *
74928+ sizeof (struct acl_subject_label *));
74929+
74930+ /* acquire the list of subjects, then NULL out
74931+ the list prior to parsing the subjects for this role,
74932+ as during this parsing the list is replaced with a list
74933+ of *nested* subjects for the role
74934+ */
74935+ subj_list = r_tmp->hash->first;
74936+
74937+ /* set nested subject list to null */
74938+ r_tmp->hash->first = NULL;
74939+
74940+ err = copy_user_subjs(subj_list, r_tmp);
74941+
74942+ if (err)
74943+ return err;
74944+
74945+ insert_acl_role_label(r_tmp);
74946+ }
74947+
74948+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
74949+ return -EINVAL;
74950+
74951+ return err;
74952+}
74953+
74954+static int gracl_reload_apply_policies(void *reload)
74955+{
74956+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
74957+ struct task_struct *task, *task2;
74958+ struct acl_role_label *role, *rtmp;
74959+ struct acl_subject_label *subj;
74960+ const struct cred *cred;
74961+ int role_applied;
74962+ int ret = 0;
74963+
74964+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
74965+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
74966+
74967+ /* first make sure we'll be able to apply the new policy cleanly */
74968+ do_each_thread(task2, task) {
74969+ if (task->exec_file == NULL)
74970+ continue;
74971+ role_applied = 0;
74972+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
74973+ /* preserve special roles */
74974+ FOR_EACH_ROLE_START(role)
74975+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
74976+ rtmp = task->role;
74977+ task->role = role;
74978+ role_applied = 1;
74979+ break;
74980+ }
74981+ FOR_EACH_ROLE_END(role)
74982+ }
74983+ if (!role_applied) {
74984+ cred = __task_cred(task);
74985+ rtmp = task->role;
74986+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
74987+ }
74988+ /* this handles non-nested inherited subjects, nested subjects will still
74989+ be dropped currently */
74990+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
74991+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL);
74992+ /* change the role back so that we've made no modifications to the policy */
74993+ task->role = rtmp;
74994+
74995+ if (subj == NULL || task->tmpacl == NULL) {
74996+ ret = -EINVAL;
74997+ goto out;
74998+ }
74999+ } while_each_thread(task2, task);
75000+
75001+ /* now actually apply the policy */
75002+
75003+ do_each_thread(task2, task) {
75004+ if (task->exec_file) {
75005+ role_applied = 0;
75006+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
75007+ /* preserve special roles */
75008+ FOR_EACH_ROLE_START(role)
75009+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
75010+ task->role = role;
75011+ role_applied = 1;
75012+ break;
75013+ }
75014+ FOR_EACH_ROLE_END(role)
75015+ }
75016+ if (!role_applied) {
75017+ cred = __task_cred(task);
75018+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
75019+ }
75020+ /* this handles non-nested inherited subjects, nested subjects will still
75021+ be dropped currently */
75022+ if (!reload_state->oldmode && task->inherited)
75023+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
75024+ else {
75025+ /* looked up and tagged to the task previously */
75026+ subj = task->tmpacl;
75027+ }
75028+ /* subj will be non-null */
75029+ __gr_apply_subject_to_task(polstate, task, subj);
75030+ if (reload_state->oldmode) {
75031+ task->acl_role_id = 0;
75032+ task->acl_sp_role = 0;
75033+ task->inherited = 0;
75034+ }
75035+ } else {
75036+ // it's a kernel process
75037+ task->role = polstate->kernel_role;
75038+ task->acl = polstate->kernel_role->root_label;
75039+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
75040+ task->acl->mode &= ~GR_PROCFIND;
75041+#endif
75042+ }
75043+ } while_each_thread(task2, task);
75044+
75045+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
75046+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
75047+
75048+out:
75049+
75050+ return ret;
75051+}
75052+
75053+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
75054+{
75055+ struct gr_reload_state new_reload_state = { };
75056+ int err;
75057+
75058+ new_reload_state.oldpolicy_ptr = polstate;
75059+ new_reload_state.oldalloc_ptr = current_alloc_state;
75060+ new_reload_state.oldmode = oldmode;
75061+
75062+ current_alloc_state = &new_reload_state.newalloc;
75063+ polstate = &new_reload_state.newpolicy;
75064+
75065+ /* everything relevant is now saved off, copy in the new policy */
75066+ if (init_variables(args, true)) {
75067+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
75068+ err = -ENOMEM;
75069+ goto error;
75070+ }
75071+
75072+ err = copy_user_acl(args);
75073+ free_init_variables();
75074+ if (err)
75075+ goto error;
75076+ /* the new policy is copied in, with the old policy available via saved_state
75077+ first go through applying roles, making sure to preserve special roles
75078+ then apply new subjects, making sure to preserve inherited and nested subjects,
75079+ though currently only inherited subjects will be preserved
75080+ */
75081+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
75082+ if (err)
75083+ goto error;
75084+
75085+ /* we've now applied the new policy, so restore the old policy state to free it */
75086+ polstate = &new_reload_state.oldpolicy;
75087+ current_alloc_state = &new_reload_state.oldalloc;
75088+ free_variables(true);
75089+
75090+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
75091+ to running_polstate/current_alloc_state inside stop_machine
75092+ */
75093+ err = 0;
75094+ goto out;
75095+error:
75096+ /* on error of loading the new policy, we'll just keep the previous
75097+ policy set around
75098+ */
75099+ free_variables(true);
75100+
75101+ /* doesn't affect runtime, but maintains consistent state */
75102+out:
75103+ polstate = new_reload_state.oldpolicy_ptr;
75104+ current_alloc_state = new_reload_state.oldalloc_ptr;
75105+
75106+ return err;
75107+}
75108+
75109+static int
75110+gracl_init(struct gr_arg *args)
75111+{
75112+ int error = 0;
75113+
75114+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
75115+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
75116+
75117+ if (init_variables(args, false)) {
75118+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
75119+ error = -ENOMEM;
75120+ goto out;
75121+ }
75122+
75123+ error = copy_user_acl(args);
75124+ free_init_variables();
75125+ if (error)
75126+ goto out;
75127+
75128+ error = gr_set_acls(0);
75129+ if (error)
75130+ goto out;
75131+
75132+ gr_enable_rbac_system();
75133+
75134+ return 0;
75135+
75136+out:
75137+ free_variables(false);
75138+ return error;
75139+}
75140+
75141+static int
75142+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
75143+ unsigned char **sum)
75144+{
75145+ struct acl_role_label *r;
75146+ struct role_allowed_ip *ipp;
75147+ struct role_transition *trans;
75148+ unsigned int i;
75149+ int found = 0;
75150+ u32 curr_ip = current->signal->curr_ip;
75151+
75152+ current->signal->saved_ip = curr_ip;
75153+
75154+ /* check transition table */
75155+
75156+ for (trans = current->role->transitions; trans; trans = trans->next) {
75157+ if (!strcmp(rolename, trans->rolename)) {
75158+ found = 1;
75159+ break;
75160+ }
75161+ }
75162+
75163+ if (!found)
75164+ return 0;
75165+
75166+ /* handle special roles that do not require authentication
75167+ and check ip */
75168+
75169+ FOR_EACH_ROLE_START(r)
75170+ if (!strcmp(rolename, r->rolename) &&
75171+ (r->roletype & GR_ROLE_SPECIAL)) {
75172+ found = 0;
75173+ if (r->allowed_ips != NULL) {
75174+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
75175+ if ((ntohl(curr_ip) & ipp->netmask) ==
75176+ (ntohl(ipp->addr) & ipp->netmask))
75177+ found = 1;
75178+ }
75179+ } else
75180+ found = 2;
75181+ if (!found)
75182+ return 0;
75183+
75184+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
75185+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
75186+ *salt = NULL;
75187+ *sum = NULL;
75188+ return 1;
75189+ }
75190+ }
75191+ FOR_EACH_ROLE_END(r)
75192+
75193+ for (i = 0; i < polstate->num_sprole_pws; i++) {
75194+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
75195+ *salt = polstate->acl_special_roles[i]->salt;
75196+ *sum = polstate->acl_special_roles[i]->sum;
75197+ return 1;
75198+ }
75199+ }
75200+
75201+ return 0;
75202+}
75203+
75204+int gr_check_secure_terminal(struct task_struct *task)
75205+{
75206+ struct task_struct *p, *p2, *p3;
75207+ struct files_struct *files;
75208+ struct fdtable *fdt;
75209+ struct file *our_file = NULL, *file;
75210+ int i;
75211+
75212+ if (task->signal->tty == NULL)
75213+ return 1;
75214+
75215+ files = get_files_struct(task);
75216+ if (files != NULL) {
75217+ rcu_read_lock();
75218+ fdt = files_fdtable(files);
75219+ for (i=0; i < fdt->max_fds; i++) {
75220+ file = fcheck_files(files, i);
75221+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
75222+ get_file(file);
75223+ our_file = file;
75224+ }
75225+ }
75226+ rcu_read_unlock();
75227+ put_files_struct(files);
75228+ }
75229+
75230+ if (our_file == NULL)
75231+ return 1;
75232+
75233+ read_lock(&tasklist_lock);
75234+ do_each_thread(p2, p) {
75235+ files = get_files_struct(p);
75236+ if (files == NULL ||
75237+ (p->signal && p->signal->tty == task->signal->tty)) {
75238+ if (files != NULL)
75239+ put_files_struct(files);
75240+ continue;
75241+ }
75242+ rcu_read_lock();
75243+ fdt = files_fdtable(files);
75244+ for (i=0; i < fdt->max_fds; i++) {
75245+ file = fcheck_files(files, i);
75246+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
75247+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
75248+ p3 = task;
75249+ while (task_pid_nr(p3) > 0) {
75250+ if (p3 == p)
75251+ break;
75252+ p3 = p3->real_parent;
75253+ }
75254+ if (p3 == p)
75255+ break;
75256+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
75257+ gr_handle_alertkill(p);
75258+ rcu_read_unlock();
75259+ put_files_struct(files);
75260+ read_unlock(&tasklist_lock);
75261+ fput(our_file);
75262+ return 0;
75263+ }
75264+ }
75265+ rcu_read_unlock();
75266+ put_files_struct(files);
75267+ } while_each_thread(p2, p);
75268+ read_unlock(&tasklist_lock);
75269+
75270+ fput(our_file);
75271+ return 1;
75272+}
75273+
75274+ssize_t
75275+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
75276+{
75277+ struct gr_arg_wrapper uwrap;
75278+ unsigned char *sprole_salt = NULL;
75279+ unsigned char *sprole_sum = NULL;
75280+ int error = 0;
75281+ int error2 = 0;
75282+ size_t req_count = 0;
75283+ unsigned char oldmode = 0;
75284+
75285+ mutex_lock(&gr_dev_mutex);
75286+
75287+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
75288+ error = -EPERM;
75289+ goto out;
75290+ }
75291+
75292+#ifdef CONFIG_COMPAT
75293+ pax_open_kernel();
75294+ if (is_compat_task()) {
75295+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
75296+ copy_gr_arg = &copy_gr_arg_compat;
75297+ copy_acl_object_label = &copy_acl_object_label_compat;
75298+ copy_acl_subject_label = &copy_acl_subject_label_compat;
75299+ copy_acl_role_label = &copy_acl_role_label_compat;
75300+ copy_acl_ip_label = &copy_acl_ip_label_compat;
75301+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
75302+ copy_role_transition = &copy_role_transition_compat;
75303+ copy_sprole_pw = &copy_sprole_pw_compat;
75304+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
75305+ copy_pointer_from_array = &copy_pointer_from_array_compat;
75306+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
75307+ } else {
75308+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
75309+ copy_gr_arg = &copy_gr_arg_normal;
75310+ copy_acl_object_label = &copy_acl_object_label_normal;
75311+ copy_acl_subject_label = &copy_acl_subject_label_normal;
75312+ copy_acl_role_label = &copy_acl_role_label_normal;
75313+ copy_acl_ip_label = &copy_acl_ip_label_normal;
75314+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
75315+ copy_role_transition = &copy_role_transition_normal;
75316+ copy_sprole_pw = &copy_sprole_pw_normal;
75317+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
75318+ copy_pointer_from_array = &copy_pointer_from_array_normal;
75319+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
75320+ }
75321+ pax_close_kernel();
75322+#endif
75323+
75324+ req_count = get_gr_arg_wrapper_size();
75325+
75326+ if (count != req_count) {
75327+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
75328+ error = -EINVAL;
75329+ goto out;
75330+ }
75331+
75332+
75333+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
75334+ gr_auth_expires = 0;
75335+ gr_auth_attempts = 0;
75336+ }
75337+
75338+ error = copy_gr_arg_wrapper(buf, &uwrap);
75339+ if (error)
75340+ goto out;
75341+
75342+ error = copy_gr_arg(uwrap.arg, gr_usermode);
75343+ if (error)
75344+ goto out;
75345+
75346+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
75347+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
75348+ time_after(gr_auth_expires, get_seconds())) {
75349+ error = -EBUSY;
75350+ goto out;
75351+ }
75352+
75353+ /* if non-root trying to do anything other than use a special role,
75354+ do not attempt authentication, do not count towards authentication
75355+ locking
75356+ */
75357+
75358+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
75359+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
75360+ gr_is_global_nonroot(current_uid())) {
75361+ error = -EPERM;
75362+ goto out;
75363+ }
75364+
75365+ /* ensure pw and special role name are null terminated */
75366+
75367+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
75368+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
75369+
75370+ /* Okay.
75371+ * We have our enough of the argument structure..(we have yet
75372+ * to copy_from_user the tables themselves) . Copy the tables
75373+ * only if we need them, i.e. for loading operations. */
75374+
75375+ switch (gr_usermode->mode) {
75376+ case GR_STATUS:
75377+ if (gr_acl_is_enabled()) {
75378+ error = 1;
75379+ if (!gr_check_secure_terminal(current))
75380+ error = 3;
75381+ } else
75382+ error = 2;
75383+ goto out;
75384+ case GR_SHUTDOWN:
75385+ if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75386+ stop_machine(gr_rbac_disable, NULL, NULL);
75387+ free_variables(false);
75388+ memset(gr_usermode, 0, sizeof(struct gr_arg));
75389+ memset(gr_system_salt, 0, GR_SALT_LEN);
75390+ memset(gr_system_sum, 0, GR_SHA_LEN);
75391+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
75392+ } else if (gr_acl_is_enabled()) {
75393+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
75394+ error = -EPERM;
75395+ } else {
75396+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
75397+ error = -EAGAIN;
75398+ }
75399+ break;
75400+ case GR_ENABLE:
75401+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode)))
75402+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
75403+ else {
75404+ if (gr_acl_is_enabled())
75405+ error = -EAGAIN;
75406+ else
75407+ error = error2;
75408+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
75409+ }
75410+ break;
75411+ case GR_OLDRELOAD:
75412+ oldmode = 1;
75413+ case GR_RELOAD:
75414+ if (!gr_acl_is_enabled()) {
75415+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
75416+ error = -EAGAIN;
75417+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75418+ error2 = gracl_reload(gr_usermode, oldmode);
75419+ if (!error2)
75420+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
75421+ else {
75422+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
75423+ error = error2;
75424+ }
75425+ } else {
75426+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
75427+ error = -EPERM;
75428+ }
75429+ break;
75430+ case GR_SEGVMOD:
75431+ if (unlikely(!gr_acl_is_enabled())) {
75432+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
75433+ error = -EAGAIN;
75434+ break;
75435+ }
75436+
75437+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75438+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
75439+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
75440+ struct acl_subject_label *segvacl;
75441+ segvacl =
75442+ lookup_acl_subj_label(gr_usermode->segv_inode,
75443+ gr_usermode->segv_device,
75444+ current->role);
75445+ if (segvacl) {
75446+ segvacl->crashes = 0;
75447+ segvacl->expires = 0;
75448+ }
75449+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
75450+ gr_remove_uid(gr_usermode->segv_uid);
75451+ }
75452+ } else {
75453+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
75454+ error = -EPERM;
75455+ }
75456+ break;
75457+ case GR_SPROLE:
75458+ case GR_SPROLEPAM:
75459+ if (unlikely(!gr_acl_is_enabled())) {
75460+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
75461+ error = -EAGAIN;
75462+ break;
75463+ }
75464+
75465+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
75466+ current->role->expires = 0;
75467+ current->role->auth_attempts = 0;
75468+ }
75469+
75470+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
75471+ time_after(current->role->expires, get_seconds())) {
75472+ error = -EBUSY;
75473+ goto out;
75474+ }
75475+
75476+ if (lookup_special_role_auth
75477+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
75478+ && ((!sprole_salt && !sprole_sum)
75479+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
75480+ char *p = "";
75481+ assign_special_role(gr_usermode->sp_role);
75482+ read_lock(&tasklist_lock);
75483+ if (current->real_parent)
75484+ p = current->real_parent->role->rolename;
75485+ read_unlock(&tasklist_lock);
75486+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
75487+ p, acl_sp_role_value);
75488+ } else {
75489+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
75490+ error = -EPERM;
75491+ if(!(current->role->auth_attempts++))
75492+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
75493+
75494+ goto out;
75495+ }
75496+ break;
75497+ case GR_UNSPROLE:
75498+ if (unlikely(!gr_acl_is_enabled())) {
75499+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
75500+ error = -EAGAIN;
75501+ break;
75502+ }
75503+
75504+ if (current->role->roletype & GR_ROLE_SPECIAL) {
75505+ char *p = "";
75506+ int i = 0;
75507+
75508+ read_lock(&tasklist_lock);
75509+ if (current->real_parent) {
75510+ p = current->real_parent->role->rolename;
75511+ i = current->real_parent->acl_role_id;
75512+ }
75513+ read_unlock(&tasklist_lock);
75514+
75515+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
75516+ gr_set_acls(1);
75517+ } else {
75518+ error = -EPERM;
75519+ goto out;
75520+ }
75521+ break;
75522+ default:
75523+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
75524+ error = -EINVAL;
75525+ break;
75526+ }
75527+
75528+ if (error != -EPERM)
75529+ goto out;
75530+
75531+ if(!(gr_auth_attempts++))
75532+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
75533+
75534+ out:
75535+ mutex_unlock(&gr_dev_mutex);
75536+
75537+ if (!error)
75538+ error = req_count;
75539+
75540+ return error;
75541+}
75542+
75543+int
75544+gr_set_acls(const int type)
75545+{
75546+ struct task_struct *task, *task2;
75547+ struct acl_role_label *role = current->role;
75548+ struct acl_subject_label *subj;
75549+ __u16 acl_role_id = current->acl_role_id;
75550+ const struct cred *cred;
75551+ int ret;
75552+
75553+ rcu_read_lock();
75554+ read_lock(&tasklist_lock);
75555+ read_lock(&grsec_exec_file_lock);
75556+ do_each_thread(task2, task) {
75557+ /* check to see if we're called from the exit handler,
75558+ if so, only replace ACLs that have inherited the admin
75559+ ACL */
75560+
75561+ if (type && (task->role != role ||
75562+ task->acl_role_id != acl_role_id))
75563+ continue;
75564+
75565+ task->acl_role_id = 0;
75566+ task->acl_sp_role = 0;
75567+ task->inherited = 0;
75568+
75569+ if (task->exec_file) {
75570+ cred = __task_cred(task);
75571+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
75572+ subj = __gr_get_subject_for_task(polstate, task, NULL);
75573+ if (subj == NULL) {
75574+ ret = -EINVAL;
75575+ read_unlock(&grsec_exec_file_lock);
75576+ read_unlock(&tasklist_lock);
75577+ rcu_read_unlock();
75578+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
75579+ return ret;
75580+ }
75581+ __gr_apply_subject_to_task(polstate, task, subj);
75582+ } else {
75583+ // it's a kernel process
75584+ task->role = polstate->kernel_role;
75585+ task->acl = polstate->kernel_role->root_label;
75586+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
75587+ task->acl->mode &= ~GR_PROCFIND;
75588+#endif
75589+ }
75590+ } while_each_thread(task2, task);
75591+ read_unlock(&grsec_exec_file_lock);
75592+ read_unlock(&tasklist_lock);
75593+ rcu_read_unlock();
75594+
75595+ return 0;
75596+}
75597diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
75598new file mode 100644
75599index 0000000..39645c9
75600--- /dev/null
75601+++ b/grsecurity/gracl_res.c
75602@@ -0,0 +1,68 @@
75603+#include <linux/kernel.h>
75604+#include <linux/sched.h>
75605+#include <linux/gracl.h>
75606+#include <linux/grinternal.h>
75607+
75608+static const char *restab_log[] = {
75609+ [RLIMIT_CPU] = "RLIMIT_CPU",
75610+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
75611+ [RLIMIT_DATA] = "RLIMIT_DATA",
75612+ [RLIMIT_STACK] = "RLIMIT_STACK",
75613+ [RLIMIT_CORE] = "RLIMIT_CORE",
75614+ [RLIMIT_RSS] = "RLIMIT_RSS",
75615+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
75616+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
75617+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
75618+ [RLIMIT_AS] = "RLIMIT_AS",
75619+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
75620+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
75621+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
75622+ [RLIMIT_NICE] = "RLIMIT_NICE",
75623+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
75624+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
75625+ [GR_CRASH_RES] = "RLIMIT_CRASH"
75626+};
75627+
75628+void
75629+gr_log_resource(const struct task_struct *task,
75630+ const int res, const unsigned long wanted, const int gt)
75631+{
75632+ const struct cred *cred;
75633+ unsigned long rlim;
75634+
75635+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
75636+ return;
75637+
75638+ // not yet supported resource
75639+ if (unlikely(!restab_log[res]))
75640+ return;
75641+
75642+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
75643+ rlim = task_rlimit_max(task, res);
75644+ else
75645+ rlim = task_rlimit(task, res);
75646+
75647+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
75648+ return;
75649+
75650+ rcu_read_lock();
75651+ cred = __task_cred(task);
75652+
75653+ if (res == RLIMIT_NPROC &&
75654+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
75655+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
75656+ goto out_rcu_unlock;
75657+ else if (res == RLIMIT_MEMLOCK &&
75658+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
75659+ goto out_rcu_unlock;
75660+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
75661+ goto out_rcu_unlock;
75662+ rcu_read_unlock();
75663+
75664+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
75665+
75666+ return;
75667+out_rcu_unlock:
75668+ rcu_read_unlock();
75669+ return;
75670+}
75671diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
75672new file mode 100644
75673index 0000000..2040e61
75674--- /dev/null
75675+++ b/grsecurity/gracl_segv.c
75676@@ -0,0 +1,313 @@
75677+#include <linux/kernel.h>
75678+#include <linux/mm.h>
75679+#include <asm/uaccess.h>
75680+#include <asm/errno.h>
75681+#include <asm/mman.h>
75682+#include <net/sock.h>
75683+#include <linux/file.h>
75684+#include <linux/fs.h>
75685+#include <linux/net.h>
75686+#include <linux/in.h>
75687+#include <linux/slab.h>
75688+#include <linux/types.h>
75689+#include <linux/sched.h>
75690+#include <linux/timer.h>
75691+#include <linux/gracl.h>
75692+#include <linux/grsecurity.h>
75693+#include <linux/grinternal.h>
75694+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75695+#include <linux/magic.h>
75696+#include <linux/pagemap.h>
75697+#include "../fs/btrfs/async-thread.h"
75698+#include "../fs/btrfs/ctree.h"
75699+#include "../fs/btrfs/btrfs_inode.h"
75700+#endif
75701+
75702+static struct crash_uid *uid_set;
75703+static unsigned short uid_used;
75704+static DEFINE_SPINLOCK(gr_uid_lock);
75705+extern rwlock_t gr_inode_lock;
75706+extern struct acl_subject_label *
75707+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
75708+ struct acl_role_label *role);
75709+
75710+static inline dev_t __get_dev(const struct dentry *dentry)
75711+{
75712+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75713+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
75714+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
75715+ else
75716+#endif
75717+ return dentry->d_sb->s_dev;
75718+}
75719+
75720+int
75721+gr_init_uidset(void)
75722+{
75723+ uid_set =
75724+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
75725+ uid_used = 0;
75726+
75727+ return uid_set ? 1 : 0;
75728+}
75729+
75730+void
75731+gr_free_uidset(void)
75732+{
75733+ if (uid_set) {
75734+ struct crash_uid *tmpset;
75735+ spin_lock(&gr_uid_lock);
75736+ tmpset = uid_set;
75737+ uid_set = NULL;
75738+ uid_used = 0;
75739+ spin_unlock(&gr_uid_lock);
75740+ if (tmpset)
75741+ kfree(tmpset);
75742+ }
75743+
75744+ return;
75745+}
75746+
75747+int
75748+gr_find_uid(const uid_t uid)
75749+{
75750+ struct crash_uid *tmp = uid_set;
75751+ uid_t buid;
75752+ int low = 0, high = uid_used - 1, mid;
75753+
75754+ while (high >= low) {
75755+ mid = (low + high) >> 1;
75756+ buid = tmp[mid].uid;
75757+ if (buid == uid)
75758+ return mid;
75759+ if (buid > uid)
75760+ high = mid - 1;
75761+ if (buid < uid)
75762+ low = mid + 1;
75763+ }
75764+
75765+ return -1;
75766+}
75767+
75768+static __inline__ void
75769+gr_insertsort(void)
75770+{
75771+ unsigned short i, j;
75772+ struct crash_uid index;
75773+
75774+ for (i = 1; i < uid_used; i++) {
75775+ index = uid_set[i];
75776+ j = i;
75777+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
75778+ uid_set[j] = uid_set[j - 1];
75779+ j--;
75780+ }
75781+ uid_set[j] = index;
75782+ }
75783+
75784+ return;
75785+}
75786+
75787+static __inline__ void
75788+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
75789+{
75790+ int loc;
75791+ uid_t uid = GR_GLOBAL_UID(kuid);
75792+
75793+ if (uid_used == GR_UIDTABLE_MAX)
75794+ return;
75795+
75796+ loc = gr_find_uid(uid);
75797+
75798+ if (loc >= 0) {
75799+ uid_set[loc].expires = expires;
75800+ return;
75801+ }
75802+
75803+ uid_set[uid_used].uid = uid;
75804+ uid_set[uid_used].expires = expires;
75805+ uid_used++;
75806+
75807+ gr_insertsort();
75808+
75809+ return;
75810+}
75811+
75812+void
75813+gr_remove_uid(const unsigned short loc)
75814+{
75815+ unsigned short i;
75816+
75817+ for (i = loc + 1; i < uid_used; i++)
75818+ uid_set[i - 1] = uid_set[i];
75819+
75820+ uid_used--;
75821+
75822+ return;
75823+}
75824+
75825+int
75826+gr_check_crash_uid(const kuid_t kuid)
75827+{
75828+ int loc;
75829+ int ret = 0;
75830+ uid_t uid;
75831+
75832+ if (unlikely(!gr_acl_is_enabled()))
75833+ return 0;
75834+
75835+ uid = GR_GLOBAL_UID(kuid);
75836+
75837+ spin_lock(&gr_uid_lock);
75838+ loc = gr_find_uid(uid);
75839+
75840+ if (loc < 0)
75841+ goto out_unlock;
75842+
75843+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
75844+ gr_remove_uid(loc);
75845+ else
75846+ ret = 1;
75847+
75848+out_unlock:
75849+ spin_unlock(&gr_uid_lock);
75850+ return ret;
75851+}
75852+
75853+static __inline__ int
75854+proc_is_setxid(const struct cred *cred)
75855+{
75856+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
75857+ !uid_eq(cred->uid, cred->fsuid))
75858+ return 1;
75859+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
75860+ !gid_eq(cred->gid, cred->fsgid))
75861+ return 1;
75862+
75863+ return 0;
75864+}
75865+
75866+extern int gr_fake_force_sig(int sig, struct task_struct *t);
75867+
75868+void
75869+gr_handle_crash(struct task_struct *task, const int sig)
75870+{
75871+ struct acl_subject_label *curr;
75872+ struct task_struct *tsk, *tsk2;
75873+ const struct cred *cred;
75874+ const struct cred *cred2;
75875+
75876+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
75877+ return;
75878+
75879+ if (unlikely(!gr_acl_is_enabled()))
75880+ return;
75881+
75882+ curr = task->acl;
75883+
75884+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
75885+ return;
75886+
75887+ if (time_before_eq(curr->expires, get_seconds())) {
75888+ curr->expires = 0;
75889+ curr->crashes = 0;
75890+ }
75891+
75892+ curr->crashes++;
75893+
75894+ if (!curr->expires)
75895+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
75896+
75897+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
75898+ time_after(curr->expires, get_seconds())) {
75899+ rcu_read_lock();
75900+ cred = __task_cred(task);
75901+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
75902+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
75903+ spin_lock(&gr_uid_lock);
75904+ gr_insert_uid(cred->uid, curr->expires);
75905+ spin_unlock(&gr_uid_lock);
75906+ curr->expires = 0;
75907+ curr->crashes = 0;
75908+ read_lock(&tasklist_lock);
75909+ do_each_thread(tsk2, tsk) {
75910+ cred2 = __task_cred(tsk);
75911+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
75912+ gr_fake_force_sig(SIGKILL, tsk);
75913+ } while_each_thread(tsk2, tsk);
75914+ read_unlock(&tasklist_lock);
75915+ } else {
75916+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
75917+ read_lock(&tasklist_lock);
75918+ read_lock(&grsec_exec_file_lock);
75919+ do_each_thread(tsk2, tsk) {
75920+ if (likely(tsk != task)) {
75921+ // if this thread has the same subject as the one that triggered
75922+ // RES_CRASH and it's the same binary, kill it
75923+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
75924+ gr_fake_force_sig(SIGKILL, tsk);
75925+ }
75926+ } while_each_thread(tsk2, tsk);
75927+ read_unlock(&grsec_exec_file_lock);
75928+ read_unlock(&tasklist_lock);
75929+ }
75930+ rcu_read_unlock();
75931+ }
75932+
75933+ return;
75934+}
75935+
75936+int
75937+gr_check_crash_exec(const struct file *filp)
75938+{
75939+ struct acl_subject_label *curr;
75940+
75941+ if (unlikely(!gr_acl_is_enabled()))
75942+ return 0;
75943+
75944+ read_lock(&gr_inode_lock);
75945+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
75946+ __get_dev(filp->f_path.dentry),
75947+ current->role);
75948+ read_unlock(&gr_inode_lock);
75949+
75950+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
75951+ (!curr->crashes && !curr->expires))
75952+ return 0;
75953+
75954+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
75955+ time_after(curr->expires, get_seconds()))
75956+ return 1;
75957+ else if (time_before_eq(curr->expires, get_seconds())) {
75958+ curr->crashes = 0;
75959+ curr->expires = 0;
75960+ }
75961+
75962+ return 0;
75963+}
75964+
75965+void
75966+gr_handle_alertkill(struct task_struct *task)
75967+{
75968+ struct acl_subject_label *curracl;
75969+ __u32 curr_ip;
75970+ struct task_struct *p, *p2;
75971+
75972+ if (unlikely(!gr_acl_is_enabled()))
75973+ return;
75974+
75975+ curracl = task->acl;
75976+ curr_ip = task->signal->curr_ip;
75977+
75978+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
75979+ read_lock(&tasklist_lock);
75980+ do_each_thread(p2, p) {
75981+ if (p->signal->curr_ip == curr_ip)
75982+ gr_fake_force_sig(SIGKILL, p);
75983+ } while_each_thread(p2, p);
75984+ read_unlock(&tasklist_lock);
75985+ } else if (curracl->mode & GR_KILLPROC)
75986+ gr_fake_force_sig(SIGKILL, task);
75987+
75988+ return;
75989+}
75990diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
75991new file mode 100644
75992index 0000000..6b0c9cc
75993--- /dev/null
75994+++ b/grsecurity/gracl_shm.c
75995@@ -0,0 +1,40 @@
75996+#include <linux/kernel.h>
75997+#include <linux/mm.h>
75998+#include <linux/sched.h>
75999+#include <linux/file.h>
76000+#include <linux/ipc.h>
76001+#include <linux/gracl.h>
76002+#include <linux/grsecurity.h>
76003+#include <linux/grinternal.h>
76004+
76005+int
76006+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76007+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
76008+{
76009+ struct task_struct *task;
76010+
76011+ if (!gr_acl_is_enabled())
76012+ return 1;
76013+
76014+ rcu_read_lock();
76015+ read_lock(&tasklist_lock);
76016+
76017+ task = find_task_by_vpid(shm_cprid);
76018+
76019+ if (unlikely(!task))
76020+ task = find_task_by_vpid(shm_lapid);
76021+
76022+ if (unlikely(task && (time_before_eq64(task->start_time, shm_createtime) ||
76023+ (task_pid_nr(task) == shm_lapid)) &&
76024+ (task->acl->mode & GR_PROTSHM) &&
76025+ (task->acl != current->acl))) {
76026+ read_unlock(&tasklist_lock);
76027+ rcu_read_unlock();
76028+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
76029+ return 0;
76030+ }
76031+ read_unlock(&tasklist_lock);
76032+ rcu_read_unlock();
76033+
76034+ return 1;
76035+}
76036diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
76037new file mode 100644
76038index 0000000..bc0be01
76039--- /dev/null
76040+++ b/grsecurity/grsec_chdir.c
76041@@ -0,0 +1,19 @@
76042+#include <linux/kernel.h>
76043+#include <linux/sched.h>
76044+#include <linux/fs.h>
76045+#include <linux/file.h>
76046+#include <linux/grsecurity.h>
76047+#include <linux/grinternal.h>
76048+
76049+void
76050+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
76051+{
76052+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
76053+ if ((grsec_enable_chdir && grsec_enable_group &&
76054+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
76055+ !grsec_enable_group)) {
76056+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
76057+ }
76058+#endif
76059+ return;
76060+}
76061diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
76062new file mode 100644
76063index 0000000..6d99cec
76064--- /dev/null
76065+++ b/grsecurity/grsec_chroot.c
76066@@ -0,0 +1,385 @@
76067+#include <linux/kernel.h>
76068+#include <linux/module.h>
76069+#include <linux/sched.h>
76070+#include <linux/file.h>
76071+#include <linux/fs.h>
76072+#include <linux/mount.h>
76073+#include <linux/types.h>
76074+#include "../fs/mount.h"
76075+#include <linux/grsecurity.h>
76076+#include <linux/grinternal.h>
76077+
76078+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76079+int gr_init_ran;
76080+#endif
76081+
76082+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
76083+{
76084+#ifdef CONFIG_GRKERNSEC
76085+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
76086+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
76087+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76088+ && gr_init_ran
76089+#endif
76090+ )
76091+ task->gr_is_chrooted = 1;
76092+ else {
76093+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76094+ if (task_pid_nr(task) == 1 && !gr_init_ran)
76095+ gr_init_ran = 1;
76096+#endif
76097+ task->gr_is_chrooted = 0;
76098+ }
76099+
76100+ task->gr_chroot_dentry = path->dentry;
76101+#endif
76102+ return;
76103+}
76104+
76105+void gr_clear_chroot_entries(struct task_struct *task)
76106+{
76107+#ifdef CONFIG_GRKERNSEC
76108+ task->gr_is_chrooted = 0;
76109+ task->gr_chroot_dentry = NULL;
76110+#endif
76111+ return;
76112+}
76113+
76114+int
76115+gr_handle_chroot_unix(const pid_t pid)
76116+{
76117+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
76118+ struct task_struct *p;
76119+
76120+ if (unlikely(!grsec_enable_chroot_unix))
76121+ return 1;
76122+
76123+ if (likely(!proc_is_chrooted(current)))
76124+ return 1;
76125+
76126+ rcu_read_lock();
76127+ read_lock(&tasklist_lock);
76128+ p = find_task_by_vpid_unrestricted(pid);
76129+ if (unlikely(p && !have_same_root(current, p))) {
76130+ read_unlock(&tasklist_lock);
76131+ rcu_read_unlock();
76132+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
76133+ return 0;
76134+ }
76135+ read_unlock(&tasklist_lock);
76136+ rcu_read_unlock();
76137+#endif
76138+ return 1;
76139+}
76140+
76141+int
76142+gr_handle_chroot_nice(void)
76143+{
76144+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
76145+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
76146+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
76147+ return -EPERM;
76148+ }
76149+#endif
76150+ return 0;
76151+}
76152+
76153+int
76154+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
76155+{
76156+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
76157+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
76158+ && proc_is_chrooted(current)) {
76159+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
76160+ return -EACCES;
76161+ }
76162+#endif
76163+ return 0;
76164+}
76165+
76166+int
76167+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
76168+{
76169+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76170+ struct task_struct *p;
76171+ int ret = 0;
76172+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
76173+ return ret;
76174+
76175+ read_lock(&tasklist_lock);
76176+ do_each_pid_task(pid, type, p) {
76177+ if (!have_same_root(current, p)) {
76178+ ret = 1;
76179+ goto out;
76180+ }
76181+ } while_each_pid_task(pid, type, p);
76182+out:
76183+ read_unlock(&tasklist_lock);
76184+ return ret;
76185+#endif
76186+ return 0;
76187+}
76188+
76189+int
76190+gr_pid_is_chrooted(struct task_struct *p)
76191+{
76192+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76193+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
76194+ return 0;
76195+
76196+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
76197+ !have_same_root(current, p)) {
76198+ return 1;
76199+ }
76200+#endif
76201+ return 0;
76202+}
76203+
76204+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
76205+
76206+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
76207+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
76208+{
76209+ struct path path, currentroot;
76210+ int ret = 0;
76211+
76212+ path.dentry = (struct dentry *)u_dentry;
76213+ path.mnt = (struct vfsmount *)u_mnt;
76214+ get_fs_root(current->fs, &currentroot);
76215+ if (path_is_under(&path, &currentroot))
76216+ ret = 1;
76217+ path_put(&currentroot);
76218+
76219+ return ret;
76220+}
76221+#endif
76222+
76223+int
76224+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
76225+{
76226+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76227+ if (!grsec_enable_chroot_fchdir)
76228+ return 1;
76229+
76230+ if (!proc_is_chrooted(current))
76231+ return 1;
76232+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
76233+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
76234+ return 0;
76235+ }
76236+#endif
76237+ return 1;
76238+}
76239+
76240+int
76241+gr_chroot_fhandle(void)
76242+{
76243+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76244+ if (!grsec_enable_chroot_fchdir)
76245+ return 1;
76246+
76247+ if (!proc_is_chrooted(current))
76248+ return 1;
76249+ else {
76250+ gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG);
76251+ return 0;
76252+ }
76253+#endif
76254+ return 1;
76255+}
76256+
76257+int
76258+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76259+ const u64 shm_createtime)
76260+{
76261+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
76262+ struct task_struct *p;
76263+
76264+ if (unlikely(!grsec_enable_chroot_shmat))
76265+ return 1;
76266+
76267+ if (likely(!proc_is_chrooted(current)))
76268+ return 1;
76269+
76270+ rcu_read_lock();
76271+ read_lock(&tasklist_lock);
76272+
76273+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
76274+ if (time_before_eq64(p->start_time, shm_createtime)) {
76275+ if (have_same_root(current, p)) {
76276+ goto allow;
76277+ } else {
76278+ read_unlock(&tasklist_lock);
76279+ rcu_read_unlock();
76280+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76281+ return 0;
76282+ }
76283+ }
76284+ /* creator exited, pid reuse, fall through to next check */
76285+ }
76286+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
76287+ if (unlikely(!have_same_root(current, p))) {
76288+ read_unlock(&tasklist_lock);
76289+ rcu_read_unlock();
76290+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76291+ return 0;
76292+ }
76293+ }
76294+
76295+allow:
76296+ read_unlock(&tasklist_lock);
76297+ rcu_read_unlock();
76298+#endif
76299+ return 1;
76300+}
76301+
76302+void
76303+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
76304+{
76305+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
76306+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
76307+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
76308+#endif
76309+ return;
76310+}
76311+
76312+int
76313+gr_handle_chroot_mknod(const struct dentry *dentry,
76314+ const struct vfsmount *mnt, const int mode)
76315+{
76316+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
76317+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
76318+ proc_is_chrooted(current)) {
76319+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
76320+ return -EPERM;
76321+ }
76322+#endif
76323+ return 0;
76324+}
76325+
76326+int
76327+gr_handle_chroot_mount(const struct dentry *dentry,
76328+ const struct vfsmount *mnt, const char *dev_name)
76329+{
76330+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
76331+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
76332+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
76333+ return -EPERM;
76334+ }
76335+#endif
76336+ return 0;
76337+}
76338+
76339+int
76340+gr_handle_chroot_pivot(void)
76341+{
76342+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
76343+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
76344+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
76345+ return -EPERM;
76346+ }
76347+#endif
76348+ return 0;
76349+}
76350+
76351+int
76352+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
76353+{
76354+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
76355+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
76356+ !gr_is_outside_chroot(dentry, mnt)) {
76357+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
76358+ return -EPERM;
76359+ }
76360+#endif
76361+ return 0;
76362+}
76363+
76364+extern const char *captab_log[];
76365+extern int captab_log_entries;
76366+
76367+int
76368+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
76369+{
76370+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76371+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
76372+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
76373+ if (cap_raised(chroot_caps, cap)) {
76374+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
76375+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
76376+ }
76377+ return 0;
76378+ }
76379+ }
76380+#endif
76381+ return 1;
76382+}
76383+
76384+int
76385+gr_chroot_is_capable(const int cap)
76386+{
76387+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76388+ return gr_task_chroot_is_capable(current, current_cred(), cap);
76389+#endif
76390+ return 1;
76391+}
76392+
76393+int
76394+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
76395+{
76396+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76397+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
76398+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
76399+ if (cap_raised(chroot_caps, cap)) {
76400+ return 0;
76401+ }
76402+ }
76403+#endif
76404+ return 1;
76405+}
76406+
76407+int
76408+gr_chroot_is_capable_nolog(const int cap)
76409+{
76410+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76411+ return gr_task_chroot_is_capable_nolog(current, cap);
76412+#endif
76413+ return 1;
76414+}
76415+
76416+int
76417+gr_handle_chroot_sysctl(const int op)
76418+{
76419+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
76420+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
76421+ proc_is_chrooted(current))
76422+ return -EACCES;
76423+#endif
76424+ return 0;
76425+}
76426+
76427+void
76428+gr_handle_chroot_chdir(const struct path *path)
76429+{
76430+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
76431+ if (grsec_enable_chroot_chdir)
76432+ set_fs_pwd(current->fs, path);
76433+#endif
76434+ return;
76435+}
76436+
76437+int
76438+gr_handle_chroot_chmod(const struct dentry *dentry,
76439+ const struct vfsmount *mnt, const int mode)
76440+{
76441+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
76442+ /* allow chmod +s on directories, but not files */
76443+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
76444+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
76445+ proc_is_chrooted(current)) {
76446+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
76447+ return -EPERM;
76448+ }
76449+#endif
76450+ return 0;
76451+}
76452diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
76453new file mode 100644
76454index 0000000..0f9ac91
76455--- /dev/null
76456+++ b/grsecurity/grsec_disabled.c
76457@@ -0,0 +1,440 @@
76458+#include <linux/kernel.h>
76459+#include <linux/module.h>
76460+#include <linux/sched.h>
76461+#include <linux/file.h>
76462+#include <linux/fs.h>
76463+#include <linux/kdev_t.h>
76464+#include <linux/net.h>
76465+#include <linux/in.h>
76466+#include <linux/ip.h>
76467+#include <linux/skbuff.h>
76468+#include <linux/sysctl.h>
76469+
76470+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
76471+void
76472+pax_set_initial_flags(struct linux_binprm *bprm)
76473+{
76474+ return;
76475+}
76476+#endif
76477+
76478+#ifdef CONFIG_SYSCTL
76479+__u32
76480+gr_handle_sysctl(const struct ctl_table * table, const int op)
76481+{
76482+ return 0;
76483+}
76484+#endif
76485+
76486+#ifdef CONFIG_TASKSTATS
76487+int gr_is_taskstats_denied(int pid)
76488+{
76489+ return 0;
76490+}
76491+#endif
76492+
76493+int
76494+gr_acl_is_enabled(void)
76495+{
76496+ return 0;
76497+}
76498+
76499+int
76500+gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
76501+{
76502+ return 0;
76503+}
76504+
76505+void
76506+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
76507+{
76508+ return;
76509+}
76510+
76511+int
76512+gr_handle_rawio(const struct inode *inode)
76513+{
76514+ return 0;
76515+}
76516+
76517+void
76518+gr_acl_handle_psacct(struct task_struct *task, const long code)
76519+{
76520+ return;
76521+}
76522+
76523+int
76524+gr_handle_ptrace(struct task_struct *task, const long request)
76525+{
76526+ return 0;
76527+}
76528+
76529+int
76530+gr_handle_proc_ptrace(struct task_struct *task)
76531+{
76532+ return 0;
76533+}
76534+
76535+int
76536+gr_set_acls(const int type)
76537+{
76538+ return 0;
76539+}
76540+
76541+int
76542+gr_check_hidden_task(const struct task_struct *tsk)
76543+{
76544+ return 0;
76545+}
76546+
76547+int
76548+gr_check_protected_task(const struct task_struct *task)
76549+{
76550+ return 0;
76551+}
76552+
76553+int
76554+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
76555+{
76556+ return 0;
76557+}
76558+
76559+void
76560+gr_copy_label(struct task_struct *tsk)
76561+{
76562+ return;
76563+}
76564+
76565+void
76566+gr_set_pax_flags(struct task_struct *task)
76567+{
76568+ return;
76569+}
76570+
76571+int
76572+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
76573+ const int unsafe_share)
76574+{
76575+ return 0;
76576+}
76577+
76578+void
76579+gr_handle_delete(const ino_t ino, const dev_t dev)
76580+{
76581+ return;
76582+}
76583+
76584+void
76585+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
76586+{
76587+ return;
76588+}
76589+
76590+void
76591+gr_handle_crash(struct task_struct *task, const int sig)
76592+{
76593+ return;
76594+}
76595+
76596+int
76597+gr_check_crash_exec(const struct file *filp)
76598+{
76599+ return 0;
76600+}
76601+
76602+int
76603+gr_check_crash_uid(const kuid_t uid)
76604+{
76605+ return 0;
76606+}
76607+
76608+void
76609+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
76610+ struct dentry *old_dentry,
76611+ struct dentry *new_dentry,
76612+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
76613+{
76614+ return;
76615+}
76616+
76617+int
76618+gr_search_socket(const int family, const int type, const int protocol)
76619+{
76620+ return 1;
76621+}
76622+
76623+int
76624+gr_search_connectbind(const int mode, const struct socket *sock,
76625+ const struct sockaddr_in *addr)
76626+{
76627+ return 0;
76628+}
76629+
76630+void
76631+gr_handle_alertkill(struct task_struct *task)
76632+{
76633+ return;
76634+}
76635+
76636+__u32
76637+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
76638+{
76639+ return 1;
76640+}
76641+
76642+__u32
76643+gr_acl_handle_hidden_file(const struct dentry * dentry,
76644+ const struct vfsmount * mnt)
76645+{
76646+ return 1;
76647+}
76648+
76649+__u32
76650+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
76651+ int acc_mode)
76652+{
76653+ return 1;
76654+}
76655+
76656+__u32
76657+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
76658+{
76659+ return 1;
76660+}
76661+
76662+__u32
76663+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
76664+{
76665+ return 1;
76666+}
76667+
76668+int
76669+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
76670+ unsigned int *vm_flags)
76671+{
76672+ return 1;
76673+}
76674+
76675+__u32
76676+gr_acl_handle_truncate(const struct dentry * dentry,
76677+ const struct vfsmount * mnt)
76678+{
76679+ return 1;
76680+}
76681+
76682+__u32
76683+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
76684+{
76685+ return 1;
76686+}
76687+
76688+__u32
76689+gr_acl_handle_access(const struct dentry * dentry,
76690+ const struct vfsmount * mnt, const int fmode)
76691+{
76692+ return 1;
76693+}
76694+
76695+__u32
76696+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
76697+ umode_t *mode)
76698+{
76699+ return 1;
76700+}
76701+
76702+__u32
76703+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
76704+{
76705+ return 1;
76706+}
76707+
76708+__u32
76709+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
76710+{
76711+ return 1;
76712+}
76713+
76714+__u32
76715+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
76716+{
76717+ return 1;
76718+}
76719+
76720+void
76721+grsecurity_init(void)
76722+{
76723+ return;
76724+}
76725+
76726+umode_t gr_acl_umask(void)
76727+{
76728+ return 0;
76729+}
76730+
76731+__u32
76732+gr_acl_handle_mknod(const struct dentry * new_dentry,
76733+ const struct dentry * parent_dentry,
76734+ const struct vfsmount * parent_mnt,
76735+ const int mode)
76736+{
76737+ return 1;
76738+}
76739+
76740+__u32
76741+gr_acl_handle_mkdir(const struct dentry * new_dentry,
76742+ const struct dentry * parent_dentry,
76743+ const struct vfsmount * parent_mnt)
76744+{
76745+ return 1;
76746+}
76747+
76748+__u32
76749+gr_acl_handle_symlink(const struct dentry * new_dentry,
76750+ const struct dentry * parent_dentry,
76751+ const struct vfsmount * parent_mnt, const struct filename *from)
76752+{
76753+ return 1;
76754+}
76755+
76756+__u32
76757+gr_acl_handle_link(const struct dentry * new_dentry,
76758+ const struct dentry * parent_dentry,
76759+ const struct vfsmount * parent_mnt,
76760+ const struct dentry * old_dentry,
76761+ const struct vfsmount * old_mnt, const struct filename *to)
76762+{
76763+ return 1;
76764+}
76765+
76766+int
76767+gr_acl_handle_rename(const struct dentry *new_dentry,
76768+ const struct dentry *parent_dentry,
76769+ const struct vfsmount *parent_mnt,
76770+ const struct dentry *old_dentry,
76771+ const struct inode *old_parent_inode,
76772+ const struct vfsmount *old_mnt, const struct filename *newname,
76773+ unsigned int flags)
76774+{
76775+ return 0;
76776+}
76777+
76778+int
76779+gr_acl_handle_filldir(const struct file *file, const char *name,
76780+ const int namelen, const ino_t ino)
76781+{
76782+ return 1;
76783+}
76784+
76785+int
76786+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76787+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
76788+{
76789+ return 1;
76790+}
76791+
76792+int
76793+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
76794+{
76795+ return 0;
76796+}
76797+
76798+int
76799+gr_search_accept(const struct socket *sock)
76800+{
76801+ return 0;
76802+}
76803+
76804+int
76805+gr_search_listen(const struct socket *sock)
76806+{
76807+ return 0;
76808+}
76809+
76810+int
76811+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
76812+{
76813+ return 0;
76814+}
76815+
76816+__u32
76817+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
76818+{
76819+ return 1;
76820+}
76821+
76822+__u32
76823+gr_acl_handle_creat(const struct dentry * dentry,
76824+ const struct dentry * p_dentry,
76825+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
76826+ const int imode)
76827+{
76828+ return 1;
76829+}
76830+
76831+void
76832+gr_acl_handle_exit(void)
76833+{
76834+ return;
76835+}
76836+
76837+int
76838+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
76839+{
76840+ return 1;
76841+}
76842+
76843+void
76844+gr_set_role_label(const kuid_t uid, const kgid_t gid)
76845+{
76846+ return;
76847+}
76848+
76849+int
76850+gr_acl_handle_procpidmem(const struct task_struct *task)
76851+{
76852+ return 0;
76853+}
76854+
76855+int
76856+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
76857+{
76858+ return 0;
76859+}
76860+
76861+int
76862+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
76863+{
76864+ return 0;
76865+}
76866+
76867+int
76868+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
76869+{
76870+ return 0;
76871+}
76872+
76873+int
76874+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
76875+{
76876+ return 0;
76877+}
76878+
76879+int gr_acl_enable_at_secure(void)
76880+{
76881+ return 0;
76882+}
76883+
76884+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
76885+{
76886+ return dentry->d_sb->s_dev;
76887+}
76888+
76889+void gr_put_exec_file(struct task_struct *task)
76890+{
76891+ return;
76892+}
76893+
76894+#ifdef CONFIG_SECURITY
76895+EXPORT_SYMBOL_GPL(gr_check_user_change);
76896+EXPORT_SYMBOL_GPL(gr_check_group_change);
76897+#endif
76898diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
76899new file mode 100644
76900index 0000000..14638ff
76901--- /dev/null
76902+++ b/grsecurity/grsec_exec.c
76903@@ -0,0 +1,188 @@
76904+#include <linux/kernel.h>
76905+#include <linux/sched.h>
76906+#include <linux/file.h>
76907+#include <linux/binfmts.h>
76908+#include <linux/fs.h>
76909+#include <linux/types.h>
76910+#include <linux/grdefs.h>
76911+#include <linux/grsecurity.h>
76912+#include <linux/grinternal.h>
76913+#include <linux/capability.h>
76914+#include <linux/module.h>
76915+#include <linux/compat.h>
76916+
76917+#include <asm/uaccess.h>
76918+
76919+#ifdef CONFIG_GRKERNSEC_EXECLOG
76920+static char gr_exec_arg_buf[132];
76921+static DEFINE_MUTEX(gr_exec_arg_mutex);
76922+#endif
76923+
76924+struct user_arg_ptr {
76925+#ifdef CONFIG_COMPAT
76926+ bool is_compat;
76927+#endif
76928+ union {
76929+ const char __user *const __user *native;
76930+#ifdef CONFIG_COMPAT
76931+ const compat_uptr_t __user *compat;
76932+#endif
76933+ } ptr;
76934+};
76935+
76936+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
76937+
76938+void
76939+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
76940+{
76941+#ifdef CONFIG_GRKERNSEC_EXECLOG
76942+ char *grarg = gr_exec_arg_buf;
76943+ unsigned int i, x, execlen = 0;
76944+ char c;
76945+
76946+ if (!((grsec_enable_execlog && grsec_enable_group &&
76947+ in_group_p(grsec_audit_gid))
76948+ || (grsec_enable_execlog && !grsec_enable_group)))
76949+ return;
76950+
76951+ mutex_lock(&gr_exec_arg_mutex);
76952+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
76953+
76954+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
76955+ const char __user *p;
76956+ unsigned int len;
76957+
76958+ p = get_user_arg_ptr(argv, i);
76959+ if (IS_ERR(p))
76960+ goto log;
76961+
76962+ len = strnlen_user(p, 128 - execlen);
76963+ if (len > 128 - execlen)
76964+ len = 128 - execlen;
76965+ else if (len > 0)
76966+ len--;
76967+ if (copy_from_user(grarg + execlen, p, len))
76968+ goto log;
76969+
76970+ /* rewrite unprintable characters */
76971+ for (x = 0; x < len; x++) {
76972+ c = *(grarg + execlen + x);
76973+ if (c < 32 || c > 126)
76974+ *(grarg + execlen + x) = ' ';
76975+ }
76976+
76977+ execlen += len;
76978+ *(grarg + execlen) = ' ';
76979+ *(grarg + execlen + 1) = '\0';
76980+ execlen++;
76981+ }
76982+
76983+ log:
76984+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
76985+ bprm->file->f_path.mnt, grarg);
76986+ mutex_unlock(&gr_exec_arg_mutex);
76987+#endif
76988+ return;
76989+}
76990+
76991+#ifdef CONFIG_GRKERNSEC
76992+extern int gr_acl_is_capable(const int cap);
76993+extern int gr_acl_is_capable_nolog(const int cap);
76994+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
76995+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
76996+extern int gr_chroot_is_capable(const int cap);
76997+extern int gr_chroot_is_capable_nolog(const int cap);
76998+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
76999+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
77000+#endif
77001+
77002+const char *captab_log[] = {
77003+ "CAP_CHOWN",
77004+ "CAP_DAC_OVERRIDE",
77005+ "CAP_DAC_READ_SEARCH",
77006+ "CAP_FOWNER",
77007+ "CAP_FSETID",
77008+ "CAP_KILL",
77009+ "CAP_SETGID",
77010+ "CAP_SETUID",
77011+ "CAP_SETPCAP",
77012+ "CAP_LINUX_IMMUTABLE",
77013+ "CAP_NET_BIND_SERVICE",
77014+ "CAP_NET_BROADCAST",
77015+ "CAP_NET_ADMIN",
77016+ "CAP_NET_RAW",
77017+ "CAP_IPC_LOCK",
77018+ "CAP_IPC_OWNER",
77019+ "CAP_SYS_MODULE",
77020+ "CAP_SYS_RAWIO",
77021+ "CAP_SYS_CHROOT",
77022+ "CAP_SYS_PTRACE",
77023+ "CAP_SYS_PACCT",
77024+ "CAP_SYS_ADMIN",
77025+ "CAP_SYS_BOOT",
77026+ "CAP_SYS_NICE",
77027+ "CAP_SYS_RESOURCE",
77028+ "CAP_SYS_TIME",
77029+ "CAP_SYS_TTY_CONFIG",
77030+ "CAP_MKNOD",
77031+ "CAP_LEASE",
77032+ "CAP_AUDIT_WRITE",
77033+ "CAP_AUDIT_CONTROL",
77034+ "CAP_SETFCAP",
77035+ "CAP_MAC_OVERRIDE",
77036+ "CAP_MAC_ADMIN",
77037+ "CAP_SYSLOG",
77038+ "CAP_WAKE_ALARM",
77039+ "CAP_BLOCK_SUSPEND"
77040+};
77041+
77042+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
77043+
77044+int gr_is_capable(const int cap)
77045+{
77046+#ifdef CONFIG_GRKERNSEC
77047+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
77048+ return 1;
77049+ return 0;
77050+#else
77051+ return 1;
77052+#endif
77053+}
77054+
77055+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
77056+{
77057+#ifdef CONFIG_GRKERNSEC
77058+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
77059+ return 1;
77060+ return 0;
77061+#else
77062+ return 1;
77063+#endif
77064+}
77065+
77066+int gr_is_capable_nolog(const int cap)
77067+{
77068+#ifdef CONFIG_GRKERNSEC
77069+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
77070+ return 1;
77071+ return 0;
77072+#else
77073+ return 1;
77074+#endif
77075+}
77076+
77077+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
77078+{
77079+#ifdef CONFIG_GRKERNSEC
77080+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
77081+ return 1;
77082+ return 0;
77083+#else
77084+ return 1;
77085+#endif
77086+}
77087+
77088+EXPORT_SYMBOL_GPL(gr_is_capable);
77089+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
77090+EXPORT_SYMBOL_GPL(gr_task_is_capable);
77091+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
77092diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
77093new file mode 100644
77094index 0000000..06cc6ea
77095--- /dev/null
77096+++ b/grsecurity/grsec_fifo.c
77097@@ -0,0 +1,24 @@
77098+#include <linux/kernel.h>
77099+#include <linux/sched.h>
77100+#include <linux/fs.h>
77101+#include <linux/file.h>
77102+#include <linux/grinternal.h>
77103+
77104+int
77105+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
77106+ const struct dentry *dir, const int flag, const int acc_mode)
77107+{
77108+#ifdef CONFIG_GRKERNSEC_FIFO
77109+ const struct cred *cred = current_cred();
77110+
77111+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
77112+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
77113+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
77114+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
77115+ if (!inode_permission(dentry->d_inode, acc_mode))
77116+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
77117+ return -EACCES;
77118+ }
77119+#endif
77120+ return 0;
77121+}
77122diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
77123new file mode 100644
77124index 0000000..8ca18bf
77125--- /dev/null
77126+++ b/grsecurity/grsec_fork.c
77127@@ -0,0 +1,23 @@
77128+#include <linux/kernel.h>
77129+#include <linux/sched.h>
77130+#include <linux/grsecurity.h>
77131+#include <linux/grinternal.h>
77132+#include <linux/errno.h>
77133+
77134+void
77135+gr_log_forkfail(const int retval)
77136+{
77137+#ifdef CONFIG_GRKERNSEC_FORKFAIL
77138+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
77139+ switch (retval) {
77140+ case -EAGAIN:
77141+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
77142+ break;
77143+ case -ENOMEM:
77144+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
77145+ break;
77146+ }
77147+ }
77148+#endif
77149+ return;
77150+}
77151diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
77152new file mode 100644
77153index 0000000..b7cb191
77154--- /dev/null
77155+++ b/grsecurity/grsec_init.c
77156@@ -0,0 +1,286 @@
77157+#include <linux/kernel.h>
77158+#include <linux/sched.h>
77159+#include <linux/mm.h>
77160+#include <linux/gracl.h>
77161+#include <linux/slab.h>
77162+#include <linux/vmalloc.h>
77163+#include <linux/percpu.h>
77164+#include <linux/module.h>
77165+
77166+int grsec_enable_ptrace_readexec;
77167+int grsec_enable_setxid;
77168+int grsec_enable_symlinkown;
77169+kgid_t grsec_symlinkown_gid;
77170+int grsec_enable_brute;
77171+int grsec_enable_link;
77172+int grsec_enable_dmesg;
77173+int grsec_enable_harden_ptrace;
77174+int grsec_enable_harden_ipc;
77175+int grsec_enable_fifo;
77176+int grsec_enable_execlog;
77177+int grsec_enable_signal;
77178+int grsec_enable_forkfail;
77179+int grsec_enable_audit_ptrace;
77180+int grsec_enable_time;
77181+int grsec_enable_group;
77182+kgid_t grsec_audit_gid;
77183+int grsec_enable_chdir;
77184+int grsec_enable_mount;
77185+int grsec_enable_rofs;
77186+int grsec_deny_new_usb;
77187+int grsec_enable_chroot_findtask;
77188+int grsec_enable_chroot_mount;
77189+int grsec_enable_chroot_shmat;
77190+int grsec_enable_chroot_fchdir;
77191+int grsec_enable_chroot_double;
77192+int grsec_enable_chroot_pivot;
77193+int grsec_enable_chroot_chdir;
77194+int grsec_enable_chroot_chmod;
77195+int grsec_enable_chroot_mknod;
77196+int grsec_enable_chroot_nice;
77197+int grsec_enable_chroot_execlog;
77198+int grsec_enable_chroot_caps;
77199+int grsec_enable_chroot_sysctl;
77200+int grsec_enable_chroot_unix;
77201+int grsec_enable_tpe;
77202+kgid_t grsec_tpe_gid;
77203+int grsec_enable_blackhole;
77204+#ifdef CONFIG_IPV6_MODULE
77205+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
77206+#endif
77207+int grsec_lastack_retries;
77208+int grsec_enable_tpe_all;
77209+int grsec_enable_tpe_invert;
77210+int grsec_enable_socket_all;
77211+kgid_t grsec_socket_all_gid;
77212+int grsec_enable_socket_client;
77213+kgid_t grsec_socket_client_gid;
77214+int grsec_enable_socket_server;
77215+kgid_t grsec_socket_server_gid;
77216+int grsec_resource_logging;
77217+int grsec_disable_privio;
77218+int grsec_enable_log_rwxmaps;
77219+int grsec_lock;
77220+
77221+DEFINE_SPINLOCK(grsec_alert_lock);
77222+unsigned long grsec_alert_wtime = 0;
77223+unsigned long grsec_alert_fyet = 0;
77224+
77225+DEFINE_SPINLOCK(grsec_audit_lock);
77226+
77227+DEFINE_RWLOCK(grsec_exec_file_lock);
77228+
77229+char *gr_shared_page[4];
77230+
77231+char *gr_alert_log_fmt;
77232+char *gr_audit_log_fmt;
77233+char *gr_alert_log_buf;
77234+char *gr_audit_log_buf;
77235+
77236+extern struct gr_arg *gr_usermode;
77237+extern unsigned char *gr_system_salt;
77238+extern unsigned char *gr_system_sum;
77239+
77240+void __init
77241+grsecurity_init(void)
77242+{
77243+ int j;
77244+ /* create the per-cpu shared pages */
77245+
77246+#ifdef CONFIG_X86
77247+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
77248+#endif
77249+
77250+ for (j = 0; j < 4; j++) {
77251+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
77252+ if (gr_shared_page[j] == NULL) {
77253+ panic("Unable to allocate grsecurity shared page");
77254+ return;
77255+ }
77256+ }
77257+
77258+ /* allocate log buffers */
77259+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
77260+ if (!gr_alert_log_fmt) {
77261+ panic("Unable to allocate grsecurity alert log format buffer");
77262+ return;
77263+ }
77264+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
77265+ if (!gr_audit_log_fmt) {
77266+ panic("Unable to allocate grsecurity audit log format buffer");
77267+ return;
77268+ }
77269+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77270+ if (!gr_alert_log_buf) {
77271+ panic("Unable to allocate grsecurity alert log buffer");
77272+ return;
77273+ }
77274+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77275+ if (!gr_audit_log_buf) {
77276+ panic("Unable to allocate grsecurity audit log buffer");
77277+ return;
77278+ }
77279+
77280+ /* allocate memory for authentication structure */
77281+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
77282+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
77283+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
77284+
77285+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
77286+ panic("Unable to allocate grsecurity authentication structure");
77287+ return;
77288+ }
77289+
77290+#ifdef CONFIG_GRKERNSEC_IO
77291+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
77292+ grsec_disable_privio = 1;
77293+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77294+ grsec_disable_privio = 1;
77295+#else
77296+ grsec_disable_privio = 0;
77297+#endif
77298+#endif
77299+
77300+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
77301+ /* for backward compatibility, tpe_invert always defaults to on if
77302+ enabled in the kernel
77303+ */
77304+ grsec_enable_tpe_invert = 1;
77305+#endif
77306+
77307+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77308+#ifndef CONFIG_GRKERNSEC_SYSCTL
77309+ grsec_lock = 1;
77310+#endif
77311+
77312+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77313+ grsec_enable_log_rwxmaps = 1;
77314+#endif
77315+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
77316+ grsec_enable_group = 1;
77317+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
77318+#endif
77319+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
77320+ grsec_enable_ptrace_readexec = 1;
77321+#endif
77322+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
77323+ grsec_enable_chdir = 1;
77324+#endif
77325+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
77326+ grsec_enable_harden_ptrace = 1;
77327+#endif
77328+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
77329+ grsec_enable_harden_ipc = 1;
77330+#endif
77331+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77332+ grsec_enable_mount = 1;
77333+#endif
77334+#ifdef CONFIG_GRKERNSEC_LINK
77335+ grsec_enable_link = 1;
77336+#endif
77337+#ifdef CONFIG_GRKERNSEC_BRUTE
77338+ grsec_enable_brute = 1;
77339+#endif
77340+#ifdef CONFIG_GRKERNSEC_DMESG
77341+ grsec_enable_dmesg = 1;
77342+#endif
77343+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77344+ grsec_enable_blackhole = 1;
77345+ grsec_lastack_retries = 4;
77346+#endif
77347+#ifdef CONFIG_GRKERNSEC_FIFO
77348+ grsec_enable_fifo = 1;
77349+#endif
77350+#ifdef CONFIG_GRKERNSEC_EXECLOG
77351+ grsec_enable_execlog = 1;
77352+#endif
77353+#ifdef CONFIG_GRKERNSEC_SETXID
77354+ grsec_enable_setxid = 1;
77355+#endif
77356+#ifdef CONFIG_GRKERNSEC_SIGNAL
77357+ grsec_enable_signal = 1;
77358+#endif
77359+#ifdef CONFIG_GRKERNSEC_FORKFAIL
77360+ grsec_enable_forkfail = 1;
77361+#endif
77362+#ifdef CONFIG_GRKERNSEC_TIME
77363+ grsec_enable_time = 1;
77364+#endif
77365+#ifdef CONFIG_GRKERNSEC_RESLOG
77366+ grsec_resource_logging = 1;
77367+#endif
77368+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
77369+ grsec_enable_chroot_findtask = 1;
77370+#endif
77371+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
77372+ grsec_enable_chroot_unix = 1;
77373+#endif
77374+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
77375+ grsec_enable_chroot_mount = 1;
77376+#endif
77377+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
77378+ grsec_enable_chroot_fchdir = 1;
77379+#endif
77380+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
77381+ grsec_enable_chroot_shmat = 1;
77382+#endif
77383+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
77384+ grsec_enable_audit_ptrace = 1;
77385+#endif
77386+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
77387+ grsec_enable_chroot_double = 1;
77388+#endif
77389+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
77390+ grsec_enable_chroot_pivot = 1;
77391+#endif
77392+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
77393+ grsec_enable_chroot_chdir = 1;
77394+#endif
77395+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
77396+ grsec_enable_chroot_chmod = 1;
77397+#endif
77398+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
77399+ grsec_enable_chroot_mknod = 1;
77400+#endif
77401+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
77402+ grsec_enable_chroot_nice = 1;
77403+#endif
77404+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
77405+ grsec_enable_chroot_execlog = 1;
77406+#endif
77407+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77408+ grsec_enable_chroot_caps = 1;
77409+#endif
77410+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
77411+ grsec_enable_chroot_sysctl = 1;
77412+#endif
77413+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
77414+ grsec_enable_symlinkown = 1;
77415+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
77416+#endif
77417+#ifdef CONFIG_GRKERNSEC_TPE
77418+ grsec_enable_tpe = 1;
77419+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
77420+#ifdef CONFIG_GRKERNSEC_TPE_ALL
77421+ grsec_enable_tpe_all = 1;
77422+#endif
77423+#endif
77424+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
77425+ grsec_enable_socket_all = 1;
77426+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
77427+#endif
77428+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
77429+ grsec_enable_socket_client = 1;
77430+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
77431+#endif
77432+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
77433+ grsec_enable_socket_server = 1;
77434+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
77435+#endif
77436+#endif
77437+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
77438+ grsec_deny_new_usb = 1;
77439+#endif
77440+
77441+ return;
77442+}
77443diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
77444new file mode 100644
77445index 0000000..1773300
77446--- /dev/null
77447+++ b/grsecurity/grsec_ipc.c
77448@@ -0,0 +1,48 @@
77449+#include <linux/kernel.h>
77450+#include <linux/mm.h>
77451+#include <linux/sched.h>
77452+#include <linux/file.h>
77453+#include <linux/ipc.h>
77454+#include <linux/ipc_namespace.h>
77455+#include <linux/grsecurity.h>
77456+#include <linux/grinternal.h>
77457+
77458+int
77459+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
77460+{
77461+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
77462+ int write;
77463+ int orig_granted_mode;
77464+ kuid_t euid;
77465+ kgid_t egid;
77466+
77467+ if (!grsec_enable_harden_ipc)
77468+ return 1;
77469+
77470+ euid = current_euid();
77471+ egid = current_egid();
77472+
77473+ write = requested_mode & 00002;
77474+ orig_granted_mode = ipcp->mode;
77475+
77476+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
77477+ orig_granted_mode >>= 6;
77478+ else {
77479+ /* if likely wrong permissions, lock to user */
77480+ if (orig_granted_mode & 0007)
77481+ orig_granted_mode = 0;
77482+ /* otherwise do a egid-only check */
77483+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
77484+ orig_granted_mode >>= 3;
77485+ /* otherwise, no access */
77486+ else
77487+ orig_granted_mode = 0;
77488+ }
77489+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
77490+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
77491+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
77492+ return 0;
77493+ }
77494+#endif
77495+ return 1;
77496+}
77497diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
77498new file mode 100644
77499index 0000000..5e05e20
77500--- /dev/null
77501+++ b/grsecurity/grsec_link.c
77502@@ -0,0 +1,58 @@
77503+#include <linux/kernel.h>
77504+#include <linux/sched.h>
77505+#include <linux/fs.h>
77506+#include <linux/file.h>
77507+#include <linux/grinternal.h>
77508+
77509+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
77510+{
77511+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
77512+ const struct inode *link_inode = link->dentry->d_inode;
77513+
77514+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
77515+ /* ignore root-owned links, e.g. /proc/self */
77516+ gr_is_global_nonroot(link_inode->i_uid) && target &&
77517+ !uid_eq(link_inode->i_uid, target->i_uid)) {
77518+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
77519+ return 1;
77520+ }
77521+#endif
77522+ return 0;
77523+}
77524+
77525+int
77526+gr_handle_follow_link(const struct inode *parent,
77527+ const struct inode *inode,
77528+ const struct dentry *dentry, const struct vfsmount *mnt)
77529+{
77530+#ifdef CONFIG_GRKERNSEC_LINK
77531+ const struct cred *cred = current_cred();
77532+
77533+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
77534+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
77535+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
77536+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
77537+ return -EACCES;
77538+ }
77539+#endif
77540+ return 0;
77541+}
77542+
77543+int
77544+gr_handle_hardlink(const struct dentry *dentry,
77545+ const struct vfsmount *mnt,
77546+ struct inode *inode, const int mode, const struct filename *to)
77547+{
77548+#ifdef CONFIG_GRKERNSEC_LINK
77549+ const struct cred *cred = current_cred();
77550+
77551+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
77552+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
77553+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
77554+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
77555+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
77556+ return -EPERM;
77557+ }
77558+#endif
77559+ return 0;
77560+}
77561diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
77562new file mode 100644
77563index 0000000..dbe0a6b
77564--- /dev/null
77565+++ b/grsecurity/grsec_log.c
77566@@ -0,0 +1,341 @@
77567+#include <linux/kernel.h>
77568+#include <linux/sched.h>
77569+#include <linux/file.h>
77570+#include <linux/tty.h>
77571+#include <linux/fs.h>
77572+#include <linux/mm.h>
77573+#include <linux/grinternal.h>
77574+
77575+#ifdef CONFIG_TREE_PREEMPT_RCU
77576+#define DISABLE_PREEMPT() preempt_disable()
77577+#define ENABLE_PREEMPT() preempt_enable()
77578+#else
77579+#define DISABLE_PREEMPT()
77580+#define ENABLE_PREEMPT()
77581+#endif
77582+
77583+#define BEGIN_LOCKS(x) \
77584+ DISABLE_PREEMPT(); \
77585+ rcu_read_lock(); \
77586+ read_lock(&tasklist_lock); \
77587+ read_lock(&grsec_exec_file_lock); \
77588+ if (x != GR_DO_AUDIT) \
77589+ spin_lock(&grsec_alert_lock); \
77590+ else \
77591+ spin_lock(&grsec_audit_lock)
77592+
77593+#define END_LOCKS(x) \
77594+ if (x != GR_DO_AUDIT) \
77595+ spin_unlock(&grsec_alert_lock); \
77596+ else \
77597+ spin_unlock(&grsec_audit_lock); \
77598+ read_unlock(&grsec_exec_file_lock); \
77599+ read_unlock(&tasklist_lock); \
77600+ rcu_read_unlock(); \
77601+ ENABLE_PREEMPT(); \
77602+ if (x == GR_DONT_AUDIT) \
77603+ gr_handle_alertkill(current)
77604+
77605+enum {
77606+ FLOODING,
77607+ NO_FLOODING
77608+};
77609+
77610+extern char *gr_alert_log_fmt;
77611+extern char *gr_audit_log_fmt;
77612+extern char *gr_alert_log_buf;
77613+extern char *gr_audit_log_buf;
77614+
77615+static int gr_log_start(int audit)
77616+{
77617+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
77618+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
77619+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77620+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
77621+ unsigned long curr_secs = get_seconds();
77622+
77623+ if (audit == GR_DO_AUDIT)
77624+ goto set_fmt;
77625+
77626+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
77627+ grsec_alert_wtime = curr_secs;
77628+ grsec_alert_fyet = 0;
77629+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
77630+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
77631+ grsec_alert_fyet++;
77632+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
77633+ grsec_alert_wtime = curr_secs;
77634+ grsec_alert_fyet++;
77635+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
77636+ return FLOODING;
77637+ }
77638+ else return FLOODING;
77639+
77640+set_fmt:
77641+#endif
77642+ memset(buf, 0, PAGE_SIZE);
77643+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
77644+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
77645+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
77646+ } else if (current->signal->curr_ip) {
77647+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
77648+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
77649+ } else if (gr_acl_is_enabled()) {
77650+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
77651+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
77652+ } else {
77653+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
77654+ strcpy(buf, fmt);
77655+ }
77656+
77657+ return NO_FLOODING;
77658+}
77659+
77660+static void gr_log_middle(int audit, const char *msg, va_list ap)
77661+ __attribute__ ((format (printf, 2, 0)));
77662+
77663+static void gr_log_middle(int audit, const char *msg, va_list ap)
77664+{
77665+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77666+ unsigned int len = strlen(buf);
77667+
77668+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
77669+
77670+ return;
77671+}
77672+
77673+static void gr_log_middle_varargs(int audit, const char *msg, ...)
77674+ __attribute__ ((format (printf, 2, 3)));
77675+
77676+static void gr_log_middle_varargs(int audit, const char *msg, ...)
77677+{
77678+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77679+ unsigned int len = strlen(buf);
77680+ va_list ap;
77681+
77682+ va_start(ap, msg);
77683+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
77684+ va_end(ap);
77685+
77686+ return;
77687+}
77688+
77689+static void gr_log_end(int audit, int append_default)
77690+{
77691+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77692+ if (append_default) {
77693+ struct task_struct *task = current;
77694+ struct task_struct *parent = task->real_parent;
77695+ const struct cred *cred = __task_cred(task);
77696+ const struct cred *pcred = __task_cred(parent);
77697+ unsigned int len = strlen(buf);
77698+
77699+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77700+ }
77701+
77702+ printk("%s\n", buf);
77703+
77704+ return;
77705+}
77706+
77707+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
77708+{
77709+ int logtype;
77710+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
77711+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
77712+ void *voidptr = NULL;
77713+ int num1 = 0, num2 = 0;
77714+ unsigned long ulong1 = 0, ulong2 = 0;
77715+ struct dentry *dentry = NULL;
77716+ struct vfsmount *mnt = NULL;
77717+ struct file *file = NULL;
77718+ struct task_struct *task = NULL;
77719+ struct vm_area_struct *vma = NULL;
77720+ const struct cred *cred, *pcred;
77721+ va_list ap;
77722+
77723+ BEGIN_LOCKS(audit);
77724+ logtype = gr_log_start(audit);
77725+ if (logtype == FLOODING) {
77726+ END_LOCKS(audit);
77727+ return;
77728+ }
77729+ va_start(ap, argtypes);
77730+ switch (argtypes) {
77731+ case GR_TTYSNIFF:
77732+ task = va_arg(ap, struct task_struct *);
77733+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
77734+ break;
77735+ case GR_SYSCTL_HIDDEN:
77736+ str1 = va_arg(ap, char *);
77737+ gr_log_middle_varargs(audit, msg, result, str1);
77738+ break;
77739+ case GR_RBAC:
77740+ dentry = va_arg(ap, struct dentry *);
77741+ mnt = va_arg(ap, struct vfsmount *);
77742+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
77743+ break;
77744+ case GR_RBAC_STR:
77745+ dentry = va_arg(ap, struct dentry *);
77746+ mnt = va_arg(ap, struct vfsmount *);
77747+ str1 = va_arg(ap, char *);
77748+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
77749+ break;
77750+ case GR_STR_RBAC:
77751+ str1 = va_arg(ap, char *);
77752+ dentry = va_arg(ap, struct dentry *);
77753+ mnt = va_arg(ap, struct vfsmount *);
77754+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
77755+ break;
77756+ case GR_RBAC_MODE2:
77757+ dentry = va_arg(ap, struct dentry *);
77758+ mnt = va_arg(ap, struct vfsmount *);
77759+ str1 = va_arg(ap, char *);
77760+ str2 = va_arg(ap, char *);
77761+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
77762+ break;
77763+ case GR_RBAC_MODE3:
77764+ dentry = va_arg(ap, struct dentry *);
77765+ mnt = va_arg(ap, struct vfsmount *);
77766+ str1 = va_arg(ap, char *);
77767+ str2 = va_arg(ap, char *);
77768+ str3 = va_arg(ap, char *);
77769+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
77770+ break;
77771+ case GR_FILENAME:
77772+ dentry = va_arg(ap, struct dentry *);
77773+ mnt = va_arg(ap, struct vfsmount *);
77774+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
77775+ break;
77776+ case GR_STR_FILENAME:
77777+ str1 = va_arg(ap, char *);
77778+ dentry = va_arg(ap, struct dentry *);
77779+ mnt = va_arg(ap, struct vfsmount *);
77780+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
77781+ break;
77782+ case GR_FILENAME_STR:
77783+ dentry = va_arg(ap, struct dentry *);
77784+ mnt = va_arg(ap, struct vfsmount *);
77785+ str1 = va_arg(ap, char *);
77786+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
77787+ break;
77788+ case GR_FILENAME_TWO_INT:
77789+ dentry = va_arg(ap, struct dentry *);
77790+ mnt = va_arg(ap, struct vfsmount *);
77791+ num1 = va_arg(ap, int);
77792+ num2 = va_arg(ap, int);
77793+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
77794+ break;
77795+ case GR_FILENAME_TWO_INT_STR:
77796+ dentry = va_arg(ap, struct dentry *);
77797+ mnt = va_arg(ap, struct vfsmount *);
77798+ num1 = va_arg(ap, int);
77799+ num2 = va_arg(ap, int);
77800+ str1 = va_arg(ap, char *);
77801+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
77802+ break;
77803+ case GR_TEXTREL:
77804+ file = va_arg(ap, struct file *);
77805+ ulong1 = va_arg(ap, unsigned long);
77806+ ulong2 = va_arg(ap, unsigned long);
77807+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
77808+ break;
77809+ case GR_PTRACE:
77810+ task = va_arg(ap, struct task_struct *);
77811+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
77812+ break;
77813+ case GR_RESOURCE:
77814+ task = va_arg(ap, struct task_struct *);
77815+ cred = __task_cred(task);
77816+ pcred = __task_cred(task->real_parent);
77817+ ulong1 = va_arg(ap, unsigned long);
77818+ str1 = va_arg(ap, char *);
77819+ ulong2 = va_arg(ap, unsigned long);
77820+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77821+ break;
77822+ case GR_CAP:
77823+ task = va_arg(ap, struct task_struct *);
77824+ cred = __task_cred(task);
77825+ pcred = __task_cred(task->real_parent);
77826+ str1 = va_arg(ap, char *);
77827+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77828+ break;
77829+ case GR_SIG:
77830+ str1 = va_arg(ap, char *);
77831+ voidptr = va_arg(ap, void *);
77832+ gr_log_middle_varargs(audit, msg, str1, voidptr);
77833+ break;
77834+ case GR_SIG2:
77835+ task = va_arg(ap, struct task_struct *);
77836+ cred = __task_cred(task);
77837+ pcred = __task_cred(task->real_parent);
77838+ num1 = va_arg(ap, int);
77839+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77840+ break;
77841+ case GR_CRASH1:
77842+ task = va_arg(ap, struct task_struct *);
77843+ cred = __task_cred(task);
77844+ pcred = __task_cred(task->real_parent);
77845+ ulong1 = va_arg(ap, unsigned long);
77846+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
77847+ break;
77848+ case GR_CRASH2:
77849+ task = va_arg(ap, struct task_struct *);
77850+ cred = __task_cred(task);
77851+ pcred = __task_cred(task->real_parent);
77852+ ulong1 = va_arg(ap, unsigned long);
77853+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
77854+ break;
77855+ case GR_RWXMAP:
77856+ file = va_arg(ap, struct file *);
77857+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
77858+ break;
77859+ case GR_RWXMAPVMA:
77860+ vma = va_arg(ap, struct vm_area_struct *);
77861+ if (vma->vm_file)
77862+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
77863+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
77864+ str1 = "<stack>";
77865+ else if (vma->vm_start <= current->mm->brk &&
77866+ vma->vm_end >= current->mm->start_brk)
77867+ str1 = "<heap>";
77868+ else
77869+ str1 = "<anonymous mapping>";
77870+ gr_log_middle_varargs(audit, msg, str1);
77871+ break;
77872+ case GR_PSACCT:
77873+ {
77874+ unsigned int wday, cday;
77875+ __u8 whr, chr;
77876+ __u8 wmin, cmin;
77877+ __u8 wsec, csec;
77878+ char cur_tty[64] = { 0 };
77879+ char parent_tty[64] = { 0 };
77880+
77881+ task = va_arg(ap, struct task_struct *);
77882+ wday = va_arg(ap, unsigned int);
77883+ cday = va_arg(ap, unsigned int);
77884+ whr = va_arg(ap, int);
77885+ chr = va_arg(ap, int);
77886+ wmin = va_arg(ap, int);
77887+ cmin = va_arg(ap, int);
77888+ wsec = va_arg(ap, int);
77889+ csec = va_arg(ap, int);
77890+ ulong1 = va_arg(ap, unsigned long);
77891+ cred = __task_cred(task);
77892+ pcred = __task_cred(task->real_parent);
77893+
77894+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77895+ }
77896+ break;
77897+ default:
77898+ gr_log_middle(audit, msg, ap);
77899+ }
77900+ va_end(ap);
77901+ // these don't need DEFAULTSECARGS printed on the end
77902+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
77903+ gr_log_end(audit, 0);
77904+ else
77905+ gr_log_end(audit, 1);
77906+ END_LOCKS(audit);
77907+}
77908diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
77909new file mode 100644
77910index 0000000..0e39d8c
77911--- /dev/null
77912+++ b/grsecurity/grsec_mem.c
77913@@ -0,0 +1,48 @@
77914+#include <linux/kernel.h>
77915+#include <linux/sched.h>
77916+#include <linux/mm.h>
77917+#include <linux/mman.h>
77918+#include <linux/module.h>
77919+#include <linux/grinternal.h>
77920+
77921+void gr_handle_msr_write(void)
77922+{
77923+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
77924+ return;
77925+}
77926+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
77927+
77928+void
77929+gr_handle_ioperm(void)
77930+{
77931+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
77932+ return;
77933+}
77934+
77935+void
77936+gr_handle_iopl(void)
77937+{
77938+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
77939+ return;
77940+}
77941+
77942+void
77943+gr_handle_mem_readwrite(u64 from, u64 to)
77944+{
77945+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
77946+ return;
77947+}
77948+
77949+void
77950+gr_handle_vm86(void)
77951+{
77952+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
77953+ return;
77954+}
77955+
77956+void
77957+gr_log_badprocpid(const char *entry)
77958+{
77959+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
77960+ return;
77961+}
77962diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
77963new file mode 100644
77964index 0000000..cd9e124
77965--- /dev/null
77966+++ b/grsecurity/grsec_mount.c
77967@@ -0,0 +1,65 @@
77968+#include <linux/kernel.h>
77969+#include <linux/sched.h>
77970+#include <linux/mount.h>
77971+#include <linux/major.h>
77972+#include <linux/grsecurity.h>
77973+#include <linux/grinternal.h>
77974+
77975+void
77976+gr_log_remount(const char *devname, const int retval)
77977+{
77978+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77979+ if (grsec_enable_mount && (retval >= 0))
77980+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
77981+#endif
77982+ return;
77983+}
77984+
77985+void
77986+gr_log_unmount(const char *devname, const int retval)
77987+{
77988+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77989+ if (grsec_enable_mount && (retval >= 0))
77990+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
77991+#endif
77992+ return;
77993+}
77994+
77995+void
77996+gr_log_mount(const char *from, const char *to, const int retval)
77997+{
77998+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77999+ if (grsec_enable_mount && (retval >= 0))
78000+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
78001+#endif
78002+ return;
78003+}
78004+
78005+int
78006+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
78007+{
78008+#ifdef CONFIG_GRKERNSEC_ROFS
78009+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
78010+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
78011+ return -EPERM;
78012+ } else
78013+ return 0;
78014+#endif
78015+ return 0;
78016+}
78017+
78018+int
78019+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
78020+{
78021+#ifdef CONFIG_GRKERNSEC_ROFS
78022+ struct inode *inode = dentry->d_inode;
78023+
78024+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
78025+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
78026+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
78027+ return -EPERM;
78028+ } else
78029+ return 0;
78030+#endif
78031+ return 0;
78032+}
78033diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
78034new file mode 100644
78035index 0000000..6ee9d50
78036--- /dev/null
78037+++ b/grsecurity/grsec_pax.c
78038@@ -0,0 +1,45 @@
78039+#include <linux/kernel.h>
78040+#include <linux/sched.h>
78041+#include <linux/mm.h>
78042+#include <linux/file.h>
78043+#include <linux/grinternal.h>
78044+#include <linux/grsecurity.h>
78045+
78046+void
78047+gr_log_textrel(struct vm_area_struct * vma)
78048+{
78049+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78050+ if (grsec_enable_log_rwxmaps)
78051+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
78052+#endif
78053+ return;
78054+}
78055+
78056+void gr_log_ptgnustack(struct file *file)
78057+{
78058+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78059+ if (grsec_enable_log_rwxmaps)
78060+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
78061+#endif
78062+ return;
78063+}
78064+
78065+void
78066+gr_log_rwxmmap(struct file *file)
78067+{
78068+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78069+ if (grsec_enable_log_rwxmaps)
78070+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
78071+#endif
78072+ return;
78073+}
78074+
78075+void
78076+gr_log_rwxmprotect(struct vm_area_struct *vma)
78077+{
78078+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78079+ if (grsec_enable_log_rwxmaps)
78080+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
78081+#endif
78082+ return;
78083+}
78084diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
78085new file mode 100644
78086index 0000000..2005a3a
78087--- /dev/null
78088+++ b/grsecurity/grsec_proc.c
78089@@ -0,0 +1,20 @@
78090+#include <linux/kernel.h>
78091+#include <linux/sched.h>
78092+#include <linux/grsecurity.h>
78093+#include <linux/grinternal.h>
78094+
78095+int gr_proc_is_restricted(void)
78096+{
78097+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78098+ const struct cred *cred = current_cred();
78099+#endif
78100+
78101+#ifdef CONFIG_GRKERNSEC_PROC_USER
78102+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
78103+ return -EACCES;
78104+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78105+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
78106+ return -EACCES;
78107+#endif
78108+ return 0;
78109+}
78110diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
78111new file mode 100644
78112index 0000000..f7f29aa
78113--- /dev/null
78114+++ b/grsecurity/grsec_ptrace.c
78115@@ -0,0 +1,30 @@
78116+#include <linux/kernel.h>
78117+#include <linux/sched.h>
78118+#include <linux/grinternal.h>
78119+#include <linux/security.h>
78120+
78121+void
78122+gr_audit_ptrace(struct task_struct *task)
78123+{
78124+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
78125+ if (grsec_enable_audit_ptrace)
78126+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
78127+#endif
78128+ return;
78129+}
78130+
78131+int
78132+gr_ptrace_readexec(struct file *file, int unsafe_flags)
78133+{
78134+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78135+ const struct dentry *dentry = file->f_path.dentry;
78136+ const struct vfsmount *mnt = file->f_path.mnt;
78137+
78138+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
78139+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
78140+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
78141+ return -EACCES;
78142+ }
78143+#endif
78144+ return 0;
78145+}
78146diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
78147new file mode 100644
78148index 0000000..3860c7e
78149--- /dev/null
78150+++ b/grsecurity/grsec_sig.c
78151@@ -0,0 +1,236 @@
78152+#include <linux/kernel.h>
78153+#include <linux/sched.h>
78154+#include <linux/fs.h>
78155+#include <linux/delay.h>
78156+#include <linux/grsecurity.h>
78157+#include <linux/grinternal.h>
78158+#include <linux/hardirq.h>
78159+
78160+char *signames[] = {
78161+ [SIGSEGV] = "Segmentation fault",
78162+ [SIGILL] = "Illegal instruction",
78163+ [SIGABRT] = "Abort",
78164+ [SIGBUS] = "Invalid alignment/Bus error"
78165+};
78166+
78167+void
78168+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
78169+{
78170+#ifdef CONFIG_GRKERNSEC_SIGNAL
78171+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
78172+ (sig == SIGABRT) || (sig == SIGBUS))) {
78173+ if (task_pid_nr(t) == task_pid_nr(current)) {
78174+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
78175+ } else {
78176+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
78177+ }
78178+ }
78179+#endif
78180+ return;
78181+}
78182+
78183+int
78184+gr_handle_signal(const struct task_struct *p, const int sig)
78185+{
78186+#ifdef CONFIG_GRKERNSEC
78187+ /* ignore the 0 signal for protected task checks */
78188+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
78189+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
78190+ return -EPERM;
78191+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
78192+ return -EPERM;
78193+ }
78194+#endif
78195+ return 0;
78196+}
78197+
78198+#ifdef CONFIG_GRKERNSEC
78199+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
78200+
78201+int gr_fake_force_sig(int sig, struct task_struct *t)
78202+{
78203+ unsigned long int flags;
78204+ int ret, blocked, ignored;
78205+ struct k_sigaction *action;
78206+
78207+ spin_lock_irqsave(&t->sighand->siglock, flags);
78208+ action = &t->sighand->action[sig-1];
78209+ ignored = action->sa.sa_handler == SIG_IGN;
78210+ blocked = sigismember(&t->blocked, sig);
78211+ if (blocked || ignored) {
78212+ action->sa.sa_handler = SIG_DFL;
78213+ if (blocked) {
78214+ sigdelset(&t->blocked, sig);
78215+ recalc_sigpending_and_wake(t);
78216+ }
78217+ }
78218+ if (action->sa.sa_handler == SIG_DFL)
78219+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
78220+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
78221+
78222+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
78223+
78224+ return ret;
78225+}
78226+#endif
78227+
78228+#define GR_USER_BAN_TIME (15 * 60)
78229+#define GR_DAEMON_BRUTE_TIME (30 * 60)
78230+
78231+void gr_handle_brute_attach(int dumpable)
78232+{
78233+#ifdef CONFIG_GRKERNSEC_BRUTE
78234+ struct task_struct *p = current;
78235+ kuid_t uid = GLOBAL_ROOT_UID;
78236+ int daemon = 0;
78237+
78238+ if (!grsec_enable_brute)
78239+ return;
78240+
78241+ rcu_read_lock();
78242+ read_lock(&tasklist_lock);
78243+ read_lock(&grsec_exec_file_lock);
78244+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
78245+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
78246+ p->real_parent->brute = 1;
78247+ daemon = 1;
78248+ } else {
78249+ const struct cred *cred = __task_cred(p), *cred2;
78250+ struct task_struct *tsk, *tsk2;
78251+
78252+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
78253+ struct user_struct *user;
78254+
78255+ uid = cred->uid;
78256+
78257+ /* this is put upon execution past expiration */
78258+ user = find_user(uid);
78259+ if (user == NULL)
78260+ goto unlock;
78261+ user->suid_banned = 1;
78262+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
78263+ if (user->suid_ban_expires == ~0UL)
78264+ user->suid_ban_expires--;
78265+
78266+ /* only kill other threads of the same binary, from the same user */
78267+ do_each_thread(tsk2, tsk) {
78268+ cred2 = __task_cred(tsk);
78269+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
78270+ gr_fake_force_sig(SIGKILL, tsk);
78271+ } while_each_thread(tsk2, tsk);
78272+ }
78273+ }
78274+unlock:
78275+ read_unlock(&grsec_exec_file_lock);
78276+ read_unlock(&tasklist_lock);
78277+ rcu_read_unlock();
78278+
78279+ if (gr_is_global_nonroot(uid))
78280+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
78281+ else if (daemon)
78282+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
78283+
78284+#endif
78285+ return;
78286+}
78287+
78288+void gr_handle_brute_check(void)
78289+{
78290+#ifdef CONFIG_GRKERNSEC_BRUTE
78291+ struct task_struct *p = current;
78292+
78293+ if (unlikely(p->brute)) {
78294+ if (!grsec_enable_brute)
78295+ p->brute = 0;
78296+ else if (time_before(get_seconds(), p->brute_expires))
78297+ msleep(30 * 1000);
78298+ }
78299+#endif
78300+ return;
78301+}
78302+
78303+void gr_handle_kernel_exploit(void)
78304+{
78305+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78306+ const struct cred *cred;
78307+ struct task_struct *tsk, *tsk2;
78308+ struct user_struct *user;
78309+ kuid_t uid;
78310+
78311+ if (in_irq() || in_serving_softirq() || in_nmi())
78312+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
78313+
78314+ uid = current_uid();
78315+
78316+ if (gr_is_global_root(uid))
78317+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
78318+ else {
78319+ /* kill all the processes of this user, hold a reference
78320+ to their creds struct, and prevent them from creating
78321+ another process until system reset
78322+ */
78323+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
78324+ GR_GLOBAL_UID(uid));
78325+ /* we intentionally leak this ref */
78326+ user = get_uid(current->cred->user);
78327+ if (user)
78328+ user->kernel_banned = 1;
78329+
78330+ /* kill all processes of this user */
78331+ read_lock(&tasklist_lock);
78332+ do_each_thread(tsk2, tsk) {
78333+ cred = __task_cred(tsk);
78334+ if (uid_eq(cred->uid, uid))
78335+ gr_fake_force_sig(SIGKILL, tsk);
78336+ } while_each_thread(tsk2, tsk);
78337+ read_unlock(&tasklist_lock);
78338+ }
78339+#endif
78340+}
78341+
78342+#ifdef CONFIG_GRKERNSEC_BRUTE
78343+static bool suid_ban_expired(struct user_struct *user)
78344+{
78345+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
78346+ user->suid_banned = 0;
78347+ user->suid_ban_expires = 0;
78348+ free_uid(user);
78349+ return true;
78350+ }
78351+
78352+ return false;
78353+}
78354+#endif
78355+
78356+int gr_process_kernel_exec_ban(void)
78357+{
78358+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78359+ if (unlikely(current->cred->user->kernel_banned))
78360+ return -EPERM;
78361+#endif
78362+ return 0;
78363+}
78364+
78365+int gr_process_kernel_setuid_ban(struct user_struct *user)
78366+{
78367+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78368+ if (unlikely(user->kernel_banned))
78369+ gr_fake_force_sig(SIGKILL, current);
78370+#endif
78371+ return 0;
78372+}
78373+
78374+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
78375+{
78376+#ifdef CONFIG_GRKERNSEC_BRUTE
78377+ struct user_struct *user = current->cred->user;
78378+ if (unlikely(user->suid_banned)) {
78379+ if (suid_ban_expired(user))
78380+ return 0;
78381+ /* disallow execution of suid binaries only */
78382+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
78383+ return -EPERM;
78384+ }
78385+#endif
78386+ return 0;
78387+}
78388diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
78389new file mode 100644
78390index 0000000..e3650b6
78391--- /dev/null
78392+++ b/grsecurity/grsec_sock.c
78393@@ -0,0 +1,244 @@
78394+#include <linux/kernel.h>
78395+#include <linux/module.h>
78396+#include <linux/sched.h>
78397+#include <linux/file.h>
78398+#include <linux/net.h>
78399+#include <linux/in.h>
78400+#include <linux/ip.h>
78401+#include <net/sock.h>
78402+#include <net/inet_sock.h>
78403+#include <linux/grsecurity.h>
78404+#include <linux/grinternal.h>
78405+#include <linux/gracl.h>
78406+
78407+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
78408+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
78409+
78410+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
78411+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
78412+
78413+#ifdef CONFIG_UNIX_MODULE
78414+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
78415+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
78416+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
78417+EXPORT_SYMBOL_GPL(gr_handle_create);
78418+#endif
78419+
78420+#ifdef CONFIG_GRKERNSEC
78421+#define gr_conn_table_size 32749
78422+struct conn_table_entry {
78423+ struct conn_table_entry *next;
78424+ struct signal_struct *sig;
78425+};
78426+
78427+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
78428+DEFINE_SPINLOCK(gr_conn_table_lock);
78429+
78430+extern const char * gr_socktype_to_name(unsigned char type);
78431+extern const char * gr_proto_to_name(unsigned char proto);
78432+extern const char * gr_sockfamily_to_name(unsigned char family);
78433+
78434+static __inline__ int
78435+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
78436+{
78437+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
78438+}
78439+
78440+static __inline__ int
78441+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
78442+ __u16 sport, __u16 dport)
78443+{
78444+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
78445+ sig->gr_sport == sport && sig->gr_dport == dport))
78446+ return 1;
78447+ else
78448+ return 0;
78449+}
78450+
78451+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
78452+{
78453+ struct conn_table_entry **match;
78454+ unsigned int index;
78455+
78456+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
78457+ sig->gr_sport, sig->gr_dport,
78458+ gr_conn_table_size);
78459+
78460+ newent->sig = sig;
78461+
78462+ match = &gr_conn_table[index];
78463+ newent->next = *match;
78464+ *match = newent;
78465+
78466+ return;
78467+}
78468+
78469+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
78470+{
78471+ struct conn_table_entry *match, *last = NULL;
78472+ unsigned int index;
78473+
78474+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
78475+ sig->gr_sport, sig->gr_dport,
78476+ gr_conn_table_size);
78477+
78478+ match = gr_conn_table[index];
78479+ while (match && !conn_match(match->sig,
78480+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
78481+ sig->gr_dport)) {
78482+ last = match;
78483+ match = match->next;
78484+ }
78485+
78486+ if (match) {
78487+ if (last)
78488+ last->next = match->next;
78489+ else
78490+ gr_conn_table[index] = NULL;
78491+ kfree(match);
78492+ }
78493+
78494+ return;
78495+}
78496+
78497+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
78498+ __u16 sport, __u16 dport)
78499+{
78500+ struct conn_table_entry *match;
78501+ unsigned int index;
78502+
78503+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
78504+
78505+ match = gr_conn_table[index];
78506+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
78507+ match = match->next;
78508+
78509+ if (match)
78510+ return match->sig;
78511+ else
78512+ return NULL;
78513+}
78514+
78515+#endif
78516+
78517+void gr_update_task_in_ip_table(const struct inet_sock *inet)
78518+{
78519+#ifdef CONFIG_GRKERNSEC
78520+ struct signal_struct *sig = current->signal;
78521+ struct conn_table_entry *newent;
78522+
78523+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
78524+ if (newent == NULL)
78525+ return;
78526+ /* no bh lock needed since we are called with bh disabled */
78527+ spin_lock(&gr_conn_table_lock);
78528+ gr_del_task_from_ip_table_nolock(sig);
78529+ sig->gr_saddr = inet->inet_rcv_saddr;
78530+ sig->gr_daddr = inet->inet_daddr;
78531+ sig->gr_sport = inet->inet_sport;
78532+ sig->gr_dport = inet->inet_dport;
78533+ gr_add_to_task_ip_table_nolock(sig, newent);
78534+ spin_unlock(&gr_conn_table_lock);
78535+#endif
78536+ return;
78537+}
78538+
78539+void gr_del_task_from_ip_table(struct task_struct *task)
78540+{
78541+#ifdef CONFIG_GRKERNSEC
78542+ spin_lock_bh(&gr_conn_table_lock);
78543+ gr_del_task_from_ip_table_nolock(task->signal);
78544+ spin_unlock_bh(&gr_conn_table_lock);
78545+#endif
78546+ return;
78547+}
78548+
78549+void
78550+gr_attach_curr_ip(const struct sock *sk)
78551+{
78552+#ifdef CONFIG_GRKERNSEC
78553+ struct signal_struct *p, *set;
78554+ const struct inet_sock *inet = inet_sk(sk);
78555+
78556+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
78557+ return;
78558+
78559+ set = current->signal;
78560+
78561+ spin_lock_bh(&gr_conn_table_lock);
78562+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
78563+ inet->inet_dport, inet->inet_sport);
78564+ if (unlikely(p != NULL)) {
78565+ set->curr_ip = p->curr_ip;
78566+ set->used_accept = 1;
78567+ gr_del_task_from_ip_table_nolock(p);
78568+ spin_unlock_bh(&gr_conn_table_lock);
78569+ return;
78570+ }
78571+ spin_unlock_bh(&gr_conn_table_lock);
78572+
78573+ set->curr_ip = inet->inet_daddr;
78574+ set->used_accept = 1;
78575+#endif
78576+ return;
78577+}
78578+
78579+int
78580+gr_handle_sock_all(const int family, const int type, const int protocol)
78581+{
78582+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78583+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
78584+ (family != AF_UNIX)) {
78585+ if (family == AF_INET)
78586+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
78587+ else
78588+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
78589+ return -EACCES;
78590+ }
78591+#endif
78592+ return 0;
78593+}
78594+
78595+int
78596+gr_handle_sock_server(const struct sockaddr *sck)
78597+{
78598+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78599+ if (grsec_enable_socket_server &&
78600+ in_group_p(grsec_socket_server_gid) &&
78601+ sck && (sck->sa_family != AF_UNIX) &&
78602+ (sck->sa_family != AF_LOCAL)) {
78603+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
78604+ return -EACCES;
78605+ }
78606+#endif
78607+ return 0;
78608+}
78609+
78610+int
78611+gr_handle_sock_server_other(const struct sock *sck)
78612+{
78613+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78614+ if (grsec_enable_socket_server &&
78615+ in_group_p(grsec_socket_server_gid) &&
78616+ sck && (sck->sk_family != AF_UNIX) &&
78617+ (sck->sk_family != AF_LOCAL)) {
78618+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
78619+ return -EACCES;
78620+ }
78621+#endif
78622+ return 0;
78623+}
78624+
78625+int
78626+gr_handle_sock_client(const struct sockaddr *sck)
78627+{
78628+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78629+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
78630+ sck && (sck->sa_family != AF_UNIX) &&
78631+ (sck->sa_family != AF_LOCAL)) {
78632+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
78633+ return -EACCES;
78634+ }
78635+#endif
78636+ return 0;
78637+}
78638diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
78639new file mode 100644
78640index 0000000..8159888
78641--- /dev/null
78642+++ b/grsecurity/grsec_sysctl.c
78643@@ -0,0 +1,479 @@
78644+#include <linux/kernel.h>
78645+#include <linux/sched.h>
78646+#include <linux/sysctl.h>
78647+#include <linux/grsecurity.h>
78648+#include <linux/grinternal.h>
78649+
78650+int
78651+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
78652+{
78653+#ifdef CONFIG_GRKERNSEC_SYSCTL
78654+ if (dirname == NULL || name == NULL)
78655+ return 0;
78656+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
78657+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
78658+ return -EACCES;
78659+ }
78660+#endif
78661+ return 0;
78662+}
78663+
78664+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
78665+static int __maybe_unused __read_only one = 1;
78666+#endif
78667+
78668+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
78669+ defined(CONFIG_GRKERNSEC_DENYUSB)
78670+struct ctl_table grsecurity_table[] = {
78671+#ifdef CONFIG_GRKERNSEC_SYSCTL
78672+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
78673+#ifdef CONFIG_GRKERNSEC_IO
78674+ {
78675+ .procname = "disable_priv_io",
78676+ .data = &grsec_disable_privio,
78677+ .maxlen = sizeof(int),
78678+ .mode = 0600,
78679+ .proc_handler = &proc_dointvec,
78680+ },
78681+#endif
78682+#endif
78683+#ifdef CONFIG_GRKERNSEC_LINK
78684+ {
78685+ .procname = "linking_restrictions",
78686+ .data = &grsec_enable_link,
78687+ .maxlen = sizeof(int),
78688+ .mode = 0600,
78689+ .proc_handler = &proc_dointvec,
78690+ },
78691+#endif
78692+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
78693+ {
78694+ .procname = "enforce_symlinksifowner",
78695+ .data = &grsec_enable_symlinkown,
78696+ .maxlen = sizeof(int),
78697+ .mode = 0600,
78698+ .proc_handler = &proc_dointvec,
78699+ },
78700+ {
78701+ .procname = "symlinkown_gid",
78702+ .data = &grsec_symlinkown_gid,
78703+ .maxlen = sizeof(int),
78704+ .mode = 0600,
78705+ .proc_handler = &proc_dointvec,
78706+ },
78707+#endif
78708+#ifdef CONFIG_GRKERNSEC_BRUTE
78709+ {
78710+ .procname = "deter_bruteforce",
78711+ .data = &grsec_enable_brute,
78712+ .maxlen = sizeof(int),
78713+ .mode = 0600,
78714+ .proc_handler = &proc_dointvec,
78715+ },
78716+#endif
78717+#ifdef CONFIG_GRKERNSEC_FIFO
78718+ {
78719+ .procname = "fifo_restrictions",
78720+ .data = &grsec_enable_fifo,
78721+ .maxlen = sizeof(int),
78722+ .mode = 0600,
78723+ .proc_handler = &proc_dointvec,
78724+ },
78725+#endif
78726+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78727+ {
78728+ .procname = "ptrace_readexec",
78729+ .data = &grsec_enable_ptrace_readexec,
78730+ .maxlen = sizeof(int),
78731+ .mode = 0600,
78732+ .proc_handler = &proc_dointvec,
78733+ },
78734+#endif
78735+#ifdef CONFIG_GRKERNSEC_SETXID
78736+ {
78737+ .procname = "consistent_setxid",
78738+ .data = &grsec_enable_setxid,
78739+ .maxlen = sizeof(int),
78740+ .mode = 0600,
78741+ .proc_handler = &proc_dointvec,
78742+ },
78743+#endif
78744+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
78745+ {
78746+ .procname = "ip_blackhole",
78747+ .data = &grsec_enable_blackhole,
78748+ .maxlen = sizeof(int),
78749+ .mode = 0600,
78750+ .proc_handler = &proc_dointvec,
78751+ },
78752+ {
78753+ .procname = "lastack_retries",
78754+ .data = &grsec_lastack_retries,
78755+ .maxlen = sizeof(int),
78756+ .mode = 0600,
78757+ .proc_handler = &proc_dointvec,
78758+ },
78759+#endif
78760+#ifdef CONFIG_GRKERNSEC_EXECLOG
78761+ {
78762+ .procname = "exec_logging",
78763+ .data = &grsec_enable_execlog,
78764+ .maxlen = sizeof(int),
78765+ .mode = 0600,
78766+ .proc_handler = &proc_dointvec,
78767+ },
78768+#endif
78769+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78770+ {
78771+ .procname = "rwxmap_logging",
78772+ .data = &grsec_enable_log_rwxmaps,
78773+ .maxlen = sizeof(int),
78774+ .mode = 0600,
78775+ .proc_handler = &proc_dointvec,
78776+ },
78777+#endif
78778+#ifdef CONFIG_GRKERNSEC_SIGNAL
78779+ {
78780+ .procname = "signal_logging",
78781+ .data = &grsec_enable_signal,
78782+ .maxlen = sizeof(int),
78783+ .mode = 0600,
78784+ .proc_handler = &proc_dointvec,
78785+ },
78786+#endif
78787+#ifdef CONFIG_GRKERNSEC_FORKFAIL
78788+ {
78789+ .procname = "forkfail_logging",
78790+ .data = &grsec_enable_forkfail,
78791+ .maxlen = sizeof(int),
78792+ .mode = 0600,
78793+ .proc_handler = &proc_dointvec,
78794+ },
78795+#endif
78796+#ifdef CONFIG_GRKERNSEC_TIME
78797+ {
78798+ .procname = "timechange_logging",
78799+ .data = &grsec_enable_time,
78800+ .maxlen = sizeof(int),
78801+ .mode = 0600,
78802+ .proc_handler = &proc_dointvec,
78803+ },
78804+#endif
78805+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
78806+ {
78807+ .procname = "chroot_deny_shmat",
78808+ .data = &grsec_enable_chroot_shmat,
78809+ .maxlen = sizeof(int),
78810+ .mode = 0600,
78811+ .proc_handler = &proc_dointvec,
78812+ },
78813+#endif
78814+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
78815+ {
78816+ .procname = "chroot_deny_unix",
78817+ .data = &grsec_enable_chroot_unix,
78818+ .maxlen = sizeof(int),
78819+ .mode = 0600,
78820+ .proc_handler = &proc_dointvec,
78821+ },
78822+#endif
78823+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
78824+ {
78825+ .procname = "chroot_deny_mount",
78826+ .data = &grsec_enable_chroot_mount,
78827+ .maxlen = sizeof(int),
78828+ .mode = 0600,
78829+ .proc_handler = &proc_dointvec,
78830+ },
78831+#endif
78832+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
78833+ {
78834+ .procname = "chroot_deny_fchdir",
78835+ .data = &grsec_enable_chroot_fchdir,
78836+ .maxlen = sizeof(int),
78837+ .mode = 0600,
78838+ .proc_handler = &proc_dointvec,
78839+ },
78840+#endif
78841+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
78842+ {
78843+ .procname = "chroot_deny_chroot",
78844+ .data = &grsec_enable_chroot_double,
78845+ .maxlen = sizeof(int),
78846+ .mode = 0600,
78847+ .proc_handler = &proc_dointvec,
78848+ },
78849+#endif
78850+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
78851+ {
78852+ .procname = "chroot_deny_pivot",
78853+ .data = &grsec_enable_chroot_pivot,
78854+ .maxlen = sizeof(int),
78855+ .mode = 0600,
78856+ .proc_handler = &proc_dointvec,
78857+ },
78858+#endif
78859+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
78860+ {
78861+ .procname = "chroot_enforce_chdir",
78862+ .data = &grsec_enable_chroot_chdir,
78863+ .maxlen = sizeof(int),
78864+ .mode = 0600,
78865+ .proc_handler = &proc_dointvec,
78866+ },
78867+#endif
78868+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
78869+ {
78870+ .procname = "chroot_deny_chmod",
78871+ .data = &grsec_enable_chroot_chmod,
78872+ .maxlen = sizeof(int),
78873+ .mode = 0600,
78874+ .proc_handler = &proc_dointvec,
78875+ },
78876+#endif
78877+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
78878+ {
78879+ .procname = "chroot_deny_mknod",
78880+ .data = &grsec_enable_chroot_mknod,
78881+ .maxlen = sizeof(int),
78882+ .mode = 0600,
78883+ .proc_handler = &proc_dointvec,
78884+ },
78885+#endif
78886+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
78887+ {
78888+ .procname = "chroot_restrict_nice",
78889+ .data = &grsec_enable_chroot_nice,
78890+ .maxlen = sizeof(int),
78891+ .mode = 0600,
78892+ .proc_handler = &proc_dointvec,
78893+ },
78894+#endif
78895+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
78896+ {
78897+ .procname = "chroot_execlog",
78898+ .data = &grsec_enable_chroot_execlog,
78899+ .maxlen = sizeof(int),
78900+ .mode = 0600,
78901+ .proc_handler = &proc_dointvec,
78902+ },
78903+#endif
78904+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
78905+ {
78906+ .procname = "chroot_caps",
78907+ .data = &grsec_enable_chroot_caps,
78908+ .maxlen = sizeof(int),
78909+ .mode = 0600,
78910+ .proc_handler = &proc_dointvec,
78911+ },
78912+#endif
78913+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
78914+ {
78915+ .procname = "chroot_deny_sysctl",
78916+ .data = &grsec_enable_chroot_sysctl,
78917+ .maxlen = sizeof(int),
78918+ .mode = 0600,
78919+ .proc_handler = &proc_dointvec,
78920+ },
78921+#endif
78922+#ifdef CONFIG_GRKERNSEC_TPE
78923+ {
78924+ .procname = "tpe",
78925+ .data = &grsec_enable_tpe,
78926+ .maxlen = sizeof(int),
78927+ .mode = 0600,
78928+ .proc_handler = &proc_dointvec,
78929+ },
78930+ {
78931+ .procname = "tpe_gid",
78932+ .data = &grsec_tpe_gid,
78933+ .maxlen = sizeof(int),
78934+ .mode = 0600,
78935+ .proc_handler = &proc_dointvec,
78936+ },
78937+#endif
78938+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
78939+ {
78940+ .procname = "tpe_invert",
78941+ .data = &grsec_enable_tpe_invert,
78942+ .maxlen = sizeof(int),
78943+ .mode = 0600,
78944+ .proc_handler = &proc_dointvec,
78945+ },
78946+#endif
78947+#ifdef CONFIG_GRKERNSEC_TPE_ALL
78948+ {
78949+ .procname = "tpe_restrict_all",
78950+ .data = &grsec_enable_tpe_all,
78951+ .maxlen = sizeof(int),
78952+ .mode = 0600,
78953+ .proc_handler = &proc_dointvec,
78954+ },
78955+#endif
78956+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78957+ {
78958+ .procname = "socket_all",
78959+ .data = &grsec_enable_socket_all,
78960+ .maxlen = sizeof(int),
78961+ .mode = 0600,
78962+ .proc_handler = &proc_dointvec,
78963+ },
78964+ {
78965+ .procname = "socket_all_gid",
78966+ .data = &grsec_socket_all_gid,
78967+ .maxlen = sizeof(int),
78968+ .mode = 0600,
78969+ .proc_handler = &proc_dointvec,
78970+ },
78971+#endif
78972+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78973+ {
78974+ .procname = "socket_client",
78975+ .data = &grsec_enable_socket_client,
78976+ .maxlen = sizeof(int),
78977+ .mode = 0600,
78978+ .proc_handler = &proc_dointvec,
78979+ },
78980+ {
78981+ .procname = "socket_client_gid",
78982+ .data = &grsec_socket_client_gid,
78983+ .maxlen = sizeof(int),
78984+ .mode = 0600,
78985+ .proc_handler = &proc_dointvec,
78986+ },
78987+#endif
78988+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78989+ {
78990+ .procname = "socket_server",
78991+ .data = &grsec_enable_socket_server,
78992+ .maxlen = sizeof(int),
78993+ .mode = 0600,
78994+ .proc_handler = &proc_dointvec,
78995+ },
78996+ {
78997+ .procname = "socket_server_gid",
78998+ .data = &grsec_socket_server_gid,
78999+ .maxlen = sizeof(int),
79000+ .mode = 0600,
79001+ .proc_handler = &proc_dointvec,
79002+ },
79003+#endif
79004+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
79005+ {
79006+ .procname = "audit_group",
79007+ .data = &grsec_enable_group,
79008+ .maxlen = sizeof(int),
79009+ .mode = 0600,
79010+ .proc_handler = &proc_dointvec,
79011+ },
79012+ {
79013+ .procname = "audit_gid",
79014+ .data = &grsec_audit_gid,
79015+ .maxlen = sizeof(int),
79016+ .mode = 0600,
79017+ .proc_handler = &proc_dointvec,
79018+ },
79019+#endif
79020+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
79021+ {
79022+ .procname = "audit_chdir",
79023+ .data = &grsec_enable_chdir,
79024+ .maxlen = sizeof(int),
79025+ .mode = 0600,
79026+ .proc_handler = &proc_dointvec,
79027+ },
79028+#endif
79029+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
79030+ {
79031+ .procname = "audit_mount",
79032+ .data = &grsec_enable_mount,
79033+ .maxlen = sizeof(int),
79034+ .mode = 0600,
79035+ .proc_handler = &proc_dointvec,
79036+ },
79037+#endif
79038+#ifdef CONFIG_GRKERNSEC_DMESG
79039+ {
79040+ .procname = "dmesg",
79041+ .data = &grsec_enable_dmesg,
79042+ .maxlen = sizeof(int),
79043+ .mode = 0600,
79044+ .proc_handler = &proc_dointvec,
79045+ },
79046+#endif
79047+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
79048+ {
79049+ .procname = "chroot_findtask",
79050+ .data = &grsec_enable_chroot_findtask,
79051+ .maxlen = sizeof(int),
79052+ .mode = 0600,
79053+ .proc_handler = &proc_dointvec,
79054+ },
79055+#endif
79056+#ifdef CONFIG_GRKERNSEC_RESLOG
79057+ {
79058+ .procname = "resource_logging",
79059+ .data = &grsec_resource_logging,
79060+ .maxlen = sizeof(int),
79061+ .mode = 0600,
79062+ .proc_handler = &proc_dointvec,
79063+ },
79064+#endif
79065+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
79066+ {
79067+ .procname = "audit_ptrace",
79068+ .data = &grsec_enable_audit_ptrace,
79069+ .maxlen = sizeof(int),
79070+ .mode = 0600,
79071+ .proc_handler = &proc_dointvec,
79072+ },
79073+#endif
79074+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
79075+ {
79076+ .procname = "harden_ptrace",
79077+ .data = &grsec_enable_harden_ptrace,
79078+ .maxlen = sizeof(int),
79079+ .mode = 0600,
79080+ .proc_handler = &proc_dointvec,
79081+ },
79082+#endif
79083+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
79084+ {
79085+ .procname = "harden_ipc",
79086+ .data = &grsec_enable_harden_ipc,
79087+ .maxlen = sizeof(int),
79088+ .mode = 0600,
79089+ .proc_handler = &proc_dointvec,
79090+ },
79091+#endif
79092+ {
79093+ .procname = "grsec_lock",
79094+ .data = &grsec_lock,
79095+ .maxlen = sizeof(int),
79096+ .mode = 0600,
79097+ .proc_handler = &proc_dointvec,
79098+ },
79099+#endif
79100+#ifdef CONFIG_GRKERNSEC_ROFS
79101+ {
79102+ .procname = "romount_protect",
79103+ .data = &grsec_enable_rofs,
79104+ .maxlen = sizeof(int),
79105+ .mode = 0600,
79106+ .proc_handler = &proc_dointvec_minmax,
79107+ .extra1 = &one,
79108+ .extra2 = &one,
79109+ },
79110+#endif
79111+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
79112+ {
79113+ .procname = "deny_new_usb",
79114+ .data = &grsec_deny_new_usb,
79115+ .maxlen = sizeof(int),
79116+ .mode = 0600,
79117+ .proc_handler = &proc_dointvec,
79118+ },
79119+#endif
79120+ { }
79121+};
79122+#endif
79123diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
79124new file mode 100644
79125index 0000000..61b514e
79126--- /dev/null
79127+++ b/grsecurity/grsec_time.c
79128@@ -0,0 +1,16 @@
79129+#include <linux/kernel.h>
79130+#include <linux/sched.h>
79131+#include <linux/grinternal.h>
79132+#include <linux/module.h>
79133+
79134+void
79135+gr_log_timechange(void)
79136+{
79137+#ifdef CONFIG_GRKERNSEC_TIME
79138+ if (grsec_enable_time)
79139+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
79140+#endif
79141+ return;
79142+}
79143+
79144+EXPORT_SYMBOL_GPL(gr_log_timechange);
79145diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
79146new file mode 100644
79147index 0000000..d1953de
79148--- /dev/null
79149+++ b/grsecurity/grsec_tpe.c
79150@@ -0,0 +1,78 @@
79151+#include <linux/kernel.h>
79152+#include <linux/sched.h>
79153+#include <linux/file.h>
79154+#include <linux/fs.h>
79155+#include <linux/grinternal.h>
79156+
79157+extern int gr_acl_tpe_check(void);
79158+
79159+int
79160+gr_tpe_allow(const struct file *file)
79161+{
79162+#ifdef CONFIG_GRKERNSEC
79163+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
79164+ struct inode *file_inode = file->f_path.dentry->d_inode;
79165+ const struct cred *cred = current_cred();
79166+ char *msg = NULL;
79167+ char *msg2 = NULL;
79168+
79169+ // never restrict root
79170+ if (gr_is_global_root(cred->uid))
79171+ return 1;
79172+
79173+ if (grsec_enable_tpe) {
79174+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
79175+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
79176+ msg = "not being in trusted group";
79177+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
79178+ msg = "being in untrusted group";
79179+#else
79180+ if (in_group_p(grsec_tpe_gid))
79181+ msg = "being in untrusted group";
79182+#endif
79183+ }
79184+ if (!msg && gr_acl_tpe_check())
79185+ msg = "being in untrusted role";
79186+
79187+ // not in any affected group/role
79188+ if (!msg)
79189+ goto next_check;
79190+
79191+ if (gr_is_global_nonroot(inode->i_uid))
79192+ msg2 = "file in non-root-owned directory";
79193+ else if (inode->i_mode & S_IWOTH)
79194+ msg2 = "file in world-writable directory";
79195+ else if (inode->i_mode & S_IWGRP)
79196+ msg2 = "file in group-writable directory";
79197+ else if (file_inode->i_mode & S_IWOTH)
79198+ msg2 = "file is world-writable";
79199+
79200+ if (msg && msg2) {
79201+ char fullmsg[70] = {0};
79202+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
79203+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
79204+ return 0;
79205+ }
79206+ msg = NULL;
79207+next_check:
79208+#ifdef CONFIG_GRKERNSEC_TPE_ALL
79209+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
79210+ return 1;
79211+
79212+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
79213+ msg = "directory not owned by user";
79214+ else if (inode->i_mode & S_IWOTH)
79215+ msg = "file in world-writable directory";
79216+ else if (inode->i_mode & S_IWGRP)
79217+ msg = "file in group-writable directory";
79218+ else if (file_inode->i_mode & S_IWOTH)
79219+ msg = "file is world-writable";
79220+
79221+ if (msg) {
79222+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
79223+ return 0;
79224+ }
79225+#endif
79226+#endif
79227+ return 1;
79228+}
79229diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
79230new file mode 100644
79231index 0000000..ae02d8e
79232--- /dev/null
79233+++ b/grsecurity/grsec_usb.c
79234@@ -0,0 +1,15 @@
79235+#include <linux/kernel.h>
79236+#include <linux/grinternal.h>
79237+#include <linux/module.h>
79238+
79239+int gr_handle_new_usb(void)
79240+{
79241+#ifdef CONFIG_GRKERNSEC_DENYUSB
79242+ if (grsec_deny_new_usb) {
79243+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
79244+ return 1;
79245+ }
79246+#endif
79247+ return 0;
79248+}
79249+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
79250diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
79251new file mode 100644
79252index 0000000..158b330
79253--- /dev/null
79254+++ b/grsecurity/grsum.c
79255@@ -0,0 +1,64 @@
79256+#include <linux/err.h>
79257+#include <linux/kernel.h>
79258+#include <linux/sched.h>
79259+#include <linux/mm.h>
79260+#include <linux/scatterlist.h>
79261+#include <linux/crypto.h>
79262+#include <linux/gracl.h>
79263+
79264+
79265+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
79266+#error "crypto and sha256 must be built into the kernel"
79267+#endif
79268+
79269+int
79270+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
79271+{
79272+ struct crypto_hash *tfm;
79273+ struct hash_desc desc;
79274+ struct scatterlist sg[2];
79275+ unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long))));
79276+ unsigned long *tmpsumptr = (unsigned long *)temp_sum;
79277+ unsigned long *sumptr = (unsigned long *)sum;
79278+ int cryptres;
79279+ int retval = 1;
79280+ volatile int mismatched = 0;
79281+ volatile int dummy = 0;
79282+ unsigned int i;
79283+
79284+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
79285+ if (IS_ERR(tfm)) {
79286+ /* should never happen, since sha256 should be built in */
79287+ memset(entry->pw, 0, GR_PW_LEN);
79288+ return 1;
79289+ }
79290+
79291+ sg_init_table(sg, 2);
79292+ sg_set_buf(&sg[0], salt, GR_SALT_LEN);
79293+ sg_set_buf(&sg[1], entry->pw, strlen(entry->pw));
79294+
79295+ desc.tfm = tfm;
79296+ desc.flags = 0;
79297+
79298+ cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw),
79299+ temp_sum);
79300+
79301+ memset(entry->pw, 0, GR_PW_LEN);
79302+
79303+ if (cryptres)
79304+ goto out;
79305+
79306+ for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++)
79307+ if (sumptr[i] != tmpsumptr[i])
79308+ mismatched = 1;
79309+ else
79310+ dummy = 1; // waste a cycle
79311+
79312+ if (!mismatched)
79313+ retval = dummy - 1;
79314+
79315+out:
79316+ crypto_free_hash(tfm);
79317+
79318+ return retval;
79319+}
79320diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
79321index 77ff547..181834f 100644
79322--- a/include/asm-generic/4level-fixup.h
79323+++ b/include/asm-generic/4level-fixup.h
79324@@ -13,8 +13,10 @@
79325 #define pmd_alloc(mm, pud, address) \
79326 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
79327 NULL: pmd_offset(pud, address))
79328+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
79329
79330 #define pud_alloc(mm, pgd, address) (pgd)
79331+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
79332 #define pud_offset(pgd, start) (pgd)
79333 #define pud_none(pud) 0
79334 #define pud_bad(pud) 0
79335diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
79336index b7babf0..1e4b4f1 100644
79337--- a/include/asm-generic/atomic-long.h
79338+++ b/include/asm-generic/atomic-long.h
79339@@ -22,6 +22,12 @@
79340
79341 typedef atomic64_t atomic_long_t;
79342
79343+#ifdef CONFIG_PAX_REFCOUNT
79344+typedef atomic64_unchecked_t atomic_long_unchecked_t;
79345+#else
79346+typedef atomic64_t atomic_long_unchecked_t;
79347+#endif
79348+
79349 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
79350
79351 static inline long atomic_long_read(atomic_long_t *l)
79352@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
79353 return (long)atomic64_read(v);
79354 }
79355
79356+#ifdef CONFIG_PAX_REFCOUNT
79357+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
79358+{
79359+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79360+
79361+ return (long)atomic64_read_unchecked(v);
79362+}
79363+#endif
79364+
79365 static inline void atomic_long_set(atomic_long_t *l, long i)
79366 {
79367 atomic64_t *v = (atomic64_t *)l;
79368@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
79369 atomic64_set(v, i);
79370 }
79371
79372+#ifdef CONFIG_PAX_REFCOUNT
79373+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
79374+{
79375+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79376+
79377+ atomic64_set_unchecked(v, i);
79378+}
79379+#endif
79380+
79381 static inline void atomic_long_inc(atomic_long_t *l)
79382 {
79383 atomic64_t *v = (atomic64_t *)l;
79384@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
79385 atomic64_inc(v);
79386 }
79387
79388+#ifdef CONFIG_PAX_REFCOUNT
79389+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
79390+{
79391+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79392+
79393+ atomic64_inc_unchecked(v);
79394+}
79395+#endif
79396+
79397 static inline void atomic_long_dec(atomic_long_t *l)
79398 {
79399 atomic64_t *v = (atomic64_t *)l;
79400@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
79401 atomic64_dec(v);
79402 }
79403
79404+#ifdef CONFIG_PAX_REFCOUNT
79405+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
79406+{
79407+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79408+
79409+ atomic64_dec_unchecked(v);
79410+}
79411+#endif
79412+
79413 static inline void atomic_long_add(long i, atomic_long_t *l)
79414 {
79415 atomic64_t *v = (atomic64_t *)l;
79416@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
79417 atomic64_add(i, v);
79418 }
79419
79420+#ifdef CONFIG_PAX_REFCOUNT
79421+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
79422+{
79423+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79424+
79425+ atomic64_add_unchecked(i, v);
79426+}
79427+#endif
79428+
79429 static inline void atomic_long_sub(long i, atomic_long_t *l)
79430 {
79431 atomic64_t *v = (atomic64_t *)l;
79432@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
79433 atomic64_sub(i, v);
79434 }
79435
79436+#ifdef CONFIG_PAX_REFCOUNT
79437+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
79438+{
79439+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79440+
79441+ atomic64_sub_unchecked(i, v);
79442+}
79443+#endif
79444+
79445 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
79446 {
79447 atomic64_t *v = (atomic64_t *)l;
79448@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
79449 return atomic64_add_negative(i, v);
79450 }
79451
79452-static inline long atomic_long_add_return(long i, atomic_long_t *l)
79453+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
79454 {
79455 atomic64_t *v = (atomic64_t *)l;
79456
79457 return (long)atomic64_add_return(i, v);
79458 }
79459
79460+#ifdef CONFIG_PAX_REFCOUNT
79461+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
79462+{
79463+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79464+
79465+ return (long)atomic64_add_return_unchecked(i, v);
79466+}
79467+#endif
79468+
79469 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
79470 {
79471 atomic64_t *v = (atomic64_t *)l;
79472@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
79473 return (long)atomic64_inc_return(v);
79474 }
79475
79476+#ifdef CONFIG_PAX_REFCOUNT
79477+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
79478+{
79479+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79480+
79481+ return (long)atomic64_inc_return_unchecked(v);
79482+}
79483+#endif
79484+
79485 static inline long atomic_long_dec_return(atomic_long_t *l)
79486 {
79487 atomic64_t *v = (atomic64_t *)l;
79488@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
79489
79490 typedef atomic_t atomic_long_t;
79491
79492+#ifdef CONFIG_PAX_REFCOUNT
79493+typedef atomic_unchecked_t atomic_long_unchecked_t;
79494+#else
79495+typedef atomic_t atomic_long_unchecked_t;
79496+#endif
79497+
79498 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
79499 static inline long atomic_long_read(atomic_long_t *l)
79500 {
79501@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
79502 return (long)atomic_read(v);
79503 }
79504
79505+#ifdef CONFIG_PAX_REFCOUNT
79506+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
79507+{
79508+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79509+
79510+ return (long)atomic_read_unchecked(v);
79511+}
79512+#endif
79513+
79514 static inline void atomic_long_set(atomic_long_t *l, long i)
79515 {
79516 atomic_t *v = (atomic_t *)l;
79517@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
79518 atomic_set(v, i);
79519 }
79520
79521+#ifdef CONFIG_PAX_REFCOUNT
79522+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
79523+{
79524+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79525+
79526+ atomic_set_unchecked(v, i);
79527+}
79528+#endif
79529+
79530 static inline void atomic_long_inc(atomic_long_t *l)
79531 {
79532 atomic_t *v = (atomic_t *)l;
79533@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
79534 atomic_inc(v);
79535 }
79536
79537+#ifdef CONFIG_PAX_REFCOUNT
79538+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
79539+{
79540+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79541+
79542+ atomic_inc_unchecked(v);
79543+}
79544+#endif
79545+
79546 static inline void atomic_long_dec(atomic_long_t *l)
79547 {
79548 atomic_t *v = (atomic_t *)l;
79549@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
79550 atomic_dec(v);
79551 }
79552
79553+#ifdef CONFIG_PAX_REFCOUNT
79554+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
79555+{
79556+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79557+
79558+ atomic_dec_unchecked(v);
79559+}
79560+#endif
79561+
79562 static inline void atomic_long_add(long i, atomic_long_t *l)
79563 {
79564 atomic_t *v = (atomic_t *)l;
79565@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
79566 atomic_add(i, v);
79567 }
79568
79569+#ifdef CONFIG_PAX_REFCOUNT
79570+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
79571+{
79572+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79573+
79574+ atomic_add_unchecked(i, v);
79575+}
79576+#endif
79577+
79578 static inline void atomic_long_sub(long i, atomic_long_t *l)
79579 {
79580 atomic_t *v = (atomic_t *)l;
79581@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
79582 atomic_sub(i, v);
79583 }
79584
79585+#ifdef CONFIG_PAX_REFCOUNT
79586+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
79587+{
79588+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79589+
79590+ atomic_sub_unchecked(i, v);
79591+}
79592+#endif
79593+
79594 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
79595 {
79596 atomic_t *v = (atomic_t *)l;
79597@@ -211,13 +349,23 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
79598 return atomic_add_negative(i, v);
79599 }
79600
79601-static inline long atomic_long_add_return(long i, atomic_long_t *l)
79602+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
79603 {
79604 atomic_t *v = (atomic_t *)l;
79605
79606 return (long)atomic_add_return(i, v);
79607 }
79608
79609+#ifdef CONFIG_PAX_REFCOUNT
79610+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
79611+{
79612+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79613+
79614+ return (long)atomic_add_return_unchecked(i, v);
79615+}
79616+
79617+#endif
79618+
79619 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
79620 {
79621 atomic_t *v = (atomic_t *)l;
79622@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
79623 return (long)atomic_inc_return(v);
79624 }
79625
79626+#ifdef CONFIG_PAX_REFCOUNT
79627+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
79628+{
79629+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79630+
79631+ return (long)atomic_inc_return_unchecked(v);
79632+}
79633+#endif
79634+
79635 static inline long atomic_long_dec_return(atomic_long_t *l)
79636 {
79637 atomic_t *v = (atomic_t *)l;
79638@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
79639
79640 #endif /* BITS_PER_LONG == 64 */
79641
79642+#ifdef CONFIG_PAX_REFCOUNT
79643+static inline void pax_refcount_needs_these_functions(void)
79644+{
79645+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
79646+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
79647+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
79648+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
79649+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
79650+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
79651+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
79652+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
79653+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
79654+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
79655+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
79656+#ifdef CONFIG_X86
79657+ atomic_clear_mask_unchecked(0, NULL);
79658+ atomic_set_mask_unchecked(0, NULL);
79659+#endif
79660+
79661+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
79662+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
79663+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
79664+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
79665+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
79666+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
79667+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
79668+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
79669+}
79670+#else
79671+#define atomic_read_unchecked(v) atomic_read(v)
79672+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
79673+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
79674+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
79675+#define atomic_inc_unchecked(v) atomic_inc(v)
79676+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
79677+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
79678+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
79679+#define atomic_dec_unchecked(v) atomic_dec(v)
79680+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
79681+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
79682+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
79683+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
79684+
79685+#define atomic_long_read_unchecked(v) atomic_long_read(v)
79686+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
79687+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
79688+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
79689+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
79690+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
79691+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
79692+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
79693+#endif
79694+
79695 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
79696diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
79697index 9c79e76..9f7827d 100644
79698--- a/include/asm-generic/atomic.h
79699+++ b/include/asm-generic/atomic.h
79700@@ -154,7 +154,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
79701 * Atomically clears the bits set in @mask from @v
79702 */
79703 #ifndef atomic_clear_mask
79704-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
79705+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
79706 {
79707 unsigned long flags;
79708
79709diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
79710index b18ce4f..2ee2843 100644
79711--- a/include/asm-generic/atomic64.h
79712+++ b/include/asm-generic/atomic64.h
79713@@ -16,6 +16,8 @@ typedef struct {
79714 long long counter;
79715 } atomic64_t;
79716
79717+typedef atomic64_t atomic64_unchecked_t;
79718+
79719 #define ATOMIC64_INIT(i) { (i) }
79720
79721 extern long long atomic64_read(const atomic64_t *v);
79722@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
79723 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
79724 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
79725
79726+#define atomic64_read_unchecked(v) atomic64_read(v)
79727+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
79728+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
79729+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
79730+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
79731+#define atomic64_inc_unchecked(v) atomic64_inc(v)
79732+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
79733+#define atomic64_dec_unchecked(v) atomic64_dec(v)
79734+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
79735+
79736 #endif /* _ASM_GENERIC_ATOMIC64_H */
79737diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
79738index 1402fa8..025a736 100644
79739--- a/include/asm-generic/barrier.h
79740+++ b/include/asm-generic/barrier.h
79741@@ -74,7 +74,7 @@
79742 do { \
79743 compiletime_assert_atomic_type(*p); \
79744 smp_mb(); \
79745- ACCESS_ONCE(*p) = (v); \
79746+ ACCESS_ONCE_RW(*p) = (v); \
79747 } while (0)
79748
79749 #define smp_load_acquire(p) \
79750diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
79751index a60a7cc..0fe12f2 100644
79752--- a/include/asm-generic/bitops/__fls.h
79753+++ b/include/asm-generic/bitops/__fls.h
79754@@ -9,7 +9,7 @@
79755 *
79756 * Undefined if no set bit exists, so code should check against 0 first.
79757 */
79758-static __always_inline unsigned long __fls(unsigned long word)
79759+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
79760 {
79761 int num = BITS_PER_LONG - 1;
79762
79763diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
79764index 0576d1f..dad6c71 100644
79765--- a/include/asm-generic/bitops/fls.h
79766+++ b/include/asm-generic/bitops/fls.h
79767@@ -9,7 +9,7 @@
79768 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
79769 */
79770
79771-static __always_inline int fls(int x)
79772+static __always_inline int __intentional_overflow(-1) fls(int x)
79773 {
79774 int r = 32;
79775
79776diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
79777index b097cf8..3d40e14 100644
79778--- a/include/asm-generic/bitops/fls64.h
79779+++ b/include/asm-generic/bitops/fls64.h
79780@@ -15,7 +15,7 @@
79781 * at position 64.
79782 */
79783 #if BITS_PER_LONG == 32
79784-static __always_inline int fls64(__u64 x)
79785+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
79786 {
79787 __u32 h = x >> 32;
79788 if (h)
79789@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
79790 return fls(x);
79791 }
79792 #elif BITS_PER_LONG == 64
79793-static __always_inline int fls64(__u64 x)
79794+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
79795 {
79796 if (x == 0)
79797 return 0;
79798diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
79799index 1bfcfe5..e04c5c9 100644
79800--- a/include/asm-generic/cache.h
79801+++ b/include/asm-generic/cache.h
79802@@ -6,7 +6,7 @@
79803 * cache lines need to provide their own cache.h.
79804 */
79805
79806-#define L1_CACHE_SHIFT 5
79807-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
79808+#define L1_CACHE_SHIFT 5UL
79809+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
79810
79811 #endif /* __ASM_GENERIC_CACHE_H */
79812diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
79813index 0d68a1e..b74a761 100644
79814--- a/include/asm-generic/emergency-restart.h
79815+++ b/include/asm-generic/emergency-restart.h
79816@@ -1,7 +1,7 @@
79817 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
79818 #define _ASM_GENERIC_EMERGENCY_RESTART_H
79819
79820-static inline void machine_emergency_restart(void)
79821+static inline __noreturn void machine_emergency_restart(void)
79822 {
79823 machine_restart(NULL);
79824 }
79825diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
79826index 975e1cc..0b8a083 100644
79827--- a/include/asm-generic/io.h
79828+++ b/include/asm-generic/io.h
79829@@ -289,7 +289,7 @@ static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
79830 * These are pretty trivial
79831 */
79832 #ifndef virt_to_phys
79833-static inline unsigned long virt_to_phys(volatile void *address)
79834+static inline unsigned long __intentional_overflow(-1) virt_to_phys(volatile void *address)
79835 {
79836 return __pa((unsigned long)address);
79837 }
79838diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
79839index 90f99c7..00ce236 100644
79840--- a/include/asm-generic/kmap_types.h
79841+++ b/include/asm-generic/kmap_types.h
79842@@ -2,9 +2,9 @@
79843 #define _ASM_GENERIC_KMAP_TYPES_H
79844
79845 #ifdef __WITH_KM_FENCE
79846-# define KM_TYPE_NR 41
79847+# define KM_TYPE_NR 42
79848 #else
79849-# define KM_TYPE_NR 20
79850+# define KM_TYPE_NR 21
79851 #endif
79852
79853 #endif
79854diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
79855index 9ceb03b..62b0b8f 100644
79856--- a/include/asm-generic/local.h
79857+++ b/include/asm-generic/local.h
79858@@ -23,24 +23,37 @@ typedef struct
79859 atomic_long_t a;
79860 } local_t;
79861
79862+typedef struct {
79863+ atomic_long_unchecked_t a;
79864+} local_unchecked_t;
79865+
79866 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
79867
79868 #define local_read(l) atomic_long_read(&(l)->a)
79869+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
79870 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
79871+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
79872 #define local_inc(l) atomic_long_inc(&(l)->a)
79873+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
79874 #define local_dec(l) atomic_long_dec(&(l)->a)
79875+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
79876 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
79877+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
79878 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
79879+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
79880
79881 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
79882 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
79883 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
79884 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
79885 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
79886+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
79887 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
79888 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
79889+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
79890
79891 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
79892+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
79893 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
79894 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
79895 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
79896diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
79897index 725612b..9cc513a 100644
79898--- a/include/asm-generic/pgtable-nopmd.h
79899+++ b/include/asm-generic/pgtable-nopmd.h
79900@@ -1,14 +1,19 @@
79901 #ifndef _PGTABLE_NOPMD_H
79902 #define _PGTABLE_NOPMD_H
79903
79904-#ifndef __ASSEMBLY__
79905-
79906 #include <asm-generic/pgtable-nopud.h>
79907
79908-struct mm_struct;
79909-
79910 #define __PAGETABLE_PMD_FOLDED
79911
79912+#define PMD_SHIFT PUD_SHIFT
79913+#define PTRS_PER_PMD 1
79914+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
79915+#define PMD_MASK (~(PMD_SIZE-1))
79916+
79917+#ifndef __ASSEMBLY__
79918+
79919+struct mm_struct;
79920+
79921 /*
79922 * Having the pmd type consist of a pud gets the size right, and allows
79923 * us to conceptually access the pud entry that this pmd is folded into
79924@@ -16,11 +21,6 @@ struct mm_struct;
79925 */
79926 typedef struct { pud_t pud; } pmd_t;
79927
79928-#define PMD_SHIFT PUD_SHIFT
79929-#define PTRS_PER_PMD 1
79930-#define PMD_SIZE (1UL << PMD_SHIFT)
79931-#define PMD_MASK (~(PMD_SIZE-1))
79932-
79933 /*
79934 * The "pud_xxx()" functions here are trivial for a folded two-level
79935 * setup: the pmd is never bad, and a pmd always exists (as it's folded
79936diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
79937index 810431d..0ec4804f 100644
79938--- a/include/asm-generic/pgtable-nopud.h
79939+++ b/include/asm-generic/pgtable-nopud.h
79940@@ -1,10 +1,15 @@
79941 #ifndef _PGTABLE_NOPUD_H
79942 #define _PGTABLE_NOPUD_H
79943
79944-#ifndef __ASSEMBLY__
79945-
79946 #define __PAGETABLE_PUD_FOLDED
79947
79948+#define PUD_SHIFT PGDIR_SHIFT
79949+#define PTRS_PER_PUD 1
79950+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
79951+#define PUD_MASK (~(PUD_SIZE-1))
79952+
79953+#ifndef __ASSEMBLY__
79954+
79955 /*
79956 * Having the pud type consist of a pgd gets the size right, and allows
79957 * us to conceptually access the pgd entry that this pud is folded into
79958@@ -12,11 +17,6 @@
79959 */
79960 typedef struct { pgd_t pgd; } pud_t;
79961
79962-#define PUD_SHIFT PGDIR_SHIFT
79963-#define PTRS_PER_PUD 1
79964-#define PUD_SIZE (1UL << PUD_SHIFT)
79965-#define PUD_MASK (~(PUD_SIZE-1))
79966-
79967 /*
79968 * The "pgd_xxx()" functions here are trivial for a folded two-level
79969 * setup: the pud is never bad, and a pud always exists (as it's folded
79970@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
79971 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
79972
79973 #define pgd_populate(mm, pgd, pud) do { } while (0)
79974+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
79975 /*
79976 * (puds are folded into pgds so this doesn't get actually called,
79977 * but the define is needed for a generic inline function.)
79978diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
79979index 53b2acc..f4568e7 100644
79980--- a/include/asm-generic/pgtable.h
79981+++ b/include/asm-generic/pgtable.h
79982@@ -819,6 +819,22 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
79983 }
79984 #endif /* CONFIG_NUMA_BALANCING */
79985
79986+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
79987+#ifdef CONFIG_PAX_KERNEXEC
79988+#error KERNEXEC requires pax_open_kernel
79989+#else
79990+static inline unsigned long pax_open_kernel(void) { return 0; }
79991+#endif
79992+#endif
79993+
79994+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
79995+#ifdef CONFIG_PAX_KERNEXEC
79996+#error KERNEXEC requires pax_close_kernel
79997+#else
79998+static inline unsigned long pax_close_kernel(void) { return 0; }
79999+#endif
80000+#endif
80001+
80002 #endif /* CONFIG_MMU */
80003
80004 #endif /* !__ASSEMBLY__ */
80005diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
80006index 72d8803..cb9749c 100644
80007--- a/include/asm-generic/uaccess.h
80008+++ b/include/asm-generic/uaccess.h
80009@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
80010 return __clear_user(to, n);
80011 }
80012
80013+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
80014+#ifdef CONFIG_PAX_MEMORY_UDEREF
80015+#error UDEREF requires pax_open_userland
80016+#else
80017+static inline unsigned long pax_open_userland(void) { return 0; }
80018+#endif
80019+#endif
80020+
80021+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
80022+#ifdef CONFIG_PAX_MEMORY_UDEREF
80023+#error UDEREF requires pax_close_userland
80024+#else
80025+static inline unsigned long pax_close_userland(void) { return 0; }
80026+#endif
80027+#endif
80028+
80029 #endif /* __ASM_GENERIC_UACCESS_H */
80030diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
80031index 5ba0360..e85c934 100644
80032--- a/include/asm-generic/vmlinux.lds.h
80033+++ b/include/asm-generic/vmlinux.lds.h
80034@@ -231,6 +231,7 @@
80035 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
80036 VMLINUX_SYMBOL(__start_rodata) = .; \
80037 *(.rodata) *(.rodata.*) \
80038+ *(.data..read_only) \
80039 *(__vermagic) /* Kernel version magic */ \
80040 . = ALIGN(8); \
80041 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
80042@@ -722,17 +723,18 @@
80043 * section in the linker script will go there too. @phdr should have
80044 * a leading colon.
80045 *
80046- * Note that this macros defines __per_cpu_load as an absolute symbol.
80047+ * Note that this macros defines per_cpu_load as an absolute symbol.
80048 * If there is no need to put the percpu section at a predetermined
80049 * address, use PERCPU_SECTION.
80050 */
80051 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
80052- VMLINUX_SYMBOL(__per_cpu_load) = .; \
80053- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
80054+ per_cpu_load = .; \
80055+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
80056 - LOAD_OFFSET) { \
80057+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
80058 PERCPU_INPUT(cacheline) \
80059 } phdr \
80060- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
80061+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
80062
80063 /**
80064 * PERCPU_SECTION - define output section for percpu area, simple version
80065diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
80066index 623a59c..1e79ab9 100644
80067--- a/include/crypto/algapi.h
80068+++ b/include/crypto/algapi.h
80069@@ -34,7 +34,7 @@ struct crypto_type {
80070 unsigned int maskclear;
80071 unsigned int maskset;
80072 unsigned int tfmsize;
80073-};
80074+} __do_const;
80075
80076 struct crypto_instance {
80077 struct crypto_alg alg;
80078diff --git a/include/drm/drmP.h b/include/drm/drmP.h
80079index 1968907..7d9ed9f 100644
80080--- a/include/drm/drmP.h
80081+++ b/include/drm/drmP.h
80082@@ -68,6 +68,7 @@
80083 #include <linux/workqueue.h>
80084 #include <linux/poll.h>
80085 #include <asm/pgalloc.h>
80086+#include <asm/local.h>
80087 #include <drm/drm.h>
80088 #include <drm/drm_sarea.h>
80089 #include <drm/drm_vma_manager.h>
80090@@ -260,10 +261,12 @@ do { \
80091 * \param cmd command.
80092 * \param arg argument.
80093 */
80094-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
80095+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
80096+ struct drm_file *file_priv);
80097+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
80098 struct drm_file *file_priv);
80099
80100-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
80101+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
80102 unsigned long arg);
80103
80104 #define DRM_IOCTL_NR(n) _IOC_NR(n)
80105@@ -279,10 +282,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
80106 struct drm_ioctl_desc {
80107 unsigned int cmd;
80108 int flags;
80109- drm_ioctl_t *func;
80110+ drm_ioctl_t func;
80111 unsigned int cmd_drv;
80112 const char *name;
80113-};
80114+} __do_const;
80115
80116 /**
80117 * Creates a driver or general drm_ioctl_desc array entry for the given
80118@@ -946,7 +949,8 @@ struct drm_info_list {
80119 int (*show)(struct seq_file*, void*); /** show callback */
80120 u32 driver_features; /**< Required driver features for this entry */
80121 void *data;
80122-};
80123+} __do_const;
80124+typedef struct drm_info_list __no_const drm_info_list_no_const;
80125
80126 /**
80127 * debugfs node structure. This structure represents a debugfs file.
80128@@ -1030,7 +1034,7 @@ struct drm_device {
80129
80130 /** \name Usage Counters */
80131 /*@{ */
80132- int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
80133+ local_t open_count; /**< Outstanding files open, protected by drm_global_mutex. */
80134 spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
80135 int buf_use; /**< Buffers in use -- cannot alloc */
80136 atomic_t buf_alloc; /**< Buffer allocation in progress */
80137diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
80138index a3d75fe..6802f9c 100644
80139--- a/include/drm/drm_crtc_helper.h
80140+++ b/include/drm/drm_crtc_helper.h
80141@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
80142 struct drm_connector *connector);
80143 /* disable encoder when not in use - more explicit than dpms off */
80144 void (*disable)(struct drm_encoder *encoder);
80145-};
80146+} __no_const;
80147
80148 /**
80149 * drm_connector_helper_funcs - helper operations for connectors
80150diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
80151index a70d456..6ea07cd 100644
80152--- a/include/drm/i915_pciids.h
80153+++ b/include/drm/i915_pciids.h
80154@@ -37,7 +37,7 @@
80155 */
80156 #define INTEL_VGA_DEVICE(id, info) { \
80157 0x8086, id, \
80158- ~0, ~0, \
80159+ PCI_ANY_ID, PCI_ANY_ID, \
80160 0x030000, 0xff0000, \
80161 (unsigned long) info }
80162
80163diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
80164index 72dcbe8..8db58d7 100644
80165--- a/include/drm/ttm/ttm_memory.h
80166+++ b/include/drm/ttm/ttm_memory.h
80167@@ -48,7 +48,7 @@
80168
80169 struct ttm_mem_shrink {
80170 int (*do_shrink) (struct ttm_mem_shrink *);
80171-};
80172+} __no_const;
80173
80174 /**
80175 * struct ttm_mem_global - Global memory accounting structure.
80176diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
80177index 49a8284..9643967 100644
80178--- a/include/drm/ttm/ttm_page_alloc.h
80179+++ b/include/drm/ttm/ttm_page_alloc.h
80180@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
80181 */
80182 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
80183
80184+struct device;
80185 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
80186 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
80187
80188diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
80189index 4b840e8..155d235 100644
80190--- a/include/keys/asymmetric-subtype.h
80191+++ b/include/keys/asymmetric-subtype.h
80192@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
80193 /* Verify the signature on a key of this subtype (optional) */
80194 int (*verify_signature)(const struct key *key,
80195 const struct public_key_signature *sig);
80196-};
80197+} __do_const;
80198
80199 /**
80200 * asymmetric_key_subtype - Get the subtype from an asymmetric key
80201diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
80202index c1da539..1dcec55 100644
80203--- a/include/linux/atmdev.h
80204+++ b/include/linux/atmdev.h
80205@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
80206 #endif
80207
80208 struct k_atm_aal_stats {
80209-#define __HANDLE_ITEM(i) atomic_t i
80210+#define __HANDLE_ITEM(i) atomic_unchecked_t i
80211 __AAL_STAT_ITEMS
80212 #undef __HANDLE_ITEM
80213 };
80214@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
80215 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
80216 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
80217 struct module *owner;
80218-};
80219+} __do_const ;
80220
80221 struct atmphy_ops {
80222 int (*start)(struct atm_dev *dev);
80223diff --git a/include/linux/audit.h b/include/linux/audit.h
80224index 22cfddb..1514eef 100644
80225--- a/include/linux/audit.h
80226+++ b/include/linux/audit.h
80227@@ -86,7 +86,7 @@ extern unsigned compat_dir_class[];
80228 extern unsigned compat_chattr_class[];
80229 extern unsigned compat_signal_class[];
80230
80231-extern int __weak audit_classify_compat_syscall(int abi, unsigned syscall);
80232+extern int audit_classify_compat_syscall(int abi, unsigned syscall);
80233
80234 /* audit_names->type values */
80235 #define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */
80236@@ -210,7 +210,7 @@ static inline void audit_ptrace(struct task_struct *t)
80237 extern unsigned int audit_serial(void);
80238 extern int auditsc_get_stamp(struct audit_context *ctx,
80239 struct timespec *t, unsigned int *serial);
80240-extern int audit_set_loginuid(kuid_t loginuid);
80241+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
80242
80243 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
80244 {
80245diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
80246index 61f29e5..e67c658 100644
80247--- a/include/linux/binfmts.h
80248+++ b/include/linux/binfmts.h
80249@@ -44,7 +44,7 @@ struct linux_binprm {
80250 unsigned interp_flags;
80251 unsigned interp_data;
80252 unsigned long loader, exec;
80253-};
80254+} __randomize_layout;
80255
80256 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
80257 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
80258@@ -73,8 +73,10 @@ struct linux_binfmt {
80259 int (*load_binary)(struct linux_binprm *);
80260 int (*load_shlib)(struct file *);
80261 int (*core_dump)(struct coredump_params *cprm);
80262+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
80263+ void (*handle_mmap)(struct file *);
80264 unsigned long min_coredump; /* minimal dump size */
80265-};
80266+} __do_const __randomize_layout;
80267
80268 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
80269
80270diff --git a/include/linux/bitops.h b/include/linux/bitops.h
80271index 38b5f5c..645018c 100644
80272--- a/include/linux/bitops.h
80273+++ b/include/linux/bitops.h
80274@@ -125,7 +125,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
80275 * @word: value to rotate
80276 * @shift: bits to roll
80277 */
80278-static inline __u32 rol32(__u32 word, unsigned int shift)
80279+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
80280 {
80281 return (word << shift) | (word >> (32 - shift));
80282 }
80283@@ -135,7 +135,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
80284 * @word: value to rotate
80285 * @shift: bits to roll
80286 */
80287-static inline __u32 ror32(__u32 word, unsigned int shift)
80288+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
80289 {
80290 return (word >> shift) | (word << (32 - shift));
80291 }
80292@@ -191,7 +191,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
80293 return (__s32)(value << shift) >> shift;
80294 }
80295
80296-static inline unsigned fls_long(unsigned long l)
80297+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
80298 {
80299 if (sizeof(l) == 4)
80300 return fls(l);
80301diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
80302index f2057ff8..59dfa2d 100644
80303--- a/include/linux/blkdev.h
80304+++ b/include/linux/blkdev.h
80305@@ -1625,7 +1625,7 @@ struct block_device_operations {
80306 /* this callback is with swap_lock and sometimes page table lock held */
80307 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
80308 struct module *owner;
80309-};
80310+} __do_const;
80311
80312 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
80313 unsigned long);
80314diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
80315index afc1343..9735539 100644
80316--- a/include/linux/blktrace_api.h
80317+++ b/include/linux/blktrace_api.h
80318@@ -25,7 +25,7 @@ struct blk_trace {
80319 struct dentry *dropped_file;
80320 struct dentry *msg_file;
80321 struct list_head running_list;
80322- atomic_t dropped;
80323+ atomic_unchecked_t dropped;
80324 };
80325
80326 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
80327diff --git a/include/linux/cache.h b/include/linux/cache.h
80328index 17e7e82..1d7da26 100644
80329--- a/include/linux/cache.h
80330+++ b/include/linux/cache.h
80331@@ -16,6 +16,14 @@
80332 #define __read_mostly
80333 #endif
80334
80335+#ifndef __read_only
80336+#ifdef CONFIG_PAX_KERNEXEC
80337+#error KERNEXEC requires __read_only
80338+#else
80339+#define __read_only __read_mostly
80340+#endif
80341+#endif
80342+
80343 #ifndef ____cacheline_aligned
80344 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
80345 #endif
80346diff --git a/include/linux/capability.h b/include/linux/capability.h
80347index aa93e5e..985a1b0 100644
80348--- a/include/linux/capability.h
80349+++ b/include/linux/capability.h
80350@@ -214,9 +214,14 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
80351 extern bool capable(int cap);
80352 extern bool ns_capable(struct user_namespace *ns, int cap);
80353 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
80354+extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap);
80355 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
80356+extern bool capable_nolog(int cap);
80357+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
80358
80359 /* audit system wants to get cap info from files as well */
80360 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
80361
80362+extern int is_privileged_binary(const struct dentry *dentry);
80363+
80364 #endif /* !_LINUX_CAPABILITY_H */
80365diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
80366index 8609d57..86e4d79 100644
80367--- a/include/linux/cdrom.h
80368+++ b/include/linux/cdrom.h
80369@@ -87,7 +87,6 @@ struct cdrom_device_ops {
80370
80371 /* driver specifications */
80372 const int capability; /* capability flags */
80373- int n_minors; /* number of active minor devices */
80374 /* handle uniform packets for scsi type devices (scsi,atapi) */
80375 int (*generic_packet) (struct cdrom_device_info *,
80376 struct packet_command *);
80377diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
80378index 4ce9056..86caac6 100644
80379--- a/include/linux/cleancache.h
80380+++ b/include/linux/cleancache.h
80381@@ -31,7 +31,7 @@ struct cleancache_ops {
80382 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
80383 void (*invalidate_inode)(int, struct cleancache_filekey);
80384 void (*invalidate_fs)(int);
80385-};
80386+} __no_const;
80387
80388 extern struct cleancache_ops *
80389 cleancache_register_ops(struct cleancache_ops *ops);
80390diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
80391index da6996e..9d13d5f 100644
80392--- a/include/linux/clk-provider.h
80393+++ b/include/linux/clk-provider.h
80394@@ -180,6 +180,7 @@ struct clk_ops {
80395 void (*init)(struct clk_hw *hw);
80396 int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
80397 };
80398+typedef struct clk_ops __no_const clk_ops_no_const;
80399
80400 /**
80401 * struct clk_init_data - holds init data that's common to all clocks and is
80402diff --git a/include/linux/compat.h b/include/linux/compat.h
80403index e649426..a74047b 100644
80404--- a/include/linux/compat.h
80405+++ b/include/linux/compat.h
80406@@ -316,7 +316,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
80407 compat_size_t __user *len_ptr);
80408
80409 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
80410-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
80411+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
80412 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
80413 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
80414 compat_ssize_t msgsz, int msgflg);
80415@@ -436,7 +436,7 @@ extern int compat_ptrace_request(struct task_struct *child,
80416 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
80417 compat_ulong_t addr, compat_ulong_t data);
80418 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
80419- compat_long_t addr, compat_long_t data);
80420+ compat_ulong_t addr, compat_ulong_t data);
80421
80422 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
80423 /*
80424diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
80425index 2507fd2..55203f8 100644
80426--- a/include/linux/compiler-gcc4.h
80427+++ b/include/linux/compiler-gcc4.h
80428@@ -39,9 +39,34 @@
80429 # define __compiletime_warning(message) __attribute__((warning(message)))
80430 # define __compiletime_error(message) __attribute__((error(message)))
80431 #endif /* __CHECKER__ */
80432+
80433+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
80434+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
80435+#define __bos0(ptr) __bos((ptr), 0)
80436+#define __bos1(ptr) __bos((ptr), 1)
80437 #endif /* GCC_VERSION >= 40300 */
80438
80439 #if GCC_VERSION >= 40500
80440+
80441+#ifdef RANDSTRUCT_PLUGIN
80442+#define __randomize_layout __attribute__((randomize_layout))
80443+#define __no_randomize_layout __attribute__((no_randomize_layout))
80444+#endif
80445+
80446+#ifdef CONSTIFY_PLUGIN
80447+#define __no_const __attribute__((no_const))
80448+#define __do_const __attribute__((do_const))
80449+#endif
80450+
80451+#ifdef SIZE_OVERFLOW_PLUGIN
80452+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
80453+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
80454+#endif
80455+
80456+#ifdef LATENT_ENTROPY_PLUGIN
80457+#define __latent_entropy __attribute__((latent_entropy))
80458+#endif
80459+
80460 /*
80461 * Mark a position in code as unreachable. This can be used to
80462 * suppress control flow warnings after asm blocks that transfer
80463diff --git a/include/linux/compiler.h b/include/linux/compiler.h
80464index d5ad7b1..3b74638 100644
80465--- a/include/linux/compiler.h
80466+++ b/include/linux/compiler.h
80467@@ -5,11 +5,14 @@
80468
80469 #ifdef __CHECKER__
80470 # define __user __attribute__((noderef, address_space(1)))
80471+# define __force_user __force __user
80472 # define __kernel __attribute__((address_space(0)))
80473+# define __force_kernel __force __kernel
80474 # define __safe __attribute__((safe))
80475 # define __force __attribute__((force))
80476 # define __nocast __attribute__((nocast))
80477 # define __iomem __attribute__((noderef, address_space(2)))
80478+# define __force_iomem __force __iomem
80479 # define __must_hold(x) __attribute__((context(x,1,1)))
80480 # define __acquires(x) __attribute__((context(x,0,1)))
80481 # define __releases(x) __attribute__((context(x,1,0)))
80482@@ -17,20 +20,37 @@
80483 # define __release(x) __context__(x,-1)
80484 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
80485 # define __percpu __attribute__((noderef, address_space(3)))
80486+# define __force_percpu __force __percpu
80487 #ifdef CONFIG_SPARSE_RCU_POINTER
80488 # define __rcu __attribute__((noderef, address_space(4)))
80489+# define __force_rcu __force __rcu
80490 #else
80491 # define __rcu
80492+# define __force_rcu
80493 #endif
80494 extern void __chk_user_ptr(const volatile void __user *);
80495 extern void __chk_io_ptr(const volatile void __iomem *);
80496 #else
80497-# define __user
80498-# define __kernel
80499+# ifdef CHECKER_PLUGIN
80500+//# define __user
80501+//# define __force_user
80502+//# define __kernel
80503+//# define __force_kernel
80504+# else
80505+# ifdef STRUCTLEAK_PLUGIN
80506+# define __user __attribute__((user))
80507+# else
80508+# define __user
80509+# endif
80510+# define __force_user
80511+# define __kernel
80512+# define __force_kernel
80513+# endif
80514 # define __safe
80515 # define __force
80516 # define __nocast
80517 # define __iomem
80518+# define __force_iomem
80519 # define __chk_user_ptr(x) (void)0
80520 # define __chk_io_ptr(x) (void)0
80521 # define __builtin_warning(x, y...) (1)
80522@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
80523 # define __release(x) (void)0
80524 # define __cond_lock(x,c) (c)
80525 # define __percpu
80526+# define __force_percpu
80527 # define __rcu
80528+# define __force_rcu
80529 #endif
80530
80531 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
80532@@ -286,6 +308,34 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
80533 # define __attribute_const__ /* unimplemented */
80534 #endif
80535
80536+#ifndef __randomize_layout
80537+# define __randomize_layout
80538+#endif
80539+
80540+#ifndef __no_randomize_layout
80541+# define __no_randomize_layout
80542+#endif
80543+
80544+#ifndef __no_const
80545+# define __no_const
80546+#endif
80547+
80548+#ifndef __do_const
80549+# define __do_const
80550+#endif
80551+
80552+#ifndef __size_overflow
80553+# define __size_overflow(...)
80554+#endif
80555+
80556+#ifndef __intentional_overflow
80557+# define __intentional_overflow(...)
80558+#endif
80559+
80560+#ifndef __latent_entropy
80561+# define __latent_entropy
80562+#endif
80563+
80564 /*
80565 * Tell gcc if a function is cold. The compiler will assume any path
80566 * directly leading to the call is unlikely.
80567@@ -295,6 +345,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
80568 #define __cold
80569 #endif
80570
80571+#ifndef __alloc_size
80572+#define __alloc_size(...)
80573+#endif
80574+
80575+#ifndef __bos
80576+#define __bos(ptr, arg)
80577+#endif
80578+
80579+#ifndef __bos0
80580+#define __bos0(ptr)
80581+#endif
80582+
80583+#ifndef __bos1
80584+#define __bos1(ptr)
80585+#endif
80586+
80587 /* Simple shorthand for a section definition */
80588 #ifndef __section
80589 # define __section(S) __attribute__ ((__section__(#S)))
80590@@ -378,7 +444,8 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
80591 * use is to mediate communication between process-level code and irq/NMI
80592 * handlers, all running on the same CPU.
80593 */
80594-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
80595+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
80596+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
80597
80598 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
80599 #ifdef CONFIG_KPROBES
80600diff --git a/include/linux/completion.h b/include/linux/completion.h
80601index 5d5aaae..0ea9b84 100644
80602--- a/include/linux/completion.h
80603+++ b/include/linux/completion.h
80604@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
80605
80606 extern void wait_for_completion(struct completion *);
80607 extern void wait_for_completion_io(struct completion *);
80608-extern int wait_for_completion_interruptible(struct completion *x);
80609-extern int wait_for_completion_killable(struct completion *x);
80610+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
80611+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
80612 extern unsigned long wait_for_completion_timeout(struct completion *x,
80613- unsigned long timeout);
80614+ unsigned long timeout) __intentional_overflow(-1);
80615 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
80616- unsigned long timeout);
80617+ unsigned long timeout) __intentional_overflow(-1);
80618 extern long wait_for_completion_interruptible_timeout(
80619- struct completion *x, unsigned long timeout);
80620+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
80621 extern long wait_for_completion_killable_timeout(
80622- struct completion *x, unsigned long timeout);
80623+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
80624 extern bool try_wait_for_completion(struct completion *x);
80625 extern bool completion_done(struct completion *x);
80626
80627diff --git a/include/linux/configfs.h b/include/linux/configfs.h
80628index 34025df..d94bbbc 100644
80629--- a/include/linux/configfs.h
80630+++ b/include/linux/configfs.h
80631@@ -125,7 +125,7 @@ struct configfs_attribute {
80632 const char *ca_name;
80633 struct module *ca_owner;
80634 umode_t ca_mode;
80635-};
80636+} __do_const;
80637
80638 /*
80639 * Users often need to create attribute structures for their configurable
80640diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
80641index 7d1955a..d86a3ca 100644
80642--- a/include/linux/cpufreq.h
80643+++ b/include/linux/cpufreq.h
80644@@ -203,6 +203,7 @@ struct global_attr {
80645 ssize_t (*store)(struct kobject *a, struct attribute *b,
80646 const char *c, size_t count);
80647 };
80648+typedef struct global_attr __no_const global_attr_no_const;
80649
80650 #define define_one_global_ro(_name) \
80651 static struct global_attr _name = \
80652@@ -269,7 +270,7 @@ struct cpufreq_driver {
80653 bool boost_supported;
80654 bool boost_enabled;
80655 int (*set_boost) (int state);
80656-};
80657+} __do_const;
80658
80659 /* flags */
80660 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
80661diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
80662index 25e0df6..952dffd 100644
80663--- a/include/linux/cpuidle.h
80664+++ b/include/linux/cpuidle.h
80665@@ -50,7 +50,8 @@ struct cpuidle_state {
80666 int index);
80667
80668 int (*enter_dead) (struct cpuidle_device *dev, int index);
80669-};
80670+} __do_const;
80671+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
80672
80673 /* Idle State Flags */
80674 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
80675@@ -209,7 +210,7 @@ struct cpuidle_governor {
80676 void (*reflect) (struct cpuidle_device *dev, int index);
80677
80678 struct module *owner;
80679-};
80680+} __do_const;
80681
80682 #ifdef CONFIG_CPU_IDLE
80683 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
80684diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
80685index 2997af6..424ddc1 100644
80686--- a/include/linux/cpumask.h
80687+++ b/include/linux/cpumask.h
80688@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
80689 }
80690
80691 /* Valid inputs for n are -1 and 0. */
80692-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80693+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
80694 {
80695 return n+1;
80696 }
80697
80698-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80699+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
80700 {
80701 return n+1;
80702 }
80703
80704-static inline unsigned int cpumask_next_and(int n,
80705+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
80706 const struct cpumask *srcp,
80707 const struct cpumask *andp)
80708 {
80709@@ -174,7 +174,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
80710 *
80711 * Returns >= nr_cpu_ids if no further cpus set.
80712 */
80713-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80714+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
80715 {
80716 /* -1 is a legal arg here. */
80717 if (n != -1)
80718@@ -189,7 +189,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80719 *
80720 * Returns >= nr_cpu_ids if no further cpus unset.
80721 */
80722-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80723+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
80724 {
80725 /* -1 is a legal arg here. */
80726 if (n != -1)
80727@@ -197,7 +197,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80728 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
80729 }
80730
80731-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
80732+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
80733 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
80734 int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
80735
80736diff --git a/include/linux/cred.h b/include/linux/cred.h
80737index b2d0820..2ecafd3 100644
80738--- a/include/linux/cred.h
80739+++ b/include/linux/cred.h
80740@@ -35,7 +35,7 @@ struct group_info {
80741 int nblocks;
80742 kgid_t small_block[NGROUPS_SMALL];
80743 kgid_t *blocks[0];
80744-};
80745+} __randomize_layout;
80746
80747 /**
80748 * get_group_info - Get a reference to a group info structure
80749@@ -136,7 +136,7 @@ struct cred {
80750 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
80751 struct group_info *group_info; /* supplementary groups for euid/fsgid */
80752 struct rcu_head rcu; /* RCU deletion hook */
80753-};
80754+} __randomize_layout;
80755
80756 extern void __put_cred(struct cred *);
80757 extern void exit_creds(struct task_struct *);
80758@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
80759 static inline void validate_process_creds(void)
80760 {
80761 }
80762+static inline void validate_task_creds(struct task_struct *task)
80763+{
80764+}
80765 #endif
80766
80767 /**
80768@@ -331,6 +334,7 @@ static inline void put_cred(const struct cred *_cred)
80769
80770 #define task_uid(task) (task_cred_xxx((task), uid))
80771 #define task_euid(task) (task_cred_xxx((task), euid))
80772+#define task_securebits(task) (task_cred_xxx((task), securebits))
80773
80774 #define current_cred_xxx(xxx) \
80775 ({ \
80776diff --git a/include/linux/crypto.h b/include/linux/crypto.h
80777index d45e949..51cf5ea 100644
80778--- a/include/linux/crypto.h
80779+++ b/include/linux/crypto.h
80780@@ -373,7 +373,7 @@ struct cipher_tfm {
80781 const u8 *key, unsigned int keylen);
80782 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
80783 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
80784-};
80785+} __no_const;
80786
80787 struct hash_tfm {
80788 int (*init)(struct hash_desc *desc);
80789@@ -394,13 +394,13 @@ struct compress_tfm {
80790 int (*cot_decompress)(struct crypto_tfm *tfm,
80791 const u8 *src, unsigned int slen,
80792 u8 *dst, unsigned int *dlen);
80793-};
80794+} __no_const;
80795
80796 struct rng_tfm {
80797 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
80798 unsigned int dlen);
80799 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
80800-};
80801+} __no_const;
80802
80803 #define crt_ablkcipher crt_u.ablkcipher
80804 #define crt_aead crt_u.aead
80805diff --git a/include/linux/ctype.h b/include/linux/ctype.h
80806index 653589e..4ef254a 100644
80807--- a/include/linux/ctype.h
80808+++ b/include/linux/ctype.h
80809@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
80810 * Fast implementation of tolower() for internal usage. Do not use in your
80811 * code.
80812 */
80813-static inline char _tolower(const char c)
80814+static inline unsigned char _tolower(const unsigned char c)
80815 {
80816 return c | 0x20;
80817 }
80818diff --git a/include/linux/dcache.h b/include/linux/dcache.h
80819index 75a227c..1456987 100644
80820--- a/include/linux/dcache.h
80821+++ b/include/linux/dcache.h
80822@@ -134,7 +134,7 @@ struct dentry {
80823 } d_u;
80824 struct list_head d_subdirs; /* our children */
80825 struct hlist_node d_alias; /* inode alias list */
80826-};
80827+} __randomize_layout;
80828
80829 /*
80830 * dentry->d_lock spinlock nesting subclasses:
80831diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
80832index 7925bf0..d5143d2 100644
80833--- a/include/linux/decompress/mm.h
80834+++ b/include/linux/decompress/mm.h
80835@@ -77,7 +77,7 @@ static void free(void *where)
80836 * warnings when not needed (indeed large_malloc / large_free are not
80837 * needed by inflate */
80838
80839-#define malloc(a) kmalloc(a, GFP_KERNEL)
80840+#define malloc(a) kmalloc((a), GFP_KERNEL)
80841 #define free(a) kfree(a)
80842
80843 #define large_malloc(a) vmalloc(a)
80844diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
80845index f1863dc..5c26074 100644
80846--- a/include/linux/devfreq.h
80847+++ b/include/linux/devfreq.h
80848@@ -114,7 +114,7 @@ struct devfreq_governor {
80849 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
80850 int (*event_handler)(struct devfreq *devfreq,
80851 unsigned int event, void *data);
80852-};
80853+} __do_const;
80854
80855 /**
80856 * struct devfreq - Device devfreq structure
80857diff --git a/include/linux/device.h b/include/linux/device.h
80858index 43d183a..03b6ba2 100644
80859--- a/include/linux/device.h
80860+++ b/include/linux/device.h
80861@@ -310,7 +310,7 @@ struct subsys_interface {
80862 struct list_head node;
80863 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
80864 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
80865-};
80866+} __do_const;
80867
80868 int subsys_interface_register(struct subsys_interface *sif);
80869 void subsys_interface_unregister(struct subsys_interface *sif);
80870@@ -506,7 +506,7 @@ struct device_type {
80871 void (*release)(struct device *dev);
80872
80873 const struct dev_pm_ops *pm;
80874-};
80875+} __do_const;
80876
80877 /* interface for exporting device attributes */
80878 struct device_attribute {
80879@@ -516,11 +516,12 @@ struct device_attribute {
80880 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
80881 const char *buf, size_t count);
80882 };
80883+typedef struct device_attribute __no_const device_attribute_no_const;
80884
80885 struct dev_ext_attribute {
80886 struct device_attribute attr;
80887 void *var;
80888-};
80889+} __do_const;
80890
80891 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
80892 char *buf);
80893diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
80894index 931b709..89b2d89 100644
80895--- a/include/linux/dma-mapping.h
80896+++ b/include/linux/dma-mapping.h
80897@@ -60,7 +60,7 @@ struct dma_map_ops {
80898 u64 (*get_required_mask)(struct device *dev);
80899 #endif
80900 int is_phys;
80901-};
80902+} __do_const;
80903
80904 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
80905
80906diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
80907index 1f9e642..39e4263 100644
80908--- a/include/linux/dmaengine.h
80909+++ b/include/linux/dmaengine.h
80910@@ -1147,9 +1147,9 @@ struct dma_pinned_list {
80911 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
80912 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
80913
80914-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
80915+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
80916 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
80917-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
80918+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
80919 struct dma_pinned_list *pinned_list, struct page *page,
80920 unsigned int offset, size_t len);
80921
80922diff --git a/include/linux/efi.h b/include/linux/efi.h
80923index 45cb4ff..c9b4912 100644
80924--- a/include/linux/efi.h
80925+++ b/include/linux/efi.h
80926@@ -1036,6 +1036,7 @@ struct efivar_operations {
80927 efi_set_variable_t *set_variable;
80928 efi_query_variable_store_t *query_variable_store;
80929 };
80930+typedef struct efivar_operations __no_const efivar_operations_no_const;
80931
80932 struct efivars {
80933 /*
80934diff --git a/include/linux/elf.h b/include/linux/elf.h
80935index 67a5fa7..b817372 100644
80936--- a/include/linux/elf.h
80937+++ b/include/linux/elf.h
80938@@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
80939 #define elf_note elf32_note
80940 #define elf_addr_t Elf32_Off
80941 #define Elf_Half Elf32_Half
80942+#define elf_dyn Elf32_Dyn
80943
80944 #else
80945
80946@@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
80947 #define elf_note elf64_note
80948 #define elf_addr_t Elf64_Off
80949 #define Elf_Half Elf64_Half
80950+#define elf_dyn Elf64_Dyn
80951
80952 #endif
80953
80954diff --git a/include/linux/err.h b/include/linux/err.h
80955index a729120..6ede2c9 100644
80956--- a/include/linux/err.h
80957+++ b/include/linux/err.h
80958@@ -20,12 +20,12 @@
80959
80960 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
80961
80962-static inline void * __must_check ERR_PTR(long error)
80963+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
80964 {
80965 return (void *) error;
80966 }
80967
80968-static inline long __must_check PTR_ERR(__force const void *ptr)
80969+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
80970 {
80971 return (long) ptr;
80972 }
80973diff --git a/include/linux/extcon.h b/include/linux/extcon.h
80974index 36f49c4..a2a1f4c 100644
80975--- a/include/linux/extcon.h
80976+++ b/include/linux/extcon.h
80977@@ -135,7 +135,7 @@ struct extcon_dev {
80978 /* /sys/class/extcon/.../mutually_exclusive/... */
80979 struct attribute_group attr_g_muex;
80980 struct attribute **attrs_muex;
80981- struct device_attribute *d_attrs_muex;
80982+ device_attribute_no_const *d_attrs_muex;
80983 };
80984
80985 /**
80986diff --git a/include/linux/fb.h b/include/linux/fb.h
80987index 09bb7a1..d98870a 100644
80988--- a/include/linux/fb.h
80989+++ b/include/linux/fb.h
80990@@ -305,7 +305,7 @@ struct fb_ops {
80991 /* called at KDB enter and leave time to prepare the console */
80992 int (*fb_debug_enter)(struct fb_info *info);
80993 int (*fb_debug_leave)(struct fb_info *info);
80994-};
80995+} __do_const;
80996
80997 #ifdef CONFIG_FB_TILEBLITTING
80998 #define FB_TILE_CURSOR_NONE 0
80999diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
81000index 230f87b..1fd0485 100644
81001--- a/include/linux/fdtable.h
81002+++ b/include/linux/fdtable.h
81003@@ -100,7 +100,7 @@ struct files_struct *get_files_struct(struct task_struct *);
81004 void put_files_struct(struct files_struct *fs);
81005 void reset_files_struct(struct files_struct *);
81006 int unshare_files(struct files_struct **);
81007-struct files_struct *dup_fd(struct files_struct *, int *);
81008+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
81009 void do_close_on_exec(struct files_struct *);
81010 int iterate_fd(struct files_struct *, unsigned,
81011 int (*)(const void *, struct file *, unsigned),
81012diff --git a/include/linux/filter.h b/include/linux/filter.h
81013index a5227ab..c789945 100644
81014--- a/include/linux/filter.h
81015+++ b/include/linux/filter.h
81016@@ -9,6 +9,11 @@
81017 #include <linux/skbuff.h>
81018 #include <linux/workqueue.h>
81019 #include <uapi/linux/filter.h>
81020+#include <asm/cacheflush.h>
81021+
81022+struct sk_buff;
81023+struct sock;
81024+struct seccomp_data;
81025
81026 /* Internally used and optimized filter representation with extended
81027 * instruction set based on top of classic BPF.
81028@@ -320,20 +325,23 @@ struct sock_fprog_kern {
81029 struct sock_filter *filter;
81030 };
81031
81032-struct sk_buff;
81033-struct sock;
81034-struct seccomp_data;
81035+struct bpf_work_struct {
81036+ struct bpf_prog *prog;
81037+ struct work_struct work;
81038+};
81039
81040 struct bpf_prog {
81041+ u32 pages; /* Number of allocated pages */
81042 u32 jited:1, /* Is our filter JIT'ed? */
81043 len:31; /* Number of filter blocks */
81044 struct sock_fprog_kern *orig_prog; /* Original BPF program */
81045+ struct bpf_work_struct *work; /* Deferred free work struct */
81046 unsigned int (*bpf_func)(const struct sk_buff *skb,
81047 const struct bpf_insn *filter);
81048+ /* Instructions for interpreter */
81049 union {
81050 struct sock_filter insns[0];
81051 struct bpf_insn insnsi[0];
81052- struct work_struct work;
81053 };
81054 };
81055
81056@@ -353,6 +361,26 @@ static inline unsigned int bpf_prog_size(unsigned int proglen)
81057
81058 #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
81059
81060+#ifdef CONFIG_DEBUG_SET_MODULE_RONX
81061+static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
81062+{
81063+ set_memory_ro((unsigned long)fp, fp->pages);
81064+}
81065+
81066+static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
81067+{
81068+ set_memory_rw((unsigned long)fp, fp->pages);
81069+}
81070+#else
81071+static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
81072+{
81073+}
81074+
81075+static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
81076+{
81077+}
81078+#endif /* CONFIG_DEBUG_SET_MODULE_RONX */
81079+
81080 int sk_filter(struct sock *sk, struct sk_buff *skb);
81081
81082 void bpf_prog_select_runtime(struct bpf_prog *fp);
81083@@ -361,6 +389,17 @@ void bpf_prog_free(struct bpf_prog *fp);
81084 int bpf_convert_filter(struct sock_filter *prog, int len,
81085 struct bpf_insn *new_prog, int *new_len);
81086
81087+struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
81088+struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
81089+ gfp_t gfp_extra_flags);
81090+void __bpf_prog_free(struct bpf_prog *fp);
81091+
81092+static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
81093+{
81094+ bpf_prog_unlock_ro(fp);
81095+ __bpf_prog_free(fp);
81096+}
81097+
81098 int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
81099 void bpf_prog_destroy(struct bpf_prog *fp);
81100
81101@@ -450,7 +489,7 @@ static inline void bpf_jit_compile(struct bpf_prog *fp)
81102
81103 static inline void bpf_jit_free(struct bpf_prog *fp)
81104 {
81105- kfree(fp);
81106+ bpf_prog_unlock_free(fp);
81107 }
81108 #endif /* CONFIG_BPF_JIT */
81109
81110diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
81111index 8293262..2b3b8bd 100644
81112--- a/include/linux/frontswap.h
81113+++ b/include/linux/frontswap.h
81114@@ -11,7 +11,7 @@ struct frontswap_ops {
81115 int (*load)(unsigned, pgoff_t, struct page *);
81116 void (*invalidate_page)(unsigned, pgoff_t);
81117 void (*invalidate_area)(unsigned);
81118-};
81119+} __no_const;
81120
81121 extern bool frontswap_enabled;
81122 extern struct frontswap_ops *
81123diff --git a/include/linux/fs.h b/include/linux/fs.h
81124index 9418772..0155807 100644
81125--- a/include/linux/fs.h
81126+++ b/include/linux/fs.h
81127@@ -401,7 +401,7 @@ struct address_space {
81128 spinlock_t private_lock; /* for use by the address_space */
81129 struct list_head private_list; /* ditto */
81130 void *private_data; /* ditto */
81131-} __attribute__((aligned(sizeof(long))));
81132+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
81133 /*
81134 * On most architectures that alignment is already the case; but
81135 * must be enforced here for CRIS, to let the least significant bit
81136@@ -444,7 +444,7 @@ struct block_device {
81137 int bd_fsfreeze_count;
81138 /* Mutex for freeze */
81139 struct mutex bd_fsfreeze_mutex;
81140-};
81141+} __randomize_layout;
81142
81143 /*
81144 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
81145@@ -613,7 +613,7 @@ struct inode {
81146 #endif
81147
81148 void *i_private; /* fs or device private pointer */
81149-};
81150+} __randomize_layout;
81151
81152 static inline int inode_unhashed(struct inode *inode)
81153 {
81154@@ -806,7 +806,7 @@ struct file {
81155 struct list_head f_tfile_llink;
81156 #endif /* #ifdef CONFIG_EPOLL */
81157 struct address_space *f_mapping;
81158-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
81159+} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */
81160
81161 struct file_handle {
81162 __u32 handle_bytes;
81163@@ -934,7 +934,7 @@ struct file_lock {
81164 int state; /* state of grant or error if -ve */
81165 } afs;
81166 } fl_u;
81167-};
81168+} __randomize_layout;
81169
81170 /* The following constant reflects the upper bound of the file/locking space */
81171 #ifndef OFFSET_MAX
81172@@ -1284,7 +1284,7 @@ struct super_block {
81173 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
81174 struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
81175 struct rcu_head rcu;
81176-};
81177+} __randomize_layout;
81178
81179 extern struct timespec current_fs_time(struct super_block *sb);
81180
81181@@ -1510,7 +1510,8 @@ struct file_operations {
81182 long (*fallocate)(struct file *file, int mode, loff_t offset,
81183 loff_t len);
81184 int (*show_fdinfo)(struct seq_file *m, struct file *f);
81185-};
81186+} __do_const __randomize_layout;
81187+typedef struct file_operations __no_const file_operations_no_const;
81188
81189 struct inode_operations {
81190 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
81191@@ -2796,4 +2797,14 @@ static inline bool dir_relax(struct inode *inode)
81192 return !IS_DEADDIR(inode);
81193 }
81194
81195+static inline bool is_sidechannel_device(const struct inode *inode)
81196+{
81197+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
81198+ umode_t mode = inode->i_mode;
81199+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
81200+#else
81201+ return false;
81202+#endif
81203+}
81204+
81205 #endif /* _LINUX_FS_H */
81206diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
81207index 0efc3e6..fd23610 100644
81208--- a/include/linux/fs_struct.h
81209+++ b/include/linux/fs_struct.h
81210@@ -6,13 +6,13 @@
81211 #include <linux/seqlock.h>
81212
81213 struct fs_struct {
81214- int users;
81215+ atomic_t users;
81216 spinlock_t lock;
81217 seqcount_t seq;
81218 int umask;
81219 int in_exec;
81220 struct path root, pwd;
81221-};
81222+} __randomize_layout;
81223
81224 extern struct kmem_cache *fs_cachep;
81225
81226diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
81227index 7714849..a4a5c7a 100644
81228--- a/include/linux/fscache-cache.h
81229+++ b/include/linux/fscache-cache.h
81230@@ -113,7 +113,7 @@ struct fscache_operation {
81231 fscache_operation_release_t release;
81232 };
81233
81234-extern atomic_t fscache_op_debug_id;
81235+extern atomic_unchecked_t fscache_op_debug_id;
81236 extern void fscache_op_work_func(struct work_struct *work);
81237
81238 extern void fscache_enqueue_operation(struct fscache_operation *);
81239@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
81240 INIT_WORK(&op->work, fscache_op_work_func);
81241 atomic_set(&op->usage, 1);
81242 op->state = FSCACHE_OP_ST_INITIALISED;
81243- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
81244+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
81245 op->processor = processor;
81246 op->release = release;
81247 INIT_LIST_HEAD(&op->pend_link);
81248diff --git a/include/linux/fscache.h b/include/linux/fscache.h
81249index 115bb81..e7b812b 100644
81250--- a/include/linux/fscache.h
81251+++ b/include/linux/fscache.h
81252@@ -152,7 +152,7 @@ struct fscache_cookie_def {
81253 * - this is mandatory for any object that may have data
81254 */
81255 void (*now_uncached)(void *cookie_netfs_data);
81256-};
81257+} __do_const;
81258
81259 /*
81260 * fscache cached network filesystem type
81261diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
81262index 1c804b0..1432c2b 100644
81263--- a/include/linux/fsnotify.h
81264+++ b/include/linux/fsnotify.h
81265@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
81266 struct inode *inode = file_inode(file);
81267 __u32 mask = FS_ACCESS;
81268
81269+ if (is_sidechannel_device(inode))
81270+ return;
81271+
81272 if (S_ISDIR(inode->i_mode))
81273 mask |= FS_ISDIR;
81274
81275@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
81276 struct inode *inode = file_inode(file);
81277 __u32 mask = FS_MODIFY;
81278
81279+ if (is_sidechannel_device(inode))
81280+ return;
81281+
81282 if (S_ISDIR(inode->i_mode))
81283 mask |= FS_ISDIR;
81284
81285@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
81286 */
81287 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
81288 {
81289- return kstrdup(name, GFP_KERNEL);
81290+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
81291 }
81292
81293 /*
81294diff --git a/include/linux/genhd.h b/include/linux/genhd.h
81295index ec274e0..e678159 100644
81296--- a/include/linux/genhd.h
81297+++ b/include/linux/genhd.h
81298@@ -194,7 +194,7 @@ struct gendisk {
81299 struct kobject *slave_dir;
81300
81301 struct timer_rand_state *random;
81302- atomic_t sync_io; /* RAID */
81303+ atomic_unchecked_t sync_io; /* RAID */
81304 struct disk_events *ev;
81305 #ifdef CONFIG_BLK_DEV_INTEGRITY
81306 struct blk_integrity *integrity;
81307@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
81308 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
81309
81310 /* drivers/char/random.c */
81311-extern void add_disk_randomness(struct gendisk *disk);
81312+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
81313 extern void rand_initialize_disk(struct gendisk *disk);
81314
81315 static inline sector_t get_start_sect(struct block_device *bdev)
81316diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
81317index c0894dd..2fbf10c 100644
81318--- a/include/linux/genl_magic_func.h
81319+++ b/include/linux/genl_magic_func.h
81320@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
81321 },
81322
81323 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
81324-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
81325+static struct genl_ops ZZZ_genl_ops[] = {
81326 #include GENL_MAGIC_INCLUDE_FILE
81327 };
81328
81329diff --git a/include/linux/gfp.h b/include/linux/gfp.h
81330index 5e7219d..b1ed627 100644
81331--- a/include/linux/gfp.h
81332+++ b/include/linux/gfp.h
81333@@ -34,6 +34,13 @@ struct vm_area_struct;
81334 #define ___GFP_NO_KSWAPD 0x400000u
81335 #define ___GFP_OTHER_NODE 0x800000u
81336 #define ___GFP_WRITE 0x1000000u
81337+
81338+#ifdef CONFIG_PAX_USERCOPY_SLABS
81339+#define ___GFP_USERCOPY 0x2000000u
81340+#else
81341+#define ___GFP_USERCOPY 0
81342+#endif
81343+
81344 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
81345
81346 /*
81347@@ -90,6 +97,7 @@ struct vm_area_struct;
81348 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
81349 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
81350 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
81351+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
81352
81353 /*
81354 * This may seem redundant, but it's a way of annotating false positives vs.
81355@@ -97,7 +105,7 @@ struct vm_area_struct;
81356 */
81357 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
81358
81359-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
81360+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
81361 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
81362
81363 /* This equals 0, but use constants in case they ever change */
81364@@ -155,6 +163,8 @@ struct vm_area_struct;
81365 /* 4GB DMA on some platforms */
81366 #define GFP_DMA32 __GFP_DMA32
81367
81368+#define GFP_USERCOPY __GFP_USERCOPY
81369+
81370 /* Convert GFP flags to their corresponding migrate type */
81371 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
81372 {
81373diff --git a/include/linux/gracl.h b/include/linux/gracl.h
81374new file mode 100644
81375index 0000000..edb2cb6
81376--- /dev/null
81377+++ b/include/linux/gracl.h
81378@@ -0,0 +1,340 @@
81379+#ifndef GR_ACL_H
81380+#define GR_ACL_H
81381+
81382+#include <linux/grdefs.h>
81383+#include <linux/resource.h>
81384+#include <linux/capability.h>
81385+#include <linux/dcache.h>
81386+#include <asm/resource.h>
81387+
81388+/* Major status information */
81389+
81390+#define GR_VERSION "grsecurity 3.0"
81391+#define GRSECURITY_VERSION 0x3000
81392+
81393+enum {
81394+ GR_SHUTDOWN = 0,
81395+ GR_ENABLE = 1,
81396+ GR_SPROLE = 2,
81397+ GR_OLDRELOAD = 3,
81398+ GR_SEGVMOD = 4,
81399+ GR_STATUS = 5,
81400+ GR_UNSPROLE = 6,
81401+ GR_PASSSET = 7,
81402+ GR_SPROLEPAM = 8,
81403+ GR_RELOAD = 9,
81404+};
81405+
81406+/* Password setup definitions
81407+ * kernel/grhash.c */
81408+enum {
81409+ GR_PW_LEN = 128,
81410+ GR_SALT_LEN = 16,
81411+ GR_SHA_LEN = 32,
81412+};
81413+
81414+enum {
81415+ GR_SPROLE_LEN = 64,
81416+};
81417+
81418+enum {
81419+ GR_NO_GLOB = 0,
81420+ GR_REG_GLOB,
81421+ GR_CREATE_GLOB
81422+};
81423+
81424+#define GR_NLIMITS 32
81425+
81426+/* Begin Data Structures */
81427+
81428+struct sprole_pw {
81429+ unsigned char *rolename;
81430+ unsigned char salt[GR_SALT_LEN];
81431+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
81432+};
81433+
81434+struct name_entry {
81435+ __u32 key;
81436+ ino_t inode;
81437+ dev_t device;
81438+ char *name;
81439+ __u16 len;
81440+ __u8 deleted;
81441+ struct name_entry *prev;
81442+ struct name_entry *next;
81443+};
81444+
81445+struct inodev_entry {
81446+ struct name_entry *nentry;
81447+ struct inodev_entry *prev;
81448+ struct inodev_entry *next;
81449+};
81450+
81451+struct acl_role_db {
81452+ struct acl_role_label **r_hash;
81453+ __u32 r_size;
81454+};
81455+
81456+struct inodev_db {
81457+ struct inodev_entry **i_hash;
81458+ __u32 i_size;
81459+};
81460+
81461+struct name_db {
81462+ struct name_entry **n_hash;
81463+ __u32 n_size;
81464+};
81465+
81466+struct crash_uid {
81467+ uid_t uid;
81468+ unsigned long expires;
81469+};
81470+
81471+struct gr_hash_struct {
81472+ void **table;
81473+ void **nametable;
81474+ void *first;
81475+ __u32 table_size;
81476+ __u32 used_size;
81477+ int type;
81478+};
81479+
81480+/* Userspace Grsecurity ACL data structures */
81481+
81482+struct acl_subject_label {
81483+ char *filename;
81484+ ino_t inode;
81485+ dev_t device;
81486+ __u32 mode;
81487+ kernel_cap_t cap_mask;
81488+ kernel_cap_t cap_lower;
81489+ kernel_cap_t cap_invert_audit;
81490+
81491+ struct rlimit res[GR_NLIMITS];
81492+ __u32 resmask;
81493+
81494+ __u8 user_trans_type;
81495+ __u8 group_trans_type;
81496+ uid_t *user_transitions;
81497+ gid_t *group_transitions;
81498+ __u16 user_trans_num;
81499+ __u16 group_trans_num;
81500+
81501+ __u32 sock_families[2];
81502+ __u32 ip_proto[8];
81503+ __u32 ip_type;
81504+ struct acl_ip_label **ips;
81505+ __u32 ip_num;
81506+ __u32 inaddr_any_override;
81507+
81508+ __u32 crashes;
81509+ unsigned long expires;
81510+
81511+ struct acl_subject_label *parent_subject;
81512+ struct gr_hash_struct *hash;
81513+ struct acl_subject_label *prev;
81514+ struct acl_subject_label *next;
81515+
81516+ struct acl_object_label **obj_hash;
81517+ __u32 obj_hash_size;
81518+ __u16 pax_flags;
81519+};
81520+
81521+struct role_allowed_ip {
81522+ __u32 addr;
81523+ __u32 netmask;
81524+
81525+ struct role_allowed_ip *prev;
81526+ struct role_allowed_ip *next;
81527+};
81528+
81529+struct role_transition {
81530+ char *rolename;
81531+
81532+ struct role_transition *prev;
81533+ struct role_transition *next;
81534+};
81535+
81536+struct acl_role_label {
81537+ char *rolename;
81538+ uid_t uidgid;
81539+ __u16 roletype;
81540+
81541+ __u16 auth_attempts;
81542+ unsigned long expires;
81543+
81544+ struct acl_subject_label *root_label;
81545+ struct gr_hash_struct *hash;
81546+
81547+ struct acl_role_label *prev;
81548+ struct acl_role_label *next;
81549+
81550+ struct role_transition *transitions;
81551+ struct role_allowed_ip *allowed_ips;
81552+ uid_t *domain_children;
81553+ __u16 domain_child_num;
81554+
81555+ umode_t umask;
81556+
81557+ struct acl_subject_label **subj_hash;
81558+ __u32 subj_hash_size;
81559+};
81560+
81561+struct user_acl_role_db {
81562+ struct acl_role_label **r_table;
81563+ __u32 num_pointers; /* Number of allocations to track */
81564+ __u32 num_roles; /* Number of roles */
81565+ __u32 num_domain_children; /* Number of domain children */
81566+ __u32 num_subjects; /* Number of subjects */
81567+ __u32 num_objects; /* Number of objects */
81568+};
81569+
81570+struct acl_object_label {
81571+ char *filename;
81572+ ino_t inode;
81573+ dev_t device;
81574+ __u32 mode;
81575+
81576+ struct acl_subject_label *nested;
81577+ struct acl_object_label *globbed;
81578+
81579+ /* next two structures not used */
81580+
81581+ struct acl_object_label *prev;
81582+ struct acl_object_label *next;
81583+};
81584+
81585+struct acl_ip_label {
81586+ char *iface;
81587+ __u32 addr;
81588+ __u32 netmask;
81589+ __u16 low, high;
81590+ __u8 mode;
81591+ __u32 type;
81592+ __u32 proto[8];
81593+
81594+ /* next two structures not used */
81595+
81596+ struct acl_ip_label *prev;
81597+ struct acl_ip_label *next;
81598+};
81599+
81600+struct gr_arg {
81601+ struct user_acl_role_db role_db;
81602+ unsigned char pw[GR_PW_LEN];
81603+ unsigned char salt[GR_SALT_LEN];
81604+ unsigned char sum[GR_SHA_LEN];
81605+ unsigned char sp_role[GR_SPROLE_LEN];
81606+ struct sprole_pw *sprole_pws;
81607+ dev_t segv_device;
81608+ ino_t segv_inode;
81609+ uid_t segv_uid;
81610+ __u16 num_sprole_pws;
81611+ __u16 mode;
81612+};
81613+
81614+struct gr_arg_wrapper {
81615+ struct gr_arg *arg;
81616+ __u32 version;
81617+ __u32 size;
81618+};
81619+
81620+struct subject_map {
81621+ struct acl_subject_label *user;
81622+ struct acl_subject_label *kernel;
81623+ struct subject_map *prev;
81624+ struct subject_map *next;
81625+};
81626+
81627+struct acl_subj_map_db {
81628+ struct subject_map **s_hash;
81629+ __u32 s_size;
81630+};
81631+
81632+struct gr_policy_state {
81633+ struct sprole_pw **acl_special_roles;
81634+ __u16 num_sprole_pws;
81635+ struct acl_role_label *kernel_role;
81636+ struct acl_role_label *role_list;
81637+ struct acl_role_label *default_role;
81638+ struct acl_role_db acl_role_set;
81639+ struct acl_subj_map_db subj_map_set;
81640+ struct name_db name_set;
81641+ struct inodev_db inodev_set;
81642+};
81643+
81644+struct gr_alloc_state {
81645+ unsigned long alloc_stack_next;
81646+ unsigned long alloc_stack_size;
81647+ void **alloc_stack;
81648+};
81649+
81650+struct gr_reload_state {
81651+ struct gr_policy_state oldpolicy;
81652+ struct gr_alloc_state oldalloc;
81653+ struct gr_policy_state newpolicy;
81654+ struct gr_alloc_state newalloc;
81655+ struct gr_policy_state *oldpolicy_ptr;
81656+ struct gr_alloc_state *oldalloc_ptr;
81657+ unsigned char oldmode;
81658+};
81659+
81660+/* End Data Structures Section */
81661+
81662+/* Hash functions generated by empirical testing by Brad Spengler
81663+ Makes good use of the low bits of the inode. Generally 0-1 times
81664+ in loop for successful match. 0-3 for unsuccessful match.
81665+ Shift/add algorithm with modulus of table size and an XOR*/
81666+
81667+static __inline__ unsigned int
81668+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
81669+{
81670+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
81671+}
81672+
81673+ static __inline__ unsigned int
81674+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
81675+{
81676+ return ((const unsigned long)userp % sz);
81677+}
81678+
81679+static __inline__ unsigned int
81680+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
81681+{
81682+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
81683+}
81684+
81685+static __inline__ unsigned int
81686+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
81687+{
81688+ return full_name_hash((const unsigned char *)name, len) % sz;
81689+}
81690+
81691+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
81692+ subj = NULL; \
81693+ iter = 0; \
81694+ while (iter < role->subj_hash_size) { \
81695+ if (subj == NULL) \
81696+ subj = role->subj_hash[iter]; \
81697+ if (subj == NULL) { \
81698+ iter++; \
81699+ continue; \
81700+ }
81701+
81702+#define FOR_EACH_SUBJECT_END(subj,iter) \
81703+ subj = subj->next; \
81704+ if (subj == NULL) \
81705+ iter++; \
81706+ }
81707+
81708+
81709+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
81710+ subj = role->hash->first; \
81711+ while (subj != NULL) {
81712+
81713+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
81714+ subj = subj->next; \
81715+ }
81716+
81717+#endif
81718+
81719diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
81720new file mode 100644
81721index 0000000..33ebd1f
81722--- /dev/null
81723+++ b/include/linux/gracl_compat.h
81724@@ -0,0 +1,156 @@
81725+#ifndef GR_ACL_COMPAT_H
81726+#define GR_ACL_COMPAT_H
81727+
81728+#include <linux/resource.h>
81729+#include <asm/resource.h>
81730+
81731+struct sprole_pw_compat {
81732+ compat_uptr_t rolename;
81733+ unsigned char salt[GR_SALT_LEN];
81734+ unsigned char sum[GR_SHA_LEN];
81735+};
81736+
81737+struct gr_hash_struct_compat {
81738+ compat_uptr_t table;
81739+ compat_uptr_t nametable;
81740+ compat_uptr_t first;
81741+ __u32 table_size;
81742+ __u32 used_size;
81743+ int type;
81744+};
81745+
81746+struct acl_subject_label_compat {
81747+ compat_uptr_t filename;
81748+ compat_ino_t inode;
81749+ __u32 device;
81750+ __u32 mode;
81751+ kernel_cap_t cap_mask;
81752+ kernel_cap_t cap_lower;
81753+ kernel_cap_t cap_invert_audit;
81754+
81755+ struct compat_rlimit res[GR_NLIMITS];
81756+ __u32 resmask;
81757+
81758+ __u8 user_trans_type;
81759+ __u8 group_trans_type;
81760+ compat_uptr_t user_transitions;
81761+ compat_uptr_t group_transitions;
81762+ __u16 user_trans_num;
81763+ __u16 group_trans_num;
81764+
81765+ __u32 sock_families[2];
81766+ __u32 ip_proto[8];
81767+ __u32 ip_type;
81768+ compat_uptr_t ips;
81769+ __u32 ip_num;
81770+ __u32 inaddr_any_override;
81771+
81772+ __u32 crashes;
81773+ compat_ulong_t expires;
81774+
81775+ compat_uptr_t parent_subject;
81776+ compat_uptr_t hash;
81777+ compat_uptr_t prev;
81778+ compat_uptr_t next;
81779+
81780+ compat_uptr_t obj_hash;
81781+ __u32 obj_hash_size;
81782+ __u16 pax_flags;
81783+};
81784+
81785+struct role_allowed_ip_compat {
81786+ __u32 addr;
81787+ __u32 netmask;
81788+
81789+ compat_uptr_t prev;
81790+ compat_uptr_t next;
81791+};
81792+
81793+struct role_transition_compat {
81794+ compat_uptr_t rolename;
81795+
81796+ compat_uptr_t prev;
81797+ compat_uptr_t next;
81798+};
81799+
81800+struct acl_role_label_compat {
81801+ compat_uptr_t rolename;
81802+ uid_t uidgid;
81803+ __u16 roletype;
81804+
81805+ __u16 auth_attempts;
81806+ compat_ulong_t expires;
81807+
81808+ compat_uptr_t root_label;
81809+ compat_uptr_t hash;
81810+
81811+ compat_uptr_t prev;
81812+ compat_uptr_t next;
81813+
81814+ compat_uptr_t transitions;
81815+ compat_uptr_t allowed_ips;
81816+ compat_uptr_t domain_children;
81817+ __u16 domain_child_num;
81818+
81819+ umode_t umask;
81820+
81821+ compat_uptr_t subj_hash;
81822+ __u32 subj_hash_size;
81823+};
81824+
81825+struct user_acl_role_db_compat {
81826+ compat_uptr_t r_table;
81827+ __u32 num_pointers;
81828+ __u32 num_roles;
81829+ __u32 num_domain_children;
81830+ __u32 num_subjects;
81831+ __u32 num_objects;
81832+};
81833+
81834+struct acl_object_label_compat {
81835+ compat_uptr_t filename;
81836+ compat_ino_t inode;
81837+ __u32 device;
81838+ __u32 mode;
81839+
81840+ compat_uptr_t nested;
81841+ compat_uptr_t globbed;
81842+
81843+ compat_uptr_t prev;
81844+ compat_uptr_t next;
81845+};
81846+
81847+struct acl_ip_label_compat {
81848+ compat_uptr_t iface;
81849+ __u32 addr;
81850+ __u32 netmask;
81851+ __u16 low, high;
81852+ __u8 mode;
81853+ __u32 type;
81854+ __u32 proto[8];
81855+
81856+ compat_uptr_t prev;
81857+ compat_uptr_t next;
81858+};
81859+
81860+struct gr_arg_compat {
81861+ struct user_acl_role_db_compat role_db;
81862+ unsigned char pw[GR_PW_LEN];
81863+ unsigned char salt[GR_SALT_LEN];
81864+ unsigned char sum[GR_SHA_LEN];
81865+ unsigned char sp_role[GR_SPROLE_LEN];
81866+ compat_uptr_t sprole_pws;
81867+ __u32 segv_device;
81868+ compat_ino_t segv_inode;
81869+ uid_t segv_uid;
81870+ __u16 num_sprole_pws;
81871+ __u16 mode;
81872+};
81873+
81874+struct gr_arg_wrapper_compat {
81875+ compat_uptr_t arg;
81876+ __u32 version;
81877+ __u32 size;
81878+};
81879+
81880+#endif
81881diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
81882new file mode 100644
81883index 0000000..323ecf2
81884--- /dev/null
81885+++ b/include/linux/gralloc.h
81886@@ -0,0 +1,9 @@
81887+#ifndef __GRALLOC_H
81888+#define __GRALLOC_H
81889+
81890+void acl_free_all(void);
81891+int acl_alloc_stack_init(unsigned long size);
81892+void *acl_alloc(unsigned long len);
81893+void *acl_alloc_num(unsigned long num, unsigned long len);
81894+
81895+#endif
81896diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
81897new file mode 100644
81898index 0000000..be66033
81899--- /dev/null
81900+++ b/include/linux/grdefs.h
81901@@ -0,0 +1,140 @@
81902+#ifndef GRDEFS_H
81903+#define GRDEFS_H
81904+
81905+/* Begin grsecurity status declarations */
81906+
81907+enum {
81908+ GR_READY = 0x01,
81909+ GR_STATUS_INIT = 0x00 // disabled state
81910+};
81911+
81912+/* Begin ACL declarations */
81913+
81914+/* Role flags */
81915+
81916+enum {
81917+ GR_ROLE_USER = 0x0001,
81918+ GR_ROLE_GROUP = 0x0002,
81919+ GR_ROLE_DEFAULT = 0x0004,
81920+ GR_ROLE_SPECIAL = 0x0008,
81921+ GR_ROLE_AUTH = 0x0010,
81922+ GR_ROLE_NOPW = 0x0020,
81923+ GR_ROLE_GOD = 0x0040,
81924+ GR_ROLE_LEARN = 0x0080,
81925+ GR_ROLE_TPE = 0x0100,
81926+ GR_ROLE_DOMAIN = 0x0200,
81927+ GR_ROLE_PAM = 0x0400,
81928+ GR_ROLE_PERSIST = 0x0800
81929+};
81930+
81931+/* ACL Subject and Object mode flags */
81932+enum {
81933+ GR_DELETED = 0x80000000
81934+};
81935+
81936+/* ACL Object-only mode flags */
81937+enum {
81938+ GR_READ = 0x00000001,
81939+ GR_APPEND = 0x00000002,
81940+ GR_WRITE = 0x00000004,
81941+ GR_EXEC = 0x00000008,
81942+ GR_FIND = 0x00000010,
81943+ GR_INHERIT = 0x00000020,
81944+ GR_SETID = 0x00000040,
81945+ GR_CREATE = 0x00000080,
81946+ GR_DELETE = 0x00000100,
81947+ GR_LINK = 0x00000200,
81948+ GR_AUDIT_READ = 0x00000400,
81949+ GR_AUDIT_APPEND = 0x00000800,
81950+ GR_AUDIT_WRITE = 0x00001000,
81951+ GR_AUDIT_EXEC = 0x00002000,
81952+ GR_AUDIT_FIND = 0x00004000,
81953+ GR_AUDIT_INHERIT= 0x00008000,
81954+ GR_AUDIT_SETID = 0x00010000,
81955+ GR_AUDIT_CREATE = 0x00020000,
81956+ GR_AUDIT_DELETE = 0x00040000,
81957+ GR_AUDIT_LINK = 0x00080000,
81958+ GR_PTRACERD = 0x00100000,
81959+ GR_NOPTRACE = 0x00200000,
81960+ GR_SUPPRESS = 0x00400000,
81961+ GR_NOLEARN = 0x00800000,
81962+ GR_INIT_TRANSFER= 0x01000000
81963+};
81964+
81965+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
81966+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
81967+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
81968+
81969+/* ACL subject-only mode flags */
81970+enum {
81971+ GR_KILL = 0x00000001,
81972+ GR_VIEW = 0x00000002,
81973+ GR_PROTECTED = 0x00000004,
81974+ GR_LEARN = 0x00000008,
81975+ GR_OVERRIDE = 0x00000010,
81976+ /* just a placeholder, this mode is only used in userspace */
81977+ GR_DUMMY = 0x00000020,
81978+ GR_PROTSHM = 0x00000040,
81979+ GR_KILLPROC = 0x00000080,
81980+ GR_KILLIPPROC = 0x00000100,
81981+ /* just a placeholder, this mode is only used in userspace */
81982+ GR_NOTROJAN = 0x00000200,
81983+ GR_PROTPROCFD = 0x00000400,
81984+ GR_PROCACCT = 0x00000800,
81985+ GR_RELAXPTRACE = 0x00001000,
81986+ //GR_NESTED = 0x00002000,
81987+ GR_INHERITLEARN = 0x00004000,
81988+ GR_PROCFIND = 0x00008000,
81989+ GR_POVERRIDE = 0x00010000,
81990+ GR_KERNELAUTH = 0x00020000,
81991+ GR_ATSECURE = 0x00040000,
81992+ GR_SHMEXEC = 0x00080000
81993+};
81994+
81995+enum {
81996+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
81997+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
81998+ GR_PAX_ENABLE_MPROTECT = 0x0004,
81999+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
82000+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
82001+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
82002+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
82003+ GR_PAX_DISABLE_MPROTECT = 0x0400,
82004+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
82005+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
82006+};
82007+
82008+enum {
82009+ GR_ID_USER = 0x01,
82010+ GR_ID_GROUP = 0x02,
82011+};
82012+
82013+enum {
82014+ GR_ID_ALLOW = 0x01,
82015+ GR_ID_DENY = 0x02,
82016+};
82017+
82018+#define GR_CRASH_RES 31
82019+#define GR_UIDTABLE_MAX 500
82020+
82021+/* begin resource learning section */
82022+enum {
82023+ GR_RLIM_CPU_BUMP = 60,
82024+ GR_RLIM_FSIZE_BUMP = 50000,
82025+ GR_RLIM_DATA_BUMP = 10000,
82026+ GR_RLIM_STACK_BUMP = 1000,
82027+ GR_RLIM_CORE_BUMP = 10000,
82028+ GR_RLIM_RSS_BUMP = 500000,
82029+ GR_RLIM_NPROC_BUMP = 1,
82030+ GR_RLIM_NOFILE_BUMP = 5,
82031+ GR_RLIM_MEMLOCK_BUMP = 50000,
82032+ GR_RLIM_AS_BUMP = 500000,
82033+ GR_RLIM_LOCKS_BUMP = 2,
82034+ GR_RLIM_SIGPENDING_BUMP = 5,
82035+ GR_RLIM_MSGQUEUE_BUMP = 10000,
82036+ GR_RLIM_NICE_BUMP = 1,
82037+ GR_RLIM_RTPRIO_BUMP = 1,
82038+ GR_RLIM_RTTIME_BUMP = 1000000
82039+};
82040+
82041+#endif
82042diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
82043new file mode 100644
82044index 0000000..d25522e
82045--- /dev/null
82046+++ b/include/linux/grinternal.h
82047@@ -0,0 +1,229 @@
82048+#ifndef __GRINTERNAL_H
82049+#define __GRINTERNAL_H
82050+
82051+#ifdef CONFIG_GRKERNSEC
82052+
82053+#include <linux/fs.h>
82054+#include <linux/mnt_namespace.h>
82055+#include <linux/nsproxy.h>
82056+#include <linux/gracl.h>
82057+#include <linux/grdefs.h>
82058+#include <linux/grmsg.h>
82059+
82060+void gr_add_learn_entry(const char *fmt, ...)
82061+ __attribute__ ((format (printf, 1, 2)));
82062+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
82063+ const struct vfsmount *mnt);
82064+__u32 gr_check_create(const struct dentry *new_dentry,
82065+ const struct dentry *parent,
82066+ const struct vfsmount *mnt, const __u32 mode);
82067+int gr_check_protected_task(const struct task_struct *task);
82068+__u32 to_gr_audit(const __u32 reqmode);
82069+int gr_set_acls(const int type);
82070+int gr_acl_is_enabled(void);
82071+char gr_roletype_to_char(void);
82072+
82073+void gr_handle_alertkill(struct task_struct *task);
82074+char *gr_to_filename(const struct dentry *dentry,
82075+ const struct vfsmount *mnt);
82076+char *gr_to_filename1(const struct dentry *dentry,
82077+ const struct vfsmount *mnt);
82078+char *gr_to_filename2(const struct dentry *dentry,
82079+ const struct vfsmount *mnt);
82080+char *gr_to_filename3(const struct dentry *dentry,
82081+ const struct vfsmount *mnt);
82082+
82083+extern int grsec_enable_ptrace_readexec;
82084+extern int grsec_enable_harden_ptrace;
82085+extern int grsec_enable_link;
82086+extern int grsec_enable_fifo;
82087+extern int grsec_enable_execve;
82088+extern int grsec_enable_shm;
82089+extern int grsec_enable_execlog;
82090+extern int grsec_enable_signal;
82091+extern int grsec_enable_audit_ptrace;
82092+extern int grsec_enable_forkfail;
82093+extern int grsec_enable_time;
82094+extern int grsec_enable_rofs;
82095+extern int grsec_deny_new_usb;
82096+extern int grsec_enable_chroot_shmat;
82097+extern int grsec_enable_chroot_mount;
82098+extern int grsec_enable_chroot_double;
82099+extern int grsec_enable_chroot_pivot;
82100+extern int grsec_enable_chroot_chdir;
82101+extern int grsec_enable_chroot_chmod;
82102+extern int grsec_enable_chroot_mknod;
82103+extern int grsec_enable_chroot_fchdir;
82104+extern int grsec_enable_chroot_nice;
82105+extern int grsec_enable_chroot_execlog;
82106+extern int grsec_enable_chroot_caps;
82107+extern int grsec_enable_chroot_sysctl;
82108+extern int grsec_enable_chroot_unix;
82109+extern int grsec_enable_symlinkown;
82110+extern kgid_t grsec_symlinkown_gid;
82111+extern int grsec_enable_tpe;
82112+extern kgid_t grsec_tpe_gid;
82113+extern int grsec_enable_tpe_all;
82114+extern int grsec_enable_tpe_invert;
82115+extern int grsec_enable_socket_all;
82116+extern kgid_t grsec_socket_all_gid;
82117+extern int grsec_enable_socket_client;
82118+extern kgid_t grsec_socket_client_gid;
82119+extern int grsec_enable_socket_server;
82120+extern kgid_t grsec_socket_server_gid;
82121+extern kgid_t grsec_audit_gid;
82122+extern int grsec_enable_group;
82123+extern int grsec_enable_log_rwxmaps;
82124+extern int grsec_enable_mount;
82125+extern int grsec_enable_chdir;
82126+extern int grsec_resource_logging;
82127+extern int grsec_enable_blackhole;
82128+extern int grsec_lastack_retries;
82129+extern int grsec_enable_brute;
82130+extern int grsec_enable_harden_ipc;
82131+extern int grsec_lock;
82132+
82133+extern spinlock_t grsec_alert_lock;
82134+extern unsigned long grsec_alert_wtime;
82135+extern unsigned long grsec_alert_fyet;
82136+
82137+extern spinlock_t grsec_audit_lock;
82138+
82139+extern rwlock_t grsec_exec_file_lock;
82140+
82141+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
82142+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
82143+ (tsk)->exec_file->f_path.mnt) : "/")
82144+
82145+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
82146+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
82147+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
82148+
82149+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
82150+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
82151+ (tsk)->exec_file->f_path.mnt) : "/")
82152+
82153+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
82154+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
82155+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
82156+
82157+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
82158+
82159+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
82160+
82161+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
82162+{
82163+ if (file1 && file2) {
82164+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
82165+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
82166+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
82167+ return true;
82168+ }
82169+
82170+ return false;
82171+}
82172+
82173+#define GR_CHROOT_CAPS {{ \
82174+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
82175+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
82176+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
82177+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
82178+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
82179+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
82180+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
82181+
82182+#define security_learn(normal_msg,args...) \
82183+({ \
82184+ read_lock(&grsec_exec_file_lock); \
82185+ gr_add_learn_entry(normal_msg "\n", ## args); \
82186+ read_unlock(&grsec_exec_file_lock); \
82187+})
82188+
82189+enum {
82190+ GR_DO_AUDIT,
82191+ GR_DONT_AUDIT,
82192+ /* used for non-audit messages that we shouldn't kill the task on */
82193+ GR_DONT_AUDIT_GOOD
82194+};
82195+
82196+enum {
82197+ GR_TTYSNIFF,
82198+ GR_RBAC,
82199+ GR_RBAC_STR,
82200+ GR_STR_RBAC,
82201+ GR_RBAC_MODE2,
82202+ GR_RBAC_MODE3,
82203+ GR_FILENAME,
82204+ GR_SYSCTL_HIDDEN,
82205+ GR_NOARGS,
82206+ GR_ONE_INT,
82207+ GR_ONE_INT_TWO_STR,
82208+ GR_ONE_STR,
82209+ GR_STR_INT,
82210+ GR_TWO_STR_INT,
82211+ GR_TWO_INT,
82212+ GR_TWO_U64,
82213+ GR_THREE_INT,
82214+ GR_FIVE_INT_TWO_STR,
82215+ GR_TWO_STR,
82216+ GR_THREE_STR,
82217+ GR_FOUR_STR,
82218+ GR_STR_FILENAME,
82219+ GR_FILENAME_STR,
82220+ GR_FILENAME_TWO_INT,
82221+ GR_FILENAME_TWO_INT_STR,
82222+ GR_TEXTREL,
82223+ GR_PTRACE,
82224+ GR_RESOURCE,
82225+ GR_CAP,
82226+ GR_SIG,
82227+ GR_SIG2,
82228+ GR_CRASH1,
82229+ GR_CRASH2,
82230+ GR_PSACCT,
82231+ GR_RWXMAP,
82232+ GR_RWXMAPVMA
82233+};
82234+
82235+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
82236+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
82237+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
82238+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
82239+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
82240+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
82241+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
82242+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
82243+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
82244+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
82245+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
82246+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
82247+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
82248+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
82249+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
82250+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
82251+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
82252+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
82253+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
82254+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
82255+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
82256+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
82257+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
82258+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
82259+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
82260+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
82261+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
82262+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
82263+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
82264+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
82265+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
82266+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
82267+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
82268+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
82269+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
82270+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
82271+
82272+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
82273+
82274+#endif
82275+
82276+#endif
82277diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
82278new file mode 100644
82279index 0000000..b02ba9d
82280--- /dev/null
82281+++ b/include/linux/grmsg.h
82282@@ -0,0 +1,117 @@
82283+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
82284+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
82285+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
82286+#define GR_STOPMOD_MSG "denied modification of module state by "
82287+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
82288+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
82289+#define GR_IOPERM_MSG "denied use of ioperm() by "
82290+#define GR_IOPL_MSG "denied use of iopl() by "
82291+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
82292+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
82293+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
82294+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
82295+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
82296+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
82297+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
82298+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
82299+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
82300+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
82301+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
82302+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
82303+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
82304+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
82305+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
82306+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
82307+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
82308+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
82309+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
82310+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
82311+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
82312+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
82313+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
82314+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
82315+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
82316+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
82317+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
82318+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
82319+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
82320+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
82321+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
82322+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
82323+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
82324+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
82325+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
82326+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
82327+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
82328+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
82329+#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by "
82330+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
82331+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
82332+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
82333+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
82334+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
82335+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
82336+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
82337+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
82338+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
82339+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
82340+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
82341+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
82342+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
82343+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
82344+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
82345+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
82346+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
82347+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
82348+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
82349+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
82350+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
82351+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
82352+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
82353+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
82354+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
82355+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
82356+#define GR_FAILFORK_MSG "failed fork with errno %s by "
82357+#define GR_NICE_CHROOT_MSG "denied priority change by "
82358+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
82359+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
82360+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
82361+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
82362+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
82363+#define GR_TIME_MSG "time set by "
82364+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
82365+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
82366+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
82367+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
82368+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
82369+#define GR_BIND_MSG "denied bind() by "
82370+#define GR_CONNECT_MSG "denied connect() by "
82371+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
82372+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
82373+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
82374+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
82375+#define GR_CAP_ACL_MSG "use of %s denied for "
82376+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
82377+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
82378+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
82379+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
82380+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
82381+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
82382+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
82383+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
82384+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
82385+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
82386+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
82387+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
82388+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
82389+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
82390+#define GR_VM86_MSG "denied use of vm86 by "
82391+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
82392+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
82393+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
82394+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
82395+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
82396+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
82397+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
82398+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
82399+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
82400diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
82401new file mode 100644
82402index 0000000..10b9635
82403--- /dev/null
82404+++ b/include/linux/grsecurity.h
82405@@ -0,0 +1,254 @@
82406+#ifndef GR_SECURITY_H
82407+#define GR_SECURITY_H
82408+#include <linux/fs.h>
82409+#include <linux/fs_struct.h>
82410+#include <linux/binfmts.h>
82411+#include <linux/gracl.h>
82412+
82413+/* notify of brain-dead configs */
82414+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
82415+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
82416+#endif
82417+#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
82418+#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled"
82419+#endif
82420+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
82421+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
82422+#endif
82423+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
82424+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
82425+#endif
82426+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
82427+#error "CONFIG_PAX enabled, but no PaX options are enabled."
82428+#endif
82429+
82430+int gr_handle_new_usb(void);
82431+
82432+void gr_handle_brute_attach(int dumpable);
82433+void gr_handle_brute_check(void);
82434+void gr_handle_kernel_exploit(void);
82435+
82436+char gr_roletype_to_char(void);
82437+
82438+int gr_proc_is_restricted(void);
82439+
82440+int gr_acl_enable_at_secure(void);
82441+
82442+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
82443+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
82444+
82445+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap);
82446+
82447+void gr_del_task_from_ip_table(struct task_struct *p);
82448+
82449+int gr_pid_is_chrooted(struct task_struct *p);
82450+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
82451+int gr_handle_chroot_nice(void);
82452+int gr_handle_chroot_sysctl(const int op);
82453+int gr_handle_chroot_setpriority(struct task_struct *p,
82454+ const int niceval);
82455+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
82456+int gr_chroot_fhandle(void);
82457+int gr_handle_chroot_chroot(const struct dentry *dentry,
82458+ const struct vfsmount *mnt);
82459+void gr_handle_chroot_chdir(const struct path *path);
82460+int gr_handle_chroot_chmod(const struct dentry *dentry,
82461+ const struct vfsmount *mnt, const int mode);
82462+int gr_handle_chroot_mknod(const struct dentry *dentry,
82463+ const struct vfsmount *mnt, const int mode);
82464+int gr_handle_chroot_mount(const struct dentry *dentry,
82465+ const struct vfsmount *mnt,
82466+ const char *dev_name);
82467+int gr_handle_chroot_pivot(void);
82468+int gr_handle_chroot_unix(const pid_t pid);
82469+
82470+int gr_handle_rawio(const struct inode *inode);
82471+
82472+void gr_handle_ioperm(void);
82473+void gr_handle_iopl(void);
82474+void gr_handle_msr_write(void);
82475+
82476+umode_t gr_acl_umask(void);
82477+
82478+int gr_tpe_allow(const struct file *file);
82479+
82480+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
82481+void gr_clear_chroot_entries(struct task_struct *task);
82482+
82483+void gr_log_forkfail(const int retval);
82484+void gr_log_timechange(void);
82485+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
82486+void gr_log_chdir(const struct dentry *dentry,
82487+ const struct vfsmount *mnt);
82488+void gr_log_chroot_exec(const struct dentry *dentry,
82489+ const struct vfsmount *mnt);
82490+void gr_log_remount(const char *devname, const int retval);
82491+void gr_log_unmount(const char *devname, const int retval);
82492+void gr_log_mount(const char *from, const char *to, const int retval);
82493+void gr_log_textrel(struct vm_area_struct *vma);
82494+void gr_log_ptgnustack(struct file *file);
82495+void gr_log_rwxmmap(struct file *file);
82496+void gr_log_rwxmprotect(struct vm_area_struct *vma);
82497+
82498+int gr_handle_follow_link(const struct inode *parent,
82499+ const struct inode *inode,
82500+ const struct dentry *dentry,
82501+ const struct vfsmount *mnt);
82502+int gr_handle_fifo(const struct dentry *dentry,
82503+ const struct vfsmount *mnt,
82504+ const struct dentry *dir, const int flag,
82505+ const int acc_mode);
82506+int gr_handle_hardlink(const struct dentry *dentry,
82507+ const struct vfsmount *mnt,
82508+ struct inode *inode,
82509+ const int mode, const struct filename *to);
82510+
82511+int gr_is_capable(const int cap);
82512+int gr_is_capable_nolog(const int cap);
82513+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
82514+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
82515+
82516+void gr_copy_label(struct task_struct *tsk);
82517+void gr_handle_crash(struct task_struct *task, const int sig);
82518+int gr_handle_signal(const struct task_struct *p, const int sig);
82519+int gr_check_crash_uid(const kuid_t uid);
82520+int gr_check_protected_task(const struct task_struct *task);
82521+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
82522+int gr_acl_handle_mmap(const struct file *file,
82523+ const unsigned long prot);
82524+int gr_acl_handle_mprotect(const struct file *file,
82525+ const unsigned long prot);
82526+int gr_check_hidden_task(const struct task_struct *tsk);
82527+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
82528+ const struct vfsmount *mnt);
82529+__u32 gr_acl_handle_utime(const struct dentry *dentry,
82530+ const struct vfsmount *mnt);
82531+__u32 gr_acl_handle_access(const struct dentry *dentry,
82532+ const struct vfsmount *mnt, const int fmode);
82533+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
82534+ const struct vfsmount *mnt, umode_t *mode);
82535+__u32 gr_acl_handle_chown(const struct dentry *dentry,
82536+ const struct vfsmount *mnt);
82537+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
82538+ const struct vfsmount *mnt);
82539+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
82540+ const struct vfsmount *mnt);
82541+int gr_handle_ptrace(struct task_struct *task, const long request);
82542+int gr_handle_proc_ptrace(struct task_struct *task);
82543+__u32 gr_acl_handle_execve(const struct dentry *dentry,
82544+ const struct vfsmount *mnt);
82545+int gr_check_crash_exec(const struct file *filp);
82546+int gr_acl_is_enabled(void);
82547+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
82548+ const kgid_t gid);
82549+int gr_set_proc_label(const struct dentry *dentry,
82550+ const struct vfsmount *mnt,
82551+ const int unsafe_flags);
82552+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
82553+ const struct vfsmount *mnt);
82554+__u32 gr_acl_handle_open(const struct dentry *dentry,
82555+ const struct vfsmount *mnt, int acc_mode);
82556+__u32 gr_acl_handle_creat(const struct dentry *dentry,
82557+ const struct dentry *p_dentry,
82558+ const struct vfsmount *p_mnt,
82559+ int open_flags, int acc_mode, const int imode);
82560+void gr_handle_create(const struct dentry *dentry,
82561+ const struct vfsmount *mnt);
82562+void gr_handle_proc_create(const struct dentry *dentry,
82563+ const struct inode *inode);
82564+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
82565+ const struct dentry *parent_dentry,
82566+ const struct vfsmount *parent_mnt,
82567+ const int mode);
82568+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
82569+ const struct dentry *parent_dentry,
82570+ const struct vfsmount *parent_mnt);
82571+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
82572+ const struct vfsmount *mnt);
82573+void gr_handle_delete(const ino_t ino, const dev_t dev);
82574+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
82575+ const struct vfsmount *mnt);
82576+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
82577+ const struct dentry *parent_dentry,
82578+ const struct vfsmount *parent_mnt,
82579+ const struct filename *from);
82580+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
82581+ const struct dentry *parent_dentry,
82582+ const struct vfsmount *parent_mnt,
82583+ const struct dentry *old_dentry,
82584+ const struct vfsmount *old_mnt, const struct filename *to);
82585+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
82586+int gr_acl_handle_rename(struct dentry *new_dentry,
82587+ struct dentry *parent_dentry,
82588+ const struct vfsmount *parent_mnt,
82589+ struct dentry *old_dentry,
82590+ struct inode *old_parent_inode,
82591+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags);
82592+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
82593+ struct dentry *old_dentry,
82594+ struct dentry *new_dentry,
82595+ struct vfsmount *mnt, const __u8 replace, unsigned int flags);
82596+__u32 gr_check_link(const struct dentry *new_dentry,
82597+ const struct dentry *parent_dentry,
82598+ const struct vfsmount *parent_mnt,
82599+ const struct dentry *old_dentry,
82600+ const struct vfsmount *old_mnt);
82601+int gr_acl_handle_filldir(const struct file *file, const char *name,
82602+ const unsigned int namelen, const ino_t ino);
82603+
82604+__u32 gr_acl_handle_unix(const struct dentry *dentry,
82605+ const struct vfsmount *mnt);
82606+void gr_acl_handle_exit(void);
82607+void gr_acl_handle_psacct(struct task_struct *task, const long code);
82608+int gr_acl_handle_procpidmem(const struct task_struct *task);
82609+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
82610+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
82611+void gr_audit_ptrace(struct task_struct *task);
82612+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
82613+void gr_put_exec_file(struct task_struct *task);
82614+
82615+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
82616+
82617+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
82618+extern void gr_learn_resource(const struct task_struct *task, const int res,
82619+ const unsigned long wanted, const int gt);
82620+#else
82621+static inline void gr_learn_resource(const struct task_struct *task, const int res,
82622+ const unsigned long wanted, const int gt)
82623+{
82624+}
82625+#endif
82626+
82627+#ifdef CONFIG_GRKERNSEC_RESLOG
82628+extern void gr_log_resource(const struct task_struct *task, const int res,
82629+ const unsigned long wanted, const int gt);
82630+#else
82631+static inline void gr_log_resource(const struct task_struct *task, const int res,
82632+ const unsigned long wanted, const int gt)
82633+{
82634+}
82635+#endif
82636+
82637+#ifdef CONFIG_GRKERNSEC
82638+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
82639+void gr_handle_vm86(void);
82640+void gr_handle_mem_readwrite(u64 from, u64 to);
82641+
82642+void gr_log_badprocpid(const char *entry);
82643+
82644+extern int grsec_enable_dmesg;
82645+extern int grsec_disable_privio;
82646+
82647+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
82648+extern kgid_t grsec_proc_gid;
82649+#endif
82650+
82651+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
82652+extern int grsec_enable_chroot_findtask;
82653+#endif
82654+#ifdef CONFIG_GRKERNSEC_SETXID
82655+extern int grsec_enable_setxid;
82656+#endif
82657+#endif
82658+
82659+#endif
82660diff --git a/include/linux/grsock.h b/include/linux/grsock.h
82661new file mode 100644
82662index 0000000..e7ffaaf
82663--- /dev/null
82664+++ b/include/linux/grsock.h
82665@@ -0,0 +1,19 @@
82666+#ifndef __GRSOCK_H
82667+#define __GRSOCK_H
82668+
82669+extern void gr_attach_curr_ip(const struct sock *sk);
82670+extern int gr_handle_sock_all(const int family, const int type,
82671+ const int protocol);
82672+extern int gr_handle_sock_server(const struct sockaddr *sck);
82673+extern int gr_handle_sock_server_other(const struct sock *sck);
82674+extern int gr_handle_sock_client(const struct sockaddr *sck);
82675+extern int gr_search_connect(struct socket * sock,
82676+ struct sockaddr_in * addr);
82677+extern int gr_search_bind(struct socket * sock,
82678+ struct sockaddr_in * addr);
82679+extern int gr_search_listen(struct socket * sock);
82680+extern int gr_search_accept(struct socket * sock);
82681+extern int gr_search_socket(const int domain, const int type,
82682+ const int protocol);
82683+
82684+#endif
82685diff --git a/include/linux/hash.h b/include/linux/hash.h
82686index d0494c3..69b7715 100644
82687--- a/include/linux/hash.h
82688+++ b/include/linux/hash.h
82689@@ -87,7 +87,7 @@ static inline u32 hash32_ptr(const void *ptr)
82690 struct fast_hash_ops {
82691 u32 (*hash)(const void *data, u32 len, u32 seed);
82692 u32 (*hash2)(const u32 *data, u32 len, u32 seed);
82693-};
82694+} __no_const;
82695
82696 /**
82697 * arch_fast_hash - Caclulates a hash over a given buffer that can have
82698diff --git a/include/linux/highmem.h b/include/linux/highmem.h
82699index 9286a46..373f27f 100644
82700--- a/include/linux/highmem.h
82701+++ b/include/linux/highmem.h
82702@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
82703 kunmap_atomic(kaddr);
82704 }
82705
82706+static inline void sanitize_highpage(struct page *page)
82707+{
82708+ void *kaddr;
82709+ unsigned long flags;
82710+
82711+ local_irq_save(flags);
82712+ kaddr = kmap_atomic(page);
82713+ clear_page(kaddr);
82714+ kunmap_atomic(kaddr);
82715+ local_irq_restore(flags);
82716+}
82717+
82718 static inline void zero_user_segments(struct page *page,
82719 unsigned start1, unsigned end1,
82720 unsigned start2, unsigned end2)
82721diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
82722index 1c7b89a..7dda400 100644
82723--- a/include/linux/hwmon-sysfs.h
82724+++ b/include/linux/hwmon-sysfs.h
82725@@ -25,7 +25,8 @@
82726 struct sensor_device_attribute{
82727 struct device_attribute dev_attr;
82728 int index;
82729-};
82730+} __do_const;
82731+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
82732 #define to_sensor_dev_attr(_dev_attr) \
82733 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
82734
82735@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
82736 struct device_attribute dev_attr;
82737 u8 index;
82738 u8 nr;
82739-};
82740+} __do_const;
82741+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
82742 #define to_sensor_dev_attr_2(_dev_attr) \
82743 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
82744
82745diff --git a/include/linux/i2c.h b/include/linux/i2c.h
82746index b556e0a..c10a515 100644
82747--- a/include/linux/i2c.h
82748+++ b/include/linux/i2c.h
82749@@ -378,6 +378,7 @@ struct i2c_algorithm {
82750 /* To determine what the adapter supports */
82751 u32 (*functionality) (struct i2c_adapter *);
82752 };
82753+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
82754
82755 /**
82756 * struct i2c_bus_recovery_info - I2C bus recovery information
82757diff --git a/include/linux/i2o.h b/include/linux/i2o.h
82758index d23c3c2..eb63c81 100644
82759--- a/include/linux/i2o.h
82760+++ b/include/linux/i2o.h
82761@@ -565,7 +565,7 @@ struct i2o_controller {
82762 struct i2o_device *exec; /* Executive */
82763 #if BITS_PER_LONG == 64
82764 spinlock_t context_list_lock; /* lock for context_list */
82765- atomic_t context_list_counter; /* needed for unique contexts */
82766+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
82767 struct list_head context_list; /* list of context id's
82768 and pointers */
82769 #endif
82770diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
82771index aff7ad8..3942bbd 100644
82772--- a/include/linux/if_pppox.h
82773+++ b/include/linux/if_pppox.h
82774@@ -76,7 +76,7 @@ struct pppox_proto {
82775 int (*ioctl)(struct socket *sock, unsigned int cmd,
82776 unsigned long arg);
82777 struct module *owner;
82778-};
82779+} __do_const;
82780
82781 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
82782 extern void unregister_pppox_proto(int proto_num);
82783diff --git a/include/linux/init.h b/include/linux/init.h
82784index 2df8e8d..3e1280d 100644
82785--- a/include/linux/init.h
82786+++ b/include/linux/init.h
82787@@ -37,9 +37,17 @@
82788 * section.
82789 */
82790
82791+#define add_init_latent_entropy __latent_entropy
82792+
82793+#ifdef CONFIG_MEMORY_HOTPLUG
82794+#define add_meminit_latent_entropy
82795+#else
82796+#define add_meminit_latent_entropy __latent_entropy
82797+#endif
82798+
82799 /* These are for everybody (although not all archs will actually
82800 discard it in modules) */
82801-#define __init __section(.init.text) __cold notrace
82802+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
82803 #define __initdata __section(.init.data)
82804 #define __initconst __constsection(.init.rodata)
82805 #define __exitdata __section(.exit.data)
82806@@ -100,7 +108,7 @@
82807 #define __cpuexitconst
82808
82809 /* Used for MEMORY_HOTPLUG */
82810-#define __meminit __section(.meminit.text) __cold notrace
82811+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
82812 #define __meminitdata __section(.meminit.data)
82813 #define __meminitconst __constsection(.meminit.rodata)
82814 #define __memexit __section(.memexit.text) __exitused __cold notrace
82815diff --git a/include/linux/init_task.h b/include/linux/init_task.h
82816index 2bb4c4f3..e0fac69 100644
82817--- a/include/linux/init_task.h
82818+++ b/include/linux/init_task.h
82819@@ -149,6 +149,12 @@ extern struct task_group root_task_group;
82820
82821 #define INIT_TASK_COMM "swapper"
82822
82823+#ifdef CONFIG_X86
82824+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
82825+#else
82826+#define INIT_TASK_THREAD_INFO
82827+#endif
82828+
82829 #ifdef CONFIG_RT_MUTEXES
82830 # define INIT_RT_MUTEXES(tsk) \
82831 .pi_waiters = RB_ROOT, \
82832@@ -196,6 +202,7 @@ extern struct task_group root_task_group;
82833 RCU_POINTER_INITIALIZER(cred, &init_cred), \
82834 .comm = INIT_TASK_COMM, \
82835 .thread = INIT_THREAD, \
82836+ INIT_TASK_THREAD_INFO \
82837 .fs = &init_fs, \
82838 .files = &init_files, \
82839 .signal = &init_signals, \
82840diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
82841index 698ad05..8601bb7 100644
82842--- a/include/linux/interrupt.h
82843+++ b/include/linux/interrupt.h
82844@@ -418,8 +418,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
82845
82846 struct softirq_action
82847 {
82848- void (*action)(struct softirq_action *);
82849-};
82850+ void (*action)(void);
82851+} __no_const;
82852
82853 asmlinkage void do_softirq(void);
82854 asmlinkage void __do_softirq(void);
82855@@ -433,7 +433,7 @@ static inline void do_softirq_own_stack(void)
82856 }
82857 #endif
82858
82859-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
82860+extern void open_softirq(int nr, void (*action)(void));
82861 extern void softirq_init(void);
82862 extern void __raise_softirq_irqoff(unsigned int nr);
82863
82864diff --git a/include/linux/iommu.h b/include/linux/iommu.h
82865index 20f9a52..63ee2e3 100644
82866--- a/include/linux/iommu.h
82867+++ b/include/linux/iommu.h
82868@@ -131,7 +131,7 @@ struct iommu_ops {
82869 u32 (*domain_get_windows)(struct iommu_domain *domain);
82870
82871 unsigned long pgsize_bitmap;
82872-};
82873+} __do_const;
82874
82875 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
82876 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
82877diff --git a/include/linux/ioport.h b/include/linux/ioport.h
82878index 142ec54..873e033 100644
82879--- a/include/linux/ioport.h
82880+++ b/include/linux/ioport.h
82881@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
82882 int adjust_resource(struct resource *res, resource_size_t start,
82883 resource_size_t size);
82884 resource_size_t resource_alignment(struct resource *res);
82885-static inline resource_size_t resource_size(const struct resource *res)
82886+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
82887 {
82888 return res->end - res->start + 1;
82889 }
82890diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
82891index 35e7eca..6afb7ad 100644
82892--- a/include/linux/ipc_namespace.h
82893+++ b/include/linux/ipc_namespace.h
82894@@ -69,7 +69,7 @@ struct ipc_namespace {
82895 struct user_namespace *user_ns;
82896
82897 unsigned int proc_inum;
82898-};
82899+} __randomize_layout;
82900
82901 extern struct ipc_namespace init_ipc_ns;
82902 extern atomic_t nr_ipc_ns;
82903diff --git a/include/linux/irq.h b/include/linux/irq.h
82904index 62af592..cc3b0d0 100644
82905--- a/include/linux/irq.h
82906+++ b/include/linux/irq.h
82907@@ -344,7 +344,8 @@ struct irq_chip {
82908 void (*irq_release_resources)(struct irq_data *data);
82909
82910 unsigned long flags;
82911-};
82912+} __do_const;
82913+typedef struct irq_chip __no_const irq_chip_no_const;
82914
82915 /*
82916 * irq_chip specific flags
82917diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
82918index 45e2d8c..26d85da 100644
82919--- a/include/linux/irqchip/arm-gic.h
82920+++ b/include/linux/irqchip/arm-gic.h
82921@@ -75,9 +75,11 @@
82922
82923 #ifndef __ASSEMBLY__
82924
82925+#include <linux/irq.h>
82926+
82927 struct device_node;
82928
82929-extern struct irq_chip gic_arch_extn;
82930+extern irq_chip_no_const gic_arch_extn;
82931
82932 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
82933 u32 offset, struct device_node *);
82934diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
82935index c367cbd..c9b79e6 100644
82936--- a/include/linux/jiffies.h
82937+++ b/include/linux/jiffies.h
82938@@ -280,20 +280,20 @@ extern unsigned long preset_lpj;
82939 /*
82940 * Convert various time units to each other:
82941 */
82942-extern unsigned int jiffies_to_msecs(const unsigned long j);
82943-extern unsigned int jiffies_to_usecs(const unsigned long j);
82944+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
82945+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
82946
82947-static inline u64 jiffies_to_nsecs(const unsigned long j)
82948+static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
82949 {
82950 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
82951 }
82952
82953-extern unsigned long msecs_to_jiffies(const unsigned int m);
82954-extern unsigned long usecs_to_jiffies(const unsigned int u);
82955+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
82956+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
82957 extern unsigned long timespec_to_jiffies(const struct timespec *value);
82958 extern void jiffies_to_timespec(const unsigned long jiffies,
82959- struct timespec *value);
82960-extern unsigned long timeval_to_jiffies(const struct timeval *value);
82961+ struct timespec *value) __intentional_overflow(-1);
82962+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
82963 extern void jiffies_to_timeval(const unsigned long jiffies,
82964 struct timeval *value);
82965
82966diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
82967index 6883e19..e854fcb 100644
82968--- a/include/linux/kallsyms.h
82969+++ b/include/linux/kallsyms.h
82970@@ -15,7 +15,8 @@
82971
82972 struct module;
82973
82974-#ifdef CONFIG_KALLSYMS
82975+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
82976+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
82977 /* Lookup the address for a symbol. Returns 0 if not found. */
82978 unsigned long kallsyms_lookup_name(const char *name);
82979
82980@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
82981 /* Stupid that this does nothing, but I didn't create this mess. */
82982 #define __print_symbol(fmt, addr)
82983 #endif /*CONFIG_KALLSYMS*/
82984+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
82985+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
82986+extern unsigned long kallsyms_lookup_name(const char *name);
82987+extern void __print_symbol(const char *fmt, unsigned long address);
82988+extern int sprint_backtrace(char *buffer, unsigned long address);
82989+extern int sprint_symbol(char *buffer, unsigned long address);
82990+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
82991+const char *kallsyms_lookup(unsigned long addr,
82992+ unsigned long *symbolsize,
82993+ unsigned long *offset,
82994+ char **modname, char *namebuf);
82995+extern int kallsyms_lookup_size_offset(unsigned long addr,
82996+ unsigned long *symbolsize,
82997+ unsigned long *offset);
82998+#endif
82999
83000 /* This macro allows us to keep printk typechecking */
83001 static __printf(1, 2)
83002diff --git a/include/linux/key-type.h b/include/linux/key-type.h
83003index 44792ee..6172f2a 100644
83004--- a/include/linux/key-type.h
83005+++ b/include/linux/key-type.h
83006@@ -132,7 +132,7 @@ struct key_type {
83007 /* internal fields */
83008 struct list_head link; /* link in types list */
83009 struct lock_class_key lock_class; /* key->sem lock class */
83010-};
83011+} __do_const;
83012
83013 extern struct key_type key_type_keyring;
83014
83015diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
83016index e465bb1..19f605f 100644
83017--- a/include/linux/kgdb.h
83018+++ b/include/linux/kgdb.h
83019@@ -52,7 +52,7 @@ extern int kgdb_connected;
83020 extern int kgdb_io_module_registered;
83021
83022 extern atomic_t kgdb_setting_breakpoint;
83023-extern atomic_t kgdb_cpu_doing_single_step;
83024+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
83025
83026 extern struct task_struct *kgdb_usethread;
83027 extern struct task_struct *kgdb_contthread;
83028@@ -254,7 +254,7 @@ struct kgdb_arch {
83029 void (*correct_hw_break)(void);
83030
83031 void (*enable_nmi)(bool on);
83032-};
83033+} __do_const;
83034
83035 /**
83036 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
83037@@ -279,7 +279,7 @@ struct kgdb_io {
83038 void (*pre_exception) (void);
83039 void (*post_exception) (void);
83040 int is_console;
83041-};
83042+} __do_const;
83043
83044 extern struct kgdb_arch arch_kgdb_ops;
83045
83046diff --git a/include/linux/kmod.h b/include/linux/kmod.h
83047index 0555cc6..40116ce 100644
83048--- a/include/linux/kmod.h
83049+++ b/include/linux/kmod.h
83050@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
83051 * usually useless though. */
83052 extern __printf(2, 3)
83053 int __request_module(bool wait, const char *name, ...);
83054+extern __printf(3, 4)
83055+int ___request_module(bool wait, char *param_name, const char *name, ...);
83056 #define request_module(mod...) __request_module(true, mod)
83057 #define request_module_nowait(mod...) __request_module(false, mod)
83058 #define try_then_request_module(x, mod...) \
83059@@ -57,6 +59,9 @@ struct subprocess_info {
83060 struct work_struct work;
83061 struct completion *complete;
83062 char *path;
83063+#ifdef CONFIG_GRKERNSEC
83064+ char *origpath;
83065+#endif
83066 char **argv;
83067 char **envp;
83068 int wait;
83069diff --git a/include/linux/kobject.h b/include/linux/kobject.h
83070index 2d61b90..a1d0a13 100644
83071--- a/include/linux/kobject.h
83072+++ b/include/linux/kobject.h
83073@@ -118,7 +118,7 @@ struct kobj_type {
83074 struct attribute **default_attrs;
83075 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
83076 const void *(*namespace)(struct kobject *kobj);
83077-};
83078+} __do_const;
83079
83080 struct kobj_uevent_env {
83081 char *argv[3];
83082@@ -142,6 +142,7 @@ struct kobj_attribute {
83083 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
83084 const char *buf, size_t count);
83085 };
83086+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
83087
83088 extern const struct sysfs_ops kobj_sysfs_ops;
83089
83090@@ -169,7 +170,7 @@ struct kset {
83091 spinlock_t list_lock;
83092 struct kobject kobj;
83093 const struct kset_uevent_ops *uevent_ops;
83094-};
83095+} __randomize_layout;
83096
83097 extern void kset_init(struct kset *kset);
83098 extern int __must_check kset_register(struct kset *kset);
83099diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
83100index df32d25..fb52e27 100644
83101--- a/include/linux/kobject_ns.h
83102+++ b/include/linux/kobject_ns.h
83103@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
83104 const void *(*netlink_ns)(struct sock *sk);
83105 const void *(*initial_ns)(void);
83106 void (*drop_ns)(void *);
83107-};
83108+} __do_const;
83109
83110 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
83111 int kobj_ns_type_registered(enum kobj_ns_type type);
83112diff --git a/include/linux/kref.h b/include/linux/kref.h
83113index 484604d..0f6c5b6 100644
83114--- a/include/linux/kref.h
83115+++ b/include/linux/kref.h
83116@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
83117 static inline int kref_sub(struct kref *kref, unsigned int count,
83118 void (*release)(struct kref *kref))
83119 {
83120- WARN_ON(release == NULL);
83121+ BUG_ON(release == NULL);
83122
83123 if (atomic_sub_and_test((int) count, &kref->refcount)) {
83124 release(kref);
83125diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
83126index a4c33b3..e854710 100644
83127--- a/include/linux/kvm_host.h
83128+++ b/include/linux/kvm_host.h
83129@@ -452,7 +452,7 @@ static inline void kvm_irqfd_exit(void)
83130 {
83131 }
83132 #endif
83133-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
83134+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
83135 struct module *module);
83136 void kvm_exit(void);
83137
83138@@ -618,7 +618,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
83139 struct kvm_guest_debug *dbg);
83140 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
83141
83142-int kvm_arch_init(void *opaque);
83143+int kvm_arch_init(const void *opaque);
83144 void kvm_arch_exit(void);
83145
83146 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
83147diff --git a/include/linux/libata.h b/include/linux/libata.h
83148index 92abb49..e7fff2a 100644
83149--- a/include/linux/libata.h
83150+++ b/include/linux/libata.h
83151@@ -976,7 +976,7 @@ struct ata_port_operations {
83152 * fields must be pointers.
83153 */
83154 const struct ata_port_operations *inherits;
83155-};
83156+} __do_const;
83157
83158 struct ata_port_info {
83159 unsigned long flags;
83160diff --git a/include/linux/linkage.h b/include/linux/linkage.h
83161index a6a42dd..6c5ebce 100644
83162--- a/include/linux/linkage.h
83163+++ b/include/linux/linkage.h
83164@@ -36,6 +36,7 @@
83165 #endif
83166
83167 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
83168+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
83169 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
83170
83171 /*
83172diff --git a/include/linux/list.h b/include/linux/list.h
83173index cbbb96f..602d023 100644
83174--- a/include/linux/list.h
83175+++ b/include/linux/list.h
83176@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
83177 extern void list_del(struct list_head *entry);
83178 #endif
83179
83180+extern void __pax_list_add(struct list_head *new,
83181+ struct list_head *prev,
83182+ struct list_head *next);
83183+static inline void pax_list_add(struct list_head *new, struct list_head *head)
83184+{
83185+ __pax_list_add(new, head, head->next);
83186+}
83187+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
83188+{
83189+ __pax_list_add(new, head->prev, head);
83190+}
83191+extern void pax_list_del(struct list_head *entry);
83192+
83193 /**
83194 * list_replace - replace old entry by new one
83195 * @old : the element to be replaced
83196@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
83197 INIT_LIST_HEAD(entry);
83198 }
83199
83200+extern void pax_list_del_init(struct list_head *entry);
83201+
83202 /**
83203 * list_move - delete from one list and add as another's head
83204 * @list: the entry to move
83205diff --git a/include/linux/lockref.h b/include/linux/lockref.h
83206index 4bfde0e..d6e2e09 100644
83207--- a/include/linux/lockref.h
83208+++ b/include/linux/lockref.h
83209@@ -47,4 +47,36 @@ static inline int __lockref_is_dead(const struct lockref *l)
83210 return ((int)l->count < 0);
83211 }
83212
83213+static inline unsigned int __lockref_read(struct lockref *lockref)
83214+{
83215+ return lockref->count;
83216+}
83217+
83218+static inline void __lockref_set(struct lockref *lockref, unsigned int count)
83219+{
83220+ lockref->count = count;
83221+}
83222+
83223+static inline void __lockref_inc(struct lockref *lockref)
83224+{
83225+
83226+#ifdef CONFIG_PAX_REFCOUNT
83227+ atomic_inc((atomic_t *)&lockref->count);
83228+#else
83229+ lockref->count++;
83230+#endif
83231+
83232+}
83233+
83234+static inline void __lockref_dec(struct lockref *lockref)
83235+{
83236+
83237+#ifdef CONFIG_PAX_REFCOUNT
83238+ atomic_dec((atomic_t *)&lockref->count);
83239+#else
83240+ lockref->count--;
83241+#endif
83242+
83243+}
83244+
83245 #endif /* __LINUX_LOCKREF_H */
83246diff --git a/include/linux/math64.h b/include/linux/math64.h
83247index c45c089..298841c 100644
83248--- a/include/linux/math64.h
83249+++ b/include/linux/math64.h
83250@@ -15,7 +15,7 @@
83251 * This is commonly provided by 32bit archs to provide an optimized 64bit
83252 * divide.
83253 */
83254-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83255+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83256 {
83257 *remainder = dividend % divisor;
83258 return dividend / divisor;
83259@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
83260 /**
83261 * div64_u64 - unsigned 64bit divide with 64bit divisor
83262 */
83263-static inline u64 div64_u64(u64 dividend, u64 divisor)
83264+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
83265 {
83266 return dividend / divisor;
83267 }
83268@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
83269 #define div64_ul(x, y) div_u64((x), (y))
83270
83271 #ifndef div_u64_rem
83272-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83273+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83274 {
83275 *remainder = do_div(dividend, divisor);
83276 return dividend;
83277@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
83278 #endif
83279
83280 #ifndef div64_u64
83281-extern u64 div64_u64(u64 dividend, u64 divisor);
83282+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
83283 #endif
83284
83285 #ifndef div64_s64
83286@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
83287 * divide.
83288 */
83289 #ifndef div_u64
83290-static inline u64 div_u64(u64 dividend, u32 divisor)
83291+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
83292 {
83293 u32 remainder;
83294 return div_u64_rem(dividend, divisor, &remainder);
83295diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
83296index f230a97..714c006 100644
83297--- a/include/linux/mempolicy.h
83298+++ b/include/linux/mempolicy.h
83299@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
83300 }
83301
83302 #define vma_policy(vma) ((vma)->vm_policy)
83303+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
83304+{
83305+ vma->vm_policy = pol;
83306+}
83307
83308 static inline void mpol_get(struct mempolicy *pol)
83309 {
83310@@ -228,6 +232,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
83311 }
83312
83313 #define vma_policy(vma) NULL
83314+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
83315+{
83316+}
83317
83318 static inline int
83319 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
83320diff --git a/include/linux/mm.h b/include/linux/mm.h
83321index f952cc8..b9f6135 100644
83322--- a/include/linux/mm.h
83323+++ b/include/linux/mm.h
83324@@ -127,6 +127,11 @@ extern unsigned int kobjsize(const void *objp);
83325 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
83326 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
83327 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
83328+
83329+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
83330+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
83331+#endif
83332+
83333 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
83334
83335 #ifdef CONFIG_MEM_SOFT_DIRTY
83336@@ -237,8 +242,8 @@ struct vm_operations_struct {
83337 /* called by access_process_vm when get_user_pages() fails, typically
83338 * for use by special VMAs that can switch between memory and hardware
83339 */
83340- int (*access)(struct vm_area_struct *vma, unsigned long addr,
83341- void *buf, int len, int write);
83342+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
83343+ void *buf, size_t len, int write);
83344
83345 /* Called by the /proc/PID/maps code to ask the vma whether it
83346 * has a special name. Returning non-NULL will also cause this
83347@@ -274,6 +279,7 @@ struct vm_operations_struct {
83348 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
83349 unsigned long size, pgoff_t pgoff);
83350 };
83351+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
83352
83353 struct mmu_gather;
83354 struct inode;
83355@@ -1163,8 +1169,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
83356 unsigned long *pfn);
83357 int follow_phys(struct vm_area_struct *vma, unsigned long address,
83358 unsigned int flags, unsigned long *prot, resource_size_t *phys);
83359-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
83360- void *buf, int len, int write);
83361+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
83362+ void *buf, size_t len, int write);
83363
83364 static inline void unmap_shared_mapping_range(struct address_space *mapping,
83365 loff_t const holebegin, loff_t const holelen)
83366@@ -1204,9 +1210,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
83367 }
83368 #endif
83369
83370-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
83371-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
83372- void *buf, int len, int write);
83373+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
83374+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
83375+ void *buf, size_t len, int write);
83376
83377 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
83378 unsigned long start, unsigned long nr_pages,
83379@@ -1238,34 +1244,6 @@ int set_page_dirty_lock(struct page *page);
83380 int clear_page_dirty_for_io(struct page *page);
83381 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
83382
83383-/* Is the vma a continuation of the stack vma above it? */
83384-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
83385-{
83386- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
83387-}
83388-
83389-static inline int stack_guard_page_start(struct vm_area_struct *vma,
83390- unsigned long addr)
83391-{
83392- return (vma->vm_flags & VM_GROWSDOWN) &&
83393- (vma->vm_start == addr) &&
83394- !vma_growsdown(vma->vm_prev, addr);
83395-}
83396-
83397-/* Is the vma a continuation of the stack vma below it? */
83398-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
83399-{
83400- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
83401-}
83402-
83403-static inline int stack_guard_page_end(struct vm_area_struct *vma,
83404- unsigned long addr)
83405-{
83406- return (vma->vm_flags & VM_GROWSUP) &&
83407- (vma->vm_end == addr) &&
83408- !vma_growsup(vma->vm_next, addr);
83409-}
83410-
83411 extern pid_t
83412 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
83413
83414@@ -1365,6 +1343,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
83415 }
83416 #endif
83417
83418+#ifdef CONFIG_MMU
83419+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
83420+#else
83421+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
83422+{
83423+ return __pgprot(0);
83424+}
83425+#endif
83426+
83427 int vma_wants_writenotify(struct vm_area_struct *vma);
83428
83429 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
83430@@ -1383,8 +1370,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
83431 {
83432 return 0;
83433 }
83434+
83435+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
83436+ unsigned long address)
83437+{
83438+ return 0;
83439+}
83440 #else
83441 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
83442+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
83443 #endif
83444
83445 #ifdef __PAGETABLE_PMD_FOLDED
83446@@ -1393,8 +1387,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
83447 {
83448 return 0;
83449 }
83450+
83451+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
83452+ unsigned long address)
83453+{
83454+ return 0;
83455+}
83456 #else
83457 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
83458+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
83459 #endif
83460
83461 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
83462@@ -1412,11 +1413,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
83463 NULL: pud_offset(pgd, address);
83464 }
83465
83466+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
83467+{
83468+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
83469+ NULL: pud_offset(pgd, address);
83470+}
83471+
83472 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
83473 {
83474 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
83475 NULL: pmd_offset(pud, address);
83476 }
83477+
83478+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
83479+{
83480+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
83481+ NULL: pmd_offset(pud, address);
83482+}
83483 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
83484
83485 #if USE_SPLIT_PTE_PTLOCKS
83486@@ -1815,7 +1828,7 @@ extern int install_special_mapping(struct mm_struct *mm,
83487 unsigned long addr, unsigned long len,
83488 unsigned long flags, struct page **pages);
83489
83490-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
83491+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
83492
83493 extern unsigned long mmap_region(struct file *file, unsigned long addr,
83494 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
83495@@ -1823,6 +1836,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
83496 unsigned long len, unsigned long prot, unsigned long flags,
83497 unsigned long pgoff, unsigned long *populate);
83498 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
83499+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
83500
83501 #ifdef CONFIG_MMU
83502 extern int __mm_populate(unsigned long addr, unsigned long len,
83503@@ -1851,10 +1865,11 @@ struct vm_unmapped_area_info {
83504 unsigned long high_limit;
83505 unsigned long align_mask;
83506 unsigned long align_offset;
83507+ unsigned long threadstack_offset;
83508 };
83509
83510-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
83511-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
83512+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
83513+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
83514
83515 /*
83516 * Search for an unmapped address range.
83517@@ -1866,7 +1881,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
83518 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
83519 */
83520 static inline unsigned long
83521-vm_unmapped_area(struct vm_unmapped_area_info *info)
83522+vm_unmapped_area(const struct vm_unmapped_area_info *info)
83523 {
83524 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
83525 return unmapped_area(info);
83526@@ -1928,6 +1943,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
83527 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
83528 struct vm_area_struct **pprev);
83529
83530+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
83531+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
83532+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
83533+
83534 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
83535 NULL if none. Assume start_addr < end_addr. */
83536 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
83537@@ -1956,15 +1975,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
83538 return vma;
83539 }
83540
83541-#ifdef CONFIG_MMU
83542-pgprot_t vm_get_page_prot(unsigned long vm_flags);
83543-#else
83544-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
83545-{
83546- return __pgprot(0);
83547-}
83548-#endif
83549-
83550 #ifdef CONFIG_NUMA_BALANCING
83551 unsigned long change_prot_numa(struct vm_area_struct *vma,
83552 unsigned long start, unsigned long end);
83553@@ -2016,6 +2026,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
83554 static inline void vm_stat_account(struct mm_struct *mm,
83555 unsigned long flags, struct file *file, long pages)
83556 {
83557+
83558+#ifdef CONFIG_PAX_RANDMMAP
83559+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
83560+#endif
83561+
83562 mm->total_vm += pages;
83563 }
83564 #endif /* CONFIG_PROC_FS */
83565@@ -2104,7 +2119,7 @@ extern int unpoison_memory(unsigned long pfn);
83566 extern int sysctl_memory_failure_early_kill;
83567 extern int sysctl_memory_failure_recovery;
83568 extern void shake_page(struct page *p, int access);
83569-extern atomic_long_t num_poisoned_pages;
83570+extern atomic_long_unchecked_t num_poisoned_pages;
83571 extern int soft_offline_page(struct page *page, int flags);
83572
83573 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
83574@@ -2139,5 +2154,11 @@ void __init setup_nr_node_ids(void);
83575 static inline void setup_nr_node_ids(void) {}
83576 #endif
83577
83578+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
83579+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
83580+#else
83581+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
83582+#endif
83583+
83584 #endif /* __KERNEL__ */
83585 #endif /* _LINUX_MM_H */
83586diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
83587index 6e0b286..90d9c0d 100644
83588--- a/include/linux/mm_types.h
83589+++ b/include/linux/mm_types.h
83590@@ -308,7 +308,9 @@ struct vm_area_struct {
83591 #ifdef CONFIG_NUMA
83592 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
83593 #endif
83594-};
83595+
83596+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
83597+} __randomize_layout;
83598
83599 struct core_thread {
83600 struct task_struct *task;
83601@@ -454,7 +456,25 @@ struct mm_struct {
83602 bool tlb_flush_pending;
83603 #endif
83604 struct uprobes_state uprobes_state;
83605-};
83606+
83607+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
83608+ unsigned long pax_flags;
83609+#endif
83610+
83611+#ifdef CONFIG_PAX_DLRESOLVE
83612+ unsigned long call_dl_resolve;
83613+#endif
83614+
83615+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
83616+ unsigned long call_syscall;
83617+#endif
83618+
83619+#ifdef CONFIG_PAX_ASLR
83620+ unsigned long delta_mmap; /* randomized offset */
83621+ unsigned long delta_stack; /* randomized offset */
83622+#endif
83623+
83624+} __randomize_layout;
83625
83626 static inline void mm_init_cpumask(struct mm_struct *mm)
83627 {
83628diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
83629index c5d5278..f0b68c8 100644
83630--- a/include/linux/mmiotrace.h
83631+++ b/include/linux/mmiotrace.h
83632@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
83633 /* Called from ioremap.c */
83634 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
83635 void __iomem *addr);
83636-extern void mmiotrace_iounmap(volatile void __iomem *addr);
83637+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
83638
83639 /* For anyone to insert markers. Remember trailing newline. */
83640 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
83641@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
83642 {
83643 }
83644
83645-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
83646+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
83647 {
83648 }
83649
83650diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
83651index b21bac4..94142ca 100644
83652--- a/include/linux/mmzone.h
83653+++ b/include/linux/mmzone.h
83654@@ -527,7 +527,7 @@ struct zone {
83655
83656 ZONE_PADDING(_pad3_)
83657 /* Zone statistics */
83658- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
83659+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
83660 } ____cacheline_internodealigned_in_smp;
83661
83662 typedef enum {
83663diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
83664index 44eeef0..a92d3f9 100644
83665--- a/include/linux/mod_devicetable.h
83666+++ b/include/linux/mod_devicetable.h
83667@@ -139,7 +139,7 @@ struct usb_device_id {
83668 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
83669 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
83670
83671-#define HID_ANY_ID (~0)
83672+#define HID_ANY_ID (~0U)
83673 #define HID_BUS_ANY 0xffff
83674 #define HID_GROUP_ANY 0x0000
83675
83676@@ -475,7 +475,7 @@ struct dmi_system_id {
83677 const char *ident;
83678 struct dmi_strmatch matches[4];
83679 void *driver_data;
83680-};
83681+} __do_const;
83682 /*
83683 * struct dmi_device_id appears during expansion of
83684 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
83685diff --git a/include/linux/module.h b/include/linux/module.h
83686index 71f282a..b2387e2 100644
83687--- a/include/linux/module.h
83688+++ b/include/linux/module.h
83689@@ -17,9 +17,11 @@
83690 #include <linux/moduleparam.h>
83691 #include <linux/jump_label.h>
83692 #include <linux/export.h>
83693+#include <linux/fs.h>
83694
83695 #include <linux/percpu.h>
83696 #include <asm/module.h>
83697+#include <asm/pgtable.h>
83698
83699 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
83700 #define MODULE_SIG_STRING "~Module signature appended~\n"
83701@@ -42,7 +44,7 @@ struct module_kobject {
83702 struct kobject *drivers_dir;
83703 struct module_param_attrs *mp;
83704 struct completion *kobj_completion;
83705-};
83706+} __randomize_layout;
83707
83708 struct module_attribute {
83709 struct attribute attr;
83710@@ -54,12 +56,13 @@ struct module_attribute {
83711 int (*test)(struct module *);
83712 void (*free)(struct module *);
83713 };
83714+typedef struct module_attribute __no_const module_attribute_no_const;
83715
83716 struct module_version_attribute {
83717 struct module_attribute mattr;
83718 const char *module_name;
83719 const char *version;
83720-} __attribute__ ((__aligned__(sizeof(void *))));
83721+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
83722
83723 extern ssize_t __modver_version_show(struct module_attribute *,
83724 struct module_kobject *, char *);
83725@@ -235,7 +238,7 @@ struct module {
83726
83727 /* Sysfs stuff. */
83728 struct module_kobject mkobj;
83729- struct module_attribute *modinfo_attrs;
83730+ module_attribute_no_const *modinfo_attrs;
83731 const char *version;
83732 const char *srcversion;
83733 struct kobject *holders_dir;
83734@@ -284,19 +287,16 @@ struct module {
83735 int (*init)(void);
83736
83737 /* If this is non-NULL, vfree after init() returns */
83738- void *module_init;
83739+ void *module_init_rx, *module_init_rw;
83740
83741 /* Here is the actual code + data, vfree'd on unload. */
83742- void *module_core;
83743+ void *module_core_rx, *module_core_rw;
83744
83745 /* Here are the sizes of the init and core sections */
83746- unsigned int init_size, core_size;
83747+ unsigned int init_size_rw, core_size_rw;
83748
83749 /* The size of the executable code in each section. */
83750- unsigned int init_text_size, core_text_size;
83751-
83752- /* Size of RO sections of the module (text+rodata) */
83753- unsigned int init_ro_size, core_ro_size;
83754+ unsigned int init_size_rx, core_size_rx;
83755
83756 /* Arch-specific module values */
83757 struct mod_arch_specific arch;
83758@@ -352,6 +352,10 @@ struct module {
83759 #ifdef CONFIG_EVENT_TRACING
83760 struct ftrace_event_call **trace_events;
83761 unsigned int num_trace_events;
83762+ struct file_operations trace_id;
83763+ struct file_operations trace_enable;
83764+ struct file_operations trace_format;
83765+ struct file_operations trace_filter;
83766 #endif
83767 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
83768 unsigned int num_ftrace_callsites;
83769@@ -375,7 +379,7 @@ struct module {
83770 ctor_fn_t *ctors;
83771 unsigned int num_ctors;
83772 #endif
83773-};
83774+} __randomize_layout;
83775 #ifndef MODULE_ARCH_INIT
83776 #define MODULE_ARCH_INIT {}
83777 #endif
83778@@ -396,18 +400,48 @@ bool is_module_address(unsigned long addr);
83779 bool is_module_percpu_address(unsigned long addr);
83780 bool is_module_text_address(unsigned long addr);
83781
83782+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
83783+{
83784+
83785+#ifdef CONFIG_PAX_KERNEXEC
83786+ if (ktla_ktva(addr) >= (unsigned long)start &&
83787+ ktla_ktva(addr) < (unsigned long)start + size)
83788+ return 1;
83789+#endif
83790+
83791+ return ((void *)addr >= start && (void *)addr < start + size);
83792+}
83793+
83794+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
83795+{
83796+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
83797+}
83798+
83799+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
83800+{
83801+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
83802+}
83803+
83804+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
83805+{
83806+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
83807+}
83808+
83809+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
83810+{
83811+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
83812+}
83813+
83814 static inline bool within_module_core(unsigned long addr,
83815 const struct module *mod)
83816 {
83817- return (unsigned long)mod->module_core <= addr &&
83818- addr < (unsigned long)mod->module_core + mod->core_size;
83819+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
83820 }
83821
83822 static inline bool within_module_init(unsigned long addr,
83823 const struct module *mod)
83824 {
83825- return (unsigned long)mod->module_init <= addr &&
83826- addr < (unsigned long)mod->module_init + mod->init_size;
83827+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
83828 }
83829
83830 static inline bool within_module(unsigned long addr, const struct module *mod)
83831diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
83832index 7eeb9bb..68f37e0 100644
83833--- a/include/linux/moduleloader.h
83834+++ b/include/linux/moduleloader.h
83835@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
83836 sections. Returns NULL on failure. */
83837 void *module_alloc(unsigned long size);
83838
83839+#ifdef CONFIG_PAX_KERNEXEC
83840+void *module_alloc_exec(unsigned long size);
83841+#else
83842+#define module_alloc_exec(x) module_alloc(x)
83843+#endif
83844+
83845 /* Free memory returned from module_alloc. */
83846 void module_free(struct module *mod, void *module_region);
83847
83848+#ifdef CONFIG_PAX_KERNEXEC
83849+void module_free_exec(struct module *mod, void *module_region);
83850+#else
83851+#define module_free_exec(x, y) module_free((x), (y))
83852+#endif
83853+
83854 /*
83855 * Apply the given relocation to the (simplified) ELF. Return -error
83856 * or 0.
83857@@ -45,8 +57,10 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
83858 unsigned int relsec,
83859 struct module *me)
83860 {
83861+#ifdef CONFIG_MODULES
83862 printk(KERN_ERR "module %s: REL relocation unsupported\n",
83863 module_name(me));
83864+#endif
83865 return -ENOEXEC;
83866 }
83867 #endif
83868@@ -68,8 +82,10 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
83869 unsigned int relsec,
83870 struct module *me)
83871 {
83872+#ifdef CONFIG_MODULES
83873 printk(KERN_ERR "module %s: REL relocation unsupported\n",
83874 module_name(me));
83875+#endif
83876 return -ENOEXEC;
83877 }
83878 #endif
83879diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
83880index 494f99e..5059f63 100644
83881--- a/include/linux/moduleparam.h
83882+++ b/include/linux/moduleparam.h
83883@@ -293,7 +293,7 @@ static inline void __kernel_param_unlock(void)
83884 * @len is usually just sizeof(string).
83885 */
83886 #define module_param_string(name, string, len, perm) \
83887- static const struct kparam_string __param_string_##name \
83888+ static const struct kparam_string __param_string_##name __used \
83889 = { len, string }; \
83890 __module_param_call(MODULE_PARAM_PREFIX, name, \
83891 &param_ops_string, \
83892@@ -437,7 +437,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
83893 */
83894 #define module_param_array_named(name, array, type, nump, perm) \
83895 param_check_##type(name, &(array)[0]); \
83896- static const struct kparam_array __param_arr_##name \
83897+ static const struct kparam_array __param_arr_##name __used \
83898 = { .max = ARRAY_SIZE(array), .num = nump, \
83899 .ops = &param_ops_##type, \
83900 .elemsize = sizeof(array[0]), .elem = array }; \
83901diff --git a/include/linux/mount.h b/include/linux/mount.h
83902index 9262e4b..0a45f98 100644
83903--- a/include/linux/mount.h
83904+++ b/include/linux/mount.h
83905@@ -66,7 +66,7 @@ struct vfsmount {
83906 struct dentry *mnt_root; /* root of the mounted tree */
83907 struct super_block *mnt_sb; /* pointer to superblock */
83908 int mnt_flags;
83909-};
83910+} __randomize_layout;
83911
83912 struct file; /* forward dec */
83913 struct path;
83914diff --git a/include/linux/namei.h b/include/linux/namei.h
83915index 492de72..1bddcd4 100644
83916--- a/include/linux/namei.h
83917+++ b/include/linux/namei.h
83918@@ -19,7 +19,7 @@ struct nameidata {
83919 unsigned seq, m_seq;
83920 int last_type;
83921 unsigned depth;
83922- char *saved_names[MAX_NESTED_LINKS + 1];
83923+ const char *saved_names[MAX_NESTED_LINKS + 1];
83924 };
83925
83926 /*
83927@@ -83,12 +83,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
83928
83929 extern void nd_jump_link(struct nameidata *nd, struct path *path);
83930
83931-static inline void nd_set_link(struct nameidata *nd, char *path)
83932+static inline void nd_set_link(struct nameidata *nd, const char *path)
83933 {
83934 nd->saved_names[nd->depth] = path;
83935 }
83936
83937-static inline char *nd_get_link(struct nameidata *nd)
83938+static inline const char *nd_get_link(const struct nameidata *nd)
83939 {
83940 return nd->saved_names[nd->depth];
83941 }
83942diff --git a/include/linux/net.h b/include/linux/net.h
83943index 17d8339..81656c0 100644
83944--- a/include/linux/net.h
83945+++ b/include/linux/net.h
83946@@ -192,7 +192,7 @@ struct net_proto_family {
83947 int (*create)(struct net *net, struct socket *sock,
83948 int protocol, int kern);
83949 struct module *owner;
83950-};
83951+} __do_const;
83952
83953 struct iovec;
83954 struct kvec;
83955diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
83956index c8e388e..5d8cd9b 100644
83957--- a/include/linux/netdevice.h
83958+++ b/include/linux/netdevice.h
83959@@ -1147,6 +1147,7 @@ struct net_device_ops {
83960 void *priv);
83961 int (*ndo_get_lock_subclass)(struct net_device *dev);
83962 };
83963+typedef struct net_device_ops __no_const net_device_ops_no_const;
83964
83965 /**
83966 * enum net_device_priv_flags - &struct net_device priv_flags
83967@@ -1485,10 +1486,10 @@ struct net_device {
83968
83969 struct net_device_stats stats;
83970
83971- atomic_long_t rx_dropped;
83972- atomic_long_t tx_dropped;
83973+ atomic_long_unchecked_t rx_dropped;
83974+ atomic_long_unchecked_t tx_dropped;
83975
83976- atomic_t carrier_changes;
83977+ atomic_unchecked_t carrier_changes;
83978
83979 #ifdef CONFIG_WIRELESS_EXT
83980 const struct iw_handler_def * wireless_handlers;
83981diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
83982index 2517ece..0bbfcfb 100644
83983--- a/include/linux/netfilter.h
83984+++ b/include/linux/netfilter.h
83985@@ -85,7 +85,7 @@ struct nf_sockopt_ops {
83986 #endif
83987 /* Use the module struct to lock set/get code in place */
83988 struct module *owner;
83989-};
83990+} __do_const;
83991
83992 /* Function to register/unregister hook points. */
83993 int nf_register_hook(struct nf_hook_ops *reg);
83994diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
83995index e955d47..04a5338 100644
83996--- a/include/linux/netfilter/nfnetlink.h
83997+++ b/include/linux/netfilter/nfnetlink.h
83998@@ -19,7 +19,7 @@ struct nfnl_callback {
83999 const struct nlattr * const cda[]);
84000 const struct nla_policy *policy; /* netlink attribute policy */
84001 const u_int16_t attr_count; /* number of nlattr's */
84002-};
84003+} __do_const;
84004
84005 struct nfnetlink_subsystem {
84006 const char *name;
84007diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
84008new file mode 100644
84009index 0000000..33f4af8
84010--- /dev/null
84011+++ b/include/linux/netfilter/xt_gradm.h
84012@@ -0,0 +1,9 @@
84013+#ifndef _LINUX_NETFILTER_XT_GRADM_H
84014+#define _LINUX_NETFILTER_XT_GRADM_H 1
84015+
84016+struct xt_gradm_mtinfo {
84017+ __u16 flags;
84018+ __u16 invflags;
84019+};
84020+
84021+#endif
84022diff --git a/include/linux/nls.h b/include/linux/nls.h
84023index 520681b..2b7fabb 100644
84024--- a/include/linux/nls.h
84025+++ b/include/linux/nls.h
84026@@ -31,7 +31,7 @@ struct nls_table {
84027 const unsigned char *charset2upper;
84028 struct module *owner;
84029 struct nls_table *next;
84030-};
84031+} __do_const;
84032
84033 /* this value hold the maximum octet of charset */
84034 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
84035@@ -46,7 +46,7 @@ enum utf16_endian {
84036 /* nls_base.c */
84037 extern int __register_nls(struct nls_table *, struct module *);
84038 extern int unregister_nls(struct nls_table *);
84039-extern struct nls_table *load_nls(char *);
84040+extern struct nls_table *load_nls(const char *);
84041 extern void unload_nls(struct nls_table *);
84042 extern struct nls_table *load_nls_default(void);
84043 #define register_nls(nls) __register_nls((nls), THIS_MODULE)
84044diff --git a/include/linux/notifier.h b/include/linux/notifier.h
84045index d14a4c3..a078786 100644
84046--- a/include/linux/notifier.h
84047+++ b/include/linux/notifier.h
84048@@ -54,7 +54,8 @@ struct notifier_block {
84049 notifier_fn_t notifier_call;
84050 struct notifier_block __rcu *next;
84051 int priority;
84052-};
84053+} __do_const;
84054+typedef struct notifier_block __no_const notifier_block_no_const;
84055
84056 struct atomic_notifier_head {
84057 spinlock_t lock;
84058diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
84059index b2a0f15..4d7da32 100644
84060--- a/include/linux/oprofile.h
84061+++ b/include/linux/oprofile.h
84062@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
84063 int oprofilefs_create_ro_ulong(struct dentry * root,
84064 char const * name, ulong * val);
84065
84066-/** Create a file for read-only access to an atomic_t. */
84067+/** Create a file for read-only access to an atomic_unchecked_t. */
84068 int oprofilefs_create_ro_atomic(struct dentry * root,
84069- char const * name, atomic_t * val);
84070+ char const * name, atomic_unchecked_t * val);
84071
84072 /** create a directory */
84073 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
84074diff --git a/include/linux/padata.h b/include/linux/padata.h
84075index 4386946..f50c615 100644
84076--- a/include/linux/padata.h
84077+++ b/include/linux/padata.h
84078@@ -129,7 +129,7 @@ struct parallel_data {
84079 struct padata_serial_queue __percpu *squeue;
84080 atomic_t reorder_objects;
84081 atomic_t refcnt;
84082- atomic_t seq_nr;
84083+ atomic_unchecked_t seq_nr;
84084 struct padata_cpumask cpumask;
84085 spinlock_t lock ____cacheline_aligned;
84086 unsigned int processed;
84087diff --git a/include/linux/path.h b/include/linux/path.h
84088index d137218..be0c176 100644
84089--- a/include/linux/path.h
84090+++ b/include/linux/path.h
84091@@ -1,13 +1,15 @@
84092 #ifndef _LINUX_PATH_H
84093 #define _LINUX_PATH_H
84094
84095+#include <linux/compiler.h>
84096+
84097 struct dentry;
84098 struct vfsmount;
84099
84100 struct path {
84101 struct vfsmount *mnt;
84102 struct dentry *dentry;
84103-};
84104+} __randomize_layout;
84105
84106 extern void path_get(const struct path *);
84107 extern void path_put(const struct path *);
84108diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
84109index 5f2e559..7d59314 100644
84110--- a/include/linux/pci_hotplug.h
84111+++ b/include/linux/pci_hotplug.h
84112@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
84113 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
84114 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
84115 int (*reset_slot) (struct hotplug_slot *slot, int probe);
84116-};
84117+} __do_const;
84118+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
84119
84120 /**
84121 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
84122diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
84123index 707617a..28a2e7e 100644
84124--- a/include/linux/perf_event.h
84125+++ b/include/linux/perf_event.h
84126@@ -339,8 +339,8 @@ struct perf_event {
84127
84128 enum perf_event_active_state state;
84129 unsigned int attach_state;
84130- local64_t count;
84131- atomic64_t child_count;
84132+ local64_t count; /* PaX: fix it one day */
84133+ atomic64_unchecked_t child_count;
84134
84135 /*
84136 * These are the total time in nanoseconds that the event
84137@@ -391,8 +391,8 @@ struct perf_event {
84138 * These accumulate total time (in nanoseconds) that children
84139 * events have been enabled and running, respectively.
84140 */
84141- atomic64_t child_total_time_enabled;
84142- atomic64_t child_total_time_running;
84143+ atomic64_unchecked_t child_total_time_enabled;
84144+ atomic64_unchecked_t child_total_time_running;
84145
84146 /*
84147 * Protect attach/detach and child_list:
84148@@ -722,7 +722,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
84149 entry->ip[entry->nr++] = ip;
84150 }
84151
84152-extern int sysctl_perf_event_paranoid;
84153+extern int sysctl_perf_event_legitimately_concerned;
84154 extern int sysctl_perf_event_mlock;
84155 extern int sysctl_perf_event_sample_rate;
84156 extern int sysctl_perf_cpu_time_max_percent;
84157@@ -737,19 +737,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
84158 loff_t *ppos);
84159
84160
84161+static inline bool perf_paranoid_any(void)
84162+{
84163+ return sysctl_perf_event_legitimately_concerned > 2;
84164+}
84165+
84166 static inline bool perf_paranoid_tracepoint_raw(void)
84167 {
84168- return sysctl_perf_event_paranoid > -1;
84169+ return sysctl_perf_event_legitimately_concerned > -1;
84170 }
84171
84172 static inline bool perf_paranoid_cpu(void)
84173 {
84174- return sysctl_perf_event_paranoid > 0;
84175+ return sysctl_perf_event_legitimately_concerned > 0;
84176 }
84177
84178 static inline bool perf_paranoid_kernel(void)
84179 {
84180- return sysctl_perf_event_paranoid > 1;
84181+ return sysctl_perf_event_legitimately_concerned > 1;
84182 }
84183
84184 extern void perf_event_init(void);
84185@@ -880,7 +885,7 @@ struct perf_pmu_events_attr {
84186 struct device_attribute attr;
84187 u64 id;
84188 const char *event_str;
84189-};
84190+} __do_const;
84191
84192 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
84193 static struct perf_pmu_events_attr _var = { \
84194diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
84195index 1997ffc..4f1f44d 100644
84196--- a/include/linux/pid_namespace.h
84197+++ b/include/linux/pid_namespace.h
84198@@ -44,7 +44,7 @@ struct pid_namespace {
84199 int hide_pid;
84200 int reboot; /* group exit code if this pidns was rebooted */
84201 unsigned int proc_inum;
84202-};
84203+} __randomize_layout;
84204
84205 extern struct pid_namespace init_pid_ns;
84206
84207diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
84208index eb8b8ac..62649e1 100644
84209--- a/include/linux/pipe_fs_i.h
84210+++ b/include/linux/pipe_fs_i.h
84211@@ -47,10 +47,10 @@ struct pipe_inode_info {
84212 struct mutex mutex;
84213 wait_queue_head_t wait;
84214 unsigned int nrbufs, curbuf, buffers;
84215- unsigned int readers;
84216- unsigned int writers;
84217- unsigned int files;
84218- unsigned int waiting_writers;
84219+ atomic_t readers;
84220+ atomic_t writers;
84221+ atomic_t files;
84222+ atomic_t waiting_writers;
84223 unsigned int r_counter;
84224 unsigned int w_counter;
84225 struct page *tmp_page;
84226diff --git a/include/linux/pm.h b/include/linux/pm.h
84227index 72c0fe0..26918ed 100644
84228--- a/include/linux/pm.h
84229+++ b/include/linux/pm.h
84230@@ -620,6 +620,7 @@ extern int dev_pm_put_subsys_data(struct device *dev);
84231 struct dev_pm_domain {
84232 struct dev_pm_ops ops;
84233 };
84234+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
84235
84236 /*
84237 * The PM_EVENT_ messages are also used by drivers implementing the legacy
84238diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
84239index ebc4c76..7fab7b0 100644
84240--- a/include/linux/pm_domain.h
84241+++ b/include/linux/pm_domain.h
84242@@ -44,11 +44,11 @@ struct gpd_dev_ops {
84243 int (*thaw_early)(struct device *dev);
84244 int (*thaw)(struct device *dev);
84245 bool (*active_wakeup)(struct device *dev);
84246-};
84247+} __no_const;
84248
84249 struct gpd_cpu_data {
84250 unsigned int saved_exit_latency;
84251- struct cpuidle_state *idle_state;
84252+ cpuidle_state_no_const *idle_state;
84253 };
84254
84255 struct generic_pm_domain {
84256diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
84257index 367f49b..d2f5a14 100644
84258--- a/include/linux/pm_runtime.h
84259+++ b/include/linux/pm_runtime.h
84260@@ -125,7 +125,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
84261
84262 static inline void pm_runtime_mark_last_busy(struct device *dev)
84263 {
84264- ACCESS_ONCE(dev->power.last_busy) = jiffies;
84265+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
84266 }
84267
84268 #else /* !CONFIG_PM_RUNTIME */
84269diff --git a/include/linux/pnp.h b/include/linux/pnp.h
84270index 195aafc..49a7bc2 100644
84271--- a/include/linux/pnp.h
84272+++ b/include/linux/pnp.h
84273@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
84274 struct pnp_fixup {
84275 char id[7];
84276 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
84277-};
84278+} __do_const;
84279
84280 /* config parameters */
84281 #define PNP_CONFIG_NORMAL 0x0001
84282diff --git a/include/linux/poison.h b/include/linux/poison.h
84283index 2110a81..13a11bb 100644
84284--- a/include/linux/poison.h
84285+++ b/include/linux/poison.h
84286@@ -19,8 +19,8 @@
84287 * under normal circumstances, used to verify that nobody uses
84288 * non-initialized list entries.
84289 */
84290-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
84291-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
84292+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
84293+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
84294
84295 /********** include/linux/timer.h **********/
84296 /*
84297diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
84298index d8b187c3..9a9257a 100644
84299--- a/include/linux/power/smartreflex.h
84300+++ b/include/linux/power/smartreflex.h
84301@@ -238,7 +238,7 @@ struct omap_sr_class_data {
84302 int (*notify)(struct omap_sr *sr, u32 status);
84303 u8 notify_flags;
84304 u8 class_type;
84305-};
84306+} __do_const;
84307
84308 /**
84309 * struct omap_sr_nvalue_table - Smartreflex n-target value info
84310diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
84311index 4ea1d37..80f4b33 100644
84312--- a/include/linux/ppp-comp.h
84313+++ b/include/linux/ppp-comp.h
84314@@ -84,7 +84,7 @@ struct compressor {
84315 struct module *owner;
84316 /* Extra skb space needed by the compressor algorithm */
84317 unsigned int comp_extra;
84318-};
84319+} __do_const;
84320
84321 /*
84322 * The return value from decompress routine is the length of the
84323diff --git a/include/linux/preempt.h b/include/linux/preempt.h
84324index de83b4e..c4b997d 100644
84325--- a/include/linux/preempt.h
84326+++ b/include/linux/preempt.h
84327@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
84328 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
84329 #endif
84330
84331+#define raw_preempt_count_add(val) __preempt_count_add(val)
84332+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
84333+
84334 #define __preempt_count_inc() __preempt_count_add(1)
84335 #define __preempt_count_dec() __preempt_count_sub(1)
84336
84337 #define preempt_count_inc() preempt_count_add(1)
84338+#define raw_preempt_count_inc() raw_preempt_count_add(1)
84339 #define preempt_count_dec() preempt_count_sub(1)
84340+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
84341
84342 #ifdef CONFIG_PREEMPT_COUNT
84343
84344@@ -41,6 +46,12 @@ do { \
84345 barrier(); \
84346 } while (0)
84347
84348+#define raw_preempt_disable() \
84349+do { \
84350+ raw_preempt_count_inc(); \
84351+ barrier(); \
84352+} while (0)
84353+
84354 #define sched_preempt_enable_no_resched() \
84355 do { \
84356 barrier(); \
84357@@ -49,6 +60,12 @@ do { \
84358
84359 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
84360
84361+#define raw_preempt_enable_no_resched() \
84362+do { \
84363+ barrier(); \
84364+ raw_preempt_count_dec(); \
84365+} while (0)
84366+
84367 #ifdef CONFIG_PREEMPT
84368 #define preempt_enable() \
84369 do { \
84370@@ -113,8 +130,10 @@ do { \
84371 * region.
84372 */
84373 #define preempt_disable() barrier()
84374+#define raw_preempt_disable() barrier()
84375 #define sched_preempt_enable_no_resched() barrier()
84376 #define preempt_enable_no_resched() barrier()
84377+#define raw_preempt_enable_no_resched() barrier()
84378 #define preempt_enable() barrier()
84379 #define preempt_check_resched() do { } while (0)
84380
84381@@ -128,11 +147,13 @@ do { \
84382 /*
84383 * Modules have no business playing preemption tricks.
84384 */
84385+#ifndef CONFIG_PAX_KERNEXEC
84386 #undef sched_preempt_enable_no_resched
84387 #undef preempt_enable_no_resched
84388 #undef preempt_enable_no_resched_notrace
84389 #undef preempt_check_resched
84390 #endif
84391+#endif
84392
84393 #define preempt_set_need_resched() \
84394 do { \
84395diff --git a/include/linux/printk.h b/include/linux/printk.h
84396index d78125f..7f36596 100644
84397--- a/include/linux/printk.h
84398+++ b/include/linux/printk.h
84399@@ -124,6 +124,8 @@ static inline __printf(1, 2) __cold
84400 void early_printk(const char *s, ...) { }
84401 #endif
84402
84403+extern int kptr_restrict;
84404+
84405 #ifdef CONFIG_PRINTK
84406 asmlinkage __printf(5, 0)
84407 int vprintk_emit(int facility, int level,
84408@@ -158,7 +160,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
84409
84410 extern int printk_delay_msec;
84411 extern int dmesg_restrict;
84412-extern int kptr_restrict;
84413
84414 extern void wake_up_klogd(void);
84415
84416diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
84417index 9d117f6..d832b31 100644
84418--- a/include/linux/proc_fs.h
84419+++ b/include/linux/proc_fs.h
84420@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
84421 extern struct proc_dir_entry *proc_symlink(const char *,
84422 struct proc_dir_entry *, const char *);
84423 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
84424+extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
84425 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
84426 struct proc_dir_entry *, void *);
84427+extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
84428+ struct proc_dir_entry *, void *);
84429 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
84430 struct proc_dir_entry *);
84431
84432@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
84433 return proc_create_data(name, mode, parent, proc_fops, NULL);
84434 }
84435
84436+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
84437+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
84438+{
84439+#ifdef CONFIG_GRKERNSEC_PROC_USER
84440+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
84441+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
84442+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
84443+#else
84444+ return proc_create_data(name, mode, parent, proc_fops, NULL);
84445+#endif
84446+}
84447+
84448+
84449 extern void proc_set_size(struct proc_dir_entry *, loff_t);
84450 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
84451 extern void *PDE_DATA(const struct inode *);
84452@@ -56,8 +72,12 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
84453 struct proc_dir_entry *parent,const char *dest) { return NULL;}
84454 static inline struct proc_dir_entry *proc_mkdir(const char *name,
84455 struct proc_dir_entry *parent) {return NULL;}
84456+static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name,
84457+ struct proc_dir_entry *parent) { return NULL; }
84458 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
84459 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
84460+static inline struct proc_dir_entry *proc_mkdir_data_restrict(const char *name,
84461+ umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
84462 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
84463 umode_t mode, struct proc_dir_entry *parent) { return NULL; }
84464 #define proc_create(name, mode, parent, proc_fops) ({NULL;})
84465@@ -77,7 +97,7 @@ static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *p
84466 static inline struct proc_dir_entry *proc_net_mkdir(
84467 struct net *net, const char *name, struct proc_dir_entry *parent)
84468 {
84469- return proc_mkdir_data(name, 0, parent, net);
84470+ return proc_mkdir_data_restrict(name, 0, parent, net);
84471 }
84472
84473 #endif /* _LINUX_PROC_FS_H */
84474diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
84475index 34a1e10..70f6bde 100644
84476--- a/include/linux/proc_ns.h
84477+++ b/include/linux/proc_ns.h
84478@@ -14,7 +14,7 @@ struct proc_ns_operations {
84479 void (*put)(void *ns);
84480 int (*install)(struct nsproxy *nsproxy, void *ns);
84481 unsigned int (*inum)(void *ns);
84482-};
84483+} __do_const __randomize_layout;
84484
84485 struct proc_ns {
84486 void *ns;
84487diff --git a/include/linux/quota.h b/include/linux/quota.h
84488index 80d345a..9e89a9a 100644
84489--- a/include/linux/quota.h
84490+++ b/include/linux/quota.h
84491@@ -70,7 +70,7 @@ struct kqid { /* Type in which we store the quota identifier */
84492
84493 extern bool qid_eq(struct kqid left, struct kqid right);
84494 extern bool qid_lt(struct kqid left, struct kqid right);
84495-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
84496+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
84497 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
84498 extern bool qid_valid(struct kqid qid);
84499
84500diff --git a/include/linux/random.h b/include/linux/random.h
84501index 57fbbff..2170304 100644
84502--- a/include/linux/random.h
84503+++ b/include/linux/random.h
84504@@ -9,9 +9,19 @@
84505 #include <uapi/linux/random.h>
84506
84507 extern void add_device_randomness(const void *, unsigned int);
84508+
84509+static inline void add_latent_entropy(void)
84510+{
84511+
84512+#ifdef LATENT_ENTROPY_PLUGIN
84513+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
84514+#endif
84515+
84516+}
84517+
84518 extern void add_input_randomness(unsigned int type, unsigned int code,
84519- unsigned int value);
84520-extern void add_interrupt_randomness(int irq, int irq_flags);
84521+ unsigned int value) __latent_entropy;
84522+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
84523
84524 extern void get_random_bytes(void *buf, int nbytes);
84525 extern void get_random_bytes_arch(void *buf, int nbytes);
84526@@ -22,10 +32,10 @@ extern int random_int_secret_init(void);
84527 extern const struct file_operations random_fops, urandom_fops;
84528 #endif
84529
84530-unsigned int get_random_int(void);
84531+unsigned int __intentional_overflow(-1) get_random_int(void);
84532 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
84533
84534-u32 prandom_u32(void);
84535+u32 prandom_u32(void) __intentional_overflow(-1);
84536 void prandom_bytes(void *buf, int nbytes);
84537 void prandom_seed(u32 seed);
84538 void prandom_reseed_late(void);
84539@@ -37,6 +47,11 @@ struct rnd_state {
84540 u32 prandom_u32_state(struct rnd_state *state);
84541 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
84542
84543+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
84544+{
84545+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
84546+}
84547+
84548 /**
84549 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
84550 * @ep_ro: right open interval endpoint
84551@@ -49,7 +64,7 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
84552 *
84553 * Returns: pseudo-random number in interval [0, ep_ro)
84554 */
84555-static inline u32 prandom_u32_max(u32 ep_ro)
84556+static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro)
84557 {
84558 return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
84559 }
84560diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
84561index fea49b5..2ac22bb 100644
84562--- a/include/linux/rbtree_augmented.h
84563+++ b/include/linux/rbtree_augmented.h
84564@@ -80,7 +80,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
84565 old->rbaugmented = rbcompute(old); \
84566 } \
84567 rbstatic const struct rb_augment_callbacks rbname = { \
84568- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
84569+ .propagate = rbname ## _propagate, \
84570+ .copy = rbname ## _copy, \
84571+ .rotate = rbname ## _rotate \
84572 };
84573
84574
84575diff --git a/include/linux/rculist.h b/include/linux/rculist.h
84576index 372ad5e..d4373f8 100644
84577--- a/include/linux/rculist.h
84578+++ b/include/linux/rculist.h
84579@@ -29,8 +29,8 @@
84580 */
84581 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
84582 {
84583- ACCESS_ONCE(list->next) = list;
84584- ACCESS_ONCE(list->prev) = list;
84585+ ACCESS_ONCE_RW(list->next) = list;
84586+ ACCESS_ONCE_RW(list->prev) = list;
84587 }
84588
84589 /*
84590@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
84591 struct list_head *prev, struct list_head *next);
84592 #endif
84593
84594+void __pax_list_add_rcu(struct list_head *new,
84595+ struct list_head *prev, struct list_head *next);
84596+
84597 /**
84598 * list_add_rcu - add a new entry to rcu-protected list
84599 * @new: new entry to be added
84600@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
84601 __list_add_rcu(new, head, head->next);
84602 }
84603
84604+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
84605+{
84606+ __pax_list_add_rcu(new, head, head->next);
84607+}
84608+
84609 /**
84610 * list_add_tail_rcu - add a new entry to rcu-protected list
84611 * @new: new entry to be added
84612@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
84613 __list_add_rcu(new, head->prev, head);
84614 }
84615
84616+static inline void pax_list_add_tail_rcu(struct list_head *new,
84617+ struct list_head *head)
84618+{
84619+ __pax_list_add_rcu(new, head->prev, head);
84620+}
84621+
84622 /**
84623 * list_del_rcu - deletes entry from list without re-initialization
84624 * @entry: the element to delete from the list.
84625@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
84626 entry->prev = LIST_POISON2;
84627 }
84628
84629+extern void pax_list_del_rcu(struct list_head *entry);
84630+
84631 /**
84632 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
84633 * @n: the element to delete from the hash list.
84634diff --git a/include/linux/reboot.h b/include/linux/reboot.h
84635index 48bf152..d38b785 100644
84636--- a/include/linux/reboot.h
84637+++ b/include/linux/reboot.h
84638@@ -44,9 +44,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
84639 */
84640
84641 extern void migrate_to_reboot_cpu(void);
84642-extern void machine_restart(char *cmd);
84643-extern void machine_halt(void);
84644-extern void machine_power_off(void);
84645+extern void machine_restart(char *cmd) __noreturn;
84646+extern void machine_halt(void) __noreturn;
84647+extern void machine_power_off(void) __noreturn;
84648
84649 extern void machine_shutdown(void);
84650 struct pt_regs;
84651@@ -57,9 +57,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
84652 */
84653
84654 extern void kernel_restart_prepare(char *cmd);
84655-extern void kernel_restart(char *cmd);
84656-extern void kernel_halt(void);
84657-extern void kernel_power_off(void);
84658+extern void kernel_restart(char *cmd) __noreturn;
84659+extern void kernel_halt(void) __noreturn;
84660+extern void kernel_power_off(void) __noreturn;
84661
84662 extern int C_A_D; /* for sysctl */
84663 void ctrl_alt_del(void);
84664@@ -73,7 +73,7 @@ extern int orderly_poweroff(bool force);
84665 * Emergency restart, callable from an interrupt handler.
84666 */
84667
84668-extern void emergency_restart(void);
84669+extern void emergency_restart(void) __noreturn;
84670 #include <asm/emergency-restart.h>
84671
84672 #endif /* _LINUX_REBOOT_H */
84673diff --git a/include/linux/regset.h b/include/linux/regset.h
84674index 8e0c9fe..ac4d221 100644
84675--- a/include/linux/regset.h
84676+++ b/include/linux/regset.h
84677@@ -161,7 +161,8 @@ struct user_regset {
84678 unsigned int align;
84679 unsigned int bias;
84680 unsigned int core_note_type;
84681-};
84682+} __do_const;
84683+typedef struct user_regset __no_const user_regset_no_const;
84684
84685 /**
84686 * struct user_regset_view - available regsets
84687diff --git a/include/linux/relay.h b/include/linux/relay.h
84688index d7c8359..818daf5 100644
84689--- a/include/linux/relay.h
84690+++ b/include/linux/relay.h
84691@@ -157,7 +157,7 @@ struct rchan_callbacks
84692 * The callback should return 0 if successful, negative if not.
84693 */
84694 int (*remove_buf_file)(struct dentry *dentry);
84695-};
84696+} __no_const;
84697
84698 /*
84699 * CONFIG_RELAY kernel API, kernel/relay.c
84700diff --git a/include/linux/rio.h b/include/linux/rio.h
84701index 6bda06f..bf39a9b 100644
84702--- a/include/linux/rio.h
84703+++ b/include/linux/rio.h
84704@@ -358,7 +358,7 @@ struct rio_ops {
84705 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
84706 u64 rstart, u32 size, u32 flags);
84707 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
84708-};
84709+} __no_const;
84710
84711 #define RIO_RESOURCE_MEM 0x00000100
84712 #define RIO_RESOURCE_DOORBELL 0x00000200
84713diff --git a/include/linux/rmap.h b/include/linux/rmap.h
84714index be57450..31cf65e 100644
84715--- a/include/linux/rmap.h
84716+++ b/include/linux/rmap.h
84717@@ -144,8 +144,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
84718 void anon_vma_init(void); /* create anon_vma_cachep */
84719 int anon_vma_prepare(struct vm_area_struct *);
84720 void unlink_anon_vmas(struct vm_area_struct *);
84721-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
84722-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
84723+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
84724+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
84725
84726 static inline void anon_vma_merge(struct vm_area_struct *vma,
84727 struct vm_area_struct *next)
84728diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
84729index ed8f9e7..999bc96 100644
84730--- a/include/linux/scatterlist.h
84731+++ b/include/linux/scatterlist.h
84732@@ -1,6 +1,7 @@
84733 #ifndef _LINUX_SCATTERLIST_H
84734 #define _LINUX_SCATTERLIST_H
84735
84736+#include <linux/sched.h>
84737 #include <linux/string.h>
84738 #include <linux/bug.h>
84739 #include <linux/mm.h>
84740@@ -114,6 +115,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
84741 #ifdef CONFIG_DEBUG_SG
84742 BUG_ON(!virt_addr_valid(buf));
84743 #endif
84744+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
84745+ if (object_starts_on_stack(buf)) {
84746+ void *adjbuf = buf - current->stack + current->lowmem_stack;
84747+ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf));
84748+ } else
84749+#endif
84750 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
84751 }
84752
84753diff --git a/include/linux/sched.h b/include/linux/sched.h
84754index 2b1d9e9..10ba706 100644
84755--- a/include/linux/sched.h
84756+++ b/include/linux/sched.h
84757@@ -132,6 +132,7 @@ struct fs_struct;
84758 struct perf_event_context;
84759 struct blk_plug;
84760 struct filename;
84761+struct linux_binprm;
84762
84763 #define VMACACHE_BITS 2
84764 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
84765@@ -374,7 +375,7 @@ extern char __sched_text_start[], __sched_text_end[];
84766 extern int in_sched_functions(unsigned long addr);
84767
84768 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
84769-extern signed long schedule_timeout(signed long timeout);
84770+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
84771 extern signed long schedule_timeout_interruptible(signed long timeout);
84772 extern signed long schedule_timeout_killable(signed long timeout);
84773 extern signed long schedule_timeout_uninterruptible(signed long timeout);
84774@@ -385,6 +386,19 @@ struct nsproxy;
84775 struct user_namespace;
84776
84777 #ifdef CONFIG_MMU
84778+
84779+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
84780+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
84781+#else
84782+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
84783+{
84784+ return 0;
84785+}
84786+#endif
84787+
84788+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
84789+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
84790+
84791 extern void arch_pick_mmap_layout(struct mm_struct *mm);
84792 extern unsigned long
84793 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
84794@@ -682,6 +696,17 @@ struct signal_struct {
84795 #ifdef CONFIG_TASKSTATS
84796 struct taskstats *stats;
84797 #endif
84798+
84799+#ifdef CONFIG_GRKERNSEC
84800+ u32 curr_ip;
84801+ u32 saved_ip;
84802+ u32 gr_saddr;
84803+ u32 gr_daddr;
84804+ u16 gr_sport;
84805+ u16 gr_dport;
84806+ u8 used_accept:1;
84807+#endif
84808+
84809 #ifdef CONFIG_AUDIT
84810 unsigned audit_tty;
84811 unsigned audit_tty_log_passwd;
84812@@ -708,7 +733,7 @@ struct signal_struct {
84813 struct mutex cred_guard_mutex; /* guard against foreign influences on
84814 * credential calculations
84815 * (notably. ptrace) */
84816-};
84817+} __randomize_layout;
84818
84819 /*
84820 * Bits in flags field of signal_struct.
84821@@ -761,6 +786,14 @@ struct user_struct {
84822 struct key *session_keyring; /* UID's default session keyring */
84823 #endif
84824
84825+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
84826+ unsigned char kernel_banned;
84827+#endif
84828+#ifdef CONFIG_GRKERNSEC_BRUTE
84829+ unsigned char suid_banned;
84830+ unsigned long suid_ban_expires;
84831+#endif
84832+
84833 /* Hash table maintenance information */
84834 struct hlist_node uidhash_node;
84835 kuid_t uid;
84836@@ -768,7 +801,7 @@ struct user_struct {
84837 #ifdef CONFIG_PERF_EVENTS
84838 atomic_long_t locked_vm;
84839 #endif
84840-};
84841+} __randomize_layout;
84842
84843 extern int uids_sysfs_init(void);
84844
84845@@ -1224,6 +1257,9 @@ enum perf_event_task_context {
84846 struct task_struct {
84847 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
84848 void *stack;
84849+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
84850+ void *lowmem_stack;
84851+#endif
84852 atomic_t usage;
84853 unsigned int flags; /* per process flags, defined below */
84854 unsigned int ptrace;
84855@@ -1345,8 +1381,8 @@ struct task_struct {
84856 struct list_head thread_node;
84857
84858 struct completion *vfork_done; /* for vfork() */
84859- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
84860- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
84861+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
84862+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
84863
84864 cputime_t utime, stime, utimescaled, stimescaled;
84865 cputime_t gtime;
84866@@ -1371,11 +1407,6 @@ struct task_struct {
84867 struct task_cputime cputime_expires;
84868 struct list_head cpu_timers[3];
84869
84870-/* process credentials */
84871- const struct cred __rcu *real_cred; /* objective and real subjective task
84872- * credentials (COW) */
84873- const struct cred __rcu *cred; /* effective (overridable) subjective task
84874- * credentials (COW) */
84875 char comm[TASK_COMM_LEN]; /* executable name excluding path
84876 - access with [gs]et_task_comm (which lock
84877 it with task_lock())
84878@@ -1393,6 +1424,10 @@ struct task_struct {
84879 #endif
84880 /* CPU-specific state of this task */
84881 struct thread_struct thread;
84882+/* thread_info moved to task_struct */
84883+#ifdef CONFIG_X86
84884+ struct thread_info tinfo;
84885+#endif
84886 /* filesystem information */
84887 struct fs_struct *fs;
84888 /* open file information */
84889@@ -1467,6 +1502,10 @@ struct task_struct {
84890 gfp_t lockdep_reclaim_gfp;
84891 #endif
84892
84893+/* process credentials */
84894+ const struct cred __rcu *real_cred; /* objective and real subjective task
84895+ * credentials (COW) */
84896+
84897 /* journalling filesystem info */
84898 void *journal_info;
84899
84900@@ -1505,6 +1544,10 @@ struct task_struct {
84901 /* cg_list protected by css_set_lock and tsk->alloc_lock */
84902 struct list_head cg_list;
84903 #endif
84904+
84905+ const struct cred __rcu *cred; /* effective (overridable) subjective task
84906+ * credentials (COW) */
84907+
84908 #ifdef CONFIG_FUTEX
84909 struct robust_list_head __user *robust_list;
84910 #ifdef CONFIG_COMPAT
84911@@ -1644,7 +1687,78 @@ struct task_struct {
84912 unsigned int sequential_io;
84913 unsigned int sequential_io_avg;
84914 #endif
84915-};
84916+
84917+#ifdef CONFIG_GRKERNSEC
84918+ /* grsecurity */
84919+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
84920+ u64 exec_id;
84921+#endif
84922+#ifdef CONFIG_GRKERNSEC_SETXID
84923+ const struct cred *delayed_cred;
84924+#endif
84925+ struct dentry *gr_chroot_dentry;
84926+ struct acl_subject_label *acl;
84927+ struct acl_subject_label *tmpacl;
84928+ struct acl_role_label *role;
84929+ struct file *exec_file;
84930+ unsigned long brute_expires;
84931+ u16 acl_role_id;
84932+ u8 inherited;
84933+ /* is this the task that authenticated to the special role */
84934+ u8 acl_sp_role;
84935+ u8 is_writable;
84936+ u8 brute;
84937+ u8 gr_is_chrooted;
84938+#endif
84939+
84940+} __randomize_layout;
84941+
84942+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
84943+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
84944+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
84945+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
84946+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
84947+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
84948+
84949+#ifdef CONFIG_PAX_SOFTMODE
84950+extern int pax_softmode;
84951+#endif
84952+
84953+extern int pax_check_flags(unsigned long *);
84954+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
84955+
84956+/* if tsk != current then task_lock must be held on it */
84957+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
84958+static inline unsigned long pax_get_flags(struct task_struct *tsk)
84959+{
84960+ if (likely(tsk->mm))
84961+ return tsk->mm->pax_flags;
84962+ else
84963+ return 0UL;
84964+}
84965+
84966+/* if tsk != current then task_lock must be held on it */
84967+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
84968+{
84969+ if (likely(tsk->mm)) {
84970+ tsk->mm->pax_flags = flags;
84971+ return 0;
84972+ }
84973+ return -EINVAL;
84974+}
84975+#endif
84976+
84977+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
84978+extern void pax_set_initial_flags(struct linux_binprm *bprm);
84979+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
84980+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
84981+#endif
84982+
84983+struct path;
84984+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
84985+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
84986+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
84987+extern void pax_report_refcount_overflow(struct pt_regs *regs);
84988
84989 /* Future-safe accessor for struct task_struct's cpus_allowed. */
84990 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
84991@@ -1726,7 +1840,7 @@ struct pid_namespace;
84992 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
84993 struct pid_namespace *ns);
84994
84995-static inline pid_t task_pid_nr(struct task_struct *tsk)
84996+static inline pid_t task_pid_nr(const struct task_struct *tsk)
84997 {
84998 return tsk->pid;
84999 }
85000@@ -2097,6 +2211,25 @@ extern u64 sched_clock_cpu(int cpu);
85001
85002 extern void sched_clock_init(void);
85003
85004+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
85005+static inline void populate_stack(void)
85006+{
85007+ struct task_struct *curtask = current;
85008+ int c;
85009+ int *ptr = curtask->stack;
85010+ int *end = curtask->stack + THREAD_SIZE;
85011+
85012+ while (ptr < end) {
85013+ c = *(volatile int *)ptr;
85014+ ptr += PAGE_SIZE/sizeof(int);
85015+ }
85016+}
85017+#else
85018+static inline void populate_stack(void)
85019+{
85020+}
85021+#endif
85022+
85023 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
85024 static inline void sched_clock_tick(void)
85025 {
85026@@ -2230,7 +2363,9 @@ void yield(void);
85027 extern struct exec_domain default_exec_domain;
85028
85029 union thread_union {
85030+#ifndef CONFIG_X86
85031 struct thread_info thread_info;
85032+#endif
85033 unsigned long stack[THREAD_SIZE/sizeof(long)];
85034 };
85035
85036@@ -2263,6 +2398,7 @@ extern struct pid_namespace init_pid_ns;
85037 */
85038
85039 extern struct task_struct *find_task_by_vpid(pid_t nr);
85040+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
85041 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
85042 struct pid_namespace *ns);
85043
85044@@ -2427,7 +2563,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
85045 extern void exit_itimers(struct signal_struct *);
85046 extern void flush_itimer_signals(void);
85047
85048-extern void do_group_exit(int);
85049+extern __noreturn void do_group_exit(int);
85050
85051 extern int do_execve(struct filename *,
85052 const char __user * const __user *,
85053@@ -2642,9 +2778,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
85054
85055 #endif
85056
85057-static inline int object_is_on_stack(void *obj)
85058+static inline int object_starts_on_stack(const void *obj)
85059 {
85060- void *stack = task_stack_page(current);
85061+ const void *stack = task_stack_page(current);
85062
85063 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
85064 }
85065diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
85066index 596a0e0..bea77ec 100644
85067--- a/include/linux/sched/sysctl.h
85068+++ b/include/linux/sched/sysctl.h
85069@@ -34,6 +34,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
85070 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
85071
85072 extern int sysctl_max_map_count;
85073+extern unsigned long sysctl_heap_stack_gap;
85074
85075 extern unsigned int sysctl_sched_latency;
85076 extern unsigned int sysctl_sched_min_granularity;
85077diff --git a/include/linux/security.h b/include/linux/security.h
85078index 623f90e..90b39da 100644
85079--- a/include/linux/security.h
85080+++ b/include/linux/security.h
85081@@ -27,6 +27,7 @@
85082 #include <linux/slab.h>
85083 #include <linux/err.h>
85084 #include <linux/string.h>
85085+#include <linux/grsecurity.h>
85086
85087 struct linux_binprm;
85088 struct cred;
85089@@ -116,8 +117,6 @@ struct seq_file;
85090
85091 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
85092
85093-void reset_security_ops(void);
85094-
85095 #ifdef CONFIG_MMU
85096 extern unsigned long mmap_min_addr;
85097 extern unsigned long dac_mmap_min_addr;
85098@@ -1729,7 +1728,7 @@ struct security_operations {
85099 struct audit_context *actx);
85100 void (*audit_rule_free) (void *lsmrule);
85101 #endif /* CONFIG_AUDIT */
85102-};
85103+} __randomize_layout;
85104
85105 /* prototypes */
85106 extern int security_init(void);
85107diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
85108index dc368b8..e895209 100644
85109--- a/include/linux/semaphore.h
85110+++ b/include/linux/semaphore.h
85111@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
85112 }
85113
85114 extern void down(struct semaphore *sem);
85115-extern int __must_check down_interruptible(struct semaphore *sem);
85116+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
85117 extern int __must_check down_killable(struct semaphore *sem);
85118 extern int __must_check down_trylock(struct semaphore *sem);
85119 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
85120diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
85121index 52e0097..383f21d 100644
85122--- a/include/linux/seq_file.h
85123+++ b/include/linux/seq_file.h
85124@@ -27,6 +27,9 @@ struct seq_file {
85125 struct mutex lock;
85126 const struct seq_operations *op;
85127 int poll_event;
85128+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
85129+ u64 exec_id;
85130+#endif
85131 #ifdef CONFIG_USER_NS
85132 struct user_namespace *user_ns;
85133 #endif
85134@@ -39,6 +42,7 @@ struct seq_operations {
85135 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
85136 int (*show) (struct seq_file *m, void *v);
85137 };
85138+typedef struct seq_operations __no_const seq_operations_no_const;
85139
85140 #define SEQ_SKIP 1
85141
85142@@ -96,6 +100,7 @@ void seq_pad(struct seq_file *m, char c);
85143
85144 char *mangle_path(char *s, const char *p, const char *esc);
85145 int seq_open(struct file *, const struct seq_operations *);
85146+int seq_open_restrict(struct file *, const struct seq_operations *);
85147 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
85148 loff_t seq_lseek(struct file *, loff_t, int);
85149 int seq_release(struct inode *, struct file *);
85150@@ -138,6 +143,7 @@ static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
85151 }
85152
85153 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
85154+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
85155 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
85156 int single_release(struct inode *, struct file *);
85157 void *__seq_open_private(struct file *, const struct seq_operations *, int);
85158diff --git a/include/linux/shm.h b/include/linux/shm.h
85159index 6fb8016..ab4465e 100644
85160--- a/include/linux/shm.h
85161+++ b/include/linux/shm.h
85162@@ -22,6 +22,10 @@ struct shmid_kernel /* private to the kernel */
85163 /* The task created the shm object. NULL if the task is dead. */
85164 struct task_struct *shm_creator;
85165 struct list_head shm_clist; /* list by creator */
85166+#ifdef CONFIG_GRKERNSEC
85167+ u64 shm_createtime;
85168+ pid_t shm_lapid;
85169+#endif
85170 };
85171
85172 /* shm_mode upper byte flags */
85173diff --git a/include/linux/signal.h b/include/linux/signal.h
85174index 750196f..ae7a3a4 100644
85175--- a/include/linux/signal.h
85176+++ b/include/linux/signal.h
85177@@ -292,7 +292,7 @@ static inline void allow_signal(int sig)
85178 * know it'll be handled, so that they don't get converted to
85179 * SIGKILL or just silently dropped.
85180 */
85181- kernel_sigaction(sig, (__force __sighandler_t)2);
85182+ kernel_sigaction(sig, (__force_user __sighandler_t)2);
85183 }
85184
85185 static inline void disallow_signal(int sig)
85186diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
85187index abde271..bc9ece1 100644
85188--- a/include/linux/skbuff.h
85189+++ b/include/linux/skbuff.h
85190@@ -728,7 +728,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
85191 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
85192 int node);
85193 struct sk_buff *build_skb(void *data, unsigned int frag_size);
85194-static inline struct sk_buff *alloc_skb(unsigned int size,
85195+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
85196 gfp_t priority)
85197 {
85198 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
85199@@ -1845,7 +1845,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
85200 return skb->inner_transport_header - skb->inner_network_header;
85201 }
85202
85203-static inline int skb_network_offset(const struct sk_buff *skb)
85204+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
85205 {
85206 return skb_network_header(skb) - skb->data;
85207 }
85208@@ -1917,7 +1917,7 @@ static inline void skb_pop_rcv_encapsulation(struct sk_buff *skb)
85209 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
85210 */
85211 #ifndef NET_SKB_PAD
85212-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
85213+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
85214 #endif
85215
85216 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
85217@@ -2524,7 +2524,7 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
85218 int *err);
85219 unsigned int datagram_poll(struct file *file, struct socket *sock,
85220 struct poll_table_struct *wait);
85221-int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
85222+int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
85223 struct iovec *to, int size);
85224 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
85225 struct iovec *iov);
85226@@ -2918,6 +2918,9 @@ static inline void nf_reset(struct sk_buff *skb)
85227 nf_bridge_put(skb->nf_bridge);
85228 skb->nf_bridge = NULL;
85229 #endif
85230+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
85231+ skb->nf_trace = 0;
85232+#endif
85233 }
85234
85235 static inline void nf_reset_trace(struct sk_buff *skb)
85236diff --git a/include/linux/slab.h b/include/linux/slab.h
85237index 1d9abb7..b1e8b10 100644
85238--- a/include/linux/slab.h
85239+++ b/include/linux/slab.h
85240@@ -14,15 +14,29 @@
85241 #include <linux/gfp.h>
85242 #include <linux/types.h>
85243 #include <linux/workqueue.h>
85244-
85245+#include <linux/err.h>
85246
85247 /*
85248 * Flags to pass to kmem_cache_create().
85249 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
85250 */
85251 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
85252+
85253+#ifdef CONFIG_PAX_USERCOPY_SLABS
85254+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
85255+#else
85256+#define SLAB_USERCOPY 0x00000000UL
85257+#endif
85258+
85259 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
85260 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
85261+
85262+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85263+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
85264+#else
85265+#define SLAB_NO_SANITIZE 0x00000000UL
85266+#endif
85267+
85268 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
85269 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
85270 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
85271@@ -98,10 +112,13 @@
85272 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
85273 * Both make kfree a no-op.
85274 */
85275-#define ZERO_SIZE_PTR ((void *)16)
85276+#define ZERO_SIZE_PTR \
85277+({ \
85278+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
85279+ (void *)(-MAX_ERRNO-1L); \
85280+})
85281
85282-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
85283- (unsigned long)ZERO_SIZE_PTR)
85284+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
85285
85286 #include <linux/kmemleak.h>
85287
85288@@ -144,6 +161,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
85289 void kfree(const void *);
85290 void kzfree(const void *);
85291 size_t ksize(const void *);
85292+const char *check_heap_object(const void *ptr, unsigned long n);
85293+bool is_usercopy_object(const void *ptr);
85294
85295 /*
85296 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
85297@@ -176,7 +195,7 @@ struct kmem_cache {
85298 unsigned int align; /* Alignment as calculated */
85299 unsigned long flags; /* Active flags on the slab */
85300 const char *name; /* Slab name for sysfs */
85301- int refcount; /* Use counter */
85302+ atomic_t refcount; /* Use counter */
85303 void (*ctor)(void *); /* Called on object slot creation */
85304 struct list_head list; /* List of all slab caches on the system */
85305 };
85306@@ -261,6 +280,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
85307 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
85308 #endif
85309
85310+#ifdef CONFIG_PAX_USERCOPY_SLABS
85311+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
85312+#endif
85313+
85314 /*
85315 * Figure out which kmalloc slab an allocation of a certain size
85316 * belongs to.
85317@@ -269,7 +292,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
85318 * 2 = 120 .. 192 bytes
85319 * n = 2^(n-1) .. 2^n -1
85320 */
85321-static __always_inline int kmalloc_index(size_t size)
85322+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
85323 {
85324 if (!size)
85325 return 0;
85326@@ -312,11 +335,11 @@ static __always_inline int kmalloc_index(size_t size)
85327 }
85328 #endif /* !CONFIG_SLOB */
85329
85330-void *__kmalloc(size_t size, gfp_t flags);
85331+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
85332 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
85333
85334 #ifdef CONFIG_NUMA
85335-void *__kmalloc_node(size_t size, gfp_t flags, int node);
85336+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
85337 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
85338 #else
85339 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
85340diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
85341index 8235dfb..47ce586 100644
85342--- a/include/linux/slab_def.h
85343+++ b/include/linux/slab_def.h
85344@@ -38,7 +38,7 @@ struct kmem_cache {
85345 /* 4) cache creation/removal */
85346 const char *name;
85347 struct list_head list;
85348- int refcount;
85349+ atomic_t refcount;
85350 int object_size;
85351 int align;
85352
85353@@ -54,10 +54,14 @@ struct kmem_cache {
85354 unsigned long node_allocs;
85355 unsigned long node_frees;
85356 unsigned long node_overflow;
85357- atomic_t allochit;
85358- atomic_t allocmiss;
85359- atomic_t freehit;
85360- atomic_t freemiss;
85361+ atomic_unchecked_t allochit;
85362+ atomic_unchecked_t allocmiss;
85363+ atomic_unchecked_t freehit;
85364+ atomic_unchecked_t freemiss;
85365+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85366+ atomic_unchecked_t sanitized;
85367+ atomic_unchecked_t not_sanitized;
85368+#endif
85369
85370 /*
85371 * If debugging is enabled, then the allocator can add additional
85372diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
85373index d82abd4..408c3a0 100644
85374--- a/include/linux/slub_def.h
85375+++ b/include/linux/slub_def.h
85376@@ -74,7 +74,7 @@ struct kmem_cache {
85377 struct kmem_cache_order_objects max;
85378 struct kmem_cache_order_objects min;
85379 gfp_t allocflags; /* gfp flags to use on each alloc */
85380- int refcount; /* Refcount for slab cache destroy */
85381+ atomic_t refcount; /* Refcount for slab cache destroy */
85382 void (*ctor)(void *);
85383 int inuse; /* Offset to metadata */
85384 int align; /* Alignment */
85385diff --git a/include/linux/smp.h b/include/linux/smp.h
85386index 34347f2..8739978 100644
85387--- a/include/linux/smp.h
85388+++ b/include/linux/smp.h
85389@@ -174,7 +174,9 @@ static inline void kick_all_cpus_sync(void) { }
85390 #endif
85391
85392 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
85393+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
85394 #define put_cpu() preempt_enable()
85395+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
85396
85397 /*
85398 * Callback to arch code if there's nosmp or maxcpus=0 on the
85399diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
85400index 46cca4c..3323536 100644
85401--- a/include/linux/sock_diag.h
85402+++ b/include/linux/sock_diag.h
85403@@ -11,7 +11,7 @@ struct sock;
85404 struct sock_diag_handler {
85405 __u8 family;
85406 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
85407-};
85408+} __do_const;
85409
85410 int sock_diag_register(const struct sock_diag_handler *h);
85411 void sock_diag_unregister(const struct sock_diag_handler *h);
85412diff --git a/include/linux/sonet.h b/include/linux/sonet.h
85413index 680f9a3..f13aeb0 100644
85414--- a/include/linux/sonet.h
85415+++ b/include/linux/sonet.h
85416@@ -7,7 +7,7 @@
85417 #include <uapi/linux/sonet.h>
85418
85419 struct k_sonet_stats {
85420-#define __HANDLE_ITEM(i) atomic_t i
85421+#define __HANDLE_ITEM(i) atomic_unchecked_t i
85422 __SONET_ITEMS
85423 #undef __HANDLE_ITEM
85424 };
85425diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
85426index 07d8e53..dc934c9 100644
85427--- a/include/linux/sunrpc/addr.h
85428+++ b/include/linux/sunrpc/addr.h
85429@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
85430 {
85431 switch (sap->sa_family) {
85432 case AF_INET:
85433- return ntohs(((struct sockaddr_in *)sap)->sin_port);
85434+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
85435 case AF_INET6:
85436- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
85437+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
85438 }
85439 return 0;
85440 }
85441@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
85442 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
85443 const struct sockaddr *src)
85444 {
85445- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
85446+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
85447 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
85448
85449 dsin->sin_family = ssin->sin_family;
85450@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
85451 if (sa->sa_family != AF_INET6)
85452 return 0;
85453
85454- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
85455+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
85456 }
85457
85458 #endif /* _LINUX_SUNRPC_ADDR_H */
85459diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
85460index 70736b9..37f33db 100644
85461--- a/include/linux/sunrpc/clnt.h
85462+++ b/include/linux/sunrpc/clnt.h
85463@@ -97,7 +97,7 @@ struct rpc_procinfo {
85464 unsigned int p_timer; /* Which RTT timer to use */
85465 u32 p_statidx; /* Which procedure to account */
85466 const char * p_name; /* name of procedure */
85467-};
85468+} __do_const;
85469
85470 #ifdef __KERNEL__
85471
85472diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
85473index cf61ecd..a4a9bc0 100644
85474--- a/include/linux/sunrpc/svc.h
85475+++ b/include/linux/sunrpc/svc.h
85476@@ -417,7 +417,7 @@ struct svc_procedure {
85477 unsigned int pc_count; /* call count */
85478 unsigned int pc_cachetype; /* cache info (NFS) */
85479 unsigned int pc_xdrressize; /* maximum size of XDR reply */
85480-};
85481+} __do_const;
85482
85483 /*
85484 * Function prototypes.
85485diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
85486index 975da75..318c083 100644
85487--- a/include/linux/sunrpc/svc_rdma.h
85488+++ b/include/linux/sunrpc/svc_rdma.h
85489@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
85490 extern unsigned int svcrdma_max_requests;
85491 extern unsigned int svcrdma_max_req_size;
85492
85493-extern atomic_t rdma_stat_recv;
85494-extern atomic_t rdma_stat_read;
85495-extern atomic_t rdma_stat_write;
85496-extern atomic_t rdma_stat_sq_starve;
85497-extern atomic_t rdma_stat_rq_starve;
85498-extern atomic_t rdma_stat_rq_poll;
85499-extern atomic_t rdma_stat_rq_prod;
85500-extern atomic_t rdma_stat_sq_poll;
85501-extern atomic_t rdma_stat_sq_prod;
85502+extern atomic_unchecked_t rdma_stat_recv;
85503+extern atomic_unchecked_t rdma_stat_read;
85504+extern atomic_unchecked_t rdma_stat_write;
85505+extern atomic_unchecked_t rdma_stat_sq_starve;
85506+extern atomic_unchecked_t rdma_stat_rq_starve;
85507+extern atomic_unchecked_t rdma_stat_rq_poll;
85508+extern atomic_unchecked_t rdma_stat_rq_prod;
85509+extern atomic_unchecked_t rdma_stat_sq_poll;
85510+extern atomic_unchecked_t rdma_stat_sq_prod;
85511
85512 #define RPCRDMA_VERSION 1
85513
85514diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
85515index 8d71d65..f79586e 100644
85516--- a/include/linux/sunrpc/svcauth.h
85517+++ b/include/linux/sunrpc/svcauth.h
85518@@ -120,7 +120,7 @@ struct auth_ops {
85519 int (*release)(struct svc_rqst *rq);
85520 void (*domain_release)(struct auth_domain *);
85521 int (*set_client)(struct svc_rqst *rq);
85522-};
85523+} __do_const;
85524
85525 #define SVC_GARBAGE 1
85526 #define SVC_SYSERR 2
85527diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
85528index e7a018e..49f8b17 100644
85529--- a/include/linux/swiotlb.h
85530+++ b/include/linux/swiotlb.h
85531@@ -60,7 +60,8 @@ extern void
85532
85533 extern void
85534 swiotlb_free_coherent(struct device *hwdev, size_t size,
85535- void *vaddr, dma_addr_t dma_handle);
85536+ void *vaddr, dma_addr_t dma_handle,
85537+ struct dma_attrs *attrs);
85538
85539 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
85540 unsigned long offset, size_t size,
85541diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
85542index 0f86d85..dff3419 100644
85543--- a/include/linux/syscalls.h
85544+++ b/include/linux/syscalls.h
85545@@ -98,10 +98,16 @@ struct sigaltstack;
85546 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
85547
85548 #define __SC_DECL(t, a) t a
85549+#define __TYPE_IS_U(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
85550 #define __TYPE_IS_L(t) (__same_type((t)0, 0L))
85551 #define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
85552 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
85553-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
85554+#define __SC_LONG(t, a) __typeof( \
85555+ __builtin_choose_expr( \
85556+ sizeof(t) > sizeof(int), \
85557+ (t) 0, \
85558+ __builtin_choose_expr(__TYPE_IS_U(t), 0UL, 0L) \
85559+ )) a
85560 #define __SC_CAST(t, a) (t) a
85561 #define __SC_ARGS(t, a) a
85562 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
85563@@ -383,11 +389,11 @@ asmlinkage long sys_sync(void);
85564 asmlinkage long sys_fsync(unsigned int fd);
85565 asmlinkage long sys_fdatasync(unsigned int fd);
85566 asmlinkage long sys_bdflush(int func, long data);
85567-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
85568- char __user *type, unsigned long flags,
85569+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
85570+ const char __user *type, unsigned long flags,
85571 void __user *data);
85572-asmlinkage long sys_umount(char __user *name, int flags);
85573-asmlinkage long sys_oldumount(char __user *name);
85574+asmlinkage long sys_umount(const char __user *name, int flags);
85575+asmlinkage long sys_oldumount(const char __user *name);
85576 asmlinkage long sys_truncate(const char __user *path, long length);
85577 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
85578 asmlinkage long sys_stat(const char __user *filename,
85579@@ -599,7 +605,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
85580 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
85581 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
85582 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
85583- struct sockaddr __user *, int);
85584+ struct sockaddr __user *, int) __intentional_overflow(0);
85585 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
85586 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
85587 unsigned int vlen, unsigned flags);
85588diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
85589index 27b3b0b..e093dd9 100644
85590--- a/include/linux/syscore_ops.h
85591+++ b/include/linux/syscore_ops.h
85592@@ -16,7 +16,7 @@ struct syscore_ops {
85593 int (*suspend)(void);
85594 void (*resume)(void);
85595 void (*shutdown)(void);
85596-};
85597+} __do_const;
85598
85599 extern void register_syscore_ops(struct syscore_ops *ops);
85600 extern void unregister_syscore_ops(struct syscore_ops *ops);
85601diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
85602index b7361f8..341a15a 100644
85603--- a/include/linux/sysctl.h
85604+++ b/include/linux/sysctl.h
85605@@ -39,6 +39,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
85606
85607 extern int proc_dostring(struct ctl_table *, int,
85608 void __user *, size_t *, loff_t *);
85609+extern int proc_dostring_modpriv(struct ctl_table *, int,
85610+ void __user *, size_t *, loff_t *);
85611 extern int proc_dointvec(struct ctl_table *, int,
85612 void __user *, size_t *, loff_t *);
85613 extern int proc_dointvec_minmax(struct ctl_table *, int,
85614@@ -113,7 +115,8 @@ struct ctl_table
85615 struct ctl_table_poll *poll;
85616 void *extra1;
85617 void *extra2;
85618-};
85619+} __do_const __randomize_layout;
85620+typedef struct ctl_table __no_const ctl_table_no_const;
85621
85622 struct ctl_node {
85623 struct rb_node node;
85624diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
85625index f97d0db..c1187dc 100644
85626--- a/include/linux/sysfs.h
85627+++ b/include/linux/sysfs.h
85628@@ -34,7 +34,8 @@ struct attribute {
85629 struct lock_class_key *key;
85630 struct lock_class_key skey;
85631 #endif
85632-};
85633+} __do_const;
85634+typedef struct attribute __no_const attribute_no_const;
85635
85636 /**
85637 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
85638@@ -63,7 +64,8 @@ struct attribute_group {
85639 struct attribute *, int);
85640 struct attribute **attrs;
85641 struct bin_attribute **bin_attrs;
85642-};
85643+} __do_const;
85644+typedef struct attribute_group __no_const attribute_group_no_const;
85645
85646 /**
85647 * Use these macros to make defining attributes easier. See include/linux/device.h
85648@@ -128,7 +130,8 @@ struct bin_attribute {
85649 char *, loff_t, size_t);
85650 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
85651 struct vm_area_struct *vma);
85652-};
85653+} __do_const;
85654+typedef struct bin_attribute __no_const bin_attribute_no_const;
85655
85656 /**
85657 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
85658diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
85659index 387fa7d..3fcde6b 100644
85660--- a/include/linux/sysrq.h
85661+++ b/include/linux/sysrq.h
85662@@ -16,6 +16,7 @@
85663
85664 #include <linux/errno.h>
85665 #include <linux/types.h>
85666+#include <linux/compiler.h>
85667
85668 /* Possible values of bitmask for enabling sysrq functions */
85669 /* 0x0001 is reserved for enable everything */
85670@@ -33,7 +34,7 @@ struct sysrq_key_op {
85671 char *help_msg;
85672 char *action_msg;
85673 int enable_mask;
85674-};
85675+} __do_const;
85676
85677 #ifdef CONFIG_MAGIC_SYSRQ
85678
85679diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
85680index ff307b5..f1a4468 100644
85681--- a/include/linux/thread_info.h
85682+++ b/include/linux/thread_info.h
85683@@ -145,6 +145,13 @@ static inline bool test_and_clear_restore_sigmask(void)
85684 #error "no set_restore_sigmask() provided and default one won't work"
85685 #endif
85686
85687+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
85688+
85689+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
85690+{
85691+ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
85692+}
85693+
85694 #endif /* __KERNEL__ */
85695
85696 #endif /* _LINUX_THREAD_INFO_H */
85697diff --git a/include/linux/tty.h b/include/linux/tty.h
85698index 8413294..44391c7 100644
85699--- a/include/linux/tty.h
85700+++ b/include/linux/tty.h
85701@@ -202,7 +202,7 @@ struct tty_port {
85702 const struct tty_port_operations *ops; /* Port operations */
85703 spinlock_t lock; /* Lock protecting tty field */
85704 int blocked_open; /* Waiting to open */
85705- int count; /* Usage count */
85706+ atomic_t count; /* Usage count */
85707 wait_queue_head_t open_wait; /* Open waiters */
85708 wait_queue_head_t close_wait; /* Close waiters */
85709 wait_queue_head_t delta_msr_wait; /* Modem status change */
85710@@ -284,7 +284,7 @@ struct tty_struct {
85711 /* If the tty has a pending do_SAK, queue it here - akpm */
85712 struct work_struct SAK_work;
85713 struct tty_port *port;
85714-};
85715+} __randomize_layout;
85716
85717 /* Each of a tty's open files has private_data pointing to tty_file_private */
85718 struct tty_file_private {
85719@@ -548,7 +548,7 @@ extern int tty_port_open(struct tty_port *port,
85720 struct tty_struct *tty, struct file *filp);
85721 static inline int tty_port_users(struct tty_port *port)
85722 {
85723- return port->count + port->blocked_open;
85724+ return atomic_read(&port->count) + port->blocked_open;
85725 }
85726
85727 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
85728diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
85729index e48c608..6a19af2 100644
85730--- a/include/linux/tty_driver.h
85731+++ b/include/linux/tty_driver.h
85732@@ -287,7 +287,7 @@ struct tty_operations {
85733 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
85734 #endif
85735 const struct file_operations *proc_fops;
85736-};
85737+} __do_const __randomize_layout;
85738
85739 struct tty_driver {
85740 int magic; /* magic number for this structure */
85741@@ -321,7 +321,7 @@ struct tty_driver {
85742
85743 const struct tty_operations *ops;
85744 struct list_head tty_drivers;
85745-};
85746+} __randomize_layout;
85747
85748 extern struct list_head tty_drivers;
85749
85750diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
85751index 00c9d68..bc0188b 100644
85752--- a/include/linux/tty_ldisc.h
85753+++ b/include/linux/tty_ldisc.h
85754@@ -215,7 +215,7 @@ struct tty_ldisc_ops {
85755
85756 struct module *owner;
85757
85758- int refcount;
85759+ atomic_t refcount;
85760 };
85761
85762 struct tty_ldisc {
85763diff --git a/include/linux/types.h b/include/linux/types.h
85764index a0bb704..f511c77 100644
85765--- a/include/linux/types.h
85766+++ b/include/linux/types.h
85767@@ -177,10 +177,26 @@ typedef struct {
85768 int counter;
85769 } atomic_t;
85770
85771+#ifdef CONFIG_PAX_REFCOUNT
85772+typedef struct {
85773+ int counter;
85774+} atomic_unchecked_t;
85775+#else
85776+typedef atomic_t atomic_unchecked_t;
85777+#endif
85778+
85779 #ifdef CONFIG_64BIT
85780 typedef struct {
85781 long counter;
85782 } atomic64_t;
85783+
85784+#ifdef CONFIG_PAX_REFCOUNT
85785+typedef struct {
85786+ long counter;
85787+} atomic64_unchecked_t;
85788+#else
85789+typedef atomic64_t atomic64_unchecked_t;
85790+#endif
85791 #endif
85792
85793 struct list_head {
85794diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
85795index ecd3319..8a36ded 100644
85796--- a/include/linux/uaccess.h
85797+++ b/include/linux/uaccess.h
85798@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
85799 long ret; \
85800 mm_segment_t old_fs = get_fs(); \
85801 \
85802- set_fs(KERNEL_DS); \
85803 pagefault_disable(); \
85804- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
85805- pagefault_enable(); \
85806+ set_fs(KERNEL_DS); \
85807+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
85808 set_fs(old_fs); \
85809+ pagefault_enable(); \
85810 ret; \
85811 })
85812
85813diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
85814index 2d1f9b6..d7a9fce 100644
85815--- a/include/linux/uidgid.h
85816+++ b/include/linux/uidgid.h
85817@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
85818
85819 #endif /* CONFIG_USER_NS */
85820
85821+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
85822+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
85823+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
85824+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
85825+
85826 #endif /* _LINUX_UIDGID_H */
85827diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
85828index 99c1b4d..562e6f3 100644
85829--- a/include/linux/unaligned/access_ok.h
85830+++ b/include/linux/unaligned/access_ok.h
85831@@ -4,34 +4,34 @@
85832 #include <linux/kernel.h>
85833 #include <asm/byteorder.h>
85834
85835-static inline u16 get_unaligned_le16(const void *p)
85836+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
85837 {
85838- return le16_to_cpup((__le16 *)p);
85839+ return le16_to_cpup((const __le16 *)p);
85840 }
85841
85842-static inline u32 get_unaligned_le32(const void *p)
85843+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
85844 {
85845- return le32_to_cpup((__le32 *)p);
85846+ return le32_to_cpup((const __le32 *)p);
85847 }
85848
85849-static inline u64 get_unaligned_le64(const void *p)
85850+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
85851 {
85852- return le64_to_cpup((__le64 *)p);
85853+ return le64_to_cpup((const __le64 *)p);
85854 }
85855
85856-static inline u16 get_unaligned_be16(const void *p)
85857+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
85858 {
85859- return be16_to_cpup((__be16 *)p);
85860+ return be16_to_cpup((const __be16 *)p);
85861 }
85862
85863-static inline u32 get_unaligned_be32(const void *p)
85864+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
85865 {
85866- return be32_to_cpup((__be32 *)p);
85867+ return be32_to_cpup((const __be32 *)p);
85868 }
85869
85870-static inline u64 get_unaligned_be64(const void *p)
85871+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
85872 {
85873- return be64_to_cpup((__be64 *)p);
85874+ return be64_to_cpup((const __be64 *)p);
85875 }
85876
85877 static inline void put_unaligned_le16(u16 val, void *p)
85878diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
85879index 4f844c6..60beb5d 100644
85880--- a/include/linux/uprobes.h
85881+++ b/include/linux/uprobes.h
85882@@ -98,11 +98,11 @@ struct uprobes_state {
85883 struct xol_area *xol_area;
85884 };
85885
85886-extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
85887-extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
85888-extern bool __weak is_swbp_insn(uprobe_opcode_t *insn);
85889-extern bool __weak is_trap_insn(uprobe_opcode_t *insn);
85890-extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
85891+extern int set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
85892+extern int set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
85893+extern bool is_swbp_insn(uprobe_opcode_t *insn);
85894+extern bool is_trap_insn(uprobe_opcode_t *insn);
85895+extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs);
85896 extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs);
85897 extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
85898 extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
85899@@ -128,8 +128,8 @@ extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
85900 extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
85901 extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
85902 extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
85903-extern bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
85904-extern void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
85905+extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
85906+extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
85907 void *src, unsigned long len);
85908 #else /* !CONFIG_UPROBES */
85909 struct uprobes_state {
85910diff --git a/include/linux/usb.h b/include/linux/usb.h
85911index d2465bc..5256de4 100644
85912--- a/include/linux/usb.h
85913+++ b/include/linux/usb.h
85914@@ -571,7 +571,7 @@ struct usb_device {
85915 int maxchild;
85916
85917 u32 quirks;
85918- atomic_t urbnum;
85919+ atomic_unchecked_t urbnum;
85920
85921 unsigned long active_duration;
85922
85923@@ -1655,7 +1655,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
85924
85925 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
85926 __u8 request, __u8 requesttype, __u16 value, __u16 index,
85927- void *data, __u16 size, int timeout);
85928+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
85929 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
85930 void *data, int len, int *actual_length, int timeout);
85931 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
85932diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
85933index d5952bb..9a626d4 100644
85934--- a/include/linux/usb/renesas_usbhs.h
85935+++ b/include/linux/usb/renesas_usbhs.h
85936@@ -39,7 +39,7 @@ enum {
85937 */
85938 struct renesas_usbhs_driver_callback {
85939 int (*notify_hotplug)(struct platform_device *pdev);
85940-};
85941+} __no_const;
85942
85943 /*
85944 * callback functions for platform
85945diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
85946index e953726..8edb26a 100644
85947--- a/include/linux/user_namespace.h
85948+++ b/include/linux/user_namespace.h
85949@@ -33,7 +33,7 @@ struct user_namespace {
85950 struct key *persistent_keyring_register;
85951 struct rw_semaphore persistent_keyring_register_sem;
85952 #endif
85953-};
85954+} __randomize_layout;
85955
85956 extern struct user_namespace init_user_ns;
85957
85958diff --git a/include/linux/utsname.h b/include/linux/utsname.h
85959index 239e277..22a5cf5 100644
85960--- a/include/linux/utsname.h
85961+++ b/include/linux/utsname.h
85962@@ -24,7 +24,7 @@ struct uts_namespace {
85963 struct new_utsname name;
85964 struct user_namespace *user_ns;
85965 unsigned int proc_inum;
85966-};
85967+} __randomize_layout;
85968 extern struct uts_namespace init_uts_ns;
85969
85970 #ifdef CONFIG_UTS_NS
85971diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
85972index 6f8fbcf..4efc177 100644
85973--- a/include/linux/vermagic.h
85974+++ b/include/linux/vermagic.h
85975@@ -25,9 +25,42 @@
85976 #define MODULE_ARCH_VERMAGIC ""
85977 #endif
85978
85979+#ifdef CONFIG_PAX_REFCOUNT
85980+#define MODULE_PAX_REFCOUNT "REFCOUNT "
85981+#else
85982+#define MODULE_PAX_REFCOUNT ""
85983+#endif
85984+
85985+#ifdef CONSTIFY_PLUGIN
85986+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
85987+#else
85988+#define MODULE_CONSTIFY_PLUGIN ""
85989+#endif
85990+
85991+#ifdef STACKLEAK_PLUGIN
85992+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
85993+#else
85994+#define MODULE_STACKLEAK_PLUGIN ""
85995+#endif
85996+
85997+#ifdef RANDSTRUCT_PLUGIN
85998+#include <generated/randomize_layout_hash.h>
85999+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
86000+#else
86001+#define MODULE_RANDSTRUCT_PLUGIN
86002+#endif
86003+
86004+#ifdef CONFIG_GRKERNSEC
86005+#define MODULE_GRSEC "GRSEC "
86006+#else
86007+#define MODULE_GRSEC ""
86008+#endif
86009+
86010 #define VERMAGIC_STRING \
86011 UTS_RELEASE " " \
86012 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
86013 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
86014- MODULE_ARCH_VERMAGIC
86015+ MODULE_ARCH_VERMAGIC \
86016+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
86017+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
86018
86019diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
86020index b483abd..af305ad 100644
86021--- a/include/linux/vga_switcheroo.h
86022+++ b/include/linux/vga_switcheroo.h
86023@@ -63,9 +63,9 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
86024
86025 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
86026
86027-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
86028+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
86029 void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
86030-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
86031+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
86032 #else
86033
86034 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
86035@@ -82,9 +82,9 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
86036
86037 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
86038
86039-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
86040+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
86041 static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
86042-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
86043+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
86044
86045 #endif
86046 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
86047diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
86048index b87696f..1d11de7 100644
86049--- a/include/linux/vmalloc.h
86050+++ b/include/linux/vmalloc.h
86051@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
86052 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
86053 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
86054 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
86055+
86056+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
86057+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
86058+#endif
86059+
86060 /* bits [20..32] reserved for arch specific ioremap internals */
86061
86062 /*
86063@@ -82,6 +87,10 @@ extern void *vmap(struct page **pages, unsigned int count,
86064 unsigned long flags, pgprot_t prot);
86065 extern void vunmap(const void *addr);
86066
86067+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86068+extern void unmap_process_stacks(struct task_struct *task);
86069+#endif
86070+
86071 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
86072 unsigned long uaddr, void *kaddr,
86073 unsigned long size);
86074@@ -142,7 +151,7 @@ extern void free_vm_area(struct vm_struct *area);
86075
86076 /* for /dev/kmem */
86077 extern long vread(char *buf, char *addr, unsigned long count);
86078-extern long vwrite(char *buf, char *addr, unsigned long count);
86079+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
86080
86081 /*
86082 * Internals. Dont't use..
86083diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
86084index 82e7db7..f8ce3d0 100644
86085--- a/include/linux/vmstat.h
86086+++ b/include/linux/vmstat.h
86087@@ -108,18 +108,18 @@ static inline void vm_events_fold_cpu(int cpu)
86088 /*
86089 * Zone based page accounting with per cpu differentials.
86090 */
86091-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86092+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86093
86094 static inline void zone_page_state_add(long x, struct zone *zone,
86095 enum zone_stat_item item)
86096 {
86097- atomic_long_add(x, &zone->vm_stat[item]);
86098- atomic_long_add(x, &vm_stat[item]);
86099+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
86100+ atomic_long_add_unchecked(x, &vm_stat[item]);
86101 }
86102
86103-static inline unsigned long global_page_state(enum zone_stat_item item)
86104+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
86105 {
86106- long x = atomic_long_read(&vm_stat[item]);
86107+ long x = atomic_long_read_unchecked(&vm_stat[item]);
86108 #ifdef CONFIG_SMP
86109 if (x < 0)
86110 x = 0;
86111@@ -127,10 +127,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
86112 return x;
86113 }
86114
86115-static inline unsigned long zone_page_state(struct zone *zone,
86116+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
86117 enum zone_stat_item item)
86118 {
86119- long x = atomic_long_read(&zone->vm_stat[item]);
86120+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
86121 #ifdef CONFIG_SMP
86122 if (x < 0)
86123 x = 0;
86124@@ -147,7 +147,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
86125 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
86126 enum zone_stat_item item)
86127 {
86128- long x = atomic_long_read(&zone->vm_stat[item]);
86129+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
86130
86131 #ifdef CONFIG_SMP
86132 int cpu;
86133@@ -234,14 +234,14 @@ static inline void __mod_zone_page_state(struct zone *zone,
86134
86135 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
86136 {
86137- atomic_long_inc(&zone->vm_stat[item]);
86138- atomic_long_inc(&vm_stat[item]);
86139+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
86140+ atomic_long_inc_unchecked(&vm_stat[item]);
86141 }
86142
86143 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
86144 {
86145- atomic_long_dec(&zone->vm_stat[item]);
86146- atomic_long_dec(&vm_stat[item]);
86147+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
86148+ atomic_long_dec_unchecked(&vm_stat[item]);
86149 }
86150
86151 static inline void __inc_zone_page_state(struct page *page,
86152diff --git a/include/linux/xattr.h b/include/linux/xattr.h
86153index 91b0a68..0e9adf6 100644
86154--- a/include/linux/xattr.h
86155+++ b/include/linux/xattr.h
86156@@ -28,7 +28,7 @@ struct xattr_handler {
86157 size_t size, int handler_flags);
86158 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
86159 size_t size, int flags, int handler_flags);
86160-};
86161+} __do_const;
86162
86163 struct xattr {
86164 const char *name;
86165@@ -37,6 +37,9 @@ struct xattr {
86166 };
86167
86168 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
86169+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
86170+ssize_t pax_getxattr(struct dentry *, void *, size_t);
86171+#endif
86172 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
86173 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
86174 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
86175diff --git a/include/linux/zlib.h b/include/linux/zlib.h
86176index 92dbbd3..13ab0b3 100644
86177--- a/include/linux/zlib.h
86178+++ b/include/linux/zlib.h
86179@@ -31,6 +31,7 @@
86180 #define _ZLIB_H
86181
86182 #include <linux/zconf.h>
86183+#include <linux/compiler.h>
86184
86185 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
86186 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
86187@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
86188
86189 /* basic functions */
86190
86191-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
86192+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
86193 /*
86194 Returns the number of bytes that needs to be allocated for a per-
86195 stream workspace with the specified parameters. A pointer to this
86196diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
86197index eb76cfd..9fd0e7c 100644
86198--- a/include/media/v4l2-dev.h
86199+++ b/include/media/v4l2-dev.h
86200@@ -75,7 +75,7 @@ struct v4l2_file_operations {
86201 int (*mmap) (struct file *, struct vm_area_struct *);
86202 int (*open) (struct file *);
86203 int (*release) (struct file *);
86204-};
86205+} __do_const;
86206
86207 /*
86208 * Newer version of video_device, handled by videodev2.c
86209diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
86210index ffb69da..040393e 100644
86211--- a/include/media/v4l2-device.h
86212+++ b/include/media/v4l2-device.h
86213@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
86214 this function returns 0. If the name ends with a digit (e.g. cx18),
86215 then the name will be set to cx18-0 since cx180 looks really odd. */
86216 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
86217- atomic_t *instance);
86218+ atomic_unchecked_t *instance);
86219
86220 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
86221 Since the parent disappears this ensures that v4l2_dev doesn't have an
86222diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
86223index d9fa68f..45c88d1 100644
86224--- a/include/net/9p/transport.h
86225+++ b/include/net/9p/transport.h
86226@@ -63,7 +63,7 @@ struct p9_trans_module {
86227 int (*cancelled)(struct p9_client *, struct p9_req_t *req);
86228 int (*zc_request)(struct p9_client *, struct p9_req_t *,
86229 char *, char *, int , int, int, int);
86230-};
86231+} __do_const;
86232
86233 void v9fs_register_trans(struct p9_trans_module *m);
86234 void v9fs_unregister_trans(struct p9_trans_module *m);
86235diff --git a/include/net/af_unix.h b/include/net/af_unix.h
86236index a175ba4..196eb8242 100644
86237--- a/include/net/af_unix.h
86238+++ b/include/net/af_unix.h
86239@@ -36,7 +36,7 @@ struct unix_skb_parms {
86240 u32 secid; /* Security ID */
86241 #endif
86242 u32 consumed;
86243-};
86244+} __randomize_layout;
86245
86246 #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
86247 #define UNIXSID(skb) (&UNIXCB((skb)).secid)
86248diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
86249index 8df15ad..837fbedd 100644
86250--- a/include/net/bluetooth/l2cap.h
86251+++ b/include/net/bluetooth/l2cap.h
86252@@ -608,7 +608,7 @@ struct l2cap_ops {
86253 unsigned char *kdata,
86254 struct iovec *iov,
86255 int len);
86256-};
86257+} __do_const;
86258
86259 struct l2cap_conn {
86260 struct hci_conn *hcon;
86261diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
86262index f2ae33d..c457cf0 100644
86263--- a/include/net/caif/cfctrl.h
86264+++ b/include/net/caif/cfctrl.h
86265@@ -52,7 +52,7 @@ struct cfctrl_rsp {
86266 void (*radioset_rsp)(void);
86267 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
86268 struct cflayer *client_layer);
86269-};
86270+} __no_const;
86271
86272 /* Link Setup Parameters for CAIF-Links. */
86273 struct cfctrl_link_param {
86274@@ -101,8 +101,8 @@ struct cfctrl_request_info {
86275 struct cfctrl {
86276 struct cfsrvl serv;
86277 struct cfctrl_rsp res;
86278- atomic_t req_seq_no;
86279- atomic_t rsp_seq_no;
86280+ atomic_unchecked_t req_seq_no;
86281+ atomic_unchecked_t rsp_seq_no;
86282 struct list_head list;
86283 /* Protects from simultaneous access to first_req list */
86284 spinlock_t info_list_lock;
86285diff --git a/include/net/flow.h b/include/net/flow.h
86286index 8109a15..504466d 100644
86287--- a/include/net/flow.h
86288+++ b/include/net/flow.h
86289@@ -231,6 +231,6 @@ void flow_cache_fini(struct net *net);
86290
86291 void flow_cache_flush(struct net *net);
86292 void flow_cache_flush_deferred(struct net *net);
86293-extern atomic_t flow_cache_genid;
86294+extern atomic_unchecked_t flow_cache_genid;
86295
86296 #endif
86297diff --git a/include/net/genetlink.h b/include/net/genetlink.h
86298index af10c2c..a431cc5 100644
86299--- a/include/net/genetlink.h
86300+++ b/include/net/genetlink.h
86301@@ -120,7 +120,7 @@ struct genl_ops {
86302 u8 cmd;
86303 u8 internal_flags;
86304 u8 flags;
86305-};
86306+} __do_const;
86307
86308 int __genl_register_family(struct genl_family *family);
86309
86310diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
86311index 734d9b5..48a9a4b 100644
86312--- a/include/net/gro_cells.h
86313+++ b/include/net/gro_cells.h
86314@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
86315 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
86316
86317 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
86318- atomic_long_inc(&dev->rx_dropped);
86319+ atomic_long_inc_unchecked(&dev->rx_dropped);
86320 kfree_skb(skb);
86321 return;
86322 }
86323diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
86324index 5fbe656..9ed3d8b 100644
86325--- a/include/net/inet_connection_sock.h
86326+++ b/include/net/inet_connection_sock.h
86327@@ -63,7 +63,7 @@ struct inet_connection_sock_af_ops {
86328 int (*bind_conflict)(const struct sock *sk,
86329 const struct inet_bind_bucket *tb, bool relax);
86330 void (*mtu_reduced)(struct sock *sk);
86331-};
86332+} __do_const;
86333
86334 /** inet_connection_sock - INET connection oriented sock
86335 *
86336diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
86337index 01d590e..f69c61d 100644
86338--- a/include/net/inetpeer.h
86339+++ b/include/net/inetpeer.h
86340@@ -47,7 +47,7 @@ struct inet_peer {
86341 */
86342 union {
86343 struct {
86344- atomic_t rid; /* Frag reception counter */
86345+ atomic_unchecked_t rid; /* Frag reception counter */
86346 };
86347 struct rcu_head rcu;
86348 struct inet_peer *gc_next;
86349diff --git a/include/net/ip.h b/include/net/ip.h
86350index db4a771..965a42a 100644
86351--- a/include/net/ip.h
86352+++ b/include/net/ip.h
86353@@ -316,7 +316,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
86354 }
86355 }
86356
86357-u32 ip_idents_reserve(u32 hash, int segs);
86358+u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1);
86359 void __ip_select_ident(struct iphdr *iph, int segs);
86360
86361 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
86362diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
86363index 9922093..a1755d6 100644
86364--- a/include/net/ip_fib.h
86365+++ b/include/net/ip_fib.h
86366@@ -169,7 +169,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
86367
86368 #define FIB_RES_SADDR(net, res) \
86369 ((FIB_RES_NH(res).nh_saddr_genid == \
86370- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
86371+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
86372 FIB_RES_NH(res).nh_saddr : \
86373 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
86374 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
86375diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
86376index 624a8a5..b1e2a24 100644
86377--- a/include/net/ip_vs.h
86378+++ b/include/net/ip_vs.h
86379@@ -558,7 +558,7 @@ struct ip_vs_conn {
86380 struct ip_vs_conn *control; /* Master control connection */
86381 atomic_t n_control; /* Number of controlled ones */
86382 struct ip_vs_dest *dest; /* real server */
86383- atomic_t in_pkts; /* incoming packet counter */
86384+ atomic_unchecked_t in_pkts; /* incoming packet counter */
86385
86386 /* packet transmitter for different forwarding methods. If it
86387 mangles the packet, it must return NF_DROP or better NF_STOLEN,
86388@@ -705,7 +705,7 @@ struct ip_vs_dest {
86389 __be16 port; /* port number of the server */
86390 union nf_inet_addr addr; /* IP address of the server */
86391 volatile unsigned int flags; /* dest status flags */
86392- atomic_t conn_flags; /* flags to copy to conn */
86393+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
86394 atomic_t weight; /* server weight */
86395
86396 atomic_t refcnt; /* reference counter */
86397@@ -960,11 +960,11 @@ struct netns_ipvs {
86398 /* ip_vs_lblc */
86399 int sysctl_lblc_expiration;
86400 struct ctl_table_header *lblc_ctl_header;
86401- struct ctl_table *lblc_ctl_table;
86402+ ctl_table_no_const *lblc_ctl_table;
86403 /* ip_vs_lblcr */
86404 int sysctl_lblcr_expiration;
86405 struct ctl_table_header *lblcr_ctl_header;
86406- struct ctl_table *lblcr_ctl_table;
86407+ ctl_table_no_const *lblcr_ctl_table;
86408 /* ip_vs_est */
86409 struct list_head est_list; /* estimator list */
86410 spinlock_t est_lock;
86411diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
86412index 8d4f588..2e37ad2 100644
86413--- a/include/net/irda/ircomm_tty.h
86414+++ b/include/net/irda/ircomm_tty.h
86415@@ -33,6 +33,7 @@
86416 #include <linux/termios.h>
86417 #include <linux/timer.h>
86418 #include <linux/tty.h> /* struct tty_struct */
86419+#include <asm/local.h>
86420
86421 #include <net/irda/irias_object.h>
86422 #include <net/irda/ircomm_core.h>
86423diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
86424index 714cc9a..ea05f3e 100644
86425--- a/include/net/iucv/af_iucv.h
86426+++ b/include/net/iucv/af_iucv.h
86427@@ -149,7 +149,7 @@ struct iucv_skb_cb {
86428 struct iucv_sock_list {
86429 struct hlist_head head;
86430 rwlock_t lock;
86431- atomic_t autobind_name;
86432+ atomic_unchecked_t autobind_name;
86433 };
86434
86435 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
86436diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
86437index f3be818..bf46196 100644
86438--- a/include/net/llc_c_ac.h
86439+++ b/include/net/llc_c_ac.h
86440@@ -87,7 +87,7 @@
86441 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
86442 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
86443
86444-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
86445+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
86446
86447 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
86448 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
86449diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
86450index 3948cf1..83b28c4 100644
86451--- a/include/net/llc_c_ev.h
86452+++ b/include/net/llc_c_ev.h
86453@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
86454 return (struct llc_conn_state_ev *)skb->cb;
86455 }
86456
86457-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
86458-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
86459+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
86460+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
86461
86462 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
86463 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
86464diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
86465index 0e79cfb..f46db31 100644
86466--- a/include/net/llc_c_st.h
86467+++ b/include/net/llc_c_st.h
86468@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
86469 u8 next_state;
86470 llc_conn_ev_qfyr_t *ev_qualifiers;
86471 llc_conn_action_t *ev_actions;
86472-};
86473+} __do_const;
86474
86475 struct llc_conn_state {
86476 u8 current_state;
86477diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
86478index a61b98c..aade1eb 100644
86479--- a/include/net/llc_s_ac.h
86480+++ b/include/net/llc_s_ac.h
86481@@ -23,7 +23,7 @@
86482 #define SAP_ACT_TEST_IND 9
86483
86484 /* All action functions must look like this */
86485-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
86486+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
86487
86488 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
86489 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
86490diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
86491index 567c681..cd73ac02 100644
86492--- a/include/net/llc_s_st.h
86493+++ b/include/net/llc_s_st.h
86494@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
86495 llc_sap_ev_t ev;
86496 u8 next_state;
86497 llc_sap_action_t *ev_actions;
86498-};
86499+} __do_const;
86500
86501 struct llc_sap_state {
86502 u8 curr_state;
86503diff --git a/include/net/mac80211.h b/include/net/mac80211.h
86504index dae2e24..89336e6 100644
86505--- a/include/net/mac80211.h
86506+++ b/include/net/mac80211.h
86507@@ -4650,7 +4650,7 @@ struct rate_control_ops {
86508 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
86509
86510 u32 (*get_expected_throughput)(void *priv_sta);
86511-};
86512+} __do_const;
86513
86514 static inline int rate_supported(struct ieee80211_sta *sta,
86515 enum ieee80211_band band,
86516diff --git a/include/net/neighbour.h b/include/net/neighbour.h
86517index 47f4254..fd095bc 100644
86518--- a/include/net/neighbour.h
86519+++ b/include/net/neighbour.h
86520@@ -163,7 +163,7 @@ struct neigh_ops {
86521 void (*error_report)(struct neighbour *, struct sk_buff *);
86522 int (*output)(struct neighbour *, struct sk_buff *);
86523 int (*connected_output)(struct neighbour *, struct sk_buff *);
86524-};
86525+} __do_const;
86526
86527 struct pneigh_entry {
86528 struct pneigh_entry *next;
86529@@ -217,7 +217,7 @@ struct neigh_table {
86530 struct neigh_statistics __percpu *stats;
86531 struct neigh_hash_table __rcu *nht;
86532 struct pneigh_entry **phash_buckets;
86533-};
86534+} __randomize_layout;
86535
86536 static inline int neigh_parms_family(struct neigh_parms *p)
86537 {
86538diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
86539index e0d6466..e2f3003 100644
86540--- a/include/net/net_namespace.h
86541+++ b/include/net/net_namespace.h
86542@@ -129,8 +129,8 @@ struct net {
86543 struct netns_ipvs *ipvs;
86544 #endif
86545 struct sock *diag_nlsk;
86546- atomic_t fnhe_genid;
86547-};
86548+ atomic_unchecked_t fnhe_genid;
86549+} __randomize_layout;
86550
86551 #include <linux/seq_file_net.h>
86552
86553@@ -286,7 +286,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
86554 #define __net_init __init
86555 #define __net_exit __exit_refok
86556 #define __net_initdata __initdata
86557+#ifdef CONSTIFY_PLUGIN
86558 #define __net_initconst __initconst
86559+#else
86560+#define __net_initconst __initdata
86561+#endif
86562 #endif
86563
86564 struct pernet_operations {
86565@@ -296,7 +300,7 @@ struct pernet_operations {
86566 void (*exit_batch)(struct list_head *net_exit_list);
86567 int *id;
86568 size_t size;
86569-};
86570+} __do_const;
86571
86572 /*
86573 * Use these carefully. If you implement a network device and it
86574@@ -344,12 +348,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
86575
86576 static inline int rt_genid_ipv4(struct net *net)
86577 {
86578- return atomic_read(&net->ipv4.rt_genid);
86579+ return atomic_read_unchecked(&net->ipv4.rt_genid);
86580 }
86581
86582 static inline void rt_genid_bump_ipv4(struct net *net)
86583 {
86584- atomic_inc(&net->ipv4.rt_genid);
86585+ atomic_inc_unchecked(&net->ipv4.rt_genid);
86586 }
86587
86588 extern void (*__fib6_flush_trees)(struct net *net);
86589@@ -376,12 +380,12 @@ static inline void rt_genid_bump_all(struct net *net)
86590
86591 static inline int fnhe_genid(struct net *net)
86592 {
86593- return atomic_read(&net->fnhe_genid);
86594+ return atomic_read_unchecked(&net->fnhe_genid);
86595 }
86596
86597 static inline void fnhe_genid_bump(struct net *net)
86598 {
86599- atomic_inc(&net->fnhe_genid);
86600+ atomic_inc_unchecked(&net->fnhe_genid);
86601 }
86602
86603 #endif /* __NET_NET_NAMESPACE_H */
86604diff --git a/include/net/netdma.h b/include/net/netdma.h
86605index 8ba8ce2..99b7fff 100644
86606--- a/include/net/netdma.h
86607+++ b/include/net/netdma.h
86608@@ -24,7 +24,7 @@
86609 #include <linux/dmaengine.h>
86610 #include <linux/skbuff.h>
86611
86612-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
86613+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
86614 struct sk_buff *skb, int offset, struct iovec *to,
86615 size_t len, struct dma_pinned_list *pinned_list);
86616
86617diff --git a/include/net/netlink.h b/include/net/netlink.h
86618index 6c10762..3e5de0c 100644
86619--- a/include/net/netlink.h
86620+++ b/include/net/netlink.h
86621@@ -521,7 +521,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
86622 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
86623 {
86624 if (mark)
86625- skb_trim(skb, (unsigned char *) mark - skb->data);
86626+ skb_trim(skb, (const unsigned char *) mark - skb->data);
86627 }
86628
86629 /**
86630diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
86631index 29d6a94..235d3d8 100644
86632--- a/include/net/netns/conntrack.h
86633+++ b/include/net/netns/conntrack.h
86634@@ -14,10 +14,10 @@ struct nf_conntrack_ecache;
86635 struct nf_proto_net {
86636 #ifdef CONFIG_SYSCTL
86637 struct ctl_table_header *ctl_table_header;
86638- struct ctl_table *ctl_table;
86639+ ctl_table_no_const *ctl_table;
86640 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
86641 struct ctl_table_header *ctl_compat_header;
86642- struct ctl_table *ctl_compat_table;
86643+ ctl_table_no_const *ctl_compat_table;
86644 #endif
86645 #endif
86646 unsigned int users;
86647@@ -60,7 +60,7 @@ struct nf_ip_net {
86648 struct nf_icmp_net icmpv6;
86649 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
86650 struct ctl_table_header *ctl_table_header;
86651- struct ctl_table *ctl_table;
86652+ ctl_table_no_const *ctl_table;
86653 #endif
86654 };
86655
86656diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
86657index aec5e12..807233f 100644
86658--- a/include/net/netns/ipv4.h
86659+++ b/include/net/netns/ipv4.h
86660@@ -82,7 +82,7 @@ struct netns_ipv4 {
86661
86662 struct ping_group_range ping_group_range;
86663
86664- atomic_t dev_addr_genid;
86665+ atomic_unchecked_t dev_addr_genid;
86666
86667 #ifdef CONFIG_SYSCTL
86668 unsigned long *sysctl_local_reserved_ports;
86669@@ -96,6 +96,6 @@ struct netns_ipv4 {
86670 struct fib_rules_ops *mr_rules_ops;
86671 #endif
86672 #endif
86673- atomic_t rt_genid;
86674+ atomic_unchecked_t rt_genid;
86675 };
86676 #endif
86677diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
86678index eade27a..42894dd 100644
86679--- a/include/net/netns/ipv6.h
86680+++ b/include/net/netns/ipv6.h
86681@@ -75,8 +75,8 @@ struct netns_ipv6 {
86682 struct fib_rules_ops *mr6_rules_ops;
86683 #endif
86684 #endif
86685- atomic_t dev_addr_genid;
86686- atomic_t rt_genid;
86687+ atomic_unchecked_t dev_addr_genid;
86688+ atomic_unchecked_t rt_genid;
86689 };
86690
86691 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
86692diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
86693index 3492434..209f58c 100644
86694--- a/include/net/netns/xfrm.h
86695+++ b/include/net/netns/xfrm.h
86696@@ -64,7 +64,7 @@ struct netns_xfrm {
86697
86698 /* flow cache part */
86699 struct flow_cache flow_cache_global;
86700- atomic_t flow_cache_genid;
86701+ atomic_unchecked_t flow_cache_genid;
86702 struct list_head flow_cache_gc_list;
86703 spinlock_t flow_cache_gc_lock;
86704 struct work_struct flow_cache_gc_work;
86705diff --git a/include/net/ping.h b/include/net/ping.h
86706index 026479b..d9b2829 100644
86707--- a/include/net/ping.h
86708+++ b/include/net/ping.h
86709@@ -54,7 +54,7 @@ struct ping_iter_state {
86710
86711 extern struct proto ping_prot;
86712 #if IS_ENABLED(CONFIG_IPV6)
86713-extern struct pingv6_ops pingv6_ops;
86714+extern struct pingv6_ops *pingv6_ops;
86715 #endif
86716
86717 struct pingfakehdr {
86718diff --git a/include/net/protocol.h b/include/net/protocol.h
86719index d6fcc1f..ca277058 100644
86720--- a/include/net/protocol.h
86721+++ b/include/net/protocol.h
86722@@ -49,7 +49,7 @@ struct net_protocol {
86723 * socket lookup?
86724 */
86725 icmp_strict_tag_validation:1;
86726-};
86727+} __do_const;
86728
86729 #if IS_ENABLED(CONFIG_IPV6)
86730 struct inet6_protocol {
86731@@ -62,7 +62,7 @@ struct inet6_protocol {
86732 u8 type, u8 code, int offset,
86733 __be32 info);
86734 unsigned int flags; /* INET6_PROTO_xxx */
86735-};
86736+} __do_const;
86737
86738 #define INET6_PROTO_NOPOLICY 0x1
86739 #define INET6_PROTO_FINAL 0x2
86740diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
86741index e21b9f9..0191ef0 100644
86742--- a/include/net/rtnetlink.h
86743+++ b/include/net/rtnetlink.h
86744@@ -93,7 +93,7 @@ struct rtnl_link_ops {
86745 int (*fill_slave_info)(struct sk_buff *skb,
86746 const struct net_device *dev,
86747 const struct net_device *slave_dev);
86748-};
86749+} __do_const;
86750
86751 int __rtnl_link_register(struct rtnl_link_ops *ops);
86752 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
86753diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
86754index 4a5b9a3..ca27d73 100644
86755--- a/include/net/sctp/checksum.h
86756+++ b/include/net/sctp/checksum.h
86757@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
86758 unsigned int offset)
86759 {
86760 struct sctphdr *sh = sctp_hdr(skb);
86761- __le32 ret, old = sh->checksum;
86762- const struct skb_checksum_ops ops = {
86763+ __le32 ret, old = sh->checksum;
86764+ static const struct skb_checksum_ops ops = {
86765 .update = sctp_csum_update,
86766 .combine = sctp_csum_combine,
86767 };
86768diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
86769index 72a31db..aaa63d9 100644
86770--- a/include/net/sctp/sm.h
86771+++ b/include/net/sctp/sm.h
86772@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
86773 typedef struct {
86774 sctp_state_fn_t *fn;
86775 const char *name;
86776-} sctp_sm_table_entry_t;
86777+} __do_const sctp_sm_table_entry_t;
86778
86779 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
86780 * currently in use.
86781@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
86782 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
86783
86784 /* Extern declarations for major data structures. */
86785-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
86786+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
86787
86788
86789 /* Get the size of a DATA chunk payload. */
86790diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
86791index 4ff3f67..89ae38e 100644
86792--- a/include/net/sctp/structs.h
86793+++ b/include/net/sctp/structs.h
86794@@ -509,7 +509,7 @@ struct sctp_pf {
86795 void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
86796 void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
86797 struct sctp_af *af;
86798-};
86799+} __do_const;
86800
86801
86802 /* Structure to track chunk fragments that have been acked, but peer
86803diff --git a/include/net/sock.h b/include/net/sock.h
86804index b9a5bd0..dcd5f3c 100644
86805--- a/include/net/sock.h
86806+++ b/include/net/sock.h
86807@@ -356,7 +356,7 @@ struct sock {
86808 unsigned int sk_napi_id;
86809 unsigned int sk_ll_usec;
86810 #endif
86811- atomic_t sk_drops;
86812+ atomic_unchecked_t sk_drops;
86813 int sk_rcvbuf;
86814
86815 struct sk_filter __rcu *sk_filter;
86816@@ -1053,7 +1053,7 @@ struct proto {
86817 void (*destroy_cgroup)(struct mem_cgroup *memcg);
86818 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
86819 #endif
86820-};
86821+} __randomize_layout;
86822
86823 /*
86824 * Bits in struct cg_proto.flags
86825@@ -1240,7 +1240,7 @@ static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
86826 return ret >> PAGE_SHIFT;
86827 }
86828
86829-static inline long
86830+static inline long __intentional_overflow(-1)
86831 sk_memory_allocated(const struct sock *sk)
86832 {
86833 struct proto *prot = sk->sk_prot;
86834@@ -1385,7 +1385,7 @@ struct sock_iocb {
86835 struct scm_cookie *scm;
86836 struct msghdr *msg, async_msg;
86837 struct kiocb *kiocb;
86838-};
86839+} __randomize_layout;
86840
86841 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
86842 {
86843@@ -1820,7 +1820,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
86844 }
86845
86846 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
86847- char __user *from, char *to,
86848+ char __user *from, unsigned char *to,
86849 int copy, int offset)
86850 {
86851 if (skb->ip_summed == CHECKSUM_NONE) {
86852@@ -2091,7 +2091,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
86853 }
86854 }
86855
86856-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
86857+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
86858
86859 /**
86860 * sk_page_frag - return an appropriate page_frag
86861diff --git a/include/net/tcp.h b/include/net/tcp.h
86862index 590e01a..76498f3 100644
86863--- a/include/net/tcp.h
86864+++ b/include/net/tcp.h
86865@@ -523,7 +523,7 @@ void tcp_retransmit_timer(struct sock *sk);
86866 void tcp_xmit_retransmit_queue(struct sock *);
86867 void tcp_simple_retransmit(struct sock *);
86868 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
86869-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
86870+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
86871
86872 void tcp_send_probe0(struct sock *);
86873 void tcp_send_partial(struct sock *);
86874@@ -696,8 +696,8 @@ struct tcp_skb_cb {
86875 struct inet6_skb_parm h6;
86876 #endif
86877 } header; /* For incoming frames */
86878- __u32 seq; /* Starting sequence number */
86879- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
86880+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
86881+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
86882 __u32 when; /* used to compute rtt's */
86883 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
86884
86885@@ -713,7 +713,7 @@ struct tcp_skb_cb {
86886
86887 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
86888 /* 1 byte hole */
86889- __u32 ack_seq; /* Sequence number ACK'd */
86890+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
86891 };
86892
86893 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
86894diff --git a/include/net/xfrm.h b/include/net/xfrm.h
86895index 721e9c3b..3c81bbf 100644
86896--- a/include/net/xfrm.h
86897+++ b/include/net/xfrm.h
86898@@ -285,7 +285,6 @@ struct xfrm_dst;
86899 struct xfrm_policy_afinfo {
86900 unsigned short family;
86901 struct dst_ops *dst_ops;
86902- void (*garbage_collect)(struct net *net);
86903 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
86904 const xfrm_address_t *saddr,
86905 const xfrm_address_t *daddr);
86906@@ -303,7 +302,7 @@ struct xfrm_policy_afinfo {
86907 struct net_device *dev,
86908 const struct flowi *fl);
86909 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
86910-};
86911+} __do_const;
86912
86913 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
86914 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
86915@@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
86916 int (*transport_finish)(struct sk_buff *skb,
86917 int async);
86918 void (*local_error)(struct sk_buff *skb, u32 mtu);
86919-};
86920+} __do_const;
86921
86922 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
86923 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
86924@@ -437,7 +436,7 @@ struct xfrm_mode {
86925 struct module *owner;
86926 unsigned int encap;
86927 int flags;
86928-};
86929+} __do_const;
86930
86931 /* Flags for xfrm_mode. */
86932 enum {
86933@@ -534,7 +533,7 @@ struct xfrm_policy {
86934 struct timer_list timer;
86935
86936 struct flow_cache_object flo;
86937- atomic_t genid;
86938+ atomic_unchecked_t genid;
86939 u32 priority;
86940 u32 index;
86941 struct xfrm_mark mark;
86942@@ -1167,6 +1166,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
86943 }
86944
86945 void xfrm_garbage_collect(struct net *net);
86946+void xfrm_garbage_collect_deferred(struct net *net);
86947
86948 #else
86949
86950@@ -1205,6 +1205,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
86951 static inline void xfrm_garbage_collect(struct net *net)
86952 {
86953 }
86954+static inline void xfrm_garbage_collect_deferred(struct net *net)
86955+{
86956+}
86957 #endif
86958
86959 static __inline__
86960diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
86961index 1017e0b..227aa4d 100644
86962--- a/include/rdma/iw_cm.h
86963+++ b/include/rdma/iw_cm.h
86964@@ -122,7 +122,7 @@ struct iw_cm_verbs {
86965 int backlog);
86966
86967 int (*destroy_listen)(struct iw_cm_id *cm_id);
86968-};
86969+} __no_const;
86970
86971 /**
86972 * iw_create_cm_id - Create an IW CM identifier.
86973diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
86974index 52beadf..598734c 100644
86975--- a/include/scsi/libfc.h
86976+++ b/include/scsi/libfc.h
86977@@ -771,6 +771,7 @@ struct libfc_function_template {
86978 */
86979 void (*disc_stop_final) (struct fc_lport *);
86980 };
86981+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
86982
86983 /**
86984 * struct fc_disc - Discovery context
86985@@ -875,7 +876,7 @@ struct fc_lport {
86986 struct fc_vport *vport;
86987
86988 /* Operational Information */
86989- struct libfc_function_template tt;
86990+ libfc_function_template_no_const tt;
86991 u8 link_up;
86992 u8 qfull;
86993 enum fc_lport_state state;
86994diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
86995index 1a0d184..4fb841f 100644
86996--- a/include/scsi/scsi_device.h
86997+++ b/include/scsi/scsi_device.h
86998@@ -185,9 +185,9 @@ struct scsi_device {
86999 unsigned int max_device_blocked; /* what device_blocked counts down from */
87000 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
87001
87002- atomic_t iorequest_cnt;
87003- atomic_t iodone_cnt;
87004- atomic_t ioerr_cnt;
87005+ atomic_unchecked_t iorequest_cnt;
87006+ atomic_unchecked_t iodone_cnt;
87007+ atomic_unchecked_t ioerr_cnt;
87008
87009 struct device sdev_gendev,
87010 sdev_dev;
87011diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
87012index 007a0bc..7188db8 100644
87013--- a/include/scsi/scsi_transport_fc.h
87014+++ b/include/scsi/scsi_transport_fc.h
87015@@ -756,7 +756,8 @@ struct fc_function_template {
87016 unsigned long show_host_system_hostname:1;
87017
87018 unsigned long disable_target_scan:1;
87019-};
87020+} __do_const;
87021+typedef struct fc_function_template __no_const fc_function_template_no_const;
87022
87023
87024 /**
87025diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
87026index ae6c3b8..fd748ac 100644
87027--- a/include/sound/compress_driver.h
87028+++ b/include/sound/compress_driver.h
87029@@ -128,7 +128,7 @@ struct snd_compr_ops {
87030 struct snd_compr_caps *caps);
87031 int (*get_codec_caps) (struct snd_compr_stream *stream,
87032 struct snd_compr_codec_caps *codec);
87033-};
87034+} __no_const;
87035
87036 /**
87037 * struct snd_compr: Compressed device
87038diff --git a/include/sound/soc.h b/include/sound/soc.h
87039index c83a334..27c8038 100644
87040--- a/include/sound/soc.h
87041+++ b/include/sound/soc.h
87042@@ -817,7 +817,7 @@ struct snd_soc_codec_driver {
87043 /* probe ordering - for components with runtime dependencies */
87044 int probe_order;
87045 int remove_order;
87046-};
87047+} __do_const;
87048
87049 /* SoC platform interface */
87050 struct snd_soc_platform_driver {
87051@@ -861,7 +861,7 @@ struct snd_soc_platform_driver {
87052 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
87053 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
87054 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
87055-};
87056+} __do_const;
87057
87058 struct snd_soc_dai_link_component {
87059 const char *name;
87060diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
87061index 9ec9864..e2ee1ee 100644
87062--- a/include/target/target_core_base.h
87063+++ b/include/target/target_core_base.h
87064@@ -761,7 +761,7 @@ struct se_device {
87065 atomic_long_t write_bytes;
87066 /* Active commands on this virtual SE device */
87067 atomic_t simple_cmds;
87068- atomic_t dev_ordered_id;
87069+ atomic_unchecked_t dev_ordered_id;
87070 atomic_t dev_ordered_sync;
87071 atomic_t dev_qf_count;
87072 int export_count;
87073diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
87074new file mode 100644
87075index 0000000..fb634b7
87076--- /dev/null
87077+++ b/include/trace/events/fs.h
87078@@ -0,0 +1,53 @@
87079+#undef TRACE_SYSTEM
87080+#define TRACE_SYSTEM fs
87081+
87082+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
87083+#define _TRACE_FS_H
87084+
87085+#include <linux/fs.h>
87086+#include <linux/tracepoint.h>
87087+
87088+TRACE_EVENT(do_sys_open,
87089+
87090+ TP_PROTO(const char *filename, int flags, int mode),
87091+
87092+ TP_ARGS(filename, flags, mode),
87093+
87094+ TP_STRUCT__entry(
87095+ __string( filename, filename )
87096+ __field( int, flags )
87097+ __field( int, mode )
87098+ ),
87099+
87100+ TP_fast_assign(
87101+ __assign_str(filename, filename);
87102+ __entry->flags = flags;
87103+ __entry->mode = mode;
87104+ ),
87105+
87106+ TP_printk("\"%s\" %x %o",
87107+ __get_str(filename), __entry->flags, __entry->mode)
87108+);
87109+
87110+TRACE_EVENT(open_exec,
87111+
87112+ TP_PROTO(const char *filename),
87113+
87114+ TP_ARGS(filename),
87115+
87116+ TP_STRUCT__entry(
87117+ __string( filename, filename )
87118+ ),
87119+
87120+ TP_fast_assign(
87121+ __assign_str(filename, filename);
87122+ ),
87123+
87124+ TP_printk("\"%s\"",
87125+ __get_str(filename))
87126+);
87127+
87128+#endif /* _TRACE_FS_H */
87129+
87130+/* This part must be outside protection */
87131+#include <trace/define_trace.h>
87132diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
87133index 3608beb..df39d8a 100644
87134--- a/include/trace/events/irq.h
87135+++ b/include/trace/events/irq.h
87136@@ -36,7 +36,7 @@ struct softirq_action;
87137 */
87138 TRACE_EVENT(irq_handler_entry,
87139
87140- TP_PROTO(int irq, struct irqaction *action),
87141+ TP_PROTO(int irq, const struct irqaction *action),
87142
87143 TP_ARGS(irq, action),
87144
87145@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
87146 */
87147 TRACE_EVENT(irq_handler_exit,
87148
87149- TP_PROTO(int irq, struct irqaction *action, int ret),
87150+ TP_PROTO(int irq, const struct irqaction *action, int ret),
87151
87152 TP_ARGS(irq, action, ret),
87153
87154diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
87155index 7caf44c..23c6f27 100644
87156--- a/include/uapi/linux/a.out.h
87157+++ b/include/uapi/linux/a.out.h
87158@@ -39,6 +39,14 @@ enum machine_type {
87159 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
87160 };
87161
87162+/* Constants for the N_FLAGS field */
87163+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
87164+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
87165+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
87166+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
87167+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
87168+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
87169+
87170 #if !defined (N_MAGIC)
87171 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
87172 #endif
87173diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
87174index 22b6ad3..aeba37e 100644
87175--- a/include/uapi/linux/bcache.h
87176+++ b/include/uapi/linux/bcache.h
87177@@ -5,6 +5,7 @@
87178 * Bcache on disk data structures
87179 */
87180
87181+#include <linux/compiler.h>
87182 #include <asm/types.h>
87183
87184 #define BITMASK(name, type, field, offset, size) \
87185@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v) \
87186 /* Btree keys - all units are in sectors */
87187
87188 struct bkey {
87189- __u64 high;
87190- __u64 low;
87191+ __u64 high __intentional_overflow(-1);
87192+ __u64 low __intentional_overflow(-1);
87193 __u64 ptr[];
87194 };
87195
87196diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
87197index d876736..ccce5c0 100644
87198--- a/include/uapi/linux/byteorder/little_endian.h
87199+++ b/include/uapi/linux/byteorder/little_endian.h
87200@@ -42,51 +42,51 @@
87201
87202 static inline __le64 __cpu_to_le64p(const __u64 *p)
87203 {
87204- return (__force __le64)*p;
87205+ return (__force const __le64)*p;
87206 }
87207-static inline __u64 __le64_to_cpup(const __le64 *p)
87208+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
87209 {
87210- return (__force __u64)*p;
87211+ return (__force const __u64)*p;
87212 }
87213 static inline __le32 __cpu_to_le32p(const __u32 *p)
87214 {
87215- return (__force __le32)*p;
87216+ return (__force const __le32)*p;
87217 }
87218 static inline __u32 __le32_to_cpup(const __le32 *p)
87219 {
87220- return (__force __u32)*p;
87221+ return (__force const __u32)*p;
87222 }
87223 static inline __le16 __cpu_to_le16p(const __u16 *p)
87224 {
87225- return (__force __le16)*p;
87226+ return (__force const __le16)*p;
87227 }
87228 static inline __u16 __le16_to_cpup(const __le16 *p)
87229 {
87230- return (__force __u16)*p;
87231+ return (__force const __u16)*p;
87232 }
87233 static inline __be64 __cpu_to_be64p(const __u64 *p)
87234 {
87235- return (__force __be64)__swab64p(p);
87236+ return (__force const __be64)__swab64p(p);
87237 }
87238 static inline __u64 __be64_to_cpup(const __be64 *p)
87239 {
87240- return __swab64p((__u64 *)p);
87241+ return __swab64p((const __u64 *)p);
87242 }
87243 static inline __be32 __cpu_to_be32p(const __u32 *p)
87244 {
87245- return (__force __be32)__swab32p(p);
87246+ return (__force const __be32)__swab32p(p);
87247 }
87248-static inline __u32 __be32_to_cpup(const __be32 *p)
87249+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
87250 {
87251- return __swab32p((__u32 *)p);
87252+ return __swab32p((const __u32 *)p);
87253 }
87254 static inline __be16 __cpu_to_be16p(const __u16 *p)
87255 {
87256- return (__force __be16)__swab16p(p);
87257+ return (__force const __be16)__swab16p(p);
87258 }
87259 static inline __u16 __be16_to_cpup(const __be16 *p)
87260 {
87261- return __swab16p((__u16 *)p);
87262+ return __swab16p((const __u16 *)p);
87263 }
87264 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
87265 #define __le64_to_cpus(x) do { (void)(x); } while (0)
87266diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
87267index ef6103b..d4e65dd 100644
87268--- a/include/uapi/linux/elf.h
87269+++ b/include/uapi/linux/elf.h
87270@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
87271 #define PT_GNU_EH_FRAME 0x6474e550
87272
87273 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
87274+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
87275+
87276+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
87277+
87278+/* Constants for the e_flags field */
87279+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
87280+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
87281+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
87282+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
87283+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
87284+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
87285
87286 /*
87287 * Extended Numbering
87288@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
87289 #define DT_DEBUG 21
87290 #define DT_TEXTREL 22
87291 #define DT_JMPREL 23
87292+#define DT_FLAGS 30
87293+ #define DF_TEXTREL 0x00000004
87294 #define DT_ENCODING 32
87295 #define OLD_DT_LOOS 0x60000000
87296 #define DT_LOOS 0x6000000d
87297@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
87298 #define PF_W 0x2
87299 #define PF_X 0x1
87300
87301+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
87302+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
87303+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
87304+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
87305+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
87306+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
87307+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
87308+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
87309+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
87310+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
87311+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
87312+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
87313+
87314 typedef struct elf32_phdr{
87315 Elf32_Word p_type;
87316 Elf32_Off p_offset;
87317@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
87318 #define EI_OSABI 7
87319 #define EI_PAD 8
87320
87321+#define EI_PAX 14
87322+
87323 #define ELFMAG0 0x7f /* EI_MAG */
87324 #define ELFMAG1 'E'
87325 #define ELFMAG2 'L'
87326diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
87327index aa169c4..6a2771d 100644
87328--- a/include/uapi/linux/personality.h
87329+++ b/include/uapi/linux/personality.h
87330@@ -30,6 +30,7 @@ enum {
87331 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
87332 ADDR_NO_RANDOMIZE | \
87333 ADDR_COMPAT_LAYOUT | \
87334+ ADDR_LIMIT_3GB | \
87335 MMAP_PAGE_ZERO)
87336
87337 /*
87338diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
87339index 7530e74..e714828 100644
87340--- a/include/uapi/linux/screen_info.h
87341+++ b/include/uapi/linux/screen_info.h
87342@@ -43,7 +43,8 @@ struct screen_info {
87343 __u16 pages; /* 0x32 */
87344 __u16 vesa_attributes; /* 0x34 */
87345 __u32 capabilities; /* 0x36 */
87346- __u8 _reserved[6]; /* 0x3a */
87347+ __u16 vesapm_size; /* 0x3a */
87348+ __u8 _reserved[4]; /* 0x3c */
87349 } __attribute__((packed));
87350
87351 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
87352diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
87353index 0e011eb..82681b1 100644
87354--- a/include/uapi/linux/swab.h
87355+++ b/include/uapi/linux/swab.h
87356@@ -43,7 +43,7 @@
87357 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
87358 */
87359
87360-static inline __attribute_const__ __u16 __fswab16(__u16 val)
87361+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
87362 {
87363 #ifdef __HAVE_BUILTIN_BSWAP16__
87364 return __builtin_bswap16(val);
87365@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
87366 #endif
87367 }
87368
87369-static inline __attribute_const__ __u32 __fswab32(__u32 val)
87370+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
87371 {
87372 #ifdef __HAVE_BUILTIN_BSWAP32__
87373 return __builtin_bswap32(val);
87374@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
87375 #endif
87376 }
87377
87378-static inline __attribute_const__ __u64 __fswab64(__u64 val)
87379+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
87380 {
87381 #ifdef __HAVE_BUILTIN_BSWAP64__
87382 return __builtin_bswap64(val);
87383diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
87384index 43aaba1..1c30b48 100644
87385--- a/include/uapi/linux/sysctl.h
87386+++ b/include/uapi/linux/sysctl.h
87387@@ -155,8 +155,6 @@ enum
87388 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
87389 };
87390
87391-
87392-
87393 /* CTL_VM names: */
87394 enum
87395 {
87396diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
87397index 778a329..1416ffb 100644
87398--- a/include/uapi/linux/videodev2.h
87399+++ b/include/uapi/linux/videodev2.h
87400@@ -1285,7 +1285,7 @@ struct v4l2_ext_control {
87401 union {
87402 __s32 value;
87403 __s64 value64;
87404- char *string;
87405+ char __user *string;
87406 __u8 *p_u8;
87407 __u16 *p_u16;
87408 __u32 *p_u32;
87409diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
87410index 1590c49..5eab462 100644
87411--- a/include/uapi/linux/xattr.h
87412+++ b/include/uapi/linux/xattr.h
87413@@ -73,5 +73,9 @@
87414 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
87415 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
87416
87417+/* User namespace */
87418+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
87419+#define XATTR_PAX_FLAGS_SUFFIX "flags"
87420+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
87421
87422 #endif /* _UAPI_LINUX_XATTR_H */
87423diff --git a/include/video/udlfb.h b/include/video/udlfb.h
87424index f9466fa..f4e2b81 100644
87425--- a/include/video/udlfb.h
87426+++ b/include/video/udlfb.h
87427@@ -53,10 +53,10 @@ struct dlfb_data {
87428 u32 pseudo_palette[256];
87429 int blank_mode; /*one of FB_BLANK_ */
87430 /* blit-only rendering path metrics, exposed through sysfs */
87431- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
87432- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
87433- atomic_t bytes_sent; /* to usb, after compression including overhead */
87434- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
87435+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
87436+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
87437+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
87438+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
87439 };
87440
87441 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
87442diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
87443index 30f5362..8ed8ac9 100644
87444--- a/include/video/uvesafb.h
87445+++ b/include/video/uvesafb.h
87446@@ -122,6 +122,7 @@ struct uvesafb_par {
87447 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
87448 u8 pmi_setpal; /* PMI for palette changes */
87449 u16 *pmi_base; /* protected mode interface location */
87450+ u8 *pmi_code; /* protected mode code location */
87451 void *pmi_start;
87452 void *pmi_pal;
87453 u8 *vbe_state_orig; /*
87454diff --git a/init/Kconfig b/init/Kconfig
87455index 80a6907..baf7d53 100644
87456--- a/init/Kconfig
87457+++ b/init/Kconfig
87458@@ -1150,6 +1150,7 @@ endif # CGROUPS
87459
87460 config CHECKPOINT_RESTORE
87461 bool "Checkpoint/restore support" if EXPERT
87462+ depends on !GRKERNSEC
87463 default n
87464 help
87465 Enables additional kernel features in a sake of checkpoint/restore.
87466@@ -1635,7 +1636,7 @@ config SLUB_DEBUG
87467
87468 config COMPAT_BRK
87469 bool "Disable heap randomization"
87470- default y
87471+ default n
87472 help
87473 Randomizing heap placement makes heap exploits harder, but it
87474 also breaks ancient binaries (including anything libc5 based).
87475@@ -1923,7 +1924,7 @@ config INIT_ALL_POSSIBLE
87476 config STOP_MACHINE
87477 bool
87478 default y
87479- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
87480+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
87481 help
87482 Need stop_machine() primitive.
87483
87484diff --git a/init/Makefile b/init/Makefile
87485index 7bc47ee..6da2dc7 100644
87486--- a/init/Makefile
87487+++ b/init/Makefile
87488@@ -2,6 +2,9 @@
87489 # Makefile for the linux kernel.
87490 #
87491
87492+ccflags-y := $(GCC_PLUGINS_CFLAGS)
87493+asflags-y := $(GCC_PLUGINS_AFLAGS)
87494+
87495 obj-y := main.o version.o mounts.o
87496 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
87497 obj-y += noinitramfs.o
87498diff --git a/init/do_mounts.c b/init/do_mounts.c
87499index 82f2288..ea1430a 100644
87500--- a/init/do_mounts.c
87501+++ b/init/do_mounts.c
87502@@ -359,11 +359,11 @@ static void __init get_fs_names(char *page)
87503 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
87504 {
87505 struct super_block *s;
87506- int err = sys_mount(name, "/root", fs, flags, data);
87507+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
87508 if (err)
87509 return err;
87510
87511- sys_chdir("/root");
87512+ sys_chdir((const char __force_user *)"/root");
87513 s = current->fs->pwd.dentry->d_sb;
87514 ROOT_DEV = s->s_dev;
87515 printk(KERN_INFO
87516@@ -484,18 +484,18 @@ void __init change_floppy(char *fmt, ...)
87517 va_start(args, fmt);
87518 vsprintf(buf, fmt, args);
87519 va_end(args);
87520- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
87521+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
87522 if (fd >= 0) {
87523 sys_ioctl(fd, FDEJECT, 0);
87524 sys_close(fd);
87525 }
87526 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
87527- fd = sys_open("/dev/console", O_RDWR, 0);
87528+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
87529 if (fd >= 0) {
87530 sys_ioctl(fd, TCGETS, (long)&termios);
87531 termios.c_lflag &= ~ICANON;
87532 sys_ioctl(fd, TCSETSF, (long)&termios);
87533- sys_read(fd, &c, 1);
87534+ sys_read(fd, (char __user *)&c, 1);
87535 termios.c_lflag |= ICANON;
87536 sys_ioctl(fd, TCSETSF, (long)&termios);
87537 sys_close(fd);
87538@@ -589,8 +589,8 @@ void __init prepare_namespace(void)
87539 mount_root();
87540 out:
87541 devtmpfs_mount("dev");
87542- sys_mount(".", "/", NULL, MS_MOVE, NULL);
87543- sys_chroot(".");
87544+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
87545+ sys_chroot((const char __force_user *)".");
87546 }
87547
87548 static bool is_tmpfs;
87549diff --git a/init/do_mounts.h b/init/do_mounts.h
87550index f5b978a..69dbfe8 100644
87551--- a/init/do_mounts.h
87552+++ b/init/do_mounts.h
87553@@ -15,15 +15,15 @@ extern int root_mountflags;
87554
87555 static inline int create_dev(char *name, dev_t dev)
87556 {
87557- sys_unlink(name);
87558- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
87559+ sys_unlink((char __force_user *)name);
87560+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
87561 }
87562
87563 #if BITS_PER_LONG == 32
87564 static inline u32 bstat(char *name)
87565 {
87566 struct stat64 stat;
87567- if (sys_stat64(name, &stat) != 0)
87568+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
87569 return 0;
87570 if (!S_ISBLK(stat.st_mode))
87571 return 0;
87572@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
87573 static inline u32 bstat(char *name)
87574 {
87575 struct stat stat;
87576- if (sys_newstat(name, &stat) != 0)
87577+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
87578 return 0;
87579 if (!S_ISBLK(stat.st_mode))
87580 return 0;
87581diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
87582index 3e0878e..8a9d7a0 100644
87583--- a/init/do_mounts_initrd.c
87584+++ b/init/do_mounts_initrd.c
87585@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
87586 {
87587 sys_unshare(CLONE_FS | CLONE_FILES);
87588 /* stdin/stdout/stderr for /linuxrc */
87589- sys_open("/dev/console", O_RDWR, 0);
87590+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
87591 sys_dup(0);
87592 sys_dup(0);
87593 /* move initrd over / and chdir/chroot in initrd root */
87594- sys_chdir("/root");
87595- sys_mount(".", "/", NULL, MS_MOVE, NULL);
87596- sys_chroot(".");
87597+ sys_chdir((const char __force_user *)"/root");
87598+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
87599+ sys_chroot((const char __force_user *)".");
87600 sys_setsid();
87601 return 0;
87602 }
87603@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
87604 create_dev("/dev/root.old", Root_RAM0);
87605 /* mount initrd on rootfs' /root */
87606 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
87607- sys_mkdir("/old", 0700);
87608- sys_chdir("/old");
87609+ sys_mkdir((const char __force_user *)"/old", 0700);
87610+ sys_chdir((const char __force_user *)"/old");
87611
87612 /* try loading default modules from initrd */
87613 load_default_modules();
87614@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
87615 current->flags &= ~PF_FREEZER_SKIP;
87616
87617 /* move initrd to rootfs' /old */
87618- sys_mount("..", ".", NULL, MS_MOVE, NULL);
87619+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
87620 /* switch root and cwd back to / of rootfs */
87621- sys_chroot("..");
87622+ sys_chroot((const char __force_user *)"..");
87623
87624 if (new_decode_dev(real_root_dev) == Root_RAM0) {
87625- sys_chdir("/old");
87626+ sys_chdir((const char __force_user *)"/old");
87627 return;
87628 }
87629
87630- sys_chdir("/");
87631+ sys_chdir((const char __force_user *)"/");
87632 ROOT_DEV = new_decode_dev(real_root_dev);
87633 mount_root();
87634
87635 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
87636- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
87637+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
87638 if (!error)
87639 printk("okay\n");
87640 else {
87641- int fd = sys_open("/dev/root.old", O_RDWR, 0);
87642+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
87643 if (error == -ENOENT)
87644 printk("/initrd does not exist. Ignored.\n");
87645 else
87646 printk("failed\n");
87647 printk(KERN_NOTICE "Unmounting old root\n");
87648- sys_umount("/old", MNT_DETACH);
87649+ sys_umount((char __force_user *)"/old", MNT_DETACH);
87650 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
87651 if (fd < 0) {
87652 error = fd;
87653@@ -127,11 +127,11 @@ int __init initrd_load(void)
87654 * mounted in the normal path.
87655 */
87656 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
87657- sys_unlink("/initrd.image");
87658+ sys_unlink((const char __force_user *)"/initrd.image");
87659 handle_initrd();
87660 return 1;
87661 }
87662 }
87663- sys_unlink("/initrd.image");
87664+ sys_unlink((const char __force_user *)"/initrd.image");
87665 return 0;
87666 }
87667diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
87668index 8cb6db5..d729f50 100644
87669--- a/init/do_mounts_md.c
87670+++ b/init/do_mounts_md.c
87671@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
87672 partitioned ? "_d" : "", minor,
87673 md_setup_args[ent].device_names);
87674
87675- fd = sys_open(name, 0, 0);
87676+ fd = sys_open((char __force_user *)name, 0, 0);
87677 if (fd < 0) {
87678 printk(KERN_ERR "md: open failed - cannot start "
87679 "array %s\n", name);
87680@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
87681 * array without it
87682 */
87683 sys_close(fd);
87684- fd = sys_open(name, 0, 0);
87685+ fd = sys_open((char __force_user *)name, 0, 0);
87686 sys_ioctl(fd, BLKRRPART, 0);
87687 }
87688 sys_close(fd);
87689@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
87690
87691 wait_for_device_probe();
87692
87693- fd = sys_open("/dev/md0", 0, 0);
87694+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
87695 if (fd >= 0) {
87696 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
87697 sys_close(fd);
87698diff --git a/init/init_task.c b/init/init_task.c
87699index ba0a7f36..2bcf1d5 100644
87700--- a/init/init_task.c
87701+++ b/init/init_task.c
87702@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
87703 * Initial thread structure. Alignment of this is handled by a special
87704 * linker map entry.
87705 */
87706+#ifdef CONFIG_X86
87707+union thread_union init_thread_union __init_task_data;
87708+#else
87709 union thread_union init_thread_union __init_task_data =
87710 { INIT_THREAD_INFO(init_task) };
87711+#endif
87712diff --git a/init/initramfs.c b/init/initramfs.c
87713index bece48c..e911bd8 100644
87714--- a/init/initramfs.c
87715+++ b/init/initramfs.c
87716@@ -25,7 +25,7 @@ static ssize_t __init xwrite(int fd, const char *p, size_t count)
87717
87718 /* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
87719 while (count) {
87720- ssize_t rv = sys_write(fd, p, count);
87721+ ssize_t rv = sys_write(fd, (char __force_user *)p, count);
87722
87723 if (rv < 0) {
87724 if (rv == -EINTR || rv == -EAGAIN)
87725@@ -107,7 +107,7 @@ static void __init free_hash(void)
87726 }
87727 }
87728
87729-static long __init do_utime(char *filename, time_t mtime)
87730+static long __init do_utime(char __force_user *filename, time_t mtime)
87731 {
87732 struct timespec t[2];
87733
87734@@ -142,7 +142,7 @@ static void __init dir_utime(void)
87735 struct dir_entry *de, *tmp;
87736 list_for_each_entry_safe(de, tmp, &dir_list, list) {
87737 list_del(&de->list);
87738- do_utime(de->name, de->mtime);
87739+ do_utime((char __force_user *)de->name, de->mtime);
87740 kfree(de->name);
87741 kfree(de);
87742 }
87743@@ -304,7 +304,7 @@ static int __init maybe_link(void)
87744 if (nlink >= 2) {
87745 char *old = find_link(major, minor, ino, mode, collected);
87746 if (old)
87747- return (sys_link(old, collected) < 0) ? -1 : 1;
87748+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
87749 }
87750 return 0;
87751 }
87752@@ -313,11 +313,11 @@ static void __init clean_path(char *path, umode_t mode)
87753 {
87754 struct stat st;
87755
87756- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
87757+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
87758 if (S_ISDIR(st.st_mode))
87759- sys_rmdir(path);
87760+ sys_rmdir((char __force_user *)path);
87761 else
87762- sys_unlink(path);
87763+ sys_unlink((char __force_user *)path);
87764 }
87765 }
87766
87767@@ -338,7 +338,7 @@ static int __init do_name(void)
87768 int openflags = O_WRONLY|O_CREAT;
87769 if (ml != 1)
87770 openflags |= O_TRUNC;
87771- wfd = sys_open(collected, openflags, mode);
87772+ wfd = sys_open((char __force_user *)collected, openflags, mode);
87773
87774 if (wfd >= 0) {
87775 sys_fchown(wfd, uid, gid);
87776@@ -350,17 +350,17 @@ static int __init do_name(void)
87777 }
87778 }
87779 } else if (S_ISDIR(mode)) {
87780- sys_mkdir(collected, mode);
87781- sys_chown(collected, uid, gid);
87782- sys_chmod(collected, mode);
87783+ sys_mkdir((char __force_user *)collected, mode);
87784+ sys_chown((char __force_user *)collected, uid, gid);
87785+ sys_chmod((char __force_user *)collected, mode);
87786 dir_add(collected, mtime);
87787 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
87788 S_ISFIFO(mode) || S_ISSOCK(mode)) {
87789 if (maybe_link() == 0) {
87790- sys_mknod(collected, mode, rdev);
87791- sys_chown(collected, uid, gid);
87792- sys_chmod(collected, mode);
87793- do_utime(collected, mtime);
87794+ sys_mknod((char __force_user *)collected, mode, rdev);
87795+ sys_chown((char __force_user *)collected, uid, gid);
87796+ sys_chmod((char __force_user *)collected, mode);
87797+ do_utime((char __force_user *)collected, mtime);
87798 }
87799 }
87800 return 0;
87801@@ -372,7 +372,7 @@ static int __init do_copy(void)
87802 if (xwrite(wfd, victim, body_len) != body_len)
87803 error("write error");
87804 sys_close(wfd);
87805- do_utime(vcollected, mtime);
87806+ do_utime((char __force_user *)vcollected, mtime);
87807 kfree(vcollected);
87808 eat(body_len);
87809 state = SkipIt;
87810@@ -390,9 +390,9 @@ static int __init do_symlink(void)
87811 {
87812 collected[N_ALIGN(name_len) + body_len] = '\0';
87813 clean_path(collected, 0);
87814- sys_symlink(collected + N_ALIGN(name_len), collected);
87815- sys_lchown(collected, uid, gid);
87816- do_utime(collected, mtime);
87817+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
87818+ sys_lchown((char __force_user *)collected, uid, gid);
87819+ do_utime((char __force_user *)collected, mtime);
87820 state = SkipIt;
87821 next_state = Reset;
87822 return 0;
87823diff --git a/init/main.c b/init/main.c
87824index d0f4b59..0c4b184 100644
87825--- a/init/main.c
87826+++ b/init/main.c
87827@@ -98,6 +98,8 @@ extern void radix_tree_init(void);
87828 static inline void mark_rodata_ro(void) { }
87829 #endif
87830
87831+extern void grsecurity_init(void);
87832+
87833 /*
87834 * Debug helper: via this flag we know that we are in 'early bootup code'
87835 * where only the boot processor is running with IRQ disabled. This means
87836@@ -159,6 +161,75 @@ static int __init set_reset_devices(char *str)
87837
87838 __setup("reset_devices", set_reset_devices);
87839
87840+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
87841+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
87842+static int __init setup_grsec_proc_gid(char *str)
87843+{
87844+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
87845+ return 1;
87846+}
87847+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
87848+#endif
87849+
87850+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
87851+unsigned long pax_user_shadow_base __read_only;
87852+EXPORT_SYMBOL(pax_user_shadow_base);
87853+extern char pax_enter_kernel_user[];
87854+extern char pax_exit_kernel_user[];
87855+#endif
87856+
87857+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
87858+static int __init setup_pax_nouderef(char *str)
87859+{
87860+#ifdef CONFIG_X86_32
87861+ unsigned int cpu;
87862+ struct desc_struct *gdt;
87863+
87864+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
87865+ gdt = get_cpu_gdt_table(cpu);
87866+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
87867+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
87868+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
87869+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
87870+ }
87871+ loadsegment(ds, __KERNEL_DS);
87872+ loadsegment(es, __KERNEL_DS);
87873+ loadsegment(ss, __KERNEL_DS);
87874+#else
87875+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
87876+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
87877+ clone_pgd_mask = ~(pgdval_t)0UL;
87878+ pax_user_shadow_base = 0UL;
87879+ setup_clear_cpu_cap(X86_FEATURE_PCID);
87880+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
87881+#endif
87882+
87883+ return 0;
87884+}
87885+early_param("pax_nouderef", setup_pax_nouderef);
87886+
87887+#ifdef CONFIG_X86_64
87888+static int __init setup_pax_weakuderef(char *str)
87889+{
87890+ if (clone_pgd_mask != ~(pgdval_t)0UL)
87891+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
87892+ return 1;
87893+}
87894+__setup("pax_weakuderef", setup_pax_weakuderef);
87895+#endif
87896+#endif
87897+
87898+#ifdef CONFIG_PAX_SOFTMODE
87899+int pax_softmode;
87900+
87901+static int __init setup_pax_softmode(char *str)
87902+{
87903+ get_option(&str, &pax_softmode);
87904+ return 1;
87905+}
87906+__setup("pax_softmode=", setup_pax_softmode);
87907+#endif
87908+
87909 static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
87910 const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
87911 static const char *panic_later, *panic_param;
87912@@ -728,7 +799,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
87913 struct blacklist_entry *entry;
87914 char *fn_name;
87915
87916- fn_name = kasprintf(GFP_KERNEL, "%pf", fn);
87917+ fn_name = kasprintf(GFP_KERNEL, "%pX", fn);
87918 if (!fn_name)
87919 return false;
87920
87921@@ -780,7 +851,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
87922 {
87923 int count = preempt_count();
87924 int ret;
87925- char msgbuf[64];
87926+ const char *msg1 = "", *msg2 = "";
87927
87928 if (initcall_blacklisted(fn))
87929 return -EPERM;
87930@@ -790,18 +861,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
87931 else
87932 ret = fn();
87933
87934- msgbuf[0] = 0;
87935-
87936 if (preempt_count() != count) {
87937- sprintf(msgbuf, "preemption imbalance ");
87938+ msg1 = " preemption imbalance";
87939 preempt_count_set(count);
87940 }
87941 if (irqs_disabled()) {
87942- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
87943+ msg2 = " disabled interrupts";
87944 local_irq_enable();
87945 }
87946- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
87947+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
87948
87949+ add_latent_entropy();
87950 return ret;
87951 }
87952
87953@@ -908,8 +978,8 @@ static int run_init_process(const char *init_filename)
87954 {
87955 argv_init[0] = init_filename;
87956 return do_execve(getname_kernel(init_filename),
87957- (const char __user *const __user *)argv_init,
87958- (const char __user *const __user *)envp_init);
87959+ (const char __user *const __force_user *)argv_init,
87960+ (const char __user *const __force_user *)envp_init);
87961 }
87962
87963 static int try_to_run_init_process(const char *init_filename)
87964@@ -926,6 +996,10 @@ static int try_to_run_init_process(const char *init_filename)
87965 return ret;
87966 }
87967
87968+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
87969+extern int gr_init_ran;
87970+#endif
87971+
87972 static noinline void __init kernel_init_freeable(void);
87973
87974 static int __ref kernel_init(void *unused)
87975@@ -950,6 +1024,11 @@ static int __ref kernel_init(void *unused)
87976 ramdisk_execute_command, ret);
87977 }
87978
87979+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
87980+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
87981+ gr_init_ran = 1;
87982+#endif
87983+
87984 /*
87985 * We try each of these until one succeeds.
87986 *
87987@@ -1005,7 +1084,7 @@ static noinline void __init kernel_init_freeable(void)
87988 do_basic_setup();
87989
87990 /* Open the /dev/console on the rootfs, this should never fail */
87991- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
87992+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
87993 pr_err("Warning: unable to open an initial console.\n");
87994
87995 (void) sys_dup(0);
87996@@ -1018,11 +1097,13 @@ static noinline void __init kernel_init_freeable(void)
87997 if (!ramdisk_execute_command)
87998 ramdisk_execute_command = "/init";
87999
88000- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
88001+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
88002 ramdisk_execute_command = NULL;
88003 prepare_namespace();
88004 }
88005
88006+ grsecurity_init();
88007+
88008 /*
88009 * Ok, we have completed the initial bootup, and
88010 * we're essentially up and running. Get rid of the
88011diff --git a/ipc/compat.c b/ipc/compat.c
88012index b5ef4f7..ff31d87 100644
88013--- a/ipc/compat.c
88014+++ b/ipc/compat.c
88015@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
88016 COMPAT_SHMLBA);
88017 if (err < 0)
88018 return err;
88019- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
88020+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
88021 }
88022 case SHMDT:
88023 return sys_shmdt(compat_ptr(ptr));
88024diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
88025index e8075b2..76f2c6a 100644
88026--- a/ipc/ipc_sysctl.c
88027+++ b/ipc/ipc_sysctl.c
88028@@ -30,7 +30,7 @@ static void *get_ipc(struct ctl_table *table)
88029 static int proc_ipc_dointvec(struct ctl_table *table, int write,
88030 void __user *buffer, size_t *lenp, loff_t *ppos)
88031 {
88032- struct ctl_table ipc_table;
88033+ ctl_table_no_const ipc_table;
88034
88035 memcpy(&ipc_table, table, sizeof(ipc_table));
88036 ipc_table.data = get_ipc(table);
88037@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
88038 static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
88039 void __user *buffer, size_t *lenp, loff_t *ppos)
88040 {
88041- struct ctl_table ipc_table;
88042+ ctl_table_no_const ipc_table;
88043
88044 memcpy(&ipc_table, table, sizeof(ipc_table));
88045 ipc_table.data = get_ipc(table);
88046@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
88047 static int proc_ipc_callback_dointvec_minmax(struct ctl_table *table, int write,
88048 void __user *buffer, size_t *lenp, loff_t *ppos)
88049 {
88050- struct ctl_table ipc_table;
88051+ ctl_table_no_const ipc_table;
88052 size_t lenp_bef = *lenp;
88053 int rc;
88054
88055@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec_minmax(struct ctl_table *table, int write,
88056 static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
88057 void __user *buffer, size_t *lenp, loff_t *ppos)
88058 {
88059- struct ctl_table ipc_table;
88060+ ctl_table_no_const ipc_table;
88061 memcpy(&ipc_table, table, sizeof(ipc_table));
88062 ipc_table.data = get_ipc(table);
88063
88064@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
88065 static int proc_ipcauto_dointvec_minmax(struct ctl_table *table, int write,
88066 void __user *buffer, size_t *lenp, loff_t *ppos)
88067 {
88068- struct ctl_table ipc_table;
88069+ ctl_table_no_const ipc_table;
88070 int oldval;
88071 int rc;
88072
88073diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
88074index 68d4e95..1477ded 100644
88075--- a/ipc/mq_sysctl.c
88076+++ b/ipc/mq_sysctl.c
88077@@ -25,7 +25,7 @@ static void *get_mq(struct ctl_table *table)
88078 static int proc_mq_dointvec(struct ctl_table *table, int write,
88079 void __user *buffer, size_t *lenp, loff_t *ppos)
88080 {
88081- struct ctl_table mq_table;
88082+ ctl_table_no_const mq_table;
88083 memcpy(&mq_table, table, sizeof(mq_table));
88084 mq_table.data = get_mq(table);
88085
88086@@ -35,7 +35,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
88087 static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
88088 void __user *buffer, size_t *lenp, loff_t *ppos)
88089 {
88090- struct ctl_table mq_table;
88091+ ctl_table_no_const mq_table;
88092 memcpy(&mq_table, table, sizeof(mq_table));
88093 mq_table.data = get_mq(table);
88094
88095diff --git a/ipc/mqueue.c b/ipc/mqueue.c
88096index 4fcf39a..d3cc2ec 100644
88097--- a/ipc/mqueue.c
88098+++ b/ipc/mqueue.c
88099@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
88100 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
88101 info->attr.mq_msgsize);
88102
88103+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
88104 spin_lock(&mq_lock);
88105 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
88106 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
88107diff --git a/ipc/shm.c b/ipc/shm.c
88108index 7fc9f9f..95e201f 100644
88109--- a/ipc/shm.c
88110+++ b/ipc/shm.c
88111@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
88112 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
88113 #endif
88114
88115+#ifdef CONFIG_GRKERNSEC
88116+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
88117+ const u64 shm_createtime, const kuid_t cuid,
88118+ const int shmid);
88119+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
88120+ const u64 shm_createtime);
88121+#endif
88122+
88123 void shm_init_ns(struct ipc_namespace *ns)
88124 {
88125 ns->shm_ctlmax = SHMMAX;
88126@@ -559,6 +567,9 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
88127 shp->shm_lprid = 0;
88128 shp->shm_atim = shp->shm_dtim = 0;
88129 shp->shm_ctim = get_seconds();
88130+#ifdef CONFIG_GRKERNSEC
88131+ shp->shm_createtime = ktime_get_ns();
88132+#endif
88133 shp->shm_segsz = size;
88134 shp->shm_nattch = 0;
88135 shp->shm_file = file;
88136@@ -1095,6 +1106,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88137 f_mode = FMODE_READ | FMODE_WRITE;
88138 }
88139 if (shmflg & SHM_EXEC) {
88140+
88141+#ifdef CONFIG_PAX_MPROTECT
88142+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
88143+ goto out;
88144+#endif
88145+
88146 prot |= PROT_EXEC;
88147 acc_mode |= S_IXUGO;
88148 }
88149@@ -1119,6 +1136,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88150 if (err)
88151 goto out_unlock;
88152
88153+#ifdef CONFIG_GRKERNSEC
88154+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
88155+ shp->shm_perm.cuid, shmid) ||
88156+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
88157+ err = -EACCES;
88158+ goto out_unlock;
88159+ }
88160+#endif
88161+
88162 ipc_lock_object(&shp->shm_perm);
88163
88164 /* check if shm_destroy() is tearing down shp */
88165@@ -1131,6 +1157,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88166 path = shp->shm_file->f_path;
88167 path_get(&path);
88168 shp->shm_nattch++;
88169+#ifdef CONFIG_GRKERNSEC
88170+ shp->shm_lapid = current->pid;
88171+#endif
88172 size = i_size_read(path.dentry->d_inode);
88173 ipc_unlock_object(&shp->shm_perm);
88174 rcu_read_unlock();
88175diff --git a/ipc/util.c b/ipc/util.c
88176index 27d74e6..8be0be2 100644
88177--- a/ipc/util.c
88178+++ b/ipc/util.c
88179@@ -71,6 +71,8 @@ struct ipc_proc_iface {
88180 int (*show)(struct seq_file *, void *);
88181 };
88182
88183+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
88184+
88185 static void ipc_memory_notifier(struct work_struct *work)
88186 {
88187 ipcns_notify(IPCNS_MEMCHANGED);
88188@@ -537,6 +539,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
88189 granted_mode >>= 6;
88190 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
88191 granted_mode >>= 3;
88192+
88193+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
88194+ return -1;
88195+
88196 /* is there some bit set in requested_mode but not in granted_mode? */
88197 if ((requested_mode & ~granted_mode & 0007) &&
88198 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
88199diff --git a/kernel/audit.c b/kernel/audit.c
88200index 6726aa6..bb864a9 100644
88201--- a/kernel/audit.c
88202+++ b/kernel/audit.c
88203@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
88204 3) suppressed due to audit_rate_limit
88205 4) suppressed due to audit_backlog_limit
88206 */
88207-static atomic_t audit_lost = ATOMIC_INIT(0);
88208+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
88209
88210 /* The netlink socket. */
88211 static struct sock *audit_sock;
88212@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
88213 unsigned long now;
88214 int print;
88215
88216- atomic_inc(&audit_lost);
88217+ atomic_inc_unchecked(&audit_lost);
88218
88219 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
88220
88221@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
88222 if (print) {
88223 if (printk_ratelimit())
88224 pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
88225- atomic_read(&audit_lost),
88226+ atomic_read_unchecked(&audit_lost),
88227 audit_rate_limit,
88228 audit_backlog_limit);
88229 audit_panic(message);
88230@@ -840,7 +840,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
88231 s.pid = audit_pid;
88232 s.rate_limit = audit_rate_limit;
88233 s.backlog_limit = audit_backlog_limit;
88234- s.lost = atomic_read(&audit_lost);
88235+ s.lost = atomic_read_unchecked(&audit_lost);
88236 s.backlog = skb_queue_len(&audit_skb_queue);
88237 s.version = AUDIT_VERSION_LATEST;
88238 s.backlog_wait_time = audit_backlog_wait_time;
88239diff --git a/kernel/auditsc.c b/kernel/auditsc.c
88240index 21eae3c..66db239 100644
88241--- a/kernel/auditsc.c
88242+++ b/kernel/auditsc.c
88243@@ -2023,7 +2023,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
88244 }
88245
88246 /* global counter which is incremented every time something logs in */
88247-static atomic_t session_id = ATOMIC_INIT(0);
88248+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
88249
88250 static int audit_set_loginuid_perm(kuid_t loginuid)
88251 {
88252@@ -2090,7 +2090,7 @@ int audit_set_loginuid(kuid_t loginuid)
88253
88254 /* are we setting or clearing? */
88255 if (uid_valid(loginuid))
88256- sessionid = (unsigned int)atomic_inc_return(&session_id);
88257+ sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
88258
88259 task->sessionid = sessionid;
88260 task->loginuid = loginuid;
88261diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
88262index 7f0dbcb..b54bb2c 100644
88263--- a/kernel/bpf/core.c
88264+++ b/kernel/bpf/core.c
88265@@ -22,6 +22,7 @@
88266 */
88267 #include <linux/filter.h>
88268 #include <linux/skbuff.h>
88269+#include <linux/vmalloc.h>
88270 #include <asm/unaligned.h>
88271
88272 /* Registers */
88273@@ -63,6 +64,67 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
88274 return NULL;
88275 }
88276
88277+struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
88278+{
88279+ gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
88280+ gfp_extra_flags;
88281+ struct bpf_work_struct *ws;
88282+ struct bpf_prog *fp;
88283+
88284+ size = round_up(size, PAGE_SIZE);
88285+ fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
88286+ if (fp == NULL)
88287+ return NULL;
88288+
88289+ ws = kmalloc(sizeof(*ws), GFP_KERNEL | gfp_extra_flags);
88290+ if (ws == NULL) {
88291+ vfree(fp);
88292+ return NULL;
88293+ }
88294+
88295+ fp->pages = size / PAGE_SIZE;
88296+ fp->work = ws;
88297+
88298+ return fp;
88299+}
88300+EXPORT_SYMBOL_GPL(bpf_prog_alloc);
88301+
88302+struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
88303+ gfp_t gfp_extra_flags)
88304+{
88305+ gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
88306+ gfp_extra_flags;
88307+ struct bpf_prog *fp;
88308+
88309+ BUG_ON(fp_old == NULL);
88310+
88311+ size = round_up(size, PAGE_SIZE);
88312+ if (size <= fp_old->pages * PAGE_SIZE)
88313+ return fp_old;
88314+
88315+ fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
88316+ if (fp != NULL) {
88317+ memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
88318+ fp->pages = size / PAGE_SIZE;
88319+
88320+ /* We keep fp->work from fp_old around in the new
88321+ * reallocated structure.
88322+ */
88323+ fp_old->work = NULL;
88324+ __bpf_prog_free(fp_old);
88325+ }
88326+
88327+ return fp;
88328+}
88329+EXPORT_SYMBOL_GPL(bpf_prog_realloc);
88330+
88331+void __bpf_prog_free(struct bpf_prog *fp)
88332+{
88333+ kfree(fp->work);
88334+ vfree(fp);
88335+}
88336+EXPORT_SYMBOL_GPL(__bpf_prog_free);
88337+
88338 /* Base function for offset calculation. Needs to go into .text section,
88339 * therefore keeping it non-static as well; will also be used by JITs
88340 * anyway later on, so do not let the compiler omit it.
88341@@ -523,12 +585,26 @@ void bpf_prog_select_runtime(struct bpf_prog *fp)
88342
88343 /* Probe if internal BPF can be JITed */
88344 bpf_int_jit_compile(fp);
88345+ /* Lock whole bpf_prog as read-only */
88346+ bpf_prog_lock_ro(fp);
88347 }
88348 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
88349
88350-/* free internal BPF program */
88351+static void bpf_prog_free_deferred(struct work_struct *work)
88352+{
88353+ struct bpf_work_struct *ws;
88354+
88355+ ws = container_of(work, struct bpf_work_struct, work);
88356+ bpf_jit_free(ws->prog);
88357+}
88358+
88359+/* Free internal BPF program */
88360 void bpf_prog_free(struct bpf_prog *fp)
88361 {
88362- bpf_jit_free(fp);
88363+ struct bpf_work_struct *ws = fp->work;
88364+
88365+ INIT_WORK(&ws->work, bpf_prog_free_deferred);
88366+ ws->prog = fp;
88367+ schedule_work(&ws->work);
88368 }
88369 EXPORT_SYMBOL_GPL(bpf_prog_free);
88370diff --git a/kernel/capability.c b/kernel/capability.c
88371index 989f5bf..d317ca0 100644
88372--- a/kernel/capability.c
88373+++ b/kernel/capability.c
88374@@ -192,6 +192,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
88375 * before modification is attempted and the application
88376 * fails.
88377 */
88378+ if (tocopy > ARRAY_SIZE(kdata))
88379+ return -EFAULT;
88380+
88381 if (copy_to_user(dataptr, kdata, tocopy
88382 * sizeof(struct __user_cap_data_struct))) {
88383 return -EFAULT;
88384@@ -297,10 +300,11 @@ bool has_ns_capability(struct task_struct *t,
88385 int ret;
88386
88387 rcu_read_lock();
88388- ret = security_capable(__task_cred(t), ns, cap);
88389+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
88390+ gr_task_is_capable(t, __task_cred(t), cap);
88391 rcu_read_unlock();
88392
88393- return (ret == 0);
88394+ return ret;
88395 }
88396
88397 /**
88398@@ -337,10 +341,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
88399 int ret;
88400
88401 rcu_read_lock();
88402- ret = security_capable_noaudit(__task_cred(t), ns, cap);
88403+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
88404 rcu_read_unlock();
88405
88406- return (ret == 0);
88407+ return ret;
88408 }
88409
88410 /**
88411@@ -378,7 +382,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
88412 BUG();
88413 }
88414
88415- if (security_capable(current_cred(), ns, cap) == 0) {
88416+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
88417 current->flags |= PF_SUPERPRIV;
88418 return true;
88419 }
88420@@ -386,6 +390,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
88421 }
88422 EXPORT_SYMBOL(ns_capable);
88423
88424+bool ns_capable_nolog(struct user_namespace *ns, int cap)
88425+{
88426+ if (unlikely(!cap_valid(cap))) {
88427+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
88428+ BUG();
88429+ }
88430+
88431+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
88432+ current->flags |= PF_SUPERPRIV;
88433+ return true;
88434+ }
88435+ return false;
88436+}
88437+EXPORT_SYMBOL(ns_capable_nolog);
88438+
88439 /**
88440 * file_ns_capable - Determine if the file's opener had a capability in effect
88441 * @file: The file we want to check
88442@@ -427,6 +446,12 @@ bool capable(int cap)
88443 }
88444 EXPORT_SYMBOL(capable);
88445
88446+bool capable_nolog(int cap)
88447+{
88448+ return ns_capable_nolog(&init_user_ns, cap);
88449+}
88450+EXPORT_SYMBOL(capable_nolog);
88451+
88452 /**
88453 * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
88454 * @inode: The inode in question
88455@@ -444,3 +469,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
88456 kgid_has_mapping(ns, inode->i_gid);
88457 }
88458 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
88459+
88460+bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap)
88461+{
88462+ struct user_namespace *ns = current_user_ns();
88463+
88464+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
88465+ kgid_has_mapping(ns, inode->i_gid);
88466+}
88467+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
88468diff --git a/kernel/cgroup.c b/kernel/cgroup.c
88469index 3a73f99..4f29fea 100644
88470--- a/kernel/cgroup.c
88471+++ b/kernel/cgroup.c
88472@@ -5341,6 +5341,14 @@ static void cgroup_release_agent(struct work_struct *work)
88473 release_list);
88474 list_del_init(&cgrp->release_list);
88475 raw_spin_unlock(&release_list_lock);
88476+
88477+ /*
88478+ * don't bother calling call_usermodehelper if we haven't
88479+ * configured a binary to execute
88480+ */
88481+ if (cgrp->root->release_agent_path[0] == '\0')
88482+ goto continue_free;
88483+
88484 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
88485 if (!pathbuf)
88486 goto continue_free;
88487@@ -5539,7 +5547,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
88488 struct task_struct *task;
88489 int count = 0;
88490
88491- seq_printf(seq, "css_set %p\n", cset);
88492+ seq_printf(seq, "css_set %pK\n", cset);
88493
88494 list_for_each_entry(task, &cset->tasks, cg_list) {
88495 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
88496diff --git a/kernel/compat.c b/kernel/compat.c
88497index ebb3c36..1df606e 100644
88498--- a/kernel/compat.c
88499+++ b/kernel/compat.c
88500@@ -13,6 +13,7 @@
88501
88502 #include <linux/linkage.h>
88503 #include <linux/compat.h>
88504+#include <linux/module.h>
88505 #include <linux/errno.h>
88506 #include <linux/time.h>
88507 #include <linux/signal.h>
88508@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
88509 mm_segment_t oldfs;
88510 long ret;
88511
88512- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
88513+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
88514 oldfs = get_fs();
88515 set_fs(KERNEL_DS);
88516 ret = hrtimer_nanosleep_restart(restart);
88517@@ -252,7 +253,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
88518 oldfs = get_fs();
88519 set_fs(KERNEL_DS);
88520 ret = hrtimer_nanosleep(&tu,
88521- rmtp ? (struct timespec __user *)&rmt : NULL,
88522+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
88523 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
88524 set_fs(oldfs);
88525
88526@@ -379,7 +380,7 @@ COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
88527 mm_segment_t old_fs = get_fs();
88528
88529 set_fs(KERNEL_DS);
88530- ret = sys_sigpending((old_sigset_t __user *) &s);
88531+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
88532 set_fs(old_fs);
88533 if (ret == 0)
88534 ret = put_user(s, set);
88535@@ -469,7 +470,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
88536 mm_segment_t old_fs = get_fs();
88537
88538 set_fs(KERNEL_DS);
88539- ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
88540+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
88541 set_fs(old_fs);
88542
88543 if (!ret) {
88544@@ -551,8 +552,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
88545 set_fs (KERNEL_DS);
88546 ret = sys_wait4(pid,
88547 (stat_addr ?
88548- (unsigned int __user *) &status : NULL),
88549- options, (struct rusage __user *) &r);
88550+ (unsigned int __force_user *) &status : NULL),
88551+ options, (struct rusage __force_user *) &r);
88552 set_fs (old_fs);
88553
88554 if (ret > 0) {
88555@@ -578,8 +579,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
88556 memset(&info, 0, sizeof(info));
88557
88558 set_fs(KERNEL_DS);
88559- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
88560- uru ? (struct rusage __user *)&ru : NULL);
88561+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
88562+ uru ? (struct rusage __force_user *)&ru : NULL);
88563 set_fs(old_fs);
88564
88565 if ((ret < 0) || (info.si_signo == 0))
88566@@ -713,8 +714,8 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
88567 oldfs = get_fs();
88568 set_fs(KERNEL_DS);
88569 err = sys_timer_settime(timer_id, flags,
88570- (struct itimerspec __user *) &newts,
88571- (struct itimerspec __user *) &oldts);
88572+ (struct itimerspec __force_user *) &newts,
88573+ (struct itimerspec __force_user *) &oldts);
88574 set_fs(oldfs);
88575 if (!err && old && put_compat_itimerspec(old, &oldts))
88576 return -EFAULT;
88577@@ -731,7 +732,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
88578 oldfs = get_fs();
88579 set_fs(KERNEL_DS);
88580 err = sys_timer_gettime(timer_id,
88581- (struct itimerspec __user *) &ts);
88582+ (struct itimerspec __force_user *) &ts);
88583 set_fs(oldfs);
88584 if (!err && put_compat_itimerspec(setting, &ts))
88585 return -EFAULT;
88586@@ -750,7 +751,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
88587 oldfs = get_fs();
88588 set_fs(KERNEL_DS);
88589 err = sys_clock_settime(which_clock,
88590- (struct timespec __user *) &ts);
88591+ (struct timespec __force_user *) &ts);
88592 set_fs(oldfs);
88593 return err;
88594 }
88595@@ -765,7 +766,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
88596 oldfs = get_fs();
88597 set_fs(KERNEL_DS);
88598 err = sys_clock_gettime(which_clock,
88599- (struct timespec __user *) &ts);
88600+ (struct timespec __force_user *) &ts);
88601 set_fs(oldfs);
88602 if (!err && compat_put_timespec(&ts, tp))
88603 return -EFAULT;
88604@@ -785,7 +786,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
88605
88606 oldfs = get_fs();
88607 set_fs(KERNEL_DS);
88608- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
88609+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
88610 set_fs(oldfs);
88611
88612 err = compat_put_timex(utp, &txc);
88613@@ -805,7 +806,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
88614 oldfs = get_fs();
88615 set_fs(KERNEL_DS);
88616 err = sys_clock_getres(which_clock,
88617- (struct timespec __user *) &ts);
88618+ (struct timespec __force_user *) &ts);
88619 set_fs(oldfs);
88620 if (!err && tp && compat_put_timespec(&ts, tp))
88621 return -EFAULT;
88622@@ -819,7 +820,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
88623 struct timespec tu;
88624 struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
88625
88626- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
88627+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
88628 oldfs = get_fs();
88629 set_fs(KERNEL_DS);
88630 err = clock_nanosleep_restart(restart);
88631@@ -851,8 +852,8 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
88632 oldfs = get_fs();
88633 set_fs(KERNEL_DS);
88634 err = sys_clock_nanosleep(which_clock, flags,
88635- (struct timespec __user *) &in,
88636- (struct timespec __user *) &out);
88637+ (struct timespec __force_user *) &in,
88638+ (struct timespec __force_user *) &out);
88639 set_fs(oldfs);
88640
88641 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
88642@@ -1146,7 +1147,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
88643 mm_segment_t old_fs = get_fs();
88644
88645 set_fs(KERNEL_DS);
88646- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
88647+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
88648 set_fs(old_fs);
88649 if (compat_put_timespec(&t, interval))
88650 return -EFAULT;
88651diff --git a/kernel/configs.c b/kernel/configs.c
88652index c18b1f1..b9a0132 100644
88653--- a/kernel/configs.c
88654+++ b/kernel/configs.c
88655@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
88656 struct proc_dir_entry *entry;
88657
88658 /* create the current config file */
88659+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
88660+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
88661+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
88662+ &ikconfig_file_ops);
88663+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
88664+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
88665+ &ikconfig_file_ops);
88666+#endif
88667+#else
88668 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
88669 &ikconfig_file_ops);
88670+#endif
88671+
88672 if (!entry)
88673 return -ENOMEM;
88674
88675diff --git a/kernel/cred.c b/kernel/cred.c
88676index e0573a4..26c0fd3 100644
88677--- a/kernel/cred.c
88678+++ b/kernel/cred.c
88679@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
88680 validate_creds(cred);
88681 alter_cred_subscribers(cred, -1);
88682 put_cred(cred);
88683+
88684+#ifdef CONFIG_GRKERNSEC_SETXID
88685+ cred = (struct cred *) tsk->delayed_cred;
88686+ if (cred != NULL) {
88687+ tsk->delayed_cred = NULL;
88688+ validate_creds(cred);
88689+ alter_cred_subscribers(cred, -1);
88690+ put_cred(cred);
88691+ }
88692+#endif
88693 }
88694
88695 /**
88696@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
88697 * Always returns 0 thus allowing this function to be tail-called at the end
88698 * of, say, sys_setgid().
88699 */
88700-int commit_creds(struct cred *new)
88701+static int __commit_creds(struct cred *new)
88702 {
88703 struct task_struct *task = current;
88704 const struct cred *old = task->real_cred;
88705@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
88706
88707 get_cred(new); /* we will require a ref for the subj creds too */
88708
88709+ gr_set_role_label(task, new->uid, new->gid);
88710+
88711 /* dumpability changes */
88712 if (!uid_eq(old->euid, new->euid) ||
88713 !gid_eq(old->egid, new->egid) ||
88714@@ -479,6 +491,105 @@ int commit_creds(struct cred *new)
88715 put_cred(old);
88716 return 0;
88717 }
88718+#ifdef CONFIG_GRKERNSEC_SETXID
88719+extern int set_user(struct cred *new);
88720+
88721+void gr_delayed_cred_worker(void)
88722+{
88723+ const struct cred *new = current->delayed_cred;
88724+ struct cred *ncred;
88725+
88726+ current->delayed_cred = NULL;
88727+
88728+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
88729+ // from doing get_cred on it when queueing this
88730+ put_cred(new);
88731+ return;
88732+ } else if (new == NULL)
88733+ return;
88734+
88735+ ncred = prepare_creds();
88736+ if (!ncred)
88737+ goto die;
88738+ // uids
88739+ ncred->uid = new->uid;
88740+ ncred->euid = new->euid;
88741+ ncred->suid = new->suid;
88742+ ncred->fsuid = new->fsuid;
88743+ // gids
88744+ ncred->gid = new->gid;
88745+ ncred->egid = new->egid;
88746+ ncred->sgid = new->sgid;
88747+ ncred->fsgid = new->fsgid;
88748+ // groups
88749+ set_groups(ncred, new->group_info);
88750+ // caps
88751+ ncred->securebits = new->securebits;
88752+ ncred->cap_inheritable = new->cap_inheritable;
88753+ ncred->cap_permitted = new->cap_permitted;
88754+ ncred->cap_effective = new->cap_effective;
88755+ ncred->cap_bset = new->cap_bset;
88756+
88757+ if (set_user(ncred)) {
88758+ abort_creds(ncred);
88759+ goto die;
88760+ }
88761+
88762+ // from doing get_cred on it when queueing this
88763+ put_cred(new);
88764+
88765+ __commit_creds(ncred);
88766+ return;
88767+die:
88768+ // from doing get_cred on it when queueing this
88769+ put_cred(new);
88770+ do_group_exit(SIGKILL);
88771+}
88772+#endif
88773+
88774+int commit_creds(struct cred *new)
88775+{
88776+#ifdef CONFIG_GRKERNSEC_SETXID
88777+ int ret;
88778+ int schedule_it = 0;
88779+ struct task_struct *t;
88780+ unsigned oldsecurebits = current_cred()->securebits;
88781+
88782+ /* we won't get called with tasklist_lock held for writing
88783+ and interrupts disabled as the cred struct in that case is
88784+ init_cred
88785+ */
88786+ if (grsec_enable_setxid && !current_is_single_threaded() &&
88787+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
88788+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
88789+ schedule_it = 1;
88790+ }
88791+ ret = __commit_creds(new);
88792+ if (schedule_it) {
88793+ rcu_read_lock();
88794+ read_lock(&tasklist_lock);
88795+ for (t = next_thread(current); t != current;
88796+ t = next_thread(t)) {
88797+ /* we'll check if the thread has uid 0 in
88798+ * the delayed worker routine
88799+ */
88800+ if (task_securebits(t) == oldsecurebits &&
88801+ t->delayed_cred == NULL) {
88802+ t->delayed_cred = get_cred(new);
88803+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
88804+ set_tsk_need_resched(t);
88805+ }
88806+ }
88807+ read_unlock(&tasklist_lock);
88808+ rcu_read_unlock();
88809+ }
88810+
88811+ return ret;
88812+#else
88813+ return __commit_creds(new);
88814+#endif
88815+}
88816+
88817 EXPORT_SYMBOL(commit_creds);
88818
88819 /**
88820diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
88821index 1adf62b..7736e06 100644
88822--- a/kernel/debug/debug_core.c
88823+++ b/kernel/debug/debug_core.c
88824@@ -124,7 +124,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
88825 */
88826 static atomic_t masters_in_kgdb;
88827 static atomic_t slaves_in_kgdb;
88828-static atomic_t kgdb_break_tasklet_var;
88829+static atomic_unchecked_t kgdb_break_tasklet_var;
88830 atomic_t kgdb_setting_breakpoint;
88831
88832 struct task_struct *kgdb_usethread;
88833@@ -134,7 +134,7 @@ int kgdb_single_step;
88834 static pid_t kgdb_sstep_pid;
88835
88836 /* to keep track of the CPU which is doing the single stepping*/
88837-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
88838+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
88839
88840 /*
88841 * If you are debugging a problem where roundup (the collection of
88842@@ -549,7 +549,7 @@ return_normal:
88843 * kernel will only try for the value of sstep_tries before
88844 * giving up and continuing on.
88845 */
88846- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
88847+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
88848 (kgdb_info[cpu].task &&
88849 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
88850 atomic_set(&kgdb_active, -1);
88851@@ -647,8 +647,8 @@ cpu_master_loop:
88852 }
88853
88854 kgdb_restore:
88855- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
88856- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
88857+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
88858+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
88859 if (kgdb_info[sstep_cpu].task)
88860 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
88861 else
88862@@ -925,18 +925,18 @@ static void kgdb_unregister_callbacks(void)
88863 static void kgdb_tasklet_bpt(unsigned long ing)
88864 {
88865 kgdb_breakpoint();
88866- atomic_set(&kgdb_break_tasklet_var, 0);
88867+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
88868 }
88869
88870 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
88871
88872 void kgdb_schedule_breakpoint(void)
88873 {
88874- if (atomic_read(&kgdb_break_tasklet_var) ||
88875+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
88876 atomic_read(&kgdb_active) != -1 ||
88877 atomic_read(&kgdb_setting_breakpoint))
88878 return;
88879- atomic_inc(&kgdb_break_tasklet_var);
88880+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
88881 tasklet_schedule(&kgdb_tasklet_breakpoint);
88882 }
88883 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
88884diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
88885index 379650b..30c5180 100644
88886--- a/kernel/debug/kdb/kdb_main.c
88887+++ b/kernel/debug/kdb/kdb_main.c
88888@@ -1977,7 +1977,7 @@ static int kdb_lsmod(int argc, const char **argv)
88889 continue;
88890
88891 kdb_printf("%-20s%8u 0x%p ", mod->name,
88892- mod->core_size, (void *)mod);
88893+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
88894 #ifdef CONFIG_MODULE_UNLOAD
88895 kdb_printf("%4ld ", module_refcount(mod));
88896 #endif
88897@@ -1987,7 +1987,7 @@ static int kdb_lsmod(int argc, const char **argv)
88898 kdb_printf(" (Loading)");
88899 else
88900 kdb_printf(" (Live)");
88901- kdb_printf(" 0x%p", mod->module_core);
88902+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
88903
88904 #ifdef CONFIG_MODULE_UNLOAD
88905 {
88906diff --git a/kernel/events/core.c b/kernel/events/core.c
88907index 658f232..32e9595 100644
88908--- a/kernel/events/core.c
88909+++ b/kernel/events/core.c
88910@@ -161,8 +161,15 @@ static struct srcu_struct pmus_srcu;
88911 * 0 - disallow raw tracepoint access for unpriv
88912 * 1 - disallow cpu events for unpriv
88913 * 2 - disallow kernel profiling for unpriv
88914+ * 3 - disallow all unpriv perf event use
88915 */
88916-int sysctl_perf_event_paranoid __read_mostly = 1;
88917+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
88918+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
88919+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
88920+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
88921+#else
88922+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
88923+#endif
88924
88925 /* Minimum for 512 kiB + 1 user control page */
88926 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
88927@@ -188,7 +195,7 @@ void update_perf_cpu_limits(void)
88928
88929 tmp *= sysctl_perf_cpu_time_max_percent;
88930 do_div(tmp, 100);
88931- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
88932+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
88933 }
88934
88935 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
88936@@ -294,7 +301,7 @@ void perf_sample_event_took(u64 sample_len_ns)
88937 }
88938 }
88939
88940-static atomic64_t perf_event_id;
88941+static atomic64_unchecked_t perf_event_id;
88942
88943 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
88944 enum event_type_t event_type);
88945@@ -3051,7 +3058,7 @@ static void __perf_event_read(void *info)
88946
88947 static inline u64 perf_event_count(struct perf_event *event)
88948 {
88949- return local64_read(&event->count) + atomic64_read(&event->child_count);
88950+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
88951 }
88952
88953 static u64 perf_event_read(struct perf_event *event)
88954@@ -3430,9 +3437,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
88955 mutex_lock(&event->child_mutex);
88956 total += perf_event_read(event);
88957 *enabled += event->total_time_enabled +
88958- atomic64_read(&event->child_total_time_enabled);
88959+ atomic64_read_unchecked(&event->child_total_time_enabled);
88960 *running += event->total_time_running +
88961- atomic64_read(&event->child_total_time_running);
88962+ atomic64_read_unchecked(&event->child_total_time_running);
88963
88964 list_for_each_entry(child, &event->child_list, child_list) {
88965 total += perf_event_read(child);
88966@@ -3881,10 +3888,10 @@ void perf_event_update_userpage(struct perf_event *event)
88967 userpg->offset -= local64_read(&event->hw.prev_count);
88968
88969 userpg->time_enabled = enabled +
88970- atomic64_read(&event->child_total_time_enabled);
88971+ atomic64_read_unchecked(&event->child_total_time_enabled);
88972
88973 userpg->time_running = running +
88974- atomic64_read(&event->child_total_time_running);
88975+ atomic64_read_unchecked(&event->child_total_time_running);
88976
88977 arch_perf_update_userpage(userpg, now);
88978
88979@@ -4448,7 +4455,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
88980
88981 /* Data. */
88982 sp = perf_user_stack_pointer(regs);
88983- rem = __output_copy_user(handle, (void *) sp, dump_size);
88984+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
88985 dyn_size = dump_size - rem;
88986
88987 perf_output_skip(handle, rem);
88988@@ -4539,11 +4546,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
88989 values[n++] = perf_event_count(event);
88990 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
88991 values[n++] = enabled +
88992- atomic64_read(&event->child_total_time_enabled);
88993+ atomic64_read_unchecked(&event->child_total_time_enabled);
88994 }
88995 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
88996 values[n++] = running +
88997- atomic64_read(&event->child_total_time_running);
88998+ atomic64_read_unchecked(&event->child_total_time_running);
88999 }
89000 if (read_format & PERF_FORMAT_ID)
89001 values[n++] = primary_event_id(event);
89002@@ -6858,7 +6865,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
89003 event->parent = parent_event;
89004
89005 event->ns = get_pid_ns(task_active_pid_ns(current));
89006- event->id = atomic64_inc_return(&perf_event_id);
89007+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
89008
89009 event->state = PERF_EVENT_STATE_INACTIVE;
89010
89011@@ -7137,6 +7144,11 @@ SYSCALL_DEFINE5(perf_event_open,
89012 if (flags & ~PERF_FLAG_ALL)
89013 return -EINVAL;
89014
89015+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
89016+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
89017+ return -EACCES;
89018+#endif
89019+
89020 err = perf_copy_attr(attr_uptr, &attr);
89021 if (err)
89022 return err;
89023@@ -7489,10 +7501,10 @@ static void sync_child_event(struct perf_event *child_event,
89024 /*
89025 * Add back the child's count to the parent's count:
89026 */
89027- atomic64_add(child_val, &parent_event->child_count);
89028- atomic64_add(child_event->total_time_enabled,
89029+ atomic64_add_unchecked(child_val, &parent_event->child_count);
89030+ atomic64_add_unchecked(child_event->total_time_enabled,
89031 &parent_event->child_total_time_enabled);
89032- atomic64_add(child_event->total_time_running,
89033+ atomic64_add_unchecked(child_event->total_time_running,
89034 &parent_event->child_total_time_running);
89035
89036 /*
89037diff --git a/kernel/events/internal.h b/kernel/events/internal.h
89038index 569b2187..19940d9 100644
89039--- a/kernel/events/internal.h
89040+++ b/kernel/events/internal.h
89041@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
89042 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
89043 }
89044
89045-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
89046+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
89047 static inline unsigned long \
89048 func_name(struct perf_output_handle *handle, \
89049- const void *buf, unsigned long len) \
89050+ const void user *buf, unsigned long len) \
89051 { \
89052 unsigned long size, written; \
89053 \
89054@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
89055 return 0;
89056 }
89057
89058-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
89059+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
89060
89061 static inline unsigned long
89062 memcpy_skip(void *dst, const void *src, unsigned long n)
89063@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
89064 return 0;
89065 }
89066
89067-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
89068+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
89069
89070 #ifndef arch_perf_out_copy_user
89071 #define arch_perf_out_copy_user arch_perf_out_copy_user
89072@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
89073 }
89074 #endif
89075
89076-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
89077+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
89078
89079 /* Callchain handling */
89080 extern struct perf_callchain_entry *
89081diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
89082index ed8f2cd..fe8030c 100644
89083--- a/kernel/events/uprobes.c
89084+++ b/kernel/events/uprobes.c
89085@@ -1670,7 +1670,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
89086 {
89087 struct page *page;
89088 uprobe_opcode_t opcode;
89089- int result;
89090+ long result;
89091
89092 pagefault_disable();
89093 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
89094diff --git a/kernel/exit.c b/kernel/exit.c
89095index 32c58f7..9eb6907 100644
89096--- a/kernel/exit.c
89097+++ b/kernel/exit.c
89098@@ -173,6 +173,10 @@ void release_task(struct task_struct *p)
89099 struct task_struct *leader;
89100 int zap_leader;
89101 repeat:
89102+#ifdef CONFIG_NET
89103+ gr_del_task_from_ip_table(p);
89104+#endif
89105+
89106 /* don't need to get the RCU readlock here - the process is dead and
89107 * can't be modifying its own credentials. But shut RCU-lockdep up */
89108 rcu_read_lock();
89109@@ -668,6 +672,8 @@ void do_exit(long code)
89110 struct task_struct *tsk = current;
89111 int group_dead;
89112
89113+ set_fs(USER_DS);
89114+
89115 profile_task_exit(tsk);
89116
89117 WARN_ON(blk_needs_flush_plug(tsk));
89118@@ -684,7 +690,6 @@ void do_exit(long code)
89119 * mm_release()->clear_child_tid() from writing to a user-controlled
89120 * kernel address.
89121 */
89122- set_fs(USER_DS);
89123
89124 ptrace_event(PTRACE_EVENT_EXIT, code);
89125
89126@@ -742,6 +747,9 @@ void do_exit(long code)
89127 tsk->exit_code = code;
89128 taskstats_exit(tsk, group_dead);
89129
89130+ gr_acl_handle_psacct(tsk, code);
89131+ gr_acl_handle_exit();
89132+
89133 exit_mm(tsk);
89134
89135 if (group_dead)
89136@@ -859,7 +867,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
89137 * Take down every thread in the group. This is called by fatal signals
89138 * as well as by sys_exit_group (below).
89139 */
89140-void
89141+__noreturn void
89142 do_group_exit(int exit_code)
89143 {
89144 struct signal_struct *sig = current->signal;
89145diff --git a/kernel/fork.c b/kernel/fork.c
89146index a91e47d..71c9064 100644
89147--- a/kernel/fork.c
89148+++ b/kernel/fork.c
89149@@ -183,6 +183,48 @@ void thread_info_cache_init(void)
89150 # endif
89151 #endif
89152
89153+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89154+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
89155+ int node, void **lowmem_stack)
89156+{
89157+ struct page *pages[THREAD_SIZE / PAGE_SIZE];
89158+ void *ret = NULL;
89159+ unsigned int i;
89160+
89161+ *lowmem_stack = alloc_thread_info_node(tsk, node);
89162+ if (*lowmem_stack == NULL)
89163+ goto out;
89164+
89165+ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
89166+ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
89167+
89168+ /* use VM_IOREMAP to gain THREAD_SIZE alignment */
89169+ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
89170+ if (ret == NULL) {
89171+ free_thread_info(*lowmem_stack);
89172+ *lowmem_stack = NULL;
89173+ }
89174+
89175+out:
89176+ return ret;
89177+}
89178+
89179+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
89180+{
89181+ unmap_process_stacks(tsk);
89182+}
89183+#else
89184+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
89185+ int node, void **lowmem_stack)
89186+{
89187+ return alloc_thread_info_node(tsk, node);
89188+}
89189+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
89190+{
89191+ free_thread_info(ti);
89192+}
89193+#endif
89194+
89195 /* SLAB cache for signal_struct structures (tsk->signal) */
89196 static struct kmem_cache *signal_cachep;
89197
89198@@ -201,18 +243,22 @@ struct kmem_cache *vm_area_cachep;
89199 /* SLAB cache for mm_struct structures (tsk->mm) */
89200 static struct kmem_cache *mm_cachep;
89201
89202-static void account_kernel_stack(struct thread_info *ti, int account)
89203+static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
89204 {
89205+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89206+ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
89207+#else
89208 struct zone *zone = page_zone(virt_to_page(ti));
89209+#endif
89210
89211 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
89212 }
89213
89214 void free_task(struct task_struct *tsk)
89215 {
89216- account_kernel_stack(tsk->stack, -1);
89217+ account_kernel_stack(tsk, tsk->stack, -1);
89218 arch_release_thread_info(tsk->stack);
89219- free_thread_info(tsk->stack);
89220+ gr_free_thread_info(tsk, tsk->stack);
89221 rt_mutex_debug_task_free(tsk);
89222 ftrace_graph_exit_task(tsk);
89223 put_seccomp_filter(tsk);
89224@@ -299,6 +345,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89225 struct task_struct *tsk;
89226 struct thread_info *ti;
89227 unsigned long *stackend;
89228+ void *lowmem_stack;
89229 int node = tsk_fork_get_node(orig);
89230 int err;
89231
89232@@ -306,7 +353,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89233 if (!tsk)
89234 return NULL;
89235
89236- ti = alloc_thread_info_node(tsk, node);
89237+ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
89238 if (!ti)
89239 goto free_tsk;
89240
89241@@ -315,6 +362,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89242 goto free_ti;
89243
89244 tsk->stack = ti;
89245+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89246+ tsk->lowmem_stack = lowmem_stack;
89247+#endif
89248 #ifdef CONFIG_SECCOMP
89249 /*
89250 * We must handle setting up seccomp filters once we're under
89251@@ -332,7 +382,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89252 *stackend = STACK_END_MAGIC; /* for overflow detection */
89253
89254 #ifdef CONFIG_CC_STACKPROTECTOR
89255- tsk->stack_canary = get_random_int();
89256+ tsk->stack_canary = pax_get_random_long();
89257 #endif
89258
89259 /*
89260@@ -346,24 +396,92 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89261 tsk->splice_pipe = NULL;
89262 tsk->task_frag.page = NULL;
89263
89264- account_kernel_stack(ti, 1);
89265+ account_kernel_stack(tsk, ti, 1);
89266
89267 return tsk;
89268
89269 free_ti:
89270- free_thread_info(ti);
89271+ gr_free_thread_info(tsk, ti);
89272 free_tsk:
89273 free_task_struct(tsk);
89274 return NULL;
89275 }
89276
89277 #ifdef CONFIG_MMU
89278-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89279+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
89280+{
89281+ struct vm_area_struct *tmp;
89282+ unsigned long charge;
89283+ struct file *file;
89284+ int retval;
89285+
89286+ charge = 0;
89287+ if (mpnt->vm_flags & VM_ACCOUNT) {
89288+ unsigned long len = vma_pages(mpnt);
89289+
89290+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
89291+ goto fail_nomem;
89292+ charge = len;
89293+ }
89294+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
89295+ if (!tmp)
89296+ goto fail_nomem;
89297+ *tmp = *mpnt;
89298+ tmp->vm_mm = mm;
89299+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
89300+ retval = vma_dup_policy(mpnt, tmp);
89301+ if (retval)
89302+ goto fail_nomem_policy;
89303+ if (anon_vma_fork(tmp, mpnt))
89304+ goto fail_nomem_anon_vma_fork;
89305+ tmp->vm_flags &= ~VM_LOCKED;
89306+ tmp->vm_next = tmp->vm_prev = NULL;
89307+ tmp->vm_mirror = NULL;
89308+ file = tmp->vm_file;
89309+ if (file) {
89310+ struct inode *inode = file_inode(file);
89311+ struct address_space *mapping = file->f_mapping;
89312+
89313+ get_file(file);
89314+ if (tmp->vm_flags & VM_DENYWRITE)
89315+ atomic_dec(&inode->i_writecount);
89316+ mutex_lock(&mapping->i_mmap_mutex);
89317+ if (tmp->vm_flags & VM_SHARED)
89318+ atomic_inc(&mapping->i_mmap_writable);
89319+ flush_dcache_mmap_lock(mapping);
89320+ /* insert tmp into the share list, just after mpnt */
89321+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
89322+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
89323+ else
89324+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
89325+ flush_dcache_mmap_unlock(mapping);
89326+ mutex_unlock(&mapping->i_mmap_mutex);
89327+ }
89328+
89329+ /*
89330+ * Clear hugetlb-related page reserves for children. This only
89331+ * affects MAP_PRIVATE mappings. Faults generated by the child
89332+ * are not guaranteed to succeed, even if read-only
89333+ */
89334+ if (is_vm_hugetlb_page(tmp))
89335+ reset_vma_resv_huge_pages(tmp);
89336+
89337+ return tmp;
89338+
89339+fail_nomem_anon_vma_fork:
89340+ mpol_put(vma_policy(tmp));
89341+fail_nomem_policy:
89342+ kmem_cache_free(vm_area_cachep, tmp);
89343+fail_nomem:
89344+ vm_unacct_memory(charge);
89345+ return NULL;
89346+}
89347+
89348+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89349 {
89350 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
89351 struct rb_node **rb_link, *rb_parent;
89352 int retval;
89353- unsigned long charge;
89354
89355 uprobe_start_dup_mmap();
89356 down_write(&oldmm->mmap_sem);
89357@@ -391,55 +509,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89358
89359 prev = NULL;
89360 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
89361- struct file *file;
89362-
89363 if (mpnt->vm_flags & VM_DONTCOPY) {
89364 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
89365 -vma_pages(mpnt));
89366 continue;
89367 }
89368- charge = 0;
89369- if (mpnt->vm_flags & VM_ACCOUNT) {
89370- unsigned long len = vma_pages(mpnt);
89371-
89372- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
89373- goto fail_nomem;
89374- charge = len;
89375- }
89376- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
89377- if (!tmp)
89378- goto fail_nomem;
89379- *tmp = *mpnt;
89380- INIT_LIST_HEAD(&tmp->anon_vma_chain);
89381- retval = vma_dup_policy(mpnt, tmp);
89382- if (retval)
89383- goto fail_nomem_policy;
89384- tmp->vm_mm = mm;
89385- if (anon_vma_fork(tmp, mpnt))
89386- goto fail_nomem_anon_vma_fork;
89387- tmp->vm_flags &= ~VM_LOCKED;
89388- tmp->vm_next = tmp->vm_prev = NULL;
89389- file = tmp->vm_file;
89390- if (file) {
89391- struct inode *inode = file_inode(file);
89392- struct address_space *mapping = file->f_mapping;
89393-
89394- get_file(file);
89395- if (tmp->vm_flags & VM_DENYWRITE)
89396- atomic_dec(&inode->i_writecount);
89397- mutex_lock(&mapping->i_mmap_mutex);
89398- if (tmp->vm_flags & VM_SHARED)
89399- atomic_inc(&mapping->i_mmap_writable);
89400- flush_dcache_mmap_lock(mapping);
89401- /* insert tmp into the share list, just after mpnt */
89402- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
89403- vma_nonlinear_insert(tmp,
89404- &mapping->i_mmap_nonlinear);
89405- else
89406- vma_interval_tree_insert_after(tmp, mpnt,
89407- &mapping->i_mmap);
89408- flush_dcache_mmap_unlock(mapping);
89409- mutex_unlock(&mapping->i_mmap_mutex);
89410+ tmp = dup_vma(mm, oldmm, mpnt);
89411+ if (!tmp) {
89412+ retval = -ENOMEM;
89413+ goto out;
89414 }
89415
89416 /*
89417@@ -471,6 +549,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89418 if (retval)
89419 goto out;
89420 }
89421+
89422+#ifdef CONFIG_PAX_SEGMEXEC
89423+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
89424+ struct vm_area_struct *mpnt_m;
89425+
89426+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
89427+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
89428+
89429+ if (!mpnt->vm_mirror)
89430+ continue;
89431+
89432+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
89433+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
89434+ mpnt->vm_mirror = mpnt_m;
89435+ } else {
89436+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
89437+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
89438+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
89439+ mpnt->vm_mirror->vm_mirror = mpnt;
89440+ }
89441+ }
89442+ BUG_ON(mpnt_m);
89443+ }
89444+#endif
89445+
89446 /* a new mm has just been created */
89447 arch_dup_mmap(oldmm, mm);
89448 retval = 0;
89449@@ -480,14 +583,6 @@ out:
89450 up_write(&oldmm->mmap_sem);
89451 uprobe_end_dup_mmap();
89452 return retval;
89453-fail_nomem_anon_vma_fork:
89454- mpol_put(vma_policy(tmp));
89455-fail_nomem_policy:
89456- kmem_cache_free(vm_area_cachep, tmp);
89457-fail_nomem:
89458- retval = -ENOMEM;
89459- vm_unacct_memory(charge);
89460- goto out;
89461 }
89462
89463 static inline int mm_alloc_pgd(struct mm_struct *mm)
89464@@ -729,8 +824,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
89465 return ERR_PTR(err);
89466
89467 mm = get_task_mm(task);
89468- if (mm && mm != current->mm &&
89469- !ptrace_may_access(task, mode)) {
89470+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
89471+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
89472 mmput(mm);
89473 mm = ERR_PTR(-EACCES);
89474 }
89475@@ -933,13 +1028,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
89476 spin_unlock(&fs->lock);
89477 return -EAGAIN;
89478 }
89479- fs->users++;
89480+ atomic_inc(&fs->users);
89481 spin_unlock(&fs->lock);
89482 return 0;
89483 }
89484 tsk->fs = copy_fs_struct(fs);
89485 if (!tsk->fs)
89486 return -ENOMEM;
89487+ /* Carry through gr_chroot_dentry and is_chrooted instead
89488+ of recomputing it here. Already copied when the task struct
89489+ is duplicated. This allows pivot_root to not be treated as
89490+ a chroot
89491+ */
89492+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
89493+
89494 return 0;
89495 }
89496
89497@@ -1173,7 +1275,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
89498 * parts of the process environment (as per the clone
89499 * flags). The actual kick-off is left to the caller.
89500 */
89501-static struct task_struct *copy_process(unsigned long clone_flags,
89502+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
89503 unsigned long stack_start,
89504 unsigned long stack_size,
89505 int __user *child_tidptr,
89506@@ -1244,6 +1346,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
89507 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
89508 #endif
89509 retval = -EAGAIN;
89510+
89511+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
89512+
89513 if (atomic_read(&p->real_cred->user->processes) >=
89514 task_rlimit(p, RLIMIT_NPROC)) {
89515 if (p->real_cred->user != INIT_USER &&
89516@@ -1493,6 +1598,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
89517 goto bad_fork_free_pid;
89518 }
89519
89520+ /* synchronizes with gr_set_acls()
89521+ we need to call this past the point of no return for fork()
89522+ */
89523+ gr_copy_label(p);
89524+
89525 if (likely(p->pid)) {
89526 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
89527
89528@@ -1583,6 +1693,8 @@ bad_fork_cleanup_count:
89529 bad_fork_free:
89530 free_task(p);
89531 fork_out:
89532+ gr_log_forkfail(retval);
89533+
89534 return ERR_PTR(retval);
89535 }
89536
89537@@ -1644,6 +1756,7 @@ long do_fork(unsigned long clone_flags,
89538
89539 p = copy_process(clone_flags, stack_start, stack_size,
89540 child_tidptr, NULL, trace);
89541+ add_latent_entropy();
89542 /*
89543 * Do this prior waking up the new thread - the thread pointer
89544 * might get invalid after that point, if the thread exits quickly.
89545@@ -1660,6 +1773,8 @@ long do_fork(unsigned long clone_flags,
89546 if (clone_flags & CLONE_PARENT_SETTID)
89547 put_user(nr, parent_tidptr);
89548
89549+ gr_handle_brute_check();
89550+
89551 if (clone_flags & CLONE_VFORK) {
89552 p->vfork_done = &vfork;
89553 init_completion(&vfork);
89554@@ -1778,7 +1893,7 @@ void __init proc_caches_init(void)
89555 mm_cachep = kmem_cache_create("mm_struct",
89556 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
89557 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
89558- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
89559+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
89560 mmap_init();
89561 nsproxy_cache_init();
89562 }
89563@@ -1818,7 +1933,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
89564 return 0;
89565
89566 /* don't need lock here; in the worst case we'll do useless copy */
89567- if (fs->users == 1)
89568+ if (atomic_read(&fs->users) == 1)
89569 return 0;
89570
89571 *new_fsp = copy_fs_struct(fs);
89572@@ -1930,7 +2045,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
89573 fs = current->fs;
89574 spin_lock(&fs->lock);
89575 current->fs = new_fs;
89576- if (--fs->users)
89577+ gr_set_chroot_entries(current, &current->fs->root);
89578+ if (atomic_dec_return(&fs->users))
89579 new_fs = NULL;
89580 else
89581 new_fs = fs;
89582diff --git a/kernel/futex.c b/kernel/futex.c
89583index 22b3f1b..6820bc0 100644
89584--- a/kernel/futex.c
89585+++ b/kernel/futex.c
89586@@ -202,7 +202,7 @@ struct futex_pi_state {
89587 atomic_t refcount;
89588
89589 union futex_key key;
89590-};
89591+} __randomize_layout;
89592
89593 /**
89594 * struct futex_q - The hashed futex queue entry, one per waiting task
89595@@ -236,7 +236,7 @@ struct futex_q {
89596 struct rt_mutex_waiter *rt_waiter;
89597 union futex_key *requeue_pi_key;
89598 u32 bitset;
89599-};
89600+} __randomize_layout;
89601
89602 static const struct futex_q futex_q_init = {
89603 /* list gets initialized in queue_me()*/
89604@@ -396,6 +396,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
89605 struct page *page, *page_head;
89606 int err, ro = 0;
89607
89608+#ifdef CONFIG_PAX_SEGMEXEC
89609+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
89610+ return -EFAULT;
89611+#endif
89612+
89613 /*
89614 * The futex address must be "naturally" aligned.
89615 */
89616@@ -595,7 +600,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
89617
89618 static int get_futex_value_locked(u32 *dest, u32 __user *from)
89619 {
89620- int ret;
89621+ unsigned long ret;
89622
89623 pagefault_disable();
89624 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
89625@@ -3000,6 +3005,7 @@ static void __init futex_detect_cmpxchg(void)
89626 {
89627 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
89628 u32 curval;
89629+ mm_segment_t oldfs;
89630
89631 /*
89632 * This will fail and we want it. Some arch implementations do
89633@@ -3011,8 +3017,11 @@ static void __init futex_detect_cmpxchg(void)
89634 * implementation, the non-functional ones will return
89635 * -ENOSYS.
89636 */
89637+ oldfs = get_fs();
89638+ set_fs(USER_DS);
89639 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
89640 futex_cmpxchg_enabled = 1;
89641+ set_fs(oldfs);
89642 #endif
89643 }
89644
89645diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
89646index 55c8c93..9ba7ad6 100644
89647--- a/kernel/futex_compat.c
89648+++ b/kernel/futex_compat.c
89649@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
89650 return 0;
89651 }
89652
89653-static void __user *futex_uaddr(struct robust_list __user *entry,
89654+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
89655 compat_long_t futex_offset)
89656 {
89657 compat_uptr_t base = ptr_to_compat(entry);
89658diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
89659index b358a80..fc25240 100644
89660--- a/kernel/gcov/base.c
89661+++ b/kernel/gcov/base.c
89662@@ -114,11 +114,6 @@ void gcov_enable_events(void)
89663 }
89664
89665 #ifdef CONFIG_MODULES
89666-static inline int within(void *addr, void *start, unsigned long size)
89667-{
89668- return ((addr >= start) && (addr < start + size));
89669-}
89670-
89671 /* Update list and generate events when modules are unloaded. */
89672 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
89673 void *data)
89674@@ -133,7 +128,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
89675
89676 /* Remove entries located in module from linked list. */
89677 while ((info = gcov_info_next(info))) {
89678- if (within(info, mod->module_core, mod->core_size)) {
89679+ if (within_module_core_rw((unsigned long)info, mod)) {
89680 gcov_info_unlink(prev, info);
89681 if (gcov_events_enabled)
89682 gcov_event(GCOV_REMOVE, info);
89683diff --git a/kernel/jump_label.c b/kernel/jump_label.c
89684index 9019f15..9a3c42e 100644
89685--- a/kernel/jump_label.c
89686+++ b/kernel/jump_label.c
89687@@ -14,6 +14,7 @@
89688 #include <linux/err.h>
89689 #include <linux/static_key.h>
89690 #include <linux/jump_label_ratelimit.h>
89691+#include <linux/mm.h>
89692
89693 #ifdef HAVE_JUMP_LABEL
89694
89695@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
89696
89697 size = (((unsigned long)stop - (unsigned long)start)
89698 / sizeof(struct jump_entry));
89699+ pax_open_kernel();
89700 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
89701+ pax_close_kernel();
89702 }
89703
89704 static void jump_label_update(struct static_key *key, int enable);
89705@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
89706 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
89707 struct jump_entry *iter;
89708
89709+ pax_open_kernel();
89710 for (iter = iter_start; iter < iter_stop; iter++) {
89711 if (within_module_init(iter->code, mod))
89712 iter->code = 0;
89713 }
89714+ pax_close_kernel();
89715 }
89716
89717 static int
89718diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
89719index ae51670..c1a9796 100644
89720--- a/kernel/kallsyms.c
89721+++ b/kernel/kallsyms.c
89722@@ -11,6 +11,9 @@
89723 * Changed the compression method from stem compression to "table lookup"
89724 * compression (see scripts/kallsyms.c for a more complete description)
89725 */
89726+#ifdef CONFIG_GRKERNSEC_HIDESYM
89727+#define __INCLUDED_BY_HIDESYM 1
89728+#endif
89729 #include <linux/kallsyms.h>
89730 #include <linux/module.h>
89731 #include <linux/init.h>
89732@@ -54,12 +57,33 @@ extern const unsigned long kallsyms_markers[] __weak;
89733
89734 static inline int is_kernel_inittext(unsigned long addr)
89735 {
89736+ if (system_state != SYSTEM_BOOTING)
89737+ return 0;
89738+
89739 if (addr >= (unsigned long)_sinittext
89740 && addr <= (unsigned long)_einittext)
89741 return 1;
89742 return 0;
89743 }
89744
89745+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89746+#ifdef CONFIG_MODULES
89747+static inline int is_module_text(unsigned long addr)
89748+{
89749+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
89750+ return 1;
89751+
89752+ addr = ktla_ktva(addr);
89753+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
89754+}
89755+#else
89756+static inline int is_module_text(unsigned long addr)
89757+{
89758+ return 0;
89759+}
89760+#endif
89761+#endif
89762+
89763 static inline int is_kernel_text(unsigned long addr)
89764 {
89765 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
89766@@ -70,13 +94,28 @@ static inline int is_kernel_text(unsigned long addr)
89767
89768 static inline int is_kernel(unsigned long addr)
89769 {
89770+
89771+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89772+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
89773+ return 1;
89774+
89775+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
89776+#else
89777 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
89778+#endif
89779+
89780 return 1;
89781 return in_gate_area_no_mm(addr);
89782 }
89783
89784 static int is_ksym_addr(unsigned long addr)
89785 {
89786+
89787+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89788+ if (is_module_text(addr))
89789+ return 0;
89790+#endif
89791+
89792 if (all_var)
89793 return is_kernel(addr);
89794
89795@@ -481,7 +520,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
89796
89797 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
89798 {
89799- iter->name[0] = '\0';
89800 iter->nameoff = get_symbol_offset(new_pos);
89801 iter->pos = new_pos;
89802 }
89803@@ -529,6 +567,11 @@ static int s_show(struct seq_file *m, void *p)
89804 {
89805 struct kallsym_iter *iter = m->private;
89806
89807+#ifdef CONFIG_GRKERNSEC_HIDESYM
89808+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
89809+ return 0;
89810+#endif
89811+
89812 /* Some debugging symbols have no name. Ignore them. */
89813 if (!iter->name[0])
89814 return 0;
89815@@ -542,6 +585,7 @@ static int s_show(struct seq_file *m, void *p)
89816 */
89817 type = iter->exported ? toupper(iter->type) :
89818 tolower(iter->type);
89819+
89820 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
89821 type, iter->name, iter->module_name);
89822 } else
89823@@ -567,7 +611,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
89824 struct kallsym_iter *iter;
89825 int ret;
89826
89827- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
89828+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
89829 if (!iter)
89830 return -ENOMEM;
89831 reset_iter(iter, 0);
89832diff --git a/kernel/kcmp.c b/kernel/kcmp.c
89833index 0aa69ea..a7fcafb 100644
89834--- a/kernel/kcmp.c
89835+++ b/kernel/kcmp.c
89836@@ -100,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
89837 struct task_struct *task1, *task2;
89838 int ret;
89839
89840+#ifdef CONFIG_GRKERNSEC
89841+ return -ENOSYS;
89842+#endif
89843+
89844 rcu_read_lock();
89845
89846 /*
89847diff --git a/kernel/kexec.c b/kernel/kexec.c
89848index 2bee072..8979af8 100644
89849--- a/kernel/kexec.c
89850+++ b/kernel/kexec.c
89851@@ -1349,7 +1349,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
89852 compat_ulong_t, flags)
89853 {
89854 struct compat_kexec_segment in;
89855- struct kexec_segment out, __user *ksegments;
89856+ struct kexec_segment out;
89857+ struct kexec_segment __user *ksegments;
89858 unsigned long i, result;
89859
89860 /* Don't allow clients that don't understand the native
89861diff --git a/kernel/kmod.c b/kernel/kmod.c
89862index 8637e04..8b1d0d8 100644
89863--- a/kernel/kmod.c
89864+++ b/kernel/kmod.c
89865@@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
89866 kfree(info->argv);
89867 }
89868
89869-static int call_modprobe(char *module_name, int wait)
89870+static int call_modprobe(char *module_name, char *module_param, int wait)
89871 {
89872 struct subprocess_info *info;
89873 static char *envp[] = {
89874@@ -85,7 +85,7 @@ static int call_modprobe(char *module_name, int wait)
89875 NULL
89876 };
89877
89878- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
89879+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
89880 if (!argv)
89881 goto out;
89882
89883@@ -97,7 +97,8 @@ static int call_modprobe(char *module_name, int wait)
89884 argv[1] = "-q";
89885 argv[2] = "--";
89886 argv[3] = module_name; /* check free_modprobe_argv() */
89887- argv[4] = NULL;
89888+ argv[4] = module_param;
89889+ argv[5] = NULL;
89890
89891 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
89892 NULL, free_modprobe_argv, NULL);
89893@@ -129,9 +130,8 @@ out:
89894 * If module auto-loading support is disabled then this function
89895 * becomes a no-operation.
89896 */
89897-int __request_module(bool wait, const char *fmt, ...)
89898+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
89899 {
89900- va_list args;
89901 char module_name[MODULE_NAME_LEN];
89902 unsigned int max_modprobes;
89903 int ret;
89904@@ -150,9 +150,7 @@ int __request_module(bool wait, const char *fmt, ...)
89905 if (!modprobe_path[0])
89906 return 0;
89907
89908- va_start(args, fmt);
89909- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
89910- va_end(args);
89911+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
89912 if (ret >= MODULE_NAME_LEN)
89913 return -ENAMETOOLONG;
89914
89915@@ -160,6 +158,20 @@ int __request_module(bool wait, const char *fmt, ...)
89916 if (ret)
89917 return ret;
89918
89919+#ifdef CONFIG_GRKERNSEC_MODHARDEN
89920+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
89921+ /* hack to workaround consolekit/udisks stupidity */
89922+ read_lock(&tasklist_lock);
89923+ if (!strcmp(current->comm, "mount") &&
89924+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
89925+ read_unlock(&tasklist_lock);
89926+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
89927+ return -EPERM;
89928+ }
89929+ read_unlock(&tasklist_lock);
89930+ }
89931+#endif
89932+
89933 /* If modprobe needs a service that is in a module, we get a recursive
89934 * loop. Limit the number of running kmod threads to max_threads/2 or
89935 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
89936@@ -188,11 +200,52 @@ int __request_module(bool wait, const char *fmt, ...)
89937
89938 trace_module_request(module_name, wait, _RET_IP_);
89939
89940- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
89941+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
89942
89943 atomic_dec(&kmod_concurrent);
89944 return ret;
89945 }
89946+
89947+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
89948+{
89949+ va_list args;
89950+ int ret;
89951+
89952+ va_start(args, fmt);
89953+ ret = ____request_module(wait, module_param, fmt, args);
89954+ va_end(args);
89955+
89956+ return ret;
89957+}
89958+
89959+int __request_module(bool wait, const char *fmt, ...)
89960+{
89961+ va_list args;
89962+ int ret;
89963+
89964+#ifdef CONFIG_GRKERNSEC_MODHARDEN
89965+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
89966+ char module_param[MODULE_NAME_LEN];
89967+
89968+ memset(module_param, 0, sizeof(module_param));
89969+
89970+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
89971+
89972+ va_start(args, fmt);
89973+ ret = ____request_module(wait, module_param, fmt, args);
89974+ va_end(args);
89975+
89976+ return ret;
89977+ }
89978+#endif
89979+
89980+ va_start(args, fmt);
89981+ ret = ____request_module(wait, NULL, fmt, args);
89982+ va_end(args);
89983+
89984+ return ret;
89985+}
89986+
89987 EXPORT_SYMBOL(__request_module);
89988 #endif /* CONFIG_MODULES */
89989
89990@@ -218,6 +271,20 @@ static int ____call_usermodehelper(void *data)
89991 */
89992 set_user_nice(current, 0);
89993
89994+#ifdef CONFIG_GRKERNSEC
89995+ /* this is race-free as far as userland is concerned as we copied
89996+ out the path to be used prior to this point and are now operating
89997+ on that copy
89998+ */
89999+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
90000+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
90001+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
90002+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of /sbin and system library paths\n", sub_info->path);
90003+ retval = -EPERM;
90004+ goto fail;
90005+ }
90006+#endif
90007+
90008 retval = -ENOMEM;
90009 new = prepare_kernel_cred(current);
90010 if (!new)
90011@@ -240,8 +307,8 @@ static int ____call_usermodehelper(void *data)
90012 commit_creds(new);
90013
90014 retval = do_execve(getname_kernel(sub_info->path),
90015- (const char __user *const __user *)sub_info->argv,
90016- (const char __user *const __user *)sub_info->envp);
90017+ (const char __user *const __force_user *)sub_info->argv,
90018+ (const char __user *const __force_user *)sub_info->envp);
90019 if (!retval)
90020 return 0;
90021
90022@@ -260,6 +327,10 @@ static int call_helper(void *data)
90023
90024 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
90025 {
90026+#ifdef CONFIG_GRKERNSEC
90027+ kfree(info->path);
90028+ info->path = info->origpath;
90029+#endif
90030 if (info->cleanup)
90031 (*info->cleanup)(info);
90032 kfree(info);
90033@@ -300,7 +371,7 @@ static int wait_for_helper(void *data)
90034 *
90035 * Thus the __user pointer cast is valid here.
90036 */
90037- sys_wait4(pid, (int __user *)&ret, 0, NULL);
90038+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
90039
90040 /*
90041 * If ret is 0, either ____call_usermodehelper failed and the
90042@@ -539,7 +610,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
90043 goto out;
90044
90045 INIT_WORK(&sub_info->work, __call_usermodehelper);
90046+#ifdef CONFIG_GRKERNSEC
90047+ sub_info->origpath = path;
90048+ sub_info->path = kstrdup(path, gfp_mask);
90049+#else
90050 sub_info->path = path;
90051+#endif
90052 sub_info->argv = argv;
90053 sub_info->envp = envp;
90054
90055@@ -647,7 +723,7 @@ EXPORT_SYMBOL(call_usermodehelper);
90056 static int proc_cap_handler(struct ctl_table *table, int write,
90057 void __user *buffer, size_t *lenp, loff_t *ppos)
90058 {
90059- struct ctl_table t;
90060+ ctl_table_no_const t;
90061 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
90062 kernel_cap_t new_cap;
90063 int err, i;
90064diff --git a/kernel/kprobes.c b/kernel/kprobes.c
90065index 3995f54..e247879 100644
90066--- a/kernel/kprobes.c
90067+++ b/kernel/kprobes.c
90068@@ -31,6 +31,9 @@
90069 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
90070 * <prasanna@in.ibm.com> added function-return probes.
90071 */
90072+#ifdef CONFIG_GRKERNSEC_HIDESYM
90073+#define __INCLUDED_BY_HIDESYM 1
90074+#endif
90075 #include <linux/kprobes.h>
90076 #include <linux/hash.h>
90077 #include <linux/init.h>
90078@@ -122,12 +125,12 @@ enum kprobe_slot_state {
90079
90080 static void *alloc_insn_page(void)
90081 {
90082- return module_alloc(PAGE_SIZE);
90083+ return module_alloc_exec(PAGE_SIZE);
90084 }
90085
90086 static void free_insn_page(void *page)
90087 {
90088- module_free(NULL, page);
90089+ module_free_exec(NULL, page);
90090 }
90091
90092 struct kprobe_insn_cache kprobe_insn_slots = {
90093@@ -2187,11 +2190,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
90094 kprobe_type = "k";
90095
90096 if (sym)
90097- seq_printf(pi, "%p %s %s+0x%x %s ",
90098+ seq_printf(pi, "%pK %s %s+0x%x %s ",
90099 p->addr, kprobe_type, sym, offset,
90100 (modname ? modname : " "));
90101 else
90102- seq_printf(pi, "%p %s %p ",
90103+ seq_printf(pi, "%pK %s %pK ",
90104 p->addr, kprobe_type, p->addr);
90105
90106 if (!pp)
90107diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
90108index 6683cce..daf8999 100644
90109--- a/kernel/ksysfs.c
90110+++ b/kernel/ksysfs.c
90111@@ -50,6 +50,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
90112 {
90113 if (count+1 > UEVENT_HELPER_PATH_LEN)
90114 return -ENOENT;
90115+ if (!capable(CAP_SYS_ADMIN))
90116+ return -EPERM;
90117 memcpy(uevent_helper, buf, count);
90118 uevent_helper[count] = '\0';
90119 if (count && uevent_helper[count-1] == '\n')
90120@@ -176,7 +178,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
90121 return count;
90122 }
90123
90124-static struct bin_attribute notes_attr = {
90125+static bin_attribute_no_const notes_attr __read_only = {
90126 .attr = {
90127 .name = "notes",
90128 .mode = S_IRUGO,
90129diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
90130index 88d0d44..e9ce0ee 100644
90131--- a/kernel/locking/lockdep.c
90132+++ b/kernel/locking/lockdep.c
90133@@ -599,6 +599,10 @@ static int static_obj(void *obj)
90134 end = (unsigned long) &_end,
90135 addr = (unsigned long) obj;
90136
90137+#ifdef CONFIG_PAX_KERNEXEC
90138+ start = ktla_ktva(start);
90139+#endif
90140+
90141 /*
90142 * static variable?
90143 */
90144@@ -740,6 +744,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
90145 if (!static_obj(lock->key)) {
90146 debug_locks_off();
90147 printk("INFO: trying to register non-static key.\n");
90148+ printk("lock:%pS key:%pS.\n", lock, lock->key);
90149 printk("the code is fine but needs lockdep annotation.\n");
90150 printk("turning off the locking correctness validator.\n");
90151 dump_stack();
90152@@ -3081,7 +3086,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
90153 if (!class)
90154 return 0;
90155 }
90156- atomic_inc((atomic_t *)&class->ops);
90157+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
90158 if (very_verbose(class)) {
90159 printk("\nacquire class [%p] %s", class->key, class->name);
90160 if (class->name_version > 1)
90161diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
90162index ef43ac4..2720dfa 100644
90163--- a/kernel/locking/lockdep_proc.c
90164+++ b/kernel/locking/lockdep_proc.c
90165@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
90166 return 0;
90167 }
90168
90169- seq_printf(m, "%p", class->key);
90170+ seq_printf(m, "%pK", class->key);
90171 #ifdef CONFIG_DEBUG_LOCKDEP
90172 seq_printf(m, " OPS:%8ld", class->ops);
90173 #endif
90174@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
90175
90176 list_for_each_entry(entry, &class->locks_after, entry) {
90177 if (entry->distance == 1) {
90178- seq_printf(m, " -> [%p] ", entry->class->key);
90179+ seq_printf(m, " -> [%pK] ", entry->class->key);
90180 print_name(m, entry->class);
90181 seq_puts(m, "\n");
90182 }
90183@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
90184 if (!class->key)
90185 continue;
90186
90187- seq_printf(m, "[%p] ", class->key);
90188+ seq_printf(m, "[%pK] ", class->key);
90189 print_name(m, class);
90190 seq_puts(m, "\n");
90191 }
90192@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
90193 if (!i)
90194 seq_line(m, '-', 40-namelen, namelen);
90195
90196- snprintf(ip, sizeof(ip), "[<%p>]",
90197+ snprintf(ip, sizeof(ip), "[<%pK>]",
90198 (void *)class->contention_point[i]);
90199 seq_printf(m, "%40s %14lu %29s %pS\n",
90200 name, stats->contention_point[i],
90201@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
90202 if (!i)
90203 seq_line(m, '-', 40-namelen, namelen);
90204
90205- snprintf(ip, sizeof(ip), "[<%p>]",
90206+ snprintf(ip, sizeof(ip), "[<%pK>]",
90207 (void *)class->contending_point[i]);
90208 seq_printf(m, "%40s %14lu %29s %pS\n",
90209 name, stats->contending_point[i],
90210diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
90211index 9887a90..0cd2b1d 100644
90212--- a/kernel/locking/mcs_spinlock.c
90213+++ b/kernel/locking/mcs_spinlock.c
90214@@ -100,7 +100,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
90215
90216 prev = decode_cpu(old);
90217 node->prev = prev;
90218- ACCESS_ONCE(prev->next) = node;
90219+ ACCESS_ONCE_RW(prev->next) = node;
90220
90221 /*
90222 * Normally @prev is untouchable after the above store; because at that
90223@@ -172,8 +172,8 @@ unqueue:
90224 * it will wait in Step-A.
90225 */
90226
90227- ACCESS_ONCE(next->prev) = prev;
90228- ACCESS_ONCE(prev->next) = next;
90229+ ACCESS_ONCE_RW(next->prev) = prev;
90230+ ACCESS_ONCE_RW(prev->next) = next;
90231
90232 return false;
90233 }
90234@@ -195,13 +195,13 @@ void osq_unlock(struct optimistic_spin_queue *lock)
90235 node = this_cpu_ptr(&osq_node);
90236 next = xchg(&node->next, NULL);
90237 if (next) {
90238- ACCESS_ONCE(next->locked) = 1;
90239+ ACCESS_ONCE_RW(next->locked) = 1;
90240 return;
90241 }
90242
90243 next = osq_wait_next(lock, node, NULL);
90244 if (next)
90245- ACCESS_ONCE(next->locked) = 1;
90246+ ACCESS_ONCE_RW(next->locked) = 1;
90247 }
90248
90249 #endif
90250diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
90251index 23e89c5..8558eac 100644
90252--- a/kernel/locking/mcs_spinlock.h
90253+++ b/kernel/locking/mcs_spinlock.h
90254@@ -81,7 +81,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
90255 */
90256 return;
90257 }
90258- ACCESS_ONCE(prev->next) = node;
90259+ ACCESS_ONCE_RW(prev->next) = node;
90260
90261 /* Wait until the lock holder passes the lock down. */
90262 arch_mcs_spin_lock_contended(&node->locked);
90263diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
90264index 5cf6731..ce3bc5a 100644
90265--- a/kernel/locking/mutex-debug.c
90266+++ b/kernel/locking/mutex-debug.c
90267@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
90268 }
90269
90270 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90271- struct thread_info *ti)
90272+ struct task_struct *task)
90273 {
90274 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
90275
90276 /* Mark the current thread as blocked on the lock: */
90277- ti->task->blocked_on = waiter;
90278+ task->blocked_on = waiter;
90279 }
90280
90281 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90282- struct thread_info *ti)
90283+ struct task_struct *task)
90284 {
90285 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
90286- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
90287- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
90288- ti->task->blocked_on = NULL;
90289+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
90290+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
90291+ task->blocked_on = NULL;
90292
90293 list_del_init(&waiter->list);
90294 waiter->task = NULL;
90295diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
90296index 0799fd3..d06ae3b 100644
90297--- a/kernel/locking/mutex-debug.h
90298+++ b/kernel/locking/mutex-debug.h
90299@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
90300 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
90301 extern void debug_mutex_add_waiter(struct mutex *lock,
90302 struct mutex_waiter *waiter,
90303- struct thread_info *ti);
90304+ struct task_struct *task);
90305 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90306- struct thread_info *ti);
90307+ struct task_struct *task);
90308 extern void debug_mutex_unlock(struct mutex *lock);
90309 extern void debug_mutex_init(struct mutex *lock, const char *name,
90310 struct lock_class_key *key);
90311diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
90312index ae712b2..d0d4a41 100644
90313--- a/kernel/locking/mutex.c
90314+++ b/kernel/locking/mutex.c
90315@@ -486,7 +486,7 @@ slowpath:
90316 goto skip_wait;
90317
90318 debug_mutex_lock_common(lock, &waiter);
90319- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
90320+ debug_mutex_add_waiter(lock, &waiter, task);
90321
90322 /* add waiting tasks to the end of the waitqueue (FIFO): */
90323 list_add_tail(&waiter.list, &lock->wait_list);
90324@@ -531,7 +531,7 @@ slowpath:
90325 schedule_preempt_disabled();
90326 spin_lock_mutex(&lock->wait_lock, flags);
90327 }
90328- mutex_remove_waiter(lock, &waiter, current_thread_info());
90329+ mutex_remove_waiter(lock, &waiter, task);
90330 /* set it to 0 if there are no waiters left: */
90331 if (likely(list_empty(&lock->wait_list)))
90332 atomic_set(&lock->count, 0);
90333@@ -568,7 +568,7 @@ skip_wait:
90334 return 0;
90335
90336 err:
90337- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
90338+ mutex_remove_waiter(lock, &waiter, task);
90339 spin_unlock_mutex(&lock->wait_lock, flags);
90340 debug_mutex_free_waiter(&waiter);
90341 mutex_release(&lock->dep_map, 1, ip);
90342diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
90343index 1d96dd0..994ff19 100644
90344--- a/kernel/locking/rtmutex-tester.c
90345+++ b/kernel/locking/rtmutex-tester.c
90346@@ -22,7 +22,7 @@
90347 #define MAX_RT_TEST_MUTEXES 8
90348
90349 static spinlock_t rttest_lock;
90350-static atomic_t rttest_event;
90351+static atomic_unchecked_t rttest_event;
90352
90353 struct test_thread_data {
90354 int opcode;
90355@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90356
90357 case RTTEST_LOCKCONT:
90358 td->mutexes[td->opdata] = 1;
90359- td->event = atomic_add_return(1, &rttest_event);
90360+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90361 return 0;
90362
90363 case RTTEST_RESET:
90364@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90365 return 0;
90366
90367 case RTTEST_RESETEVENT:
90368- atomic_set(&rttest_event, 0);
90369+ atomic_set_unchecked(&rttest_event, 0);
90370 return 0;
90371
90372 default:
90373@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90374 return ret;
90375
90376 td->mutexes[id] = 1;
90377- td->event = atomic_add_return(1, &rttest_event);
90378+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90379 rt_mutex_lock(&mutexes[id]);
90380- td->event = atomic_add_return(1, &rttest_event);
90381+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90382 td->mutexes[id] = 4;
90383 return 0;
90384
90385@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90386 return ret;
90387
90388 td->mutexes[id] = 1;
90389- td->event = atomic_add_return(1, &rttest_event);
90390+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90391 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
90392- td->event = atomic_add_return(1, &rttest_event);
90393+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90394 td->mutexes[id] = ret ? 0 : 4;
90395 return ret ? -EINTR : 0;
90396
90397@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90398 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
90399 return ret;
90400
90401- td->event = atomic_add_return(1, &rttest_event);
90402+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90403 rt_mutex_unlock(&mutexes[id]);
90404- td->event = atomic_add_return(1, &rttest_event);
90405+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90406 td->mutexes[id] = 0;
90407 return 0;
90408
90409@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90410 break;
90411
90412 td->mutexes[dat] = 2;
90413- td->event = atomic_add_return(1, &rttest_event);
90414+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90415 break;
90416
90417 default:
90418@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90419 return;
90420
90421 td->mutexes[dat] = 3;
90422- td->event = atomic_add_return(1, &rttest_event);
90423+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90424 break;
90425
90426 case RTTEST_LOCKNOWAIT:
90427@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90428 return;
90429
90430 td->mutexes[dat] = 1;
90431- td->event = atomic_add_return(1, &rttest_event);
90432+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90433 return;
90434
90435 default:
90436diff --git a/kernel/module.c b/kernel/module.c
90437index 1c47139..6242887 100644
90438--- a/kernel/module.c
90439+++ b/kernel/module.c
90440@@ -60,6 +60,7 @@
90441 #include <linux/jump_label.h>
90442 #include <linux/pfn.h>
90443 #include <linux/bsearch.h>
90444+#include <linux/grsecurity.h>
90445 #include <uapi/linux/module.h>
90446 #include "module-internal.h"
90447
90448@@ -156,7 +157,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
90449
90450 /* Bounds of module allocation, for speeding __module_address.
90451 * Protected by module_mutex. */
90452-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
90453+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
90454+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
90455
90456 int register_module_notifier(struct notifier_block * nb)
90457 {
90458@@ -323,7 +325,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
90459 return true;
90460
90461 list_for_each_entry_rcu(mod, &modules, list) {
90462- struct symsearch arr[] = {
90463+ struct symsearch modarr[] = {
90464 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
90465 NOT_GPL_ONLY, false },
90466 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
90467@@ -348,7 +350,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
90468 if (mod->state == MODULE_STATE_UNFORMED)
90469 continue;
90470
90471- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
90472+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
90473 return true;
90474 }
90475 return false;
90476@@ -488,7 +490,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
90477 if (!pcpusec->sh_size)
90478 return 0;
90479
90480- if (align > PAGE_SIZE) {
90481+ if (align-1 >= PAGE_SIZE) {
90482 pr_warn("%s: per-cpu alignment %li > %li\n",
90483 mod->name, align, PAGE_SIZE);
90484 align = PAGE_SIZE;
90485@@ -1060,7 +1062,7 @@ struct module_attribute module_uevent =
90486 static ssize_t show_coresize(struct module_attribute *mattr,
90487 struct module_kobject *mk, char *buffer)
90488 {
90489- return sprintf(buffer, "%u\n", mk->mod->core_size);
90490+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
90491 }
90492
90493 static struct module_attribute modinfo_coresize =
90494@@ -1069,7 +1071,7 @@ static struct module_attribute modinfo_coresize =
90495 static ssize_t show_initsize(struct module_attribute *mattr,
90496 struct module_kobject *mk, char *buffer)
90497 {
90498- return sprintf(buffer, "%u\n", mk->mod->init_size);
90499+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
90500 }
90501
90502 static struct module_attribute modinfo_initsize =
90503@@ -1161,12 +1163,29 @@ static int check_version(Elf_Shdr *sechdrs,
90504 goto bad_version;
90505 }
90506
90507+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90508+ /*
90509+ * avoid potentially printing jibberish on attempted load
90510+ * of a module randomized with a different seed
90511+ */
90512+ pr_warn("no symbol version for %s\n", symname);
90513+#else
90514 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
90515+#endif
90516 return 0;
90517
90518 bad_version:
90519+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90520+ /*
90521+ * avoid potentially printing jibberish on attempted load
90522+ * of a module randomized with a different seed
90523+ */
90524+ printk("attempted module disagrees about version of symbol %s\n",
90525+ symname);
90526+#else
90527 printk("%s: disagrees about version of symbol %s\n",
90528 mod->name, symname);
90529+#endif
90530 return 0;
90531 }
90532
90533@@ -1282,7 +1301,7 @@ resolve_symbol_wait(struct module *mod,
90534 */
90535 #ifdef CONFIG_SYSFS
90536
90537-#ifdef CONFIG_KALLSYMS
90538+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
90539 static inline bool sect_empty(const Elf_Shdr *sect)
90540 {
90541 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
90542@@ -1422,7 +1441,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
90543 {
90544 unsigned int notes, loaded, i;
90545 struct module_notes_attrs *notes_attrs;
90546- struct bin_attribute *nattr;
90547+ bin_attribute_no_const *nattr;
90548
90549 /* failed to create section attributes, so can't create notes */
90550 if (!mod->sect_attrs)
90551@@ -1534,7 +1553,7 @@ static void del_usage_links(struct module *mod)
90552 static int module_add_modinfo_attrs(struct module *mod)
90553 {
90554 struct module_attribute *attr;
90555- struct module_attribute *temp_attr;
90556+ module_attribute_no_const *temp_attr;
90557 int error = 0;
90558 int i;
90559
90560@@ -1755,21 +1774,21 @@ static void set_section_ro_nx(void *base,
90561
90562 static void unset_module_core_ro_nx(struct module *mod)
90563 {
90564- set_page_attributes(mod->module_core + mod->core_text_size,
90565- mod->module_core + mod->core_size,
90566+ set_page_attributes(mod->module_core_rw,
90567+ mod->module_core_rw + mod->core_size_rw,
90568 set_memory_x);
90569- set_page_attributes(mod->module_core,
90570- mod->module_core + mod->core_ro_size,
90571+ set_page_attributes(mod->module_core_rx,
90572+ mod->module_core_rx + mod->core_size_rx,
90573 set_memory_rw);
90574 }
90575
90576 static void unset_module_init_ro_nx(struct module *mod)
90577 {
90578- set_page_attributes(mod->module_init + mod->init_text_size,
90579- mod->module_init + mod->init_size,
90580+ set_page_attributes(mod->module_init_rw,
90581+ mod->module_init_rw + mod->init_size_rw,
90582 set_memory_x);
90583- set_page_attributes(mod->module_init,
90584- mod->module_init + mod->init_ro_size,
90585+ set_page_attributes(mod->module_init_rx,
90586+ mod->module_init_rx + mod->init_size_rx,
90587 set_memory_rw);
90588 }
90589
90590@@ -1782,14 +1801,14 @@ void set_all_modules_text_rw(void)
90591 list_for_each_entry_rcu(mod, &modules, list) {
90592 if (mod->state == MODULE_STATE_UNFORMED)
90593 continue;
90594- if ((mod->module_core) && (mod->core_text_size)) {
90595- set_page_attributes(mod->module_core,
90596- mod->module_core + mod->core_text_size,
90597+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
90598+ set_page_attributes(mod->module_core_rx,
90599+ mod->module_core_rx + mod->core_size_rx,
90600 set_memory_rw);
90601 }
90602- if ((mod->module_init) && (mod->init_text_size)) {
90603- set_page_attributes(mod->module_init,
90604- mod->module_init + mod->init_text_size,
90605+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
90606+ set_page_attributes(mod->module_init_rx,
90607+ mod->module_init_rx + mod->init_size_rx,
90608 set_memory_rw);
90609 }
90610 }
90611@@ -1805,14 +1824,14 @@ void set_all_modules_text_ro(void)
90612 list_for_each_entry_rcu(mod, &modules, list) {
90613 if (mod->state == MODULE_STATE_UNFORMED)
90614 continue;
90615- if ((mod->module_core) && (mod->core_text_size)) {
90616- set_page_attributes(mod->module_core,
90617- mod->module_core + mod->core_text_size,
90618+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
90619+ set_page_attributes(mod->module_core_rx,
90620+ mod->module_core_rx + mod->core_size_rx,
90621 set_memory_ro);
90622 }
90623- if ((mod->module_init) && (mod->init_text_size)) {
90624- set_page_attributes(mod->module_init,
90625- mod->module_init + mod->init_text_size,
90626+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
90627+ set_page_attributes(mod->module_init_rx,
90628+ mod->module_init_rx + mod->init_size_rx,
90629 set_memory_ro);
90630 }
90631 }
90632@@ -1865,16 +1884,19 @@ static void free_module(struct module *mod)
90633
90634 /* This may be NULL, but that's OK */
90635 unset_module_init_ro_nx(mod);
90636- module_free(mod, mod->module_init);
90637+ module_free(mod, mod->module_init_rw);
90638+ module_free_exec(mod, mod->module_init_rx);
90639 kfree(mod->args);
90640 percpu_modfree(mod);
90641
90642 /* Free lock-classes: */
90643- lockdep_free_key_range(mod->module_core, mod->core_size);
90644+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
90645+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
90646
90647 /* Finally, free the core (containing the module structure) */
90648 unset_module_core_ro_nx(mod);
90649- module_free(mod, mod->module_core);
90650+ module_free_exec(mod, mod->module_core_rx);
90651+ module_free(mod, mod->module_core_rw);
90652
90653 #ifdef CONFIG_MPU
90654 update_protections(current->mm);
90655@@ -1943,9 +1965,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90656 int ret = 0;
90657 const struct kernel_symbol *ksym;
90658
90659+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90660+ int is_fs_load = 0;
90661+ int register_filesystem_found = 0;
90662+ char *p;
90663+
90664+ p = strstr(mod->args, "grsec_modharden_fs");
90665+ if (p) {
90666+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
90667+ /* copy \0 as well */
90668+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
90669+ is_fs_load = 1;
90670+ }
90671+#endif
90672+
90673 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
90674 const char *name = info->strtab + sym[i].st_name;
90675
90676+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90677+ /* it's a real shame this will never get ripped and copied
90678+ upstream! ;(
90679+ */
90680+ if (is_fs_load && !strcmp(name, "register_filesystem"))
90681+ register_filesystem_found = 1;
90682+#endif
90683+
90684 switch (sym[i].st_shndx) {
90685 case SHN_COMMON:
90686 /* Ignore common symbols */
90687@@ -1970,7 +2014,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90688 ksym = resolve_symbol_wait(mod, info, name);
90689 /* Ok if resolved. */
90690 if (ksym && !IS_ERR(ksym)) {
90691+ pax_open_kernel();
90692 sym[i].st_value = ksym->value;
90693+ pax_close_kernel();
90694 break;
90695 }
90696
90697@@ -1989,11 +2035,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90698 secbase = (unsigned long)mod_percpu(mod);
90699 else
90700 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
90701+ pax_open_kernel();
90702 sym[i].st_value += secbase;
90703+ pax_close_kernel();
90704 break;
90705 }
90706 }
90707
90708+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90709+ if (is_fs_load && !register_filesystem_found) {
90710+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
90711+ ret = -EPERM;
90712+ }
90713+#endif
90714+
90715 return ret;
90716 }
90717
90718@@ -2077,22 +2132,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
90719 || s->sh_entsize != ~0UL
90720 || strstarts(sname, ".init"))
90721 continue;
90722- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
90723+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
90724+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
90725+ else
90726+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
90727 pr_debug("\t%s\n", sname);
90728 }
90729- switch (m) {
90730- case 0: /* executable */
90731- mod->core_size = debug_align(mod->core_size);
90732- mod->core_text_size = mod->core_size;
90733- break;
90734- case 1: /* RO: text and ro-data */
90735- mod->core_size = debug_align(mod->core_size);
90736- mod->core_ro_size = mod->core_size;
90737- break;
90738- case 3: /* whole core */
90739- mod->core_size = debug_align(mod->core_size);
90740- break;
90741- }
90742 }
90743
90744 pr_debug("Init section allocation order:\n");
90745@@ -2106,23 +2151,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
90746 || s->sh_entsize != ~0UL
90747 || !strstarts(sname, ".init"))
90748 continue;
90749- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
90750- | INIT_OFFSET_MASK);
90751+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
90752+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
90753+ else
90754+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
90755+ s->sh_entsize |= INIT_OFFSET_MASK;
90756 pr_debug("\t%s\n", sname);
90757 }
90758- switch (m) {
90759- case 0: /* executable */
90760- mod->init_size = debug_align(mod->init_size);
90761- mod->init_text_size = mod->init_size;
90762- break;
90763- case 1: /* RO: text and ro-data */
90764- mod->init_size = debug_align(mod->init_size);
90765- mod->init_ro_size = mod->init_size;
90766- break;
90767- case 3: /* whole init */
90768- mod->init_size = debug_align(mod->init_size);
90769- break;
90770- }
90771 }
90772 }
90773
90774@@ -2295,7 +2330,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
90775
90776 /* Put symbol section at end of init part of module. */
90777 symsect->sh_flags |= SHF_ALLOC;
90778- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
90779+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
90780 info->index.sym) | INIT_OFFSET_MASK;
90781 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
90782
90783@@ -2312,13 +2347,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
90784 }
90785
90786 /* Append room for core symbols at end of core part. */
90787- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
90788- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
90789- mod->core_size += strtab_size;
90790+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
90791+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
90792+ mod->core_size_rx += strtab_size;
90793
90794 /* Put string table section at end of init part of module. */
90795 strsect->sh_flags |= SHF_ALLOC;
90796- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
90797+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
90798 info->index.str) | INIT_OFFSET_MASK;
90799 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
90800 }
90801@@ -2336,12 +2371,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
90802 /* Make sure we get permanent strtab: don't use info->strtab. */
90803 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
90804
90805+ pax_open_kernel();
90806+
90807 /* Set types up while we still have access to sections. */
90808 for (i = 0; i < mod->num_symtab; i++)
90809 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
90810
90811- mod->core_symtab = dst = mod->module_core + info->symoffs;
90812- mod->core_strtab = s = mod->module_core + info->stroffs;
90813+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
90814+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
90815 src = mod->symtab;
90816 for (ndst = i = 0; i < mod->num_symtab; i++) {
90817 if (i == 0 ||
90818@@ -2353,6 +2390,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
90819 }
90820 }
90821 mod->core_num_syms = ndst;
90822+
90823+ pax_close_kernel();
90824 }
90825 #else
90826 static inline void layout_symtab(struct module *mod, struct load_info *info)
90827@@ -2386,17 +2425,33 @@ void * __weak module_alloc(unsigned long size)
90828 return vmalloc_exec(size);
90829 }
90830
90831-static void *module_alloc_update_bounds(unsigned long size)
90832+static void *module_alloc_update_bounds_rw(unsigned long size)
90833 {
90834 void *ret = module_alloc(size);
90835
90836 if (ret) {
90837 mutex_lock(&module_mutex);
90838 /* Update module bounds. */
90839- if ((unsigned long)ret < module_addr_min)
90840- module_addr_min = (unsigned long)ret;
90841- if ((unsigned long)ret + size > module_addr_max)
90842- module_addr_max = (unsigned long)ret + size;
90843+ if ((unsigned long)ret < module_addr_min_rw)
90844+ module_addr_min_rw = (unsigned long)ret;
90845+ if ((unsigned long)ret + size > module_addr_max_rw)
90846+ module_addr_max_rw = (unsigned long)ret + size;
90847+ mutex_unlock(&module_mutex);
90848+ }
90849+ return ret;
90850+}
90851+
90852+static void *module_alloc_update_bounds_rx(unsigned long size)
90853+{
90854+ void *ret = module_alloc_exec(size);
90855+
90856+ if (ret) {
90857+ mutex_lock(&module_mutex);
90858+ /* Update module bounds. */
90859+ if ((unsigned long)ret < module_addr_min_rx)
90860+ module_addr_min_rx = (unsigned long)ret;
90861+ if ((unsigned long)ret + size > module_addr_max_rx)
90862+ module_addr_max_rx = (unsigned long)ret + size;
90863 mutex_unlock(&module_mutex);
90864 }
90865 return ret;
90866@@ -2650,7 +2705,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
90867 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
90868
90869 if (info->index.sym == 0) {
90870+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90871+ /*
90872+ * avoid potentially printing jibberish on attempted load
90873+ * of a module randomized with a different seed
90874+ */
90875+ pr_warn("module has no symbols (stripped?)\n");
90876+#else
90877 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
90878+#endif
90879 return ERR_PTR(-ENOEXEC);
90880 }
90881
90882@@ -2666,8 +2729,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
90883 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
90884 {
90885 const char *modmagic = get_modinfo(info, "vermagic");
90886+ const char *license = get_modinfo(info, "license");
90887 int err;
90888
90889+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
90890+ if (!license || !license_is_gpl_compatible(license))
90891+ return -ENOEXEC;
90892+#endif
90893+
90894 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
90895 modmagic = NULL;
90896
90897@@ -2692,7 +2761,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
90898 }
90899
90900 /* Set up license info based on the info section */
90901- set_license(mod, get_modinfo(info, "license"));
90902+ set_license(mod, license);
90903
90904 return 0;
90905 }
90906@@ -2786,7 +2855,7 @@ static int move_module(struct module *mod, struct load_info *info)
90907 void *ptr;
90908
90909 /* Do the allocs. */
90910- ptr = module_alloc_update_bounds(mod->core_size);
90911+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
90912 /*
90913 * The pointer to this block is stored in the module structure
90914 * which is inside the block. Just mark it as not being a
90915@@ -2796,11 +2865,11 @@ static int move_module(struct module *mod, struct load_info *info)
90916 if (!ptr)
90917 return -ENOMEM;
90918
90919- memset(ptr, 0, mod->core_size);
90920- mod->module_core = ptr;
90921+ memset(ptr, 0, mod->core_size_rw);
90922+ mod->module_core_rw = ptr;
90923
90924- if (mod->init_size) {
90925- ptr = module_alloc_update_bounds(mod->init_size);
90926+ if (mod->init_size_rw) {
90927+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
90928 /*
90929 * The pointer to this block is stored in the module structure
90930 * which is inside the block. This block doesn't need to be
90931@@ -2809,13 +2878,45 @@ static int move_module(struct module *mod, struct load_info *info)
90932 */
90933 kmemleak_ignore(ptr);
90934 if (!ptr) {
90935- module_free(mod, mod->module_core);
90936+ module_free(mod, mod->module_core_rw);
90937 return -ENOMEM;
90938 }
90939- memset(ptr, 0, mod->init_size);
90940- mod->module_init = ptr;
90941+ memset(ptr, 0, mod->init_size_rw);
90942+ mod->module_init_rw = ptr;
90943 } else
90944- mod->module_init = NULL;
90945+ mod->module_init_rw = NULL;
90946+
90947+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
90948+ kmemleak_not_leak(ptr);
90949+ if (!ptr) {
90950+ if (mod->module_init_rw)
90951+ module_free(mod, mod->module_init_rw);
90952+ module_free(mod, mod->module_core_rw);
90953+ return -ENOMEM;
90954+ }
90955+
90956+ pax_open_kernel();
90957+ memset(ptr, 0, mod->core_size_rx);
90958+ pax_close_kernel();
90959+ mod->module_core_rx = ptr;
90960+
90961+ if (mod->init_size_rx) {
90962+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
90963+ kmemleak_ignore(ptr);
90964+ if (!ptr && mod->init_size_rx) {
90965+ module_free_exec(mod, mod->module_core_rx);
90966+ if (mod->module_init_rw)
90967+ module_free(mod, mod->module_init_rw);
90968+ module_free(mod, mod->module_core_rw);
90969+ return -ENOMEM;
90970+ }
90971+
90972+ pax_open_kernel();
90973+ memset(ptr, 0, mod->init_size_rx);
90974+ pax_close_kernel();
90975+ mod->module_init_rx = ptr;
90976+ } else
90977+ mod->module_init_rx = NULL;
90978
90979 /* Transfer each section which specifies SHF_ALLOC */
90980 pr_debug("final section addresses:\n");
90981@@ -2826,16 +2927,45 @@ static int move_module(struct module *mod, struct load_info *info)
90982 if (!(shdr->sh_flags & SHF_ALLOC))
90983 continue;
90984
90985- if (shdr->sh_entsize & INIT_OFFSET_MASK)
90986- dest = mod->module_init
90987- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
90988- else
90989- dest = mod->module_core + shdr->sh_entsize;
90990+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
90991+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
90992+ dest = mod->module_init_rw
90993+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
90994+ else
90995+ dest = mod->module_init_rx
90996+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
90997+ } else {
90998+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
90999+ dest = mod->module_core_rw + shdr->sh_entsize;
91000+ else
91001+ dest = mod->module_core_rx + shdr->sh_entsize;
91002+ }
91003+
91004+ if (shdr->sh_type != SHT_NOBITS) {
91005+
91006+#ifdef CONFIG_PAX_KERNEXEC
91007+#ifdef CONFIG_X86_64
91008+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
91009+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
91010+#endif
91011+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
91012+ pax_open_kernel();
91013+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
91014+ pax_close_kernel();
91015+ } else
91016+#endif
91017
91018- if (shdr->sh_type != SHT_NOBITS)
91019 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
91020+ }
91021 /* Update sh_addr to point to copy in image. */
91022- shdr->sh_addr = (unsigned long)dest;
91023+
91024+#ifdef CONFIG_PAX_KERNEXEC
91025+ if (shdr->sh_flags & SHF_EXECINSTR)
91026+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
91027+ else
91028+#endif
91029+
91030+ shdr->sh_addr = (unsigned long)dest;
91031 pr_debug("\t0x%lx %s\n",
91032 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
91033 }
91034@@ -2892,12 +3022,12 @@ static void flush_module_icache(const struct module *mod)
91035 * Do it before processing of module parameters, so the module
91036 * can provide parameter accessor functions of its own.
91037 */
91038- if (mod->module_init)
91039- flush_icache_range((unsigned long)mod->module_init,
91040- (unsigned long)mod->module_init
91041- + mod->init_size);
91042- flush_icache_range((unsigned long)mod->module_core,
91043- (unsigned long)mod->module_core + mod->core_size);
91044+ if (mod->module_init_rx)
91045+ flush_icache_range((unsigned long)mod->module_init_rx,
91046+ (unsigned long)mod->module_init_rx
91047+ + mod->init_size_rx);
91048+ flush_icache_range((unsigned long)mod->module_core_rx,
91049+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
91050
91051 set_fs(old_fs);
91052 }
91053@@ -2954,8 +3084,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
91054 static void module_deallocate(struct module *mod, struct load_info *info)
91055 {
91056 percpu_modfree(mod);
91057- module_free(mod, mod->module_init);
91058- module_free(mod, mod->module_core);
91059+ module_free_exec(mod, mod->module_init_rx);
91060+ module_free_exec(mod, mod->module_core_rx);
91061+ module_free(mod, mod->module_init_rw);
91062+ module_free(mod, mod->module_core_rw);
91063 }
91064
91065 int __weak module_finalize(const Elf_Ehdr *hdr,
91066@@ -2968,7 +3100,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
91067 static int post_relocation(struct module *mod, const struct load_info *info)
91068 {
91069 /* Sort exception table now relocations are done. */
91070+ pax_open_kernel();
91071 sort_extable(mod->extable, mod->extable + mod->num_exentries);
91072+ pax_close_kernel();
91073
91074 /* Copy relocated percpu area over. */
91075 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
91076@@ -3077,11 +3211,12 @@ static int do_init_module(struct module *mod)
91077 mod->strtab = mod->core_strtab;
91078 #endif
91079 unset_module_init_ro_nx(mod);
91080- module_free(mod, mod->module_init);
91081- mod->module_init = NULL;
91082- mod->init_size = 0;
91083- mod->init_ro_size = 0;
91084- mod->init_text_size = 0;
91085+ module_free(mod, mod->module_init_rw);
91086+ module_free_exec(mod, mod->module_init_rx);
91087+ mod->module_init_rw = NULL;
91088+ mod->module_init_rx = NULL;
91089+ mod->init_size_rw = 0;
91090+ mod->init_size_rx = 0;
91091 mutex_unlock(&module_mutex);
91092 wake_up_all(&module_wq);
91093
91094@@ -3149,16 +3284,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
91095 module_bug_finalize(info->hdr, info->sechdrs, mod);
91096
91097 /* Set RO and NX regions for core */
91098- set_section_ro_nx(mod->module_core,
91099- mod->core_text_size,
91100- mod->core_ro_size,
91101- mod->core_size);
91102+ set_section_ro_nx(mod->module_core_rx,
91103+ mod->core_size_rx,
91104+ mod->core_size_rx,
91105+ mod->core_size_rx);
91106
91107 /* Set RO and NX regions for init */
91108- set_section_ro_nx(mod->module_init,
91109- mod->init_text_size,
91110- mod->init_ro_size,
91111- mod->init_size);
91112+ set_section_ro_nx(mod->module_init_rx,
91113+ mod->init_size_rx,
91114+ mod->init_size_rx,
91115+ mod->init_size_rx);
91116
91117 /* Mark state as coming so strong_try_module_get() ignores us,
91118 * but kallsyms etc. can see us. */
91119@@ -3242,9 +3377,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
91120 if (err)
91121 goto free_unload;
91122
91123+ /* Now copy in args */
91124+ mod->args = strndup_user(uargs, ~0UL >> 1);
91125+ if (IS_ERR(mod->args)) {
91126+ err = PTR_ERR(mod->args);
91127+ goto free_unload;
91128+ }
91129+
91130 /* Set up MODINFO_ATTR fields */
91131 setup_modinfo(mod, info);
91132
91133+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91134+ {
91135+ char *p, *p2;
91136+
91137+ if (strstr(mod->args, "grsec_modharden_netdev")) {
91138+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
91139+ err = -EPERM;
91140+ goto free_modinfo;
91141+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
91142+ p += sizeof("grsec_modharden_normal") - 1;
91143+ p2 = strstr(p, "_");
91144+ if (p2) {
91145+ *p2 = '\0';
91146+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
91147+ *p2 = '_';
91148+ }
91149+ err = -EPERM;
91150+ goto free_modinfo;
91151+ }
91152+ }
91153+#endif
91154+
91155 /* Fix up syms, so that st_value is a pointer to location. */
91156 err = simplify_symbols(mod, info);
91157 if (err < 0)
91158@@ -3260,13 +3424,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
91159
91160 flush_module_icache(mod);
91161
91162- /* Now copy in args */
91163- mod->args = strndup_user(uargs, ~0UL >> 1);
91164- if (IS_ERR(mod->args)) {
91165- err = PTR_ERR(mod->args);
91166- goto free_arch_cleanup;
91167- }
91168-
91169 dynamic_debug_setup(info->debug, info->num_debug);
91170
91171 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
91172@@ -3314,11 +3471,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
91173 ddebug_cleanup:
91174 dynamic_debug_remove(info->debug);
91175 synchronize_sched();
91176- kfree(mod->args);
91177- free_arch_cleanup:
91178 module_arch_cleanup(mod);
91179 free_modinfo:
91180 free_modinfo(mod);
91181+ kfree(mod->args);
91182 free_unload:
91183 module_unload_free(mod);
91184 unlink_mod:
91185@@ -3403,10 +3559,16 @@ static const char *get_ksymbol(struct module *mod,
91186 unsigned long nextval;
91187
91188 /* At worse, next value is at end of module */
91189- if (within_module_init(addr, mod))
91190- nextval = (unsigned long)mod->module_init+mod->init_text_size;
91191+ if (within_module_init_rx(addr, mod))
91192+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
91193+ else if (within_module_init_rw(addr, mod))
91194+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
91195+ else if (within_module_core_rx(addr, mod))
91196+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
91197+ else if (within_module_core_rw(addr, mod))
91198+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
91199 else
91200- nextval = (unsigned long)mod->module_core+mod->core_text_size;
91201+ return NULL;
91202
91203 /* Scan for closest preceding symbol, and next symbol. (ELF
91204 starts real symbols at 1). */
91205@@ -3654,7 +3816,7 @@ static int m_show(struct seq_file *m, void *p)
91206 return 0;
91207
91208 seq_printf(m, "%s %u",
91209- mod->name, mod->init_size + mod->core_size);
91210+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
91211 print_unload_info(m, mod);
91212
91213 /* Informative for users. */
91214@@ -3663,7 +3825,7 @@ static int m_show(struct seq_file *m, void *p)
91215 mod->state == MODULE_STATE_COMING ? "Loading":
91216 "Live");
91217 /* Used by oprofile and other similar tools. */
91218- seq_printf(m, " 0x%pK", mod->module_core);
91219+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
91220
91221 /* Taints info */
91222 if (mod->taints)
91223@@ -3699,7 +3861,17 @@ static const struct file_operations proc_modules_operations = {
91224
91225 static int __init proc_modules_init(void)
91226 {
91227+#ifndef CONFIG_GRKERNSEC_HIDESYM
91228+#ifdef CONFIG_GRKERNSEC_PROC_USER
91229+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91230+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
91231+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
91232+#else
91233 proc_create("modules", 0, NULL, &proc_modules_operations);
91234+#endif
91235+#else
91236+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91237+#endif
91238 return 0;
91239 }
91240 module_init(proc_modules_init);
91241@@ -3760,7 +3932,8 @@ struct module *__module_address(unsigned long addr)
91242 {
91243 struct module *mod;
91244
91245- if (addr < module_addr_min || addr > module_addr_max)
91246+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
91247+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
91248 return NULL;
91249
91250 list_for_each_entry_rcu(mod, &modules, list) {
91251@@ -3801,11 +3974,20 @@ bool is_module_text_address(unsigned long addr)
91252 */
91253 struct module *__module_text_address(unsigned long addr)
91254 {
91255- struct module *mod = __module_address(addr);
91256+ struct module *mod;
91257+
91258+#ifdef CONFIG_X86_32
91259+ addr = ktla_ktva(addr);
91260+#endif
91261+
91262+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
91263+ return NULL;
91264+
91265+ mod = __module_address(addr);
91266+
91267 if (mod) {
91268 /* Make sure it's within the text section. */
91269- if (!within(addr, mod->module_init, mod->init_text_size)
91270- && !within(addr, mod->module_core, mod->core_text_size))
91271+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
91272 mod = NULL;
91273 }
91274 return mod;
91275diff --git a/kernel/notifier.c b/kernel/notifier.c
91276index 4803da6..1c5eea6 100644
91277--- a/kernel/notifier.c
91278+++ b/kernel/notifier.c
91279@@ -5,6 +5,7 @@
91280 #include <linux/rcupdate.h>
91281 #include <linux/vmalloc.h>
91282 #include <linux/reboot.h>
91283+#include <linux/mm.h>
91284
91285 /*
91286 * Notifier list for kernel code which wants to be called
91287@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
91288 while ((*nl) != NULL) {
91289 if (n->priority > (*nl)->priority)
91290 break;
91291- nl = &((*nl)->next);
91292+ nl = (struct notifier_block **)&((*nl)->next);
91293 }
91294- n->next = *nl;
91295+ pax_open_kernel();
91296+ *(const void **)&n->next = *nl;
91297 rcu_assign_pointer(*nl, n);
91298+ pax_close_kernel();
91299 return 0;
91300 }
91301
91302@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
91303 return 0;
91304 if (n->priority > (*nl)->priority)
91305 break;
91306- nl = &((*nl)->next);
91307+ nl = (struct notifier_block **)&((*nl)->next);
91308 }
91309- n->next = *nl;
91310+ pax_open_kernel();
91311+ *(const void **)&n->next = *nl;
91312 rcu_assign_pointer(*nl, n);
91313+ pax_close_kernel();
91314 return 0;
91315 }
91316
91317@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
91318 {
91319 while ((*nl) != NULL) {
91320 if ((*nl) == n) {
91321+ pax_open_kernel();
91322 rcu_assign_pointer(*nl, n->next);
91323+ pax_close_kernel();
91324 return 0;
91325 }
91326- nl = &((*nl)->next);
91327+ nl = (struct notifier_block **)&((*nl)->next);
91328 }
91329 return -ENOENT;
91330 }
91331diff --git a/kernel/padata.c b/kernel/padata.c
91332index 161402f..598814c 100644
91333--- a/kernel/padata.c
91334+++ b/kernel/padata.c
91335@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
91336 * seq_nr mod. number of cpus in use.
91337 */
91338
91339- seq_nr = atomic_inc_return(&pd->seq_nr);
91340+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
91341 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
91342
91343 return padata_index_to_cpu(pd, cpu_index);
91344@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
91345 padata_init_pqueues(pd);
91346 padata_init_squeues(pd);
91347 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
91348- atomic_set(&pd->seq_nr, -1);
91349+ atomic_set_unchecked(&pd->seq_nr, -1);
91350 atomic_set(&pd->reorder_objects, 0);
91351 atomic_set(&pd->refcnt, 0);
91352 pd->pinst = pinst;
91353diff --git a/kernel/panic.c b/kernel/panic.c
91354index d09dc5c..9abbdff 100644
91355--- a/kernel/panic.c
91356+++ b/kernel/panic.c
91357@@ -53,7 +53,7 @@ EXPORT_SYMBOL(panic_blink);
91358 /*
91359 * Stop ourself in panic -- architecture code may override this
91360 */
91361-void __weak panic_smp_self_stop(void)
91362+void __weak __noreturn panic_smp_self_stop(void)
91363 {
91364 while (1)
91365 cpu_relax();
91366@@ -421,7 +421,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
91367 disable_trace_on_warning();
91368
91369 pr_warn("------------[ cut here ]------------\n");
91370- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
91371+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
91372 raw_smp_processor_id(), current->pid, file, line, caller);
91373
91374 if (args)
91375@@ -475,7 +475,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
91376 */
91377 __visible void __stack_chk_fail(void)
91378 {
91379- panic("stack-protector: Kernel stack is corrupted in: %p\n",
91380+ dump_stack();
91381+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
91382 __builtin_return_address(0));
91383 }
91384 EXPORT_SYMBOL(__stack_chk_fail);
91385diff --git a/kernel/pid.c b/kernel/pid.c
91386index 9b9a266..c20ef80 100644
91387--- a/kernel/pid.c
91388+++ b/kernel/pid.c
91389@@ -33,6 +33,7 @@
91390 #include <linux/rculist.h>
91391 #include <linux/bootmem.h>
91392 #include <linux/hash.h>
91393+#include <linux/security.h>
91394 #include <linux/pid_namespace.h>
91395 #include <linux/init_task.h>
91396 #include <linux/syscalls.h>
91397@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
91398
91399 int pid_max = PID_MAX_DEFAULT;
91400
91401-#define RESERVED_PIDS 300
91402+#define RESERVED_PIDS 500
91403
91404 int pid_max_min = RESERVED_PIDS + 1;
91405 int pid_max_max = PID_MAX_LIMIT;
91406@@ -445,10 +446,18 @@ EXPORT_SYMBOL(pid_task);
91407 */
91408 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
91409 {
91410+ struct task_struct *task;
91411+
91412 rcu_lockdep_assert(rcu_read_lock_held(),
91413 "find_task_by_pid_ns() needs rcu_read_lock()"
91414 " protection");
91415- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
91416+
91417+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
91418+
91419+ if (gr_pid_is_chrooted(task))
91420+ return NULL;
91421+
91422+ return task;
91423 }
91424
91425 struct task_struct *find_task_by_vpid(pid_t vnr)
91426@@ -456,6 +465,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
91427 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
91428 }
91429
91430+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
91431+{
91432+ rcu_lockdep_assert(rcu_read_lock_held(),
91433+ "find_task_by_pid_ns() needs rcu_read_lock()"
91434+ " protection");
91435+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
91436+}
91437+
91438 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
91439 {
91440 struct pid *pid;
91441diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
91442index db95d8e..a0ca23f 100644
91443--- a/kernel/pid_namespace.c
91444+++ b/kernel/pid_namespace.c
91445@@ -253,7 +253,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
91446 void __user *buffer, size_t *lenp, loff_t *ppos)
91447 {
91448 struct pid_namespace *pid_ns = task_active_pid_ns(current);
91449- struct ctl_table tmp = *table;
91450+ ctl_table_no_const tmp = *table;
91451
91452 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
91453 return -EPERM;
91454diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
91455index e4e4121..71faf14 100644
91456--- a/kernel/power/Kconfig
91457+++ b/kernel/power/Kconfig
91458@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
91459 config HIBERNATION
91460 bool "Hibernation (aka 'suspend to disk')"
91461 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
91462+ depends on !GRKERNSEC_KMEM
91463+ depends on !PAX_MEMORY_SANITIZE
91464 select HIBERNATE_CALLBACKS
91465 select LZO_COMPRESS
91466 select LZO_DECOMPRESS
91467diff --git a/kernel/power/process.c b/kernel/power/process.c
91468index 7a37cf3..3e4c1c8 100644
91469--- a/kernel/power/process.c
91470+++ b/kernel/power/process.c
91471@@ -35,6 +35,7 @@ static int try_to_freeze_tasks(bool user_only)
91472 unsigned int elapsed_msecs;
91473 bool wakeup = false;
91474 int sleep_usecs = USEC_PER_MSEC;
91475+ bool timedout = false;
91476
91477 do_gettimeofday(&start);
91478
91479@@ -45,13 +46,20 @@ static int try_to_freeze_tasks(bool user_only)
91480
91481 while (true) {
91482 todo = 0;
91483+ if (time_after(jiffies, end_time))
91484+ timedout = true;
91485 read_lock(&tasklist_lock);
91486 do_each_thread(g, p) {
91487 if (p == current || !freeze_task(p))
91488 continue;
91489
91490- if (!freezer_should_skip(p))
91491+ if (!freezer_should_skip(p)) {
91492 todo++;
91493+ if (timedout) {
91494+ printk(KERN_ERR "Task refusing to freeze:\n");
91495+ sched_show_task(p);
91496+ }
91497+ }
91498 } while_each_thread(g, p);
91499 read_unlock(&tasklist_lock);
91500
91501@@ -60,7 +68,7 @@ static int try_to_freeze_tasks(bool user_only)
91502 todo += wq_busy;
91503 }
91504
91505- if (!todo || time_after(jiffies, end_time))
91506+ if (!todo || timedout)
91507 break;
91508
91509 if (pm_wakeup_pending()) {
91510diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
91511index 1ce7706..3b07c49 100644
91512--- a/kernel/printk/printk.c
91513+++ b/kernel/printk/printk.c
91514@@ -490,6 +490,11 @@ static int check_syslog_permissions(int type, bool from_file)
91515 if (from_file && type != SYSLOG_ACTION_OPEN)
91516 return 0;
91517
91518+#ifdef CONFIG_GRKERNSEC_DMESG
91519+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
91520+ return -EPERM;
91521+#endif
91522+
91523 if (syslog_action_restricted(type)) {
91524 if (capable(CAP_SYSLOG))
91525 return 0;
91526diff --git a/kernel/profile.c b/kernel/profile.c
91527index 54bf5ba..df6e0a2 100644
91528--- a/kernel/profile.c
91529+++ b/kernel/profile.c
91530@@ -37,7 +37,7 @@ struct profile_hit {
91531 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
91532 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
91533
91534-static atomic_t *prof_buffer;
91535+static atomic_unchecked_t *prof_buffer;
91536 static unsigned long prof_len, prof_shift;
91537
91538 int prof_on __read_mostly;
91539@@ -256,7 +256,7 @@ static void profile_flip_buffers(void)
91540 hits[i].pc = 0;
91541 continue;
91542 }
91543- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
91544+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
91545 hits[i].hits = hits[i].pc = 0;
91546 }
91547 }
91548@@ -317,9 +317,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
91549 * Add the current hit(s) and flush the write-queue out
91550 * to the global buffer:
91551 */
91552- atomic_add(nr_hits, &prof_buffer[pc]);
91553+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
91554 for (i = 0; i < NR_PROFILE_HIT; ++i) {
91555- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
91556+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
91557 hits[i].pc = hits[i].hits = 0;
91558 }
91559 out:
91560@@ -394,7 +394,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
91561 {
91562 unsigned long pc;
91563 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
91564- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
91565+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
91566 }
91567 #endif /* !CONFIG_SMP */
91568
91569@@ -490,7 +490,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
91570 return -EFAULT;
91571 buf++; p++; count--; read++;
91572 }
91573- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
91574+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
91575 if (copy_to_user(buf, (void *)pnt, count))
91576 return -EFAULT;
91577 read += count;
91578@@ -521,7 +521,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
91579 }
91580 #endif
91581 profile_discard_flip_buffers();
91582- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
91583+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
91584 return count;
91585 }
91586
91587diff --git a/kernel/ptrace.c b/kernel/ptrace.c
91588index 54e7522..5b82dd6 100644
91589--- a/kernel/ptrace.c
91590+++ b/kernel/ptrace.c
91591@@ -321,7 +321,7 @@ static int ptrace_attach(struct task_struct *task, long request,
91592 if (seize)
91593 flags |= PT_SEIZED;
91594 rcu_read_lock();
91595- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
91596+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
91597 flags |= PT_PTRACE_CAP;
91598 rcu_read_unlock();
91599 task->ptrace = flags;
91600@@ -532,7 +532,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
91601 break;
91602 return -EIO;
91603 }
91604- if (copy_to_user(dst, buf, retval))
91605+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
91606 return -EFAULT;
91607 copied += retval;
91608 src += retval;
91609@@ -800,7 +800,7 @@ int ptrace_request(struct task_struct *child, long request,
91610 bool seized = child->ptrace & PT_SEIZED;
91611 int ret = -EIO;
91612 siginfo_t siginfo, *si;
91613- void __user *datavp = (void __user *) data;
91614+ void __user *datavp = (__force void __user *) data;
91615 unsigned long __user *datalp = datavp;
91616 unsigned long flags;
91617
91618@@ -1046,14 +1046,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
91619 goto out;
91620 }
91621
91622+ if (gr_handle_ptrace(child, request)) {
91623+ ret = -EPERM;
91624+ goto out_put_task_struct;
91625+ }
91626+
91627 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
91628 ret = ptrace_attach(child, request, addr, data);
91629 /*
91630 * Some architectures need to do book-keeping after
91631 * a ptrace attach.
91632 */
91633- if (!ret)
91634+ if (!ret) {
91635 arch_ptrace_attach(child);
91636+ gr_audit_ptrace(child);
91637+ }
91638 goto out_put_task_struct;
91639 }
91640
91641@@ -1081,7 +1088,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
91642 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
91643 if (copied != sizeof(tmp))
91644 return -EIO;
91645- return put_user(tmp, (unsigned long __user *)data);
91646+ return put_user(tmp, (__force unsigned long __user *)data);
91647 }
91648
91649 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
91650@@ -1175,7 +1182,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
91651 }
91652
91653 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
91654- compat_long_t, addr, compat_long_t, data)
91655+ compat_ulong_t, addr, compat_ulong_t, data)
91656 {
91657 struct task_struct *child;
91658 long ret;
91659@@ -1191,14 +1198,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
91660 goto out;
91661 }
91662
91663+ if (gr_handle_ptrace(child, request)) {
91664+ ret = -EPERM;
91665+ goto out_put_task_struct;
91666+ }
91667+
91668 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
91669 ret = ptrace_attach(child, request, addr, data);
91670 /*
91671 * Some architectures need to do book-keeping after
91672 * a ptrace attach.
91673 */
91674- if (!ret)
91675+ if (!ret) {
91676 arch_ptrace_attach(child);
91677+ gr_audit_ptrace(child);
91678+ }
91679 goto out_put_task_struct;
91680 }
91681
91682diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
91683index 948a769..5ca842b 100644
91684--- a/kernel/rcu/rcutorture.c
91685+++ b/kernel/rcu/rcutorture.c
91686@@ -124,12 +124,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
91687 rcu_torture_count) = { 0 };
91688 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
91689 rcu_torture_batch) = { 0 };
91690-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
91691-static atomic_t n_rcu_torture_alloc;
91692-static atomic_t n_rcu_torture_alloc_fail;
91693-static atomic_t n_rcu_torture_free;
91694-static atomic_t n_rcu_torture_mberror;
91695-static atomic_t n_rcu_torture_error;
91696+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
91697+static atomic_unchecked_t n_rcu_torture_alloc;
91698+static atomic_unchecked_t n_rcu_torture_alloc_fail;
91699+static atomic_unchecked_t n_rcu_torture_free;
91700+static atomic_unchecked_t n_rcu_torture_mberror;
91701+static atomic_unchecked_t n_rcu_torture_error;
91702 static long n_rcu_torture_barrier_error;
91703 static long n_rcu_torture_boost_ktrerror;
91704 static long n_rcu_torture_boost_rterror;
91705@@ -200,11 +200,11 @@ rcu_torture_alloc(void)
91706
91707 spin_lock_bh(&rcu_torture_lock);
91708 if (list_empty(&rcu_torture_freelist)) {
91709- atomic_inc(&n_rcu_torture_alloc_fail);
91710+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
91711 spin_unlock_bh(&rcu_torture_lock);
91712 return NULL;
91713 }
91714- atomic_inc(&n_rcu_torture_alloc);
91715+ atomic_inc_unchecked(&n_rcu_torture_alloc);
91716 p = rcu_torture_freelist.next;
91717 list_del_init(p);
91718 spin_unlock_bh(&rcu_torture_lock);
91719@@ -217,7 +217,7 @@ rcu_torture_alloc(void)
91720 static void
91721 rcu_torture_free(struct rcu_torture *p)
91722 {
91723- atomic_inc(&n_rcu_torture_free);
91724+ atomic_inc_unchecked(&n_rcu_torture_free);
91725 spin_lock_bh(&rcu_torture_lock);
91726 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
91727 spin_unlock_bh(&rcu_torture_lock);
91728@@ -301,7 +301,7 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
91729 i = rp->rtort_pipe_count;
91730 if (i > RCU_TORTURE_PIPE_LEN)
91731 i = RCU_TORTURE_PIPE_LEN;
91732- atomic_inc(&rcu_torture_wcount[i]);
91733+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
91734 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
91735 rp->rtort_mbtest = 0;
91736 return true;
91737@@ -808,7 +808,7 @@ rcu_torture_writer(void *arg)
91738 i = old_rp->rtort_pipe_count;
91739 if (i > RCU_TORTURE_PIPE_LEN)
91740 i = RCU_TORTURE_PIPE_LEN;
91741- atomic_inc(&rcu_torture_wcount[i]);
91742+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
91743 old_rp->rtort_pipe_count++;
91744 switch (synctype[torture_random(&rand) % nsynctypes]) {
91745 case RTWS_DEF_FREE:
91746@@ -926,7 +926,7 @@ static void rcu_torture_timer(unsigned long unused)
91747 return;
91748 }
91749 if (p->rtort_mbtest == 0)
91750- atomic_inc(&n_rcu_torture_mberror);
91751+ atomic_inc_unchecked(&n_rcu_torture_mberror);
91752 spin_lock(&rand_lock);
91753 cur_ops->read_delay(&rand);
91754 n_rcu_torture_timers++;
91755@@ -996,7 +996,7 @@ rcu_torture_reader(void *arg)
91756 continue;
91757 }
91758 if (p->rtort_mbtest == 0)
91759- atomic_inc(&n_rcu_torture_mberror);
91760+ atomic_inc_unchecked(&n_rcu_torture_mberror);
91761 cur_ops->read_delay(&rand);
91762 preempt_disable();
91763 pipe_count = p->rtort_pipe_count;
91764@@ -1054,15 +1054,15 @@ rcu_torture_printk(char *page)
91765 }
91766 page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG);
91767 page += sprintf(page,
91768- "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
91769+ "rtc: %pP ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
91770 rcu_torture_current,
91771 rcu_torture_current_version,
91772 list_empty(&rcu_torture_freelist),
91773- atomic_read(&n_rcu_torture_alloc),
91774- atomic_read(&n_rcu_torture_alloc_fail),
91775- atomic_read(&n_rcu_torture_free));
91776+ atomic_read_unchecked(&n_rcu_torture_alloc),
91777+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
91778+ atomic_read_unchecked(&n_rcu_torture_free));
91779 page += sprintf(page, "rtmbe: %d rtbke: %ld rtbre: %ld ",
91780- atomic_read(&n_rcu_torture_mberror),
91781+ atomic_read_unchecked(&n_rcu_torture_mberror),
91782 n_rcu_torture_boost_ktrerror,
91783 n_rcu_torture_boost_rterror);
91784 page += sprintf(page, "rtbf: %ld rtb: %ld nt: %ld ",
91785@@ -1075,14 +1075,14 @@ rcu_torture_printk(char *page)
91786 n_barrier_attempts,
91787 n_rcu_torture_barrier_error);
91788 page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
91789- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
91790+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
91791 n_rcu_torture_barrier_error != 0 ||
91792 n_rcu_torture_boost_ktrerror != 0 ||
91793 n_rcu_torture_boost_rterror != 0 ||
91794 n_rcu_torture_boost_failure != 0 ||
91795 i > 1) {
91796 page += sprintf(page, "!!! ");
91797- atomic_inc(&n_rcu_torture_error);
91798+ atomic_inc_unchecked(&n_rcu_torture_error);
91799 WARN_ON_ONCE(1);
91800 }
91801 page += sprintf(page, "Reader Pipe: ");
91802@@ -1096,7 +1096,7 @@ rcu_torture_printk(char *page)
91803 page += sprintf(page, "Free-Block Circulation: ");
91804 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
91805 page += sprintf(page, " %d",
91806- atomic_read(&rcu_torture_wcount[i]));
91807+ atomic_read_unchecked(&rcu_torture_wcount[i]));
91808 }
91809 page += sprintf(page, "\n");
91810 if (cur_ops->stats)
91811@@ -1461,7 +1461,7 @@ rcu_torture_cleanup(void)
91812
91813 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
91814
91815- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
91816+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
91817 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
91818 else if (torture_onoff_failures())
91819 rcu_torture_print_module_parms(cur_ops,
91820@@ -1584,18 +1584,18 @@ rcu_torture_init(void)
91821
91822 rcu_torture_current = NULL;
91823 rcu_torture_current_version = 0;
91824- atomic_set(&n_rcu_torture_alloc, 0);
91825- atomic_set(&n_rcu_torture_alloc_fail, 0);
91826- atomic_set(&n_rcu_torture_free, 0);
91827- atomic_set(&n_rcu_torture_mberror, 0);
91828- atomic_set(&n_rcu_torture_error, 0);
91829+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
91830+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
91831+ atomic_set_unchecked(&n_rcu_torture_free, 0);
91832+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
91833+ atomic_set_unchecked(&n_rcu_torture_error, 0);
91834 n_rcu_torture_barrier_error = 0;
91835 n_rcu_torture_boost_ktrerror = 0;
91836 n_rcu_torture_boost_rterror = 0;
91837 n_rcu_torture_boost_failure = 0;
91838 n_rcu_torture_boosts = 0;
91839 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
91840- atomic_set(&rcu_torture_wcount[i], 0);
91841+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
91842 for_each_possible_cpu(cpu) {
91843 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
91844 per_cpu(rcu_torture_count, cpu)[i] = 0;
91845diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
91846index d9efcc1..ea543e9 100644
91847--- a/kernel/rcu/tiny.c
91848+++ b/kernel/rcu/tiny.c
91849@@ -42,7 +42,7 @@
91850 /* Forward declarations for tiny_plugin.h. */
91851 struct rcu_ctrlblk;
91852 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
91853-static void rcu_process_callbacks(struct softirq_action *unused);
91854+static void rcu_process_callbacks(void);
91855 static void __call_rcu(struct rcu_head *head,
91856 void (*func)(struct rcu_head *rcu),
91857 struct rcu_ctrlblk *rcp);
91858@@ -308,7 +308,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
91859 false));
91860 }
91861
91862-static void rcu_process_callbacks(struct softirq_action *unused)
91863+static __latent_entropy void rcu_process_callbacks(void)
91864 {
91865 __rcu_process_callbacks(&rcu_sched_ctrlblk);
91866 __rcu_process_callbacks(&rcu_bh_ctrlblk);
91867diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
91868index 858c565..7efd915 100644
91869--- a/kernel/rcu/tiny_plugin.h
91870+++ b/kernel/rcu/tiny_plugin.h
91871@@ -152,17 +152,17 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
91872 dump_stack();
91873 }
91874 if (*rcp->curtail && ULONG_CMP_GE(j, js))
91875- ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
91876+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies +
91877 3 * rcu_jiffies_till_stall_check() + 3;
91878 else if (ULONG_CMP_GE(j, js))
91879- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91880+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91881 }
91882
91883 static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
91884 {
91885 rcp->ticks_this_gp = 0;
91886 rcp->gp_start = jiffies;
91887- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91888+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91889 }
91890
91891 static void check_cpu_stalls(void)
91892diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
91893index 89a404a..f42a019 100644
91894--- a/kernel/rcu/tree.c
91895+++ b/kernel/rcu/tree.c
91896@@ -263,7 +263,7 @@ static void rcu_momentary_dyntick_idle(void)
91897 */
91898 rdtp = this_cpu_ptr(&rcu_dynticks);
91899 smp_mb__before_atomic(); /* Earlier stuff before QS. */
91900- atomic_add(2, &rdtp->dynticks); /* QS. */
91901+ atomic_add_unchecked(2, &rdtp->dynticks); /* QS. */
91902 smp_mb__after_atomic(); /* Later stuff after QS. */
91903 break;
91904 }
91905@@ -523,9 +523,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
91906 rcu_prepare_for_idle(smp_processor_id());
91907 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
91908 smp_mb__before_atomic(); /* See above. */
91909- atomic_inc(&rdtp->dynticks);
91910+ atomic_inc_unchecked(&rdtp->dynticks);
91911 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
91912- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
91913+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
91914
91915 /*
91916 * It is illegal to enter an extended quiescent state while
91917@@ -643,10 +643,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
91918 int user)
91919 {
91920 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
91921- atomic_inc(&rdtp->dynticks);
91922+ atomic_inc_unchecked(&rdtp->dynticks);
91923 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
91924 smp_mb__after_atomic(); /* See above. */
91925- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
91926+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
91927 rcu_cleanup_after_idle(smp_processor_id());
91928 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
91929 if (!user && !is_idle_task(current)) {
91930@@ -767,14 +767,14 @@ void rcu_nmi_enter(void)
91931 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
91932
91933 if (rdtp->dynticks_nmi_nesting == 0 &&
91934- (atomic_read(&rdtp->dynticks) & 0x1))
91935+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
91936 return;
91937 rdtp->dynticks_nmi_nesting++;
91938 smp_mb__before_atomic(); /* Force delay from prior write. */
91939- atomic_inc(&rdtp->dynticks);
91940+ atomic_inc_unchecked(&rdtp->dynticks);
91941 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
91942 smp_mb__after_atomic(); /* See above. */
91943- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
91944+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
91945 }
91946
91947 /**
91948@@ -793,9 +793,9 @@ void rcu_nmi_exit(void)
91949 return;
91950 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
91951 smp_mb__before_atomic(); /* See above. */
91952- atomic_inc(&rdtp->dynticks);
91953+ atomic_inc_unchecked(&rdtp->dynticks);
91954 smp_mb__after_atomic(); /* Force delay to next write. */
91955- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
91956+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
91957 }
91958
91959 /**
91960@@ -808,7 +808,7 @@ void rcu_nmi_exit(void)
91961 */
91962 bool notrace __rcu_is_watching(void)
91963 {
91964- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
91965+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
91966 }
91967
91968 /**
91969@@ -891,7 +891,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
91970 static int dyntick_save_progress_counter(struct rcu_data *rdp,
91971 bool *isidle, unsigned long *maxj)
91972 {
91973- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
91974+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
91975 rcu_sysidle_check_cpu(rdp, isidle, maxj);
91976 if ((rdp->dynticks_snap & 0x1) == 0) {
91977 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
91978@@ -920,7 +920,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
91979 int *rcrmp;
91980 unsigned int snap;
91981
91982- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
91983+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
91984 snap = (unsigned int)rdp->dynticks_snap;
91985
91986 /*
91987@@ -983,10 +983,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
91988 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
91989 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
91990 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
91991- ACCESS_ONCE(rdp->cond_resched_completed) =
91992+ ACCESS_ONCE_RW(rdp->cond_resched_completed) =
91993 ACCESS_ONCE(rdp->mynode->completed);
91994 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
91995- ACCESS_ONCE(*rcrmp) =
91996+ ACCESS_ONCE_RW(*rcrmp) =
91997 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
91998 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
91999 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
92000@@ -1008,7 +1008,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
92001 rsp->gp_start = j;
92002 smp_wmb(); /* Record start time before stall time. */
92003 j1 = rcu_jiffies_till_stall_check();
92004- ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
92005+ ACCESS_ONCE_RW(rsp->jiffies_stall) = j + j1;
92006 rsp->jiffies_resched = j + j1 / 2;
92007 }
92008
92009@@ -1049,7 +1049,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
92010 raw_spin_unlock_irqrestore(&rnp->lock, flags);
92011 return;
92012 }
92013- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
92014+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
92015 raw_spin_unlock_irqrestore(&rnp->lock, flags);
92016
92017 /*
92018@@ -1126,7 +1126,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
92019
92020 raw_spin_lock_irqsave(&rnp->lock, flags);
92021 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
92022- ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
92023+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies +
92024 3 * rcu_jiffies_till_stall_check() + 3;
92025 raw_spin_unlock_irqrestore(&rnp->lock, flags);
92026
92027@@ -1210,7 +1210,7 @@ void rcu_cpu_stall_reset(void)
92028 struct rcu_state *rsp;
92029
92030 for_each_rcu_flavor(rsp)
92031- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
92032+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
92033 }
92034
92035 /*
92036@@ -1596,7 +1596,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
92037 raw_spin_unlock_irq(&rnp->lock);
92038 return 0;
92039 }
92040- ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
92041+ ACCESS_ONCE_RW(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
92042
92043 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
92044 /*
92045@@ -1637,9 +1637,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
92046 rdp = this_cpu_ptr(rsp->rda);
92047 rcu_preempt_check_blocked_tasks(rnp);
92048 rnp->qsmask = rnp->qsmaskinit;
92049- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
92050+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
92051 WARN_ON_ONCE(rnp->completed != rsp->completed);
92052- ACCESS_ONCE(rnp->completed) = rsp->completed;
92053+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
92054 if (rnp == rdp->mynode)
92055 (void)__note_gp_changes(rsp, rnp, rdp);
92056 rcu_preempt_boost_start_gp(rnp);
92057@@ -1684,7 +1684,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
92058 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
92059 raw_spin_lock_irq(&rnp->lock);
92060 smp_mb__after_unlock_lock();
92061- ACCESS_ONCE(rsp->gp_flags) &= ~RCU_GP_FLAG_FQS;
92062+ ACCESS_ONCE_RW(rsp->gp_flags) &= ~RCU_GP_FLAG_FQS;
92063 raw_spin_unlock_irq(&rnp->lock);
92064 }
92065 return fqs_state;
92066@@ -1729,7 +1729,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
92067 rcu_for_each_node_breadth_first(rsp, rnp) {
92068 raw_spin_lock_irq(&rnp->lock);
92069 smp_mb__after_unlock_lock();
92070- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
92071+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
92072 rdp = this_cpu_ptr(rsp->rda);
92073 if (rnp == rdp->mynode)
92074 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
92075@@ -1744,14 +1744,14 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
92076 rcu_nocb_gp_set(rnp, nocb);
92077
92078 /* Declare grace period done. */
92079- ACCESS_ONCE(rsp->completed) = rsp->gpnum;
92080+ ACCESS_ONCE_RW(rsp->completed) = rsp->gpnum;
92081 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
92082 rsp->fqs_state = RCU_GP_IDLE;
92083 rdp = this_cpu_ptr(rsp->rda);
92084 /* Advance CBs to reduce false positives below. */
92085 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
92086 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
92087- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92088+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92089 trace_rcu_grace_period(rsp->name,
92090 ACCESS_ONCE(rsp->gpnum),
92091 TPS("newreq"));
92092@@ -1876,7 +1876,7 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
92093 */
92094 return false;
92095 }
92096- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92097+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92098 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
92099 TPS("newreq"));
92100
92101@@ -2097,7 +2097,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
92102 rsp->qlen += rdp->qlen;
92103 rdp->n_cbs_orphaned += rdp->qlen;
92104 rdp->qlen_lazy = 0;
92105- ACCESS_ONCE(rdp->qlen) = 0;
92106+ ACCESS_ONCE_RW(rdp->qlen) = 0;
92107 }
92108
92109 /*
92110@@ -2344,7 +2344,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
92111 }
92112 smp_mb(); /* List handling before counting for rcu_barrier(). */
92113 rdp->qlen_lazy -= count_lazy;
92114- ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
92115+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen - count;
92116 rdp->n_cbs_invoked += count;
92117
92118 /* Reinstate batch limit if we have worked down the excess. */
92119@@ -2505,7 +2505,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
92120 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
92121 return; /* Someone beat us to it. */
92122 }
92123- ACCESS_ONCE(rsp->gp_flags) |= RCU_GP_FLAG_FQS;
92124+ ACCESS_ONCE_RW(rsp->gp_flags) |= RCU_GP_FLAG_FQS;
92125 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
92126 rcu_gp_kthread_wake(rsp);
92127 }
92128@@ -2550,7 +2550,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
92129 /*
92130 * Do RCU core processing for the current CPU.
92131 */
92132-static void rcu_process_callbacks(struct softirq_action *unused)
92133+static void rcu_process_callbacks(void)
92134 {
92135 struct rcu_state *rsp;
92136
92137@@ -2662,7 +2662,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
92138 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
92139 if (debug_rcu_head_queue(head)) {
92140 /* Probable double call_rcu(), so leak the callback. */
92141- ACCESS_ONCE(head->func) = rcu_leak_callback;
92142+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
92143 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
92144 return;
92145 }
92146@@ -2690,7 +2690,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
92147 local_irq_restore(flags);
92148 return;
92149 }
92150- ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
92151+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen + 1;
92152 if (lazy)
92153 rdp->qlen_lazy++;
92154 else
92155@@ -2965,11 +2965,11 @@ void synchronize_sched_expedited(void)
92156 * counter wrap on a 32-bit system. Quite a few more CPUs would of
92157 * course be required on a 64-bit system.
92158 */
92159- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
92160+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
92161 (ulong)atomic_long_read(&rsp->expedited_done) +
92162 ULONG_MAX / 8)) {
92163 synchronize_sched();
92164- atomic_long_inc(&rsp->expedited_wrap);
92165+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
92166 return;
92167 }
92168
92169@@ -2977,7 +2977,7 @@ void synchronize_sched_expedited(void)
92170 * Take a ticket. Note that atomic_inc_return() implies a
92171 * full memory barrier.
92172 */
92173- snap = atomic_long_inc_return(&rsp->expedited_start);
92174+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
92175 firstsnap = snap;
92176 get_online_cpus();
92177 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
92178@@ -2990,14 +2990,14 @@ void synchronize_sched_expedited(void)
92179 synchronize_sched_expedited_cpu_stop,
92180 NULL) == -EAGAIN) {
92181 put_online_cpus();
92182- atomic_long_inc(&rsp->expedited_tryfail);
92183+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
92184
92185 /* Check to see if someone else did our work for us. */
92186 s = atomic_long_read(&rsp->expedited_done);
92187 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
92188 /* ensure test happens before caller kfree */
92189 smp_mb__before_atomic(); /* ^^^ */
92190- atomic_long_inc(&rsp->expedited_workdone1);
92191+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
92192 return;
92193 }
92194
92195@@ -3006,7 +3006,7 @@ void synchronize_sched_expedited(void)
92196 udelay(trycount * num_online_cpus());
92197 } else {
92198 wait_rcu_gp(call_rcu_sched);
92199- atomic_long_inc(&rsp->expedited_normal);
92200+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92201 return;
92202 }
92203
92204@@ -3015,7 +3015,7 @@ void synchronize_sched_expedited(void)
92205 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
92206 /* ensure test happens before caller kfree */
92207 smp_mb__before_atomic(); /* ^^^ */
92208- atomic_long_inc(&rsp->expedited_workdone2);
92209+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
92210 return;
92211 }
92212
92213@@ -3027,10 +3027,10 @@ void synchronize_sched_expedited(void)
92214 * period works for us.
92215 */
92216 get_online_cpus();
92217- snap = atomic_long_read(&rsp->expedited_start);
92218+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
92219 smp_mb(); /* ensure read is before try_stop_cpus(). */
92220 }
92221- atomic_long_inc(&rsp->expedited_stoppedcpus);
92222+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
92223
92224 /*
92225 * Everyone up to our most recent fetch is covered by our grace
92226@@ -3039,16 +3039,16 @@ void synchronize_sched_expedited(void)
92227 * than we did already did their update.
92228 */
92229 do {
92230- atomic_long_inc(&rsp->expedited_done_tries);
92231+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
92232 s = atomic_long_read(&rsp->expedited_done);
92233 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
92234 /* ensure test happens before caller kfree */
92235 smp_mb__before_atomic(); /* ^^^ */
92236- atomic_long_inc(&rsp->expedited_done_lost);
92237+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
92238 break;
92239 }
92240 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
92241- atomic_long_inc(&rsp->expedited_done_exit);
92242+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
92243
92244 put_online_cpus();
92245 }
92246@@ -3254,7 +3254,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
92247 * ACCESS_ONCE() to prevent the compiler from speculating
92248 * the increment to precede the early-exit check.
92249 */
92250- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92251+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92252 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
92253 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
92254 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
92255@@ -3304,7 +3304,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
92256
92257 /* Increment ->n_barrier_done to prevent duplicate work. */
92258 smp_mb(); /* Keep increment after above mechanism. */
92259- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92260+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92261 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
92262 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
92263 smp_mb(); /* Keep increment before caller's subsequent code. */
92264@@ -3349,10 +3349,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
92265 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
92266 init_callback_list(rdp);
92267 rdp->qlen_lazy = 0;
92268- ACCESS_ONCE(rdp->qlen) = 0;
92269+ ACCESS_ONCE_RW(rdp->qlen) = 0;
92270 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
92271 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
92272- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
92273+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
92274 rdp->cpu = cpu;
92275 rdp->rsp = rsp;
92276 rcu_boot_init_nocb_percpu_data(rdp);
92277@@ -3385,8 +3385,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
92278 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
92279 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
92280 rcu_sysidle_init_percpu_data(rdp->dynticks);
92281- atomic_set(&rdp->dynticks->dynticks,
92282- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
92283+ atomic_set_unchecked(&rdp->dynticks->dynticks,
92284+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
92285 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
92286
92287 /* Add CPU to rcu_node bitmasks. */
92288diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
92289index 6a86eb7..022b506 100644
92290--- a/kernel/rcu/tree.h
92291+++ b/kernel/rcu/tree.h
92292@@ -87,11 +87,11 @@ struct rcu_dynticks {
92293 long long dynticks_nesting; /* Track irq/process nesting level. */
92294 /* Process level is worth LLONG_MAX/2. */
92295 int dynticks_nmi_nesting; /* Track NMI nesting level. */
92296- atomic_t dynticks; /* Even value for idle, else odd. */
92297+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
92298 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
92299 long long dynticks_idle_nesting;
92300 /* irq/process nesting level from idle. */
92301- atomic_t dynticks_idle; /* Even value for idle, else odd. */
92302+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
92303 /* "Idle" excludes userspace execution. */
92304 unsigned long dynticks_idle_jiffies;
92305 /* End of last non-NMI non-idle period. */
92306@@ -461,17 +461,17 @@ struct rcu_state {
92307 /* _rcu_barrier(). */
92308 /* End of fields guarded by barrier_mutex. */
92309
92310- atomic_long_t expedited_start; /* Starting ticket. */
92311- atomic_long_t expedited_done; /* Done ticket. */
92312- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
92313- atomic_long_t expedited_tryfail; /* # acquisition failures. */
92314- atomic_long_t expedited_workdone1; /* # done by others #1. */
92315- atomic_long_t expedited_workdone2; /* # done by others #2. */
92316- atomic_long_t expedited_normal; /* # fallbacks to normal. */
92317- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
92318- atomic_long_t expedited_done_tries; /* # tries to update _done. */
92319- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
92320- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
92321+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
92322+ atomic_long_t expedited_done; /* Done ticket. */
92323+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
92324+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
92325+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
92326+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
92327+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
92328+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
92329+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
92330+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
92331+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
92332
92333 unsigned long jiffies_force_qs; /* Time at which to invoke */
92334 /* force_quiescent_state(). */
92335diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
92336index a7997e2..9787c9e 100644
92337--- a/kernel/rcu/tree_plugin.h
92338+++ b/kernel/rcu/tree_plugin.h
92339@@ -735,7 +735,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
92340 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
92341 {
92342 return !rcu_preempted_readers_exp(rnp) &&
92343- ACCESS_ONCE(rnp->expmask) == 0;
92344+ ACCESS_ONCE_RW(rnp->expmask) == 0;
92345 }
92346
92347 /*
92348@@ -897,7 +897,7 @@ void synchronize_rcu_expedited(void)
92349
92350 /* Clean up and exit. */
92351 smp_mb(); /* ensure expedited GP seen before counter increment. */
92352- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
92353+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
92354 unlock_mb_ret:
92355 mutex_unlock(&sync_rcu_preempt_exp_mutex);
92356 mb_ret:
92357@@ -1452,7 +1452,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
92358 free_cpumask_var(cm);
92359 }
92360
92361-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
92362+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
92363 .store = &rcu_cpu_kthread_task,
92364 .thread_should_run = rcu_cpu_kthread_should_run,
92365 .thread_fn = rcu_cpu_kthread,
92366@@ -1932,7 +1932,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
92367 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
92368 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
92369 cpu, ticks_value, ticks_title,
92370- atomic_read(&rdtp->dynticks) & 0xfff,
92371+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
92372 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
92373 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
92374 fast_no_hz);
92375@@ -2076,7 +2076,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
92376 return;
92377 if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
92378 /* Prior xchg orders against prior callback enqueue. */
92379- ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
92380+ ACCESS_ONCE_RW(rdp_leader->nocb_leader_sleep) = false;
92381 wake_up(&rdp_leader->nocb_wq);
92382 }
92383 }
92384@@ -2101,7 +2101,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
92385
92386 /* Enqueue the callback on the nocb list and update counts. */
92387 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
92388- ACCESS_ONCE(*old_rhpp) = rhp;
92389+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
92390 atomic_long_add(rhcount, &rdp->nocb_q_count);
92391 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
92392
92393@@ -2272,7 +2272,7 @@ wait_again:
92394 continue; /* No CBs here, try next follower. */
92395
92396 /* Move callbacks to wait-for-GP list, which is empty. */
92397- ACCESS_ONCE(rdp->nocb_head) = NULL;
92398+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
92399 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
92400 rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0);
92401 rdp->nocb_gp_count_lazy =
92402@@ -2398,7 +2398,7 @@ static int rcu_nocb_kthread(void *arg)
92403 list = ACCESS_ONCE(rdp->nocb_follower_head);
92404 BUG_ON(!list);
92405 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
92406- ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
92407+ ACCESS_ONCE_RW(rdp->nocb_follower_head) = NULL;
92408 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
92409 c = atomic_long_xchg(&rdp->nocb_follower_count, 0);
92410 cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0);
92411@@ -2428,8 +2428,8 @@ static int rcu_nocb_kthread(void *arg)
92412 list = next;
92413 }
92414 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
92415- ACCESS_ONCE(rdp->nocb_p_count) -= c;
92416- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
92417+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
92418+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
92419 rdp->n_nocbs_invoked += c;
92420 }
92421 return 0;
92422@@ -2446,7 +2446,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
92423 {
92424 if (!rcu_nocb_need_deferred_wakeup(rdp))
92425 return;
92426- ACCESS_ONCE(rdp->nocb_defer_wakeup) = false;
92427+ ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = false;
92428 wake_nocb_leader(rdp, false);
92429 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty"));
92430 }
92431@@ -2510,7 +2510,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
92432 t = kthread_run(rcu_nocb_kthread, rdp,
92433 "rcuo%c/%d", rsp->abbr, cpu);
92434 BUG_ON(IS_ERR(t));
92435- ACCESS_ONCE(rdp->nocb_kthread) = t;
92436+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
92437 }
92438 }
92439
92440@@ -2641,11 +2641,11 @@ static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
92441
92442 /* Record start of fully idle period. */
92443 j = jiffies;
92444- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
92445+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
92446 smp_mb__before_atomic();
92447- atomic_inc(&rdtp->dynticks_idle);
92448+ atomic_inc_unchecked(&rdtp->dynticks_idle);
92449 smp_mb__after_atomic();
92450- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
92451+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
92452 }
92453
92454 /*
92455@@ -2710,9 +2710,9 @@ static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
92456
92457 /* Record end of idle period. */
92458 smp_mb__before_atomic();
92459- atomic_inc(&rdtp->dynticks_idle);
92460+ atomic_inc_unchecked(&rdtp->dynticks_idle);
92461 smp_mb__after_atomic();
92462- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
92463+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
92464
92465 /*
92466 * If we are the timekeeping CPU, we are permitted to be non-idle
92467@@ -2753,7 +2753,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
92468 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
92469
92470 /* Pick up current idle and NMI-nesting counter and check. */
92471- cur = atomic_read(&rdtp->dynticks_idle);
92472+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
92473 if (cur & 0x1) {
92474 *isidle = false; /* We are not idle! */
92475 return;
92476@@ -2802,7 +2802,7 @@ static void rcu_sysidle(unsigned long j)
92477 case RCU_SYSIDLE_NOT:
92478
92479 /* First time all are idle, so note a short idle period. */
92480- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
92481+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
92482 break;
92483
92484 case RCU_SYSIDLE_SHORT:
92485@@ -2840,7 +2840,7 @@ static void rcu_sysidle_cancel(void)
92486 {
92487 smp_mb();
92488 if (full_sysidle_state > RCU_SYSIDLE_SHORT)
92489- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
92490+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
92491 }
92492
92493 /*
92494@@ -2888,7 +2888,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
92495 smp_mb(); /* grace period precedes setting inuse. */
92496
92497 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
92498- ACCESS_ONCE(rshp->inuse) = 0;
92499+ ACCESS_ONCE_RW(rshp->inuse) = 0;
92500 }
92501
92502 /*
92503diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
92504index 5cdc62e..cc52e88 100644
92505--- a/kernel/rcu/tree_trace.c
92506+++ b/kernel/rcu/tree_trace.c
92507@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
92508 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
92509 rdp->passed_quiesce, rdp->qs_pending);
92510 seq_printf(m, " dt=%d/%llx/%d df=%lu",
92511- atomic_read(&rdp->dynticks->dynticks),
92512+ atomic_read_unchecked(&rdp->dynticks->dynticks),
92513 rdp->dynticks->dynticks_nesting,
92514 rdp->dynticks->dynticks_nmi_nesting,
92515 rdp->dynticks_fqs);
92516@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
92517 struct rcu_state *rsp = (struct rcu_state *)m->private;
92518
92519 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
92520- atomic_long_read(&rsp->expedited_start),
92521+ atomic_long_read_unchecked(&rsp->expedited_start),
92522 atomic_long_read(&rsp->expedited_done),
92523- atomic_long_read(&rsp->expedited_wrap),
92524- atomic_long_read(&rsp->expedited_tryfail),
92525- atomic_long_read(&rsp->expedited_workdone1),
92526- atomic_long_read(&rsp->expedited_workdone2),
92527- atomic_long_read(&rsp->expedited_normal),
92528- atomic_long_read(&rsp->expedited_stoppedcpus),
92529- atomic_long_read(&rsp->expedited_done_tries),
92530- atomic_long_read(&rsp->expedited_done_lost),
92531- atomic_long_read(&rsp->expedited_done_exit));
92532+ atomic_long_read_unchecked(&rsp->expedited_wrap),
92533+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
92534+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
92535+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
92536+ atomic_long_read_unchecked(&rsp->expedited_normal),
92537+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
92538+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
92539+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
92540+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
92541 return 0;
92542 }
92543
92544diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
92545index 4056d79..c11741a 100644
92546--- a/kernel/rcu/update.c
92547+++ b/kernel/rcu/update.c
92548@@ -308,10 +308,10 @@ int rcu_jiffies_till_stall_check(void)
92549 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
92550 */
92551 if (till_stall_check < 3) {
92552- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
92553+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
92554 till_stall_check = 3;
92555 } else if (till_stall_check > 300) {
92556- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
92557+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
92558 till_stall_check = 300;
92559 }
92560 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
92561diff --git a/kernel/resource.c b/kernel/resource.c
92562index 60c5a38..ed77193 100644
92563--- a/kernel/resource.c
92564+++ b/kernel/resource.c
92565@@ -161,8 +161,18 @@ static const struct file_operations proc_iomem_operations = {
92566
92567 static int __init ioresources_init(void)
92568 {
92569+#ifdef CONFIG_GRKERNSEC_PROC_ADD
92570+#ifdef CONFIG_GRKERNSEC_PROC_USER
92571+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
92572+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
92573+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
92574+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
92575+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
92576+#endif
92577+#else
92578 proc_create("ioports", 0, NULL, &proc_ioports_operations);
92579 proc_create("iomem", 0, NULL, &proc_iomem_operations);
92580+#endif
92581 return 0;
92582 }
92583 __initcall(ioresources_init);
92584diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
92585index e73efba..c9bfbd4 100644
92586--- a/kernel/sched/auto_group.c
92587+++ b/kernel/sched/auto_group.c
92588@@ -11,7 +11,7 @@
92589
92590 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
92591 static struct autogroup autogroup_default;
92592-static atomic_t autogroup_seq_nr;
92593+static atomic_unchecked_t autogroup_seq_nr;
92594
92595 void __init autogroup_init(struct task_struct *init_task)
92596 {
92597@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
92598
92599 kref_init(&ag->kref);
92600 init_rwsem(&ag->lock);
92601- ag->id = atomic_inc_return(&autogroup_seq_nr);
92602+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
92603 ag->tg = tg;
92604 #ifdef CONFIG_RT_GROUP_SCHED
92605 /*
92606diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
92607index a63f4dc..349bbb0 100644
92608--- a/kernel/sched/completion.c
92609+++ b/kernel/sched/completion.c
92610@@ -204,7 +204,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
92611 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
92612 * or number of jiffies left till timeout) if completed.
92613 */
92614-long __sched
92615+long __sched __intentional_overflow(-1)
92616 wait_for_completion_interruptible_timeout(struct completion *x,
92617 unsigned long timeout)
92618 {
92619@@ -221,7 +221,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
92620 *
92621 * Return: -ERESTARTSYS if interrupted, 0 if completed.
92622 */
92623-int __sched wait_for_completion_killable(struct completion *x)
92624+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
92625 {
92626 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
92627 if (t == -ERESTARTSYS)
92628@@ -242,7 +242,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
92629 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
92630 * or number of jiffies left till timeout) if completed.
92631 */
92632-long __sched
92633+long __sched __intentional_overflow(-1)
92634 wait_for_completion_killable_timeout(struct completion *x,
92635 unsigned long timeout)
92636 {
92637diff --git a/kernel/sched/core.c b/kernel/sched/core.c
92638index 6d7cb91..420f2d2 100644
92639--- a/kernel/sched/core.c
92640+++ b/kernel/sched/core.c
92641@@ -1857,7 +1857,7 @@ void set_numabalancing_state(bool enabled)
92642 int sysctl_numa_balancing(struct ctl_table *table, int write,
92643 void __user *buffer, size_t *lenp, loff_t *ppos)
92644 {
92645- struct ctl_table t;
92646+ ctl_table_no_const t;
92647 int err;
92648 int state = numabalancing_enabled;
92649
92650@@ -2324,8 +2324,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
92651 next->active_mm = oldmm;
92652 atomic_inc(&oldmm->mm_count);
92653 enter_lazy_tlb(oldmm, next);
92654- } else
92655+ } else {
92656 switch_mm(oldmm, mm, next);
92657+ populate_stack();
92658+ }
92659
92660 if (!prev->mm) {
92661 prev->active_mm = NULL;
92662@@ -3107,6 +3109,8 @@ int can_nice(const struct task_struct *p, const int nice)
92663 /* convert nice value [19,-20] to rlimit style value [1,40] */
92664 int nice_rlim = nice_to_rlimit(nice);
92665
92666+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
92667+
92668 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
92669 capable(CAP_SYS_NICE));
92670 }
92671@@ -3133,7 +3137,8 @@ SYSCALL_DEFINE1(nice, int, increment)
92672 nice = task_nice(current) + increment;
92673
92674 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
92675- if (increment < 0 && !can_nice(current, nice))
92676+ if (increment < 0 && (!can_nice(current, nice) ||
92677+ gr_handle_chroot_nice()))
92678 return -EPERM;
92679
92680 retval = security_task_setnice(current, nice);
92681@@ -3412,6 +3417,7 @@ recheck:
92682 if (policy != p->policy && !rlim_rtprio)
92683 return -EPERM;
92684
92685+ gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
92686 /* can't increase priority */
92687 if (attr->sched_priority > p->rt_priority &&
92688 attr->sched_priority > rlim_rtprio)
92689@@ -4802,6 +4808,7 @@ void idle_task_exit(void)
92690
92691 if (mm != &init_mm) {
92692 switch_mm(mm, &init_mm, current);
92693+ populate_stack();
92694 finish_arch_post_lock_switch();
92695 }
92696 mmdrop(mm);
92697@@ -4897,7 +4904,7 @@ static void migrate_tasks(unsigned int dead_cpu)
92698
92699 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
92700
92701-static struct ctl_table sd_ctl_dir[] = {
92702+static ctl_table_no_const sd_ctl_dir[] __read_only = {
92703 {
92704 .procname = "sched_domain",
92705 .mode = 0555,
92706@@ -4914,17 +4921,17 @@ static struct ctl_table sd_ctl_root[] = {
92707 {}
92708 };
92709
92710-static struct ctl_table *sd_alloc_ctl_entry(int n)
92711+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
92712 {
92713- struct ctl_table *entry =
92714+ ctl_table_no_const *entry =
92715 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
92716
92717 return entry;
92718 }
92719
92720-static void sd_free_ctl_entry(struct ctl_table **tablep)
92721+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
92722 {
92723- struct ctl_table *entry;
92724+ ctl_table_no_const *entry;
92725
92726 /*
92727 * In the intermediate directories, both the child directory and
92728@@ -4932,22 +4939,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
92729 * will always be set. In the lowest directory the names are
92730 * static strings and all have proc handlers.
92731 */
92732- for (entry = *tablep; entry->mode; entry++) {
92733- if (entry->child)
92734- sd_free_ctl_entry(&entry->child);
92735+ for (entry = tablep; entry->mode; entry++) {
92736+ if (entry->child) {
92737+ sd_free_ctl_entry(entry->child);
92738+ pax_open_kernel();
92739+ entry->child = NULL;
92740+ pax_close_kernel();
92741+ }
92742 if (entry->proc_handler == NULL)
92743 kfree(entry->procname);
92744 }
92745
92746- kfree(*tablep);
92747- *tablep = NULL;
92748+ kfree(tablep);
92749 }
92750
92751 static int min_load_idx = 0;
92752 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
92753
92754 static void
92755-set_table_entry(struct ctl_table *entry,
92756+set_table_entry(ctl_table_no_const *entry,
92757 const char *procname, void *data, int maxlen,
92758 umode_t mode, proc_handler *proc_handler,
92759 bool load_idx)
92760@@ -4967,7 +4977,7 @@ set_table_entry(struct ctl_table *entry,
92761 static struct ctl_table *
92762 sd_alloc_ctl_domain_table(struct sched_domain *sd)
92763 {
92764- struct ctl_table *table = sd_alloc_ctl_entry(14);
92765+ ctl_table_no_const *table = sd_alloc_ctl_entry(14);
92766
92767 if (table == NULL)
92768 return NULL;
92769@@ -5005,9 +5015,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
92770 return table;
92771 }
92772
92773-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
92774+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
92775 {
92776- struct ctl_table *entry, *table;
92777+ ctl_table_no_const *entry, *table;
92778 struct sched_domain *sd;
92779 int domain_num = 0, i;
92780 char buf[32];
92781@@ -5034,11 +5044,13 @@ static struct ctl_table_header *sd_sysctl_header;
92782 static void register_sched_domain_sysctl(void)
92783 {
92784 int i, cpu_num = num_possible_cpus();
92785- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
92786+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
92787 char buf[32];
92788
92789 WARN_ON(sd_ctl_dir[0].child);
92790+ pax_open_kernel();
92791 sd_ctl_dir[0].child = entry;
92792+ pax_close_kernel();
92793
92794 if (entry == NULL)
92795 return;
92796@@ -5061,8 +5073,12 @@ static void unregister_sched_domain_sysctl(void)
92797 if (sd_sysctl_header)
92798 unregister_sysctl_table(sd_sysctl_header);
92799 sd_sysctl_header = NULL;
92800- if (sd_ctl_dir[0].child)
92801- sd_free_ctl_entry(&sd_ctl_dir[0].child);
92802+ if (sd_ctl_dir[0].child) {
92803+ sd_free_ctl_entry(sd_ctl_dir[0].child);
92804+ pax_open_kernel();
92805+ sd_ctl_dir[0].child = NULL;
92806+ pax_close_kernel();
92807+ }
92808 }
92809 #else
92810 static void register_sched_domain_sysctl(void)
92811diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
92812index bfa3c86..e58767c 100644
92813--- a/kernel/sched/fair.c
92814+++ b/kernel/sched/fair.c
92815@@ -1873,7 +1873,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
92816
92817 static void reset_ptenuma_scan(struct task_struct *p)
92818 {
92819- ACCESS_ONCE(p->mm->numa_scan_seq)++;
92820+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
92821 p->mm->numa_scan_offset = 0;
92822 }
92823
92824@@ -7339,7 +7339,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
92825 * run_rebalance_domains is triggered when needed from the scheduler tick.
92826 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
92827 */
92828-static void run_rebalance_domains(struct softirq_action *h)
92829+static __latent_entropy void run_rebalance_domains(void)
92830 {
92831 struct rq *this_rq = this_rq();
92832 enum cpu_idle_type idle = this_rq->idle_balance ?
92833diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
92834index 579712f..a338a9d 100644
92835--- a/kernel/sched/sched.h
92836+++ b/kernel/sched/sched.h
92837@@ -1146,7 +1146,7 @@ struct sched_class {
92838 #ifdef CONFIG_FAIR_GROUP_SCHED
92839 void (*task_move_group) (struct task_struct *p, int on_rq);
92840 #endif
92841-};
92842+} __do_const;
92843
92844 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
92845 {
92846diff --git a/kernel/seccomp.c b/kernel/seccomp.c
92847index 44eb005..84922be 100644
92848--- a/kernel/seccomp.c
92849+++ b/kernel/seccomp.c
92850@@ -395,16 +395,15 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
92851 if (!filter)
92852 goto free_prog;
92853
92854- filter->prog = kzalloc(bpf_prog_size(new_len),
92855- GFP_KERNEL|__GFP_NOWARN);
92856+ filter->prog = bpf_prog_alloc(bpf_prog_size(new_len), __GFP_NOWARN);
92857 if (!filter->prog)
92858 goto free_filter;
92859
92860 ret = bpf_convert_filter(fp, fprog->len, filter->prog->insnsi, &new_len);
92861 if (ret)
92862 goto free_filter_prog;
92863- kfree(fp);
92864
92865+ kfree(fp);
92866 atomic_set(&filter->usage, 1);
92867 filter->prog->len = new_len;
92868
92869@@ -413,7 +412,7 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
92870 return filter;
92871
92872 free_filter_prog:
92873- kfree(filter->prog);
92874+ __bpf_prog_free(filter->prog);
92875 free_filter:
92876 kfree(filter);
92877 free_prog:
92878diff --git a/kernel/signal.c b/kernel/signal.c
92879index 8f0876f..1153a5a 100644
92880--- a/kernel/signal.c
92881+++ b/kernel/signal.c
92882@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
92883
92884 int print_fatal_signals __read_mostly;
92885
92886-static void __user *sig_handler(struct task_struct *t, int sig)
92887+static __sighandler_t sig_handler(struct task_struct *t, int sig)
92888 {
92889 return t->sighand->action[sig - 1].sa.sa_handler;
92890 }
92891
92892-static int sig_handler_ignored(void __user *handler, int sig)
92893+static int sig_handler_ignored(__sighandler_t handler, int sig)
92894 {
92895 /* Is it explicitly or implicitly ignored? */
92896 return handler == SIG_IGN ||
92897@@ -67,7 +67,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
92898
92899 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
92900 {
92901- void __user *handler;
92902+ __sighandler_t handler;
92903
92904 handler = sig_handler(t, sig);
92905
92906@@ -372,6 +372,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
92907 atomic_inc(&user->sigpending);
92908 rcu_read_unlock();
92909
92910+ if (!override_rlimit)
92911+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
92912+
92913 if (override_rlimit ||
92914 atomic_read(&user->sigpending) <=
92915 task_rlimit(t, RLIMIT_SIGPENDING)) {
92916@@ -499,7 +502,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
92917
92918 int unhandled_signal(struct task_struct *tsk, int sig)
92919 {
92920- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
92921+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
92922 if (is_global_init(tsk))
92923 return 1;
92924 if (handler != SIG_IGN && handler != SIG_DFL)
92925@@ -793,6 +796,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
92926 }
92927 }
92928
92929+ /* allow glibc communication via tgkill to other threads in our
92930+ thread group */
92931+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
92932+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
92933+ && gr_handle_signal(t, sig))
92934+ return -EPERM;
92935+
92936 return security_task_kill(t, info, sig, 0);
92937 }
92938
92939@@ -1176,7 +1186,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
92940 return send_signal(sig, info, p, 1);
92941 }
92942
92943-static int
92944+int
92945 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92946 {
92947 return send_signal(sig, info, t, 0);
92948@@ -1213,6 +1223,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92949 unsigned long int flags;
92950 int ret, blocked, ignored;
92951 struct k_sigaction *action;
92952+ int is_unhandled = 0;
92953
92954 spin_lock_irqsave(&t->sighand->siglock, flags);
92955 action = &t->sighand->action[sig-1];
92956@@ -1227,9 +1238,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92957 }
92958 if (action->sa.sa_handler == SIG_DFL)
92959 t->signal->flags &= ~SIGNAL_UNKILLABLE;
92960+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
92961+ is_unhandled = 1;
92962 ret = specific_send_sig_info(sig, info, t);
92963 spin_unlock_irqrestore(&t->sighand->siglock, flags);
92964
92965+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
92966+ normal operation */
92967+ if (is_unhandled) {
92968+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
92969+ gr_handle_crash(t, sig);
92970+ }
92971+
92972 return ret;
92973 }
92974
92975@@ -1300,8 +1320,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
92976 ret = check_kill_permission(sig, info, p);
92977 rcu_read_unlock();
92978
92979- if (!ret && sig)
92980+ if (!ret && sig) {
92981 ret = do_send_sig_info(sig, info, p, true);
92982+ if (!ret)
92983+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
92984+ }
92985
92986 return ret;
92987 }
92988@@ -2903,7 +2926,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
92989 int error = -ESRCH;
92990
92991 rcu_read_lock();
92992- p = find_task_by_vpid(pid);
92993+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
92994+ /* allow glibc communication via tgkill to other threads in our
92995+ thread group */
92996+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
92997+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
92998+ p = find_task_by_vpid_unrestricted(pid);
92999+ else
93000+#endif
93001+ p = find_task_by_vpid(pid);
93002 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
93003 error = check_kill_permission(sig, info, p);
93004 /*
93005@@ -3236,8 +3267,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
93006 }
93007 seg = get_fs();
93008 set_fs(KERNEL_DS);
93009- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
93010- (stack_t __force __user *) &uoss,
93011+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
93012+ (stack_t __force_user *) &uoss,
93013 compat_user_stack_pointer());
93014 set_fs(seg);
93015 if (ret >= 0 && uoss_ptr) {
93016diff --git a/kernel/smpboot.c b/kernel/smpboot.c
93017index eb89e18..a4e6792 100644
93018--- a/kernel/smpboot.c
93019+++ b/kernel/smpboot.c
93020@@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
93021 }
93022 smpboot_unpark_thread(plug_thread, cpu);
93023 }
93024- list_add(&plug_thread->list, &hotplug_threads);
93025+ pax_list_add(&plug_thread->list, &hotplug_threads);
93026 out:
93027 mutex_unlock(&smpboot_threads_lock);
93028 return ret;
93029@@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
93030 {
93031 get_online_cpus();
93032 mutex_lock(&smpboot_threads_lock);
93033- list_del(&plug_thread->list);
93034+ pax_list_del(&plug_thread->list);
93035 smpboot_destroy_threads(plug_thread);
93036 mutex_unlock(&smpboot_threads_lock);
93037 put_online_cpus();
93038diff --git a/kernel/softirq.c b/kernel/softirq.c
93039index 5918d22..e95d1926 100644
93040--- a/kernel/softirq.c
93041+++ b/kernel/softirq.c
93042@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
93043 EXPORT_SYMBOL(irq_stat);
93044 #endif
93045
93046-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
93047+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
93048
93049 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
93050
93051@@ -266,7 +266,7 @@ restart:
93052 kstat_incr_softirqs_this_cpu(vec_nr);
93053
93054 trace_softirq_entry(vec_nr);
93055- h->action(h);
93056+ h->action();
93057 trace_softirq_exit(vec_nr);
93058 if (unlikely(prev_count != preempt_count())) {
93059 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
93060@@ -426,7 +426,7 @@ void __raise_softirq_irqoff(unsigned int nr)
93061 or_softirq_pending(1UL << nr);
93062 }
93063
93064-void open_softirq(int nr, void (*action)(struct softirq_action *))
93065+void __init open_softirq(int nr, void (*action)(void))
93066 {
93067 softirq_vec[nr].action = action;
93068 }
93069@@ -478,7 +478,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
93070 }
93071 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
93072
93073-static void tasklet_action(struct softirq_action *a)
93074+static void tasklet_action(void)
93075 {
93076 struct tasklet_struct *list;
93077
93078@@ -514,7 +514,7 @@ static void tasklet_action(struct softirq_action *a)
93079 }
93080 }
93081
93082-static void tasklet_hi_action(struct softirq_action *a)
93083+static __latent_entropy void tasklet_hi_action(void)
93084 {
93085 struct tasklet_struct *list;
93086
93087@@ -741,7 +741,7 @@ static struct notifier_block cpu_nfb = {
93088 .notifier_call = cpu_callback
93089 };
93090
93091-static struct smp_hotplug_thread softirq_threads = {
93092+static struct smp_hotplug_thread softirq_threads __read_only = {
93093 .store = &ksoftirqd,
93094 .thread_should_run = ksoftirqd_should_run,
93095 .thread_fn = run_ksoftirqd,
93096diff --git a/kernel/sys.c b/kernel/sys.c
93097index ce81291..df2ca85 100644
93098--- a/kernel/sys.c
93099+++ b/kernel/sys.c
93100@@ -148,6 +148,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
93101 error = -EACCES;
93102 goto out;
93103 }
93104+
93105+ if (gr_handle_chroot_setpriority(p, niceval)) {
93106+ error = -EACCES;
93107+ goto out;
93108+ }
93109+
93110 no_nice = security_task_setnice(p, niceval);
93111 if (no_nice) {
93112 error = no_nice;
93113@@ -351,6 +357,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
93114 goto error;
93115 }
93116
93117+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
93118+ goto error;
93119+
93120+ if (!gid_eq(new->gid, old->gid)) {
93121+ /* make sure we generate a learn log for what will
93122+ end up being a role transition after a full-learning
93123+ policy is generated
93124+ CAP_SETGID is required to perform a transition
93125+ we may not log a CAP_SETGID check above, e.g.
93126+ in the case where new rgid = old egid
93127+ */
93128+ gr_learn_cap(current, new, CAP_SETGID);
93129+ }
93130+
93131 if (rgid != (gid_t) -1 ||
93132 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
93133 new->sgid = new->egid;
93134@@ -386,6 +406,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
93135 old = current_cred();
93136
93137 retval = -EPERM;
93138+
93139+ if (gr_check_group_change(kgid, kgid, kgid))
93140+ goto error;
93141+
93142 if (ns_capable(old->user_ns, CAP_SETGID))
93143 new->gid = new->egid = new->sgid = new->fsgid = kgid;
93144 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
93145@@ -403,7 +427,7 @@ error:
93146 /*
93147 * change the user struct in a credentials set to match the new UID
93148 */
93149-static int set_user(struct cred *new)
93150+int set_user(struct cred *new)
93151 {
93152 struct user_struct *new_user;
93153
93154@@ -483,7 +507,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
93155 goto error;
93156 }
93157
93158+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
93159+ goto error;
93160+
93161 if (!uid_eq(new->uid, old->uid)) {
93162+ /* make sure we generate a learn log for what will
93163+ end up being a role transition after a full-learning
93164+ policy is generated
93165+ CAP_SETUID is required to perform a transition
93166+ we may not log a CAP_SETUID check above, e.g.
93167+ in the case where new ruid = old euid
93168+ */
93169+ gr_learn_cap(current, new, CAP_SETUID);
93170 retval = set_user(new);
93171 if (retval < 0)
93172 goto error;
93173@@ -533,6 +568,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
93174 old = current_cred();
93175
93176 retval = -EPERM;
93177+
93178+ if (gr_check_crash_uid(kuid))
93179+ goto error;
93180+ if (gr_check_user_change(kuid, kuid, kuid))
93181+ goto error;
93182+
93183 if (ns_capable(old->user_ns, CAP_SETUID)) {
93184 new->suid = new->uid = kuid;
93185 if (!uid_eq(kuid, old->uid)) {
93186@@ -602,6 +643,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
93187 goto error;
93188 }
93189
93190+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
93191+ goto error;
93192+
93193 if (ruid != (uid_t) -1) {
93194 new->uid = kruid;
93195 if (!uid_eq(kruid, old->uid)) {
93196@@ -684,6 +728,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
93197 goto error;
93198 }
93199
93200+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
93201+ goto error;
93202+
93203 if (rgid != (gid_t) -1)
93204 new->gid = krgid;
93205 if (egid != (gid_t) -1)
93206@@ -745,12 +792,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
93207 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
93208 ns_capable(old->user_ns, CAP_SETUID)) {
93209 if (!uid_eq(kuid, old->fsuid)) {
93210+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
93211+ goto error;
93212+
93213 new->fsuid = kuid;
93214 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
93215 goto change_okay;
93216 }
93217 }
93218
93219+error:
93220 abort_creds(new);
93221 return old_fsuid;
93222
93223@@ -783,12 +834,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
93224 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
93225 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
93226 ns_capable(old->user_ns, CAP_SETGID)) {
93227+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
93228+ goto error;
93229+
93230 if (!gid_eq(kgid, old->fsgid)) {
93231 new->fsgid = kgid;
93232 goto change_okay;
93233 }
93234 }
93235
93236+error:
93237 abort_creds(new);
93238 return old_fsgid;
93239
93240@@ -1167,19 +1222,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
93241 return -EFAULT;
93242
93243 down_read(&uts_sem);
93244- error = __copy_to_user(&name->sysname, &utsname()->sysname,
93245+ error = __copy_to_user(name->sysname, &utsname()->sysname,
93246 __OLD_UTS_LEN);
93247 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
93248- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
93249+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
93250 __OLD_UTS_LEN);
93251 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
93252- error |= __copy_to_user(&name->release, &utsname()->release,
93253+ error |= __copy_to_user(name->release, &utsname()->release,
93254 __OLD_UTS_LEN);
93255 error |= __put_user(0, name->release + __OLD_UTS_LEN);
93256- error |= __copy_to_user(&name->version, &utsname()->version,
93257+ error |= __copy_to_user(name->version, &utsname()->version,
93258 __OLD_UTS_LEN);
93259 error |= __put_user(0, name->version + __OLD_UTS_LEN);
93260- error |= __copy_to_user(&name->machine, &utsname()->machine,
93261+ error |= __copy_to_user(name->machine, &utsname()->machine,
93262 __OLD_UTS_LEN);
93263 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
93264 up_read(&uts_sem);
93265@@ -1381,6 +1436,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
93266 */
93267 new_rlim->rlim_cur = 1;
93268 }
93269+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
93270+ is changed to a lower value. Since tasks can be created by the same
93271+ user in between this limit change and an execve by this task, force
93272+ a recheck only for this task by setting PF_NPROC_EXCEEDED
93273+ */
93274+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
93275+ tsk->flags |= PF_NPROC_EXCEEDED;
93276 }
93277 if (!retval) {
93278 if (old_rlim)
93279diff --git a/kernel/sysctl.c b/kernel/sysctl.c
93280index 75875a7..cd8e838 100644
93281--- a/kernel/sysctl.c
93282+++ b/kernel/sysctl.c
93283@@ -94,7 +94,6 @@
93284
93285
93286 #if defined(CONFIG_SYSCTL)
93287-
93288 /* External variables not in a header file. */
93289 extern int max_threads;
93290 extern int suid_dumpable;
93291@@ -115,19 +114,20 @@ extern int sysctl_nr_trim_pages;
93292
93293 /* Constants used for minimum and maximum */
93294 #ifdef CONFIG_LOCKUP_DETECTOR
93295-static int sixty = 60;
93296+static int sixty __read_only = 60;
93297 #endif
93298
93299-static int __maybe_unused neg_one = -1;
93300+static int __maybe_unused neg_one __read_only = -1;
93301
93302-static int zero;
93303-static int __maybe_unused one = 1;
93304-static int __maybe_unused two = 2;
93305-static int __maybe_unused four = 4;
93306-static unsigned long one_ul = 1;
93307-static int one_hundred = 100;
93308+static int zero __read_only = 0;
93309+static int __maybe_unused one __read_only = 1;
93310+static int __maybe_unused two __read_only = 2;
93311+static int __maybe_unused three __read_only = 3;
93312+static int __maybe_unused four __read_only = 4;
93313+static unsigned long one_ul __read_only = 1;
93314+static int one_hundred __read_only = 100;
93315 #ifdef CONFIG_PRINTK
93316-static int ten_thousand = 10000;
93317+static int ten_thousand __read_only = 10000;
93318 #endif
93319
93320 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
93321@@ -181,10 +181,8 @@ static int proc_taint(struct ctl_table *table, int write,
93322 void __user *buffer, size_t *lenp, loff_t *ppos);
93323 #endif
93324
93325-#ifdef CONFIG_PRINTK
93326 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93327 void __user *buffer, size_t *lenp, loff_t *ppos);
93328-#endif
93329
93330 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
93331 void __user *buffer, size_t *lenp, loff_t *ppos);
93332@@ -215,6 +213,8 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
93333
93334 #endif
93335
93336+extern struct ctl_table grsecurity_table[];
93337+
93338 static struct ctl_table kern_table[];
93339 static struct ctl_table vm_table[];
93340 static struct ctl_table fs_table[];
93341@@ -229,6 +229,20 @@ extern struct ctl_table epoll_table[];
93342 int sysctl_legacy_va_layout;
93343 #endif
93344
93345+#ifdef CONFIG_PAX_SOFTMODE
93346+static struct ctl_table pax_table[] = {
93347+ {
93348+ .procname = "softmode",
93349+ .data = &pax_softmode,
93350+ .maxlen = sizeof(unsigned int),
93351+ .mode = 0600,
93352+ .proc_handler = &proc_dointvec,
93353+ },
93354+
93355+ { }
93356+};
93357+#endif
93358+
93359 /* The default sysctl tables: */
93360
93361 static struct ctl_table sysctl_base_table[] = {
93362@@ -277,6 +291,22 @@ static int max_extfrag_threshold = 1000;
93363 #endif
93364
93365 static struct ctl_table kern_table[] = {
93366+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
93367+ {
93368+ .procname = "grsecurity",
93369+ .mode = 0500,
93370+ .child = grsecurity_table,
93371+ },
93372+#endif
93373+
93374+#ifdef CONFIG_PAX_SOFTMODE
93375+ {
93376+ .procname = "pax",
93377+ .mode = 0500,
93378+ .child = pax_table,
93379+ },
93380+#endif
93381+
93382 {
93383 .procname = "sched_child_runs_first",
93384 .data = &sysctl_sched_child_runs_first,
93385@@ -641,7 +671,7 @@ static struct ctl_table kern_table[] = {
93386 .data = &modprobe_path,
93387 .maxlen = KMOD_PATH_LEN,
93388 .mode = 0644,
93389- .proc_handler = proc_dostring,
93390+ .proc_handler = proc_dostring_modpriv,
93391 },
93392 {
93393 .procname = "modules_disabled",
93394@@ -808,16 +838,20 @@ static struct ctl_table kern_table[] = {
93395 .extra1 = &zero,
93396 .extra2 = &one,
93397 },
93398+#endif
93399 {
93400 .procname = "kptr_restrict",
93401 .data = &kptr_restrict,
93402 .maxlen = sizeof(int),
93403 .mode = 0644,
93404 .proc_handler = proc_dointvec_minmax_sysadmin,
93405+#ifdef CONFIG_GRKERNSEC_HIDESYM
93406+ .extra1 = &two,
93407+#else
93408 .extra1 = &zero,
93409+#endif
93410 .extra2 = &two,
93411 },
93412-#endif
93413 {
93414 .procname = "ngroups_max",
93415 .data = &ngroups_max,
93416@@ -1073,10 +1107,17 @@ static struct ctl_table kern_table[] = {
93417 */
93418 {
93419 .procname = "perf_event_paranoid",
93420- .data = &sysctl_perf_event_paranoid,
93421- .maxlen = sizeof(sysctl_perf_event_paranoid),
93422+ .data = &sysctl_perf_event_legitimately_concerned,
93423+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
93424 .mode = 0644,
93425- .proc_handler = proc_dointvec,
93426+ /* go ahead, be a hero */
93427+ .proc_handler = proc_dointvec_minmax_sysadmin,
93428+ .extra1 = &neg_one,
93429+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
93430+ .extra2 = &three,
93431+#else
93432+ .extra2 = &two,
93433+#endif
93434 },
93435 {
93436 .procname = "perf_event_mlock_kb",
93437@@ -1335,6 +1376,13 @@ static struct ctl_table vm_table[] = {
93438 .proc_handler = proc_dointvec_minmax,
93439 .extra1 = &zero,
93440 },
93441+ {
93442+ .procname = "heap_stack_gap",
93443+ .data = &sysctl_heap_stack_gap,
93444+ .maxlen = sizeof(sysctl_heap_stack_gap),
93445+ .mode = 0644,
93446+ .proc_handler = proc_doulongvec_minmax,
93447+ },
93448 #else
93449 {
93450 .procname = "nr_trim_pages",
93451@@ -1824,6 +1872,16 @@ int proc_dostring(struct ctl_table *table, int write,
93452 (char __user *)buffer, lenp, ppos);
93453 }
93454
93455+int proc_dostring_modpriv(struct ctl_table *table, int write,
93456+ void __user *buffer, size_t *lenp, loff_t *ppos)
93457+{
93458+ if (write && !capable(CAP_SYS_MODULE))
93459+ return -EPERM;
93460+
93461+ return _proc_do_string(table->data, table->maxlen, write,
93462+ buffer, lenp, ppos);
93463+}
93464+
93465 static size_t proc_skip_spaces(char **buf)
93466 {
93467 size_t ret;
93468@@ -1929,6 +1987,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
93469 len = strlen(tmp);
93470 if (len > *size)
93471 len = *size;
93472+ if (len > sizeof(tmp))
93473+ len = sizeof(tmp);
93474 if (copy_to_user(*buf, tmp, len))
93475 return -EFAULT;
93476 *size -= len;
93477@@ -2106,7 +2166,7 @@ int proc_dointvec(struct ctl_table *table, int write,
93478 static int proc_taint(struct ctl_table *table, int write,
93479 void __user *buffer, size_t *lenp, loff_t *ppos)
93480 {
93481- struct ctl_table t;
93482+ ctl_table_no_const t;
93483 unsigned long tmptaint = get_taint();
93484 int err;
93485
93486@@ -2134,7 +2194,6 @@ static int proc_taint(struct ctl_table *table, int write,
93487 return err;
93488 }
93489
93490-#ifdef CONFIG_PRINTK
93491 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93492 void __user *buffer, size_t *lenp, loff_t *ppos)
93493 {
93494@@ -2143,7 +2202,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93495
93496 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
93497 }
93498-#endif
93499
93500 struct do_proc_dointvec_minmax_conv_param {
93501 int *min;
93502@@ -2703,6 +2761,12 @@ int proc_dostring(struct ctl_table *table, int write,
93503 return -ENOSYS;
93504 }
93505
93506+int proc_dostring_modpriv(struct ctl_table *table, int write,
93507+ void __user *buffer, size_t *lenp, loff_t *ppos)
93508+{
93509+ return -ENOSYS;
93510+}
93511+
93512 int proc_dointvec(struct ctl_table *table, int write,
93513 void __user *buffer, size_t *lenp, loff_t *ppos)
93514 {
93515@@ -2759,5 +2823,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
93516 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
93517 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
93518 EXPORT_SYMBOL(proc_dostring);
93519+EXPORT_SYMBOL(proc_dostring_modpriv);
93520 EXPORT_SYMBOL(proc_doulongvec_minmax);
93521 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
93522diff --git a/kernel/taskstats.c b/kernel/taskstats.c
93523index 13d2f7c..c93d0b0 100644
93524--- a/kernel/taskstats.c
93525+++ b/kernel/taskstats.c
93526@@ -28,9 +28,12 @@
93527 #include <linux/fs.h>
93528 #include <linux/file.h>
93529 #include <linux/pid_namespace.h>
93530+#include <linux/grsecurity.h>
93531 #include <net/genetlink.h>
93532 #include <linux/atomic.h>
93533
93534+extern int gr_is_taskstats_denied(int pid);
93535+
93536 /*
93537 * Maximum length of a cpumask that can be specified in
93538 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
93539@@ -576,6 +579,9 @@ err:
93540
93541 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
93542 {
93543+ if (gr_is_taskstats_denied(current->pid))
93544+ return -EACCES;
93545+
93546 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
93547 return cmd_attr_register_cpumask(info);
93548 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
93549diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
93550index a7077d3..dd48a49 100644
93551--- a/kernel/time/alarmtimer.c
93552+++ b/kernel/time/alarmtimer.c
93553@@ -823,7 +823,7 @@ static int __init alarmtimer_init(void)
93554 struct platform_device *pdev;
93555 int error = 0;
93556 int i;
93557- struct k_clock alarm_clock = {
93558+ static struct k_clock alarm_clock = {
93559 .clock_getres = alarm_clock_getres,
93560 .clock_get = alarm_clock_get,
93561 .timer_create = alarm_timer_create,
93562diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
93563index 1c2fe7d..ce7483d 100644
93564--- a/kernel/time/hrtimer.c
93565+++ b/kernel/time/hrtimer.c
93566@@ -1399,7 +1399,7 @@ void hrtimer_peek_ahead_timers(void)
93567 local_irq_restore(flags);
93568 }
93569
93570-static void run_hrtimer_softirq(struct softirq_action *h)
93571+static __latent_entropy void run_hrtimer_softirq(void)
93572 {
93573 hrtimer_peek_ahead_timers();
93574 }
93575diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
93576index 3b89464..5e38379 100644
93577--- a/kernel/time/posix-cpu-timers.c
93578+++ b/kernel/time/posix-cpu-timers.c
93579@@ -1464,14 +1464,14 @@ struct k_clock clock_posix_cpu = {
93580
93581 static __init int init_posix_cpu_timers(void)
93582 {
93583- struct k_clock process = {
93584+ static struct k_clock process = {
93585 .clock_getres = process_cpu_clock_getres,
93586 .clock_get = process_cpu_clock_get,
93587 .timer_create = process_cpu_timer_create,
93588 .nsleep = process_cpu_nsleep,
93589 .nsleep_restart = process_cpu_nsleep_restart,
93590 };
93591- struct k_clock thread = {
93592+ static struct k_clock thread = {
93593 .clock_getres = thread_cpu_clock_getres,
93594 .clock_get = thread_cpu_clock_get,
93595 .timer_create = thread_cpu_timer_create,
93596diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
93597index 31ea01f..7fc61ef 100644
93598--- a/kernel/time/posix-timers.c
93599+++ b/kernel/time/posix-timers.c
93600@@ -43,6 +43,7 @@
93601 #include <linux/hash.h>
93602 #include <linux/posix-clock.h>
93603 #include <linux/posix-timers.h>
93604+#include <linux/grsecurity.h>
93605 #include <linux/syscalls.h>
93606 #include <linux/wait.h>
93607 #include <linux/workqueue.h>
93608@@ -124,7 +125,7 @@ static DEFINE_SPINLOCK(hash_lock);
93609 * which we beg off on and pass to do_sys_settimeofday().
93610 */
93611
93612-static struct k_clock posix_clocks[MAX_CLOCKS];
93613+static struct k_clock *posix_clocks[MAX_CLOCKS];
93614
93615 /*
93616 * These ones are defined below.
93617@@ -277,7 +278,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
93618 */
93619 static __init int init_posix_timers(void)
93620 {
93621- struct k_clock clock_realtime = {
93622+ static struct k_clock clock_realtime = {
93623 .clock_getres = hrtimer_get_res,
93624 .clock_get = posix_clock_realtime_get,
93625 .clock_set = posix_clock_realtime_set,
93626@@ -289,7 +290,7 @@ static __init int init_posix_timers(void)
93627 .timer_get = common_timer_get,
93628 .timer_del = common_timer_del,
93629 };
93630- struct k_clock clock_monotonic = {
93631+ static struct k_clock clock_monotonic = {
93632 .clock_getres = hrtimer_get_res,
93633 .clock_get = posix_ktime_get_ts,
93634 .nsleep = common_nsleep,
93635@@ -299,19 +300,19 @@ static __init int init_posix_timers(void)
93636 .timer_get = common_timer_get,
93637 .timer_del = common_timer_del,
93638 };
93639- struct k_clock clock_monotonic_raw = {
93640+ static struct k_clock clock_monotonic_raw = {
93641 .clock_getres = hrtimer_get_res,
93642 .clock_get = posix_get_monotonic_raw,
93643 };
93644- struct k_clock clock_realtime_coarse = {
93645+ static struct k_clock clock_realtime_coarse = {
93646 .clock_getres = posix_get_coarse_res,
93647 .clock_get = posix_get_realtime_coarse,
93648 };
93649- struct k_clock clock_monotonic_coarse = {
93650+ static struct k_clock clock_monotonic_coarse = {
93651 .clock_getres = posix_get_coarse_res,
93652 .clock_get = posix_get_monotonic_coarse,
93653 };
93654- struct k_clock clock_tai = {
93655+ static struct k_clock clock_tai = {
93656 .clock_getres = hrtimer_get_res,
93657 .clock_get = posix_get_tai,
93658 .nsleep = common_nsleep,
93659@@ -321,7 +322,7 @@ static __init int init_posix_timers(void)
93660 .timer_get = common_timer_get,
93661 .timer_del = common_timer_del,
93662 };
93663- struct k_clock clock_boottime = {
93664+ static struct k_clock clock_boottime = {
93665 .clock_getres = hrtimer_get_res,
93666 .clock_get = posix_get_boottime,
93667 .nsleep = common_nsleep,
93668@@ -533,7 +534,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
93669 return;
93670 }
93671
93672- posix_clocks[clock_id] = *new_clock;
93673+ posix_clocks[clock_id] = new_clock;
93674 }
93675 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
93676
93677@@ -579,9 +580,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
93678 return (id & CLOCKFD_MASK) == CLOCKFD ?
93679 &clock_posix_dynamic : &clock_posix_cpu;
93680
93681- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
93682+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
93683 return NULL;
93684- return &posix_clocks[id];
93685+ return posix_clocks[id];
93686 }
93687
93688 static int common_timer_create(struct k_itimer *new_timer)
93689@@ -599,7 +600,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
93690 struct k_clock *kc = clockid_to_kclock(which_clock);
93691 struct k_itimer *new_timer;
93692 int error, new_timer_id;
93693- sigevent_t event;
93694+ sigevent_t event = { };
93695 int it_id_set = IT_ID_NOT_SET;
93696
93697 if (!kc)
93698@@ -1014,6 +1015,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
93699 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
93700 return -EFAULT;
93701
93702+ /* only the CLOCK_REALTIME clock can be set, all other clocks
93703+ have their clock_set fptr set to a nosettime dummy function
93704+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
93705+ call common_clock_set, which calls do_sys_settimeofday, which
93706+ we hook
93707+ */
93708+
93709 return kc->clock_set(which_clock, &new_tp);
93710 }
93711
93712diff --git a/kernel/time/time.c b/kernel/time/time.c
93713index a9ae20f..d3fbde7 100644
93714--- a/kernel/time/time.c
93715+++ b/kernel/time/time.c
93716@@ -173,6 +173,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
93717 return error;
93718
93719 if (tz) {
93720+ /* we log in do_settimeofday called below, so don't log twice
93721+ */
93722+ if (!tv)
93723+ gr_log_timechange();
93724+
93725 sys_tz = *tz;
93726 update_vsyscall_tz();
93727 if (firsttime) {
93728diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
93729index ec1791f..6a086cd 100644
93730--- a/kernel/time/timekeeping.c
93731+++ b/kernel/time/timekeeping.c
93732@@ -15,6 +15,7 @@
93733 #include <linux/init.h>
93734 #include <linux/mm.h>
93735 #include <linux/sched.h>
93736+#include <linux/grsecurity.h>
93737 #include <linux/syscore_ops.h>
93738 #include <linux/clocksource.h>
93739 #include <linux/jiffies.h>
93740@@ -717,6 +718,8 @@ int do_settimeofday(const struct timespec *tv)
93741 if (!timespec_valid_strict(tv))
93742 return -EINVAL;
93743
93744+ gr_log_timechange();
93745+
93746 raw_spin_lock_irqsave(&timekeeper_lock, flags);
93747 write_seqcount_begin(&tk_core.seq);
93748
93749diff --git a/kernel/time/timer.c b/kernel/time/timer.c
93750index 9bbb834..3caa8ed 100644
93751--- a/kernel/time/timer.c
93752+++ b/kernel/time/timer.c
93753@@ -1394,7 +1394,7 @@ void update_process_times(int user_tick)
93754 /*
93755 * This function runs timers and the timer-tq in bottom half context.
93756 */
93757-static void run_timer_softirq(struct softirq_action *h)
93758+static __latent_entropy void run_timer_softirq(void)
93759 {
93760 struct tvec_base *base = __this_cpu_read(tvec_bases);
93761
93762@@ -1457,7 +1457,7 @@ static void process_timeout(unsigned long __data)
93763 *
93764 * In all cases the return value is guaranteed to be non-negative.
93765 */
93766-signed long __sched schedule_timeout(signed long timeout)
93767+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
93768 {
93769 struct timer_list timer;
93770 unsigned long expire;
93771diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
93772index 61ed862..3b52c65 100644
93773--- a/kernel/time/timer_list.c
93774+++ b/kernel/time/timer_list.c
93775@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
93776
93777 static void print_name_offset(struct seq_file *m, void *sym)
93778 {
93779+#ifdef CONFIG_GRKERNSEC_HIDESYM
93780+ SEQ_printf(m, "<%p>", NULL);
93781+#else
93782 char symname[KSYM_NAME_LEN];
93783
93784 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
93785 SEQ_printf(m, "<%pK>", sym);
93786 else
93787 SEQ_printf(m, "%s", symname);
93788+#endif
93789 }
93790
93791 static void
93792@@ -119,7 +123,11 @@ next_one:
93793 static void
93794 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
93795 {
93796+#ifdef CONFIG_GRKERNSEC_HIDESYM
93797+ SEQ_printf(m, " .base: %p\n", NULL);
93798+#else
93799 SEQ_printf(m, " .base: %pK\n", base);
93800+#endif
93801 SEQ_printf(m, " .index: %d\n",
93802 base->index);
93803 SEQ_printf(m, " .resolution: %Lu nsecs\n",
93804@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
93805 {
93806 struct proc_dir_entry *pe;
93807
93808+#ifdef CONFIG_GRKERNSEC_PROC_ADD
93809+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
93810+#else
93811 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
93812+#endif
93813 if (!pe)
93814 return -ENOMEM;
93815 return 0;
93816diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
93817index 1fb08f2..ca4bb1e 100644
93818--- a/kernel/time/timer_stats.c
93819+++ b/kernel/time/timer_stats.c
93820@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
93821 static unsigned long nr_entries;
93822 static struct entry entries[MAX_ENTRIES];
93823
93824-static atomic_t overflow_count;
93825+static atomic_unchecked_t overflow_count;
93826
93827 /*
93828 * The entries are in a hash-table, for fast lookup:
93829@@ -140,7 +140,7 @@ static void reset_entries(void)
93830 nr_entries = 0;
93831 memset(entries, 0, sizeof(entries));
93832 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
93833- atomic_set(&overflow_count, 0);
93834+ atomic_set_unchecked(&overflow_count, 0);
93835 }
93836
93837 static struct entry *alloc_entry(void)
93838@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
93839 if (likely(entry))
93840 entry->count++;
93841 else
93842- atomic_inc(&overflow_count);
93843+ atomic_inc_unchecked(&overflow_count);
93844
93845 out_unlock:
93846 raw_spin_unlock_irqrestore(lock, flags);
93847@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
93848
93849 static void print_name_offset(struct seq_file *m, unsigned long addr)
93850 {
93851+#ifdef CONFIG_GRKERNSEC_HIDESYM
93852+ seq_printf(m, "<%p>", NULL);
93853+#else
93854 char symname[KSYM_NAME_LEN];
93855
93856 if (lookup_symbol_name(addr, symname) < 0)
93857- seq_printf(m, "<%p>", (void *)addr);
93858+ seq_printf(m, "<%pK>", (void *)addr);
93859 else
93860 seq_printf(m, "%s", symname);
93861+#endif
93862 }
93863
93864 static int tstats_show(struct seq_file *m, void *v)
93865@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
93866
93867 seq_puts(m, "Timer Stats Version: v0.3\n");
93868 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
93869- if (atomic_read(&overflow_count))
93870- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
93871+ if (atomic_read_unchecked(&overflow_count))
93872+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
93873 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
93874
93875 for (i = 0; i < nr_entries; i++) {
93876@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
93877 {
93878 struct proc_dir_entry *pe;
93879
93880+#ifdef CONFIG_GRKERNSEC_PROC_ADD
93881+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
93882+#else
93883 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
93884+#endif
93885 if (!pe)
93886 return -ENOMEM;
93887 return 0;
93888diff --git a/kernel/torture.c b/kernel/torture.c
93889index d600af2..27a4e9d 100644
93890--- a/kernel/torture.c
93891+++ b/kernel/torture.c
93892@@ -484,7 +484,7 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
93893 mutex_lock(&fullstop_mutex);
93894 if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
93895 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
93896- ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
93897+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_SHUTDOWN;
93898 } else {
93899 pr_warn("Concurrent rmmod and shutdown illegal!\n");
93900 }
93901@@ -551,14 +551,14 @@ static int torture_stutter(void *arg)
93902 if (!torture_must_stop()) {
93903 if (stutter > 1) {
93904 schedule_timeout_interruptible(stutter - 1);
93905- ACCESS_ONCE(stutter_pause_test) = 2;
93906+ ACCESS_ONCE_RW(stutter_pause_test) = 2;
93907 }
93908 schedule_timeout_interruptible(1);
93909- ACCESS_ONCE(stutter_pause_test) = 1;
93910+ ACCESS_ONCE_RW(stutter_pause_test) = 1;
93911 }
93912 if (!torture_must_stop())
93913 schedule_timeout_interruptible(stutter);
93914- ACCESS_ONCE(stutter_pause_test) = 0;
93915+ ACCESS_ONCE_RW(stutter_pause_test) = 0;
93916 torture_shutdown_absorb("torture_stutter");
93917 } while (!torture_must_stop());
93918 torture_kthread_stopping("torture_stutter");
93919@@ -645,7 +645,7 @@ bool torture_cleanup(void)
93920 schedule_timeout_uninterruptible(10);
93921 return true;
93922 }
93923- ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
93924+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_RMMOD;
93925 mutex_unlock(&fullstop_mutex);
93926 torture_shutdown_cleanup();
93927 torture_shuffle_cleanup();
93928diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
93929index c1bd4ad..4b861dc 100644
93930--- a/kernel/trace/blktrace.c
93931+++ b/kernel/trace/blktrace.c
93932@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
93933 struct blk_trace *bt = filp->private_data;
93934 char buf[16];
93935
93936- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
93937+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
93938
93939 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
93940 }
93941@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
93942 return 1;
93943
93944 bt = buf->chan->private_data;
93945- atomic_inc(&bt->dropped);
93946+ atomic_inc_unchecked(&bt->dropped);
93947 return 0;
93948 }
93949
93950@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
93951
93952 bt->dir = dir;
93953 bt->dev = dev;
93954- atomic_set(&bt->dropped, 0);
93955+ atomic_set_unchecked(&bt->dropped, 0);
93956 INIT_LIST_HEAD(&bt->running_list);
93957
93958 ret = -EIO;
93959diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
93960index 5916a8e..5cd3b1f 100644
93961--- a/kernel/trace/ftrace.c
93962+++ b/kernel/trace/ftrace.c
93963@@ -2128,12 +2128,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
93964 if (unlikely(ftrace_disabled))
93965 return 0;
93966
93967+ ret = ftrace_arch_code_modify_prepare();
93968+ FTRACE_WARN_ON(ret);
93969+ if (ret)
93970+ return 0;
93971+
93972 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
93973+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
93974 if (ret) {
93975 ftrace_bug(ret, ip);
93976- return 0;
93977 }
93978- return 1;
93979+ return ret ? 0 : 1;
93980 }
93981
93982 /*
93983@@ -4458,8 +4463,10 @@ static int ftrace_process_locs(struct module *mod,
93984 if (!count)
93985 return 0;
93986
93987+ pax_open_kernel();
93988 sort(start, count, sizeof(*start),
93989 ftrace_cmp_ips, ftrace_swap_ips);
93990+ pax_close_kernel();
93991
93992 start_pg = ftrace_allocate_pages(count);
93993 if (!start_pg)
93994diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
93995index a56e07c..d46f0ba 100644
93996--- a/kernel/trace/ring_buffer.c
93997+++ b/kernel/trace/ring_buffer.c
93998@@ -352,9 +352,9 @@ struct buffer_data_page {
93999 */
94000 struct buffer_page {
94001 struct list_head list; /* list of buffer pages */
94002- local_t write; /* index for next write */
94003+ local_unchecked_t write; /* index for next write */
94004 unsigned read; /* index for next read */
94005- local_t entries; /* entries on this page */
94006+ local_unchecked_t entries; /* entries on this page */
94007 unsigned long real_end; /* real end of data */
94008 struct buffer_data_page *page; /* Actual data page */
94009 };
94010@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
94011 unsigned long last_overrun;
94012 local_t entries_bytes;
94013 local_t entries;
94014- local_t overrun;
94015- local_t commit_overrun;
94016+ local_unchecked_t overrun;
94017+ local_unchecked_t commit_overrun;
94018 local_t dropped_events;
94019 local_t committing;
94020 local_t commits;
94021@@ -1032,8 +1032,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
94022 *
94023 * We add a counter to the write field to denote this.
94024 */
94025- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
94026- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
94027+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
94028+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
94029
94030 /*
94031 * Just make sure we have seen our old_write and synchronize
94032@@ -1061,8 +1061,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
94033 * cmpxchg to only update if an interrupt did not already
94034 * do it for us. If the cmpxchg fails, we don't care.
94035 */
94036- (void)local_cmpxchg(&next_page->write, old_write, val);
94037- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
94038+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
94039+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
94040
94041 /*
94042 * No need to worry about races with clearing out the commit.
94043@@ -1429,12 +1429,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
94044
94045 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
94046 {
94047- return local_read(&bpage->entries) & RB_WRITE_MASK;
94048+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
94049 }
94050
94051 static inline unsigned long rb_page_write(struct buffer_page *bpage)
94052 {
94053- return local_read(&bpage->write) & RB_WRITE_MASK;
94054+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
94055 }
94056
94057 static int
94058@@ -1529,7 +1529,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
94059 * bytes consumed in ring buffer from here.
94060 * Increment overrun to account for the lost events.
94061 */
94062- local_add(page_entries, &cpu_buffer->overrun);
94063+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
94064 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
94065 }
94066
94067@@ -2091,7 +2091,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
94068 * it is our responsibility to update
94069 * the counters.
94070 */
94071- local_add(entries, &cpu_buffer->overrun);
94072+ local_add_unchecked(entries, &cpu_buffer->overrun);
94073 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
94074
94075 /*
94076@@ -2241,7 +2241,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94077 if (tail == BUF_PAGE_SIZE)
94078 tail_page->real_end = 0;
94079
94080- local_sub(length, &tail_page->write);
94081+ local_sub_unchecked(length, &tail_page->write);
94082 return;
94083 }
94084
94085@@ -2276,7 +2276,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94086 rb_event_set_padding(event);
94087
94088 /* Set the write back to the previous setting */
94089- local_sub(length, &tail_page->write);
94090+ local_sub_unchecked(length, &tail_page->write);
94091 return;
94092 }
94093
94094@@ -2288,7 +2288,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94095
94096 /* Set write to end of buffer */
94097 length = (tail + length) - BUF_PAGE_SIZE;
94098- local_sub(length, &tail_page->write);
94099+ local_sub_unchecked(length, &tail_page->write);
94100 }
94101
94102 /*
94103@@ -2314,7 +2314,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94104 * about it.
94105 */
94106 if (unlikely(next_page == commit_page)) {
94107- local_inc(&cpu_buffer->commit_overrun);
94108+ local_inc_unchecked(&cpu_buffer->commit_overrun);
94109 goto out_reset;
94110 }
94111
94112@@ -2370,7 +2370,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94113 cpu_buffer->tail_page) &&
94114 (cpu_buffer->commit_page ==
94115 cpu_buffer->reader_page))) {
94116- local_inc(&cpu_buffer->commit_overrun);
94117+ local_inc_unchecked(&cpu_buffer->commit_overrun);
94118 goto out_reset;
94119 }
94120 }
94121@@ -2418,7 +2418,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
94122 length += RB_LEN_TIME_EXTEND;
94123
94124 tail_page = cpu_buffer->tail_page;
94125- write = local_add_return(length, &tail_page->write);
94126+ write = local_add_return_unchecked(length, &tail_page->write);
94127
94128 /* set write to only the index of the write */
94129 write &= RB_WRITE_MASK;
94130@@ -2442,7 +2442,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
94131 kmemcheck_annotate_bitfield(event, bitfield);
94132 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
94133
94134- local_inc(&tail_page->entries);
94135+ local_inc_unchecked(&tail_page->entries);
94136
94137 /*
94138 * If this is the first commit on the page, then update
94139@@ -2475,7 +2475,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94140
94141 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
94142 unsigned long write_mask =
94143- local_read(&bpage->write) & ~RB_WRITE_MASK;
94144+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
94145 unsigned long event_length = rb_event_length(event);
94146 /*
94147 * This is on the tail page. It is possible that
94148@@ -2485,7 +2485,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94149 */
94150 old_index += write_mask;
94151 new_index += write_mask;
94152- index = local_cmpxchg(&bpage->write, old_index, new_index);
94153+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
94154 if (index == old_index) {
94155 /* update counters */
94156 local_sub(event_length, &cpu_buffer->entries_bytes);
94157@@ -2877,7 +2877,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
94158
94159 /* Do the likely case first */
94160 if (likely(bpage->page == (void *)addr)) {
94161- local_dec(&bpage->entries);
94162+ local_dec_unchecked(&bpage->entries);
94163 return;
94164 }
94165
94166@@ -2889,7 +2889,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
94167 start = bpage;
94168 do {
94169 if (bpage->page == (void *)addr) {
94170- local_dec(&bpage->entries);
94171+ local_dec_unchecked(&bpage->entries);
94172 return;
94173 }
94174 rb_inc_page(cpu_buffer, &bpage);
94175@@ -3173,7 +3173,7 @@ static inline unsigned long
94176 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
94177 {
94178 return local_read(&cpu_buffer->entries) -
94179- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
94180+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
94181 }
94182
94183 /**
94184@@ -3262,7 +3262,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
94185 return 0;
94186
94187 cpu_buffer = buffer->buffers[cpu];
94188- ret = local_read(&cpu_buffer->overrun);
94189+ ret = local_read_unchecked(&cpu_buffer->overrun);
94190
94191 return ret;
94192 }
94193@@ -3285,7 +3285,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
94194 return 0;
94195
94196 cpu_buffer = buffer->buffers[cpu];
94197- ret = local_read(&cpu_buffer->commit_overrun);
94198+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
94199
94200 return ret;
94201 }
94202@@ -3370,7 +3370,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
94203 /* if you care about this being correct, lock the buffer */
94204 for_each_buffer_cpu(buffer, cpu) {
94205 cpu_buffer = buffer->buffers[cpu];
94206- overruns += local_read(&cpu_buffer->overrun);
94207+ overruns += local_read_unchecked(&cpu_buffer->overrun);
94208 }
94209
94210 return overruns;
94211@@ -3541,8 +3541,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
94212 /*
94213 * Reset the reader page to size zero.
94214 */
94215- local_set(&cpu_buffer->reader_page->write, 0);
94216- local_set(&cpu_buffer->reader_page->entries, 0);
94217+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
94218+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
94219 local_set(&cpu_buffer->reader_page->page->commit, 0);
94220 cpu_buffer->reader_page->real_end = 0;
94221
94222@@ -3576,7 +3576,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
94223 * want to compare with the last_overrun.
94224 */
94225 smp_mb();
94226- overwrite = local_read(&(cpu_buffer->overrun));
94227+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
94228
94229 /*
94230 * Here's the tricky part.
94231@@ -4148,8 +4148,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
94232
94233 cpu_buffer->head_page
94234 = list_entry(cpu_buffer->pages, struct buffer_page, list);
94235- local_set(&cpu_buffer->head_page->write, 0);
94236- local_set(&cpu_buffer->head_page->entries, 0);
94237+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
94238+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
94239 local_set(&cpu_buffer->head_page->page->commit, 0);
94240
94241 cpu_buffer->head_page->read = 0;
94242@@ -4159,14 +4159,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
94243
94244 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
94245 INIT_LIST_HEAD(&cpu_buffer->new_pages);
94246- local_set(&cpu_buffer->reader_page->write, 0);
94247- local_set(&cpu_buffer->reader_page->entries, 0);
94248+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
94249+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
94250 local_set(&cpu_buffer->reader_page->page->commit, 0);
94251 cpu_buffer->reader_page->read = 0;
94252
94253 local_set(&cpu_buffer->entries_bytes, 0);
94254- local_set(&cpu_buffer->overrun, 0);
94255- local_set(&cpu_buffer->commit_overrun, 0);
94256+ local_set_unchecked(&cpu_buffer->overrun, 0);
94257+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
94258 local_set(&cpu_buffer->dropped_events, 0);
94259 local_set(&cpu_buffer->entries, 0);
94260 local_set(&cpu_buffer->committing, 0);
94261@@ -4571,8 +4571,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
94262 rb_init_page(bpage);
94263 bpage = reader->page;
94264 reader->page = *data_page;
94265- local_set(&reader->write, 0);
94266- local_set(&reader->entries, 0);
94267+ local_set_unchecked(&reader->write, 0);
94268+ local_set_unchecked(&reader->entries, 0);
94269 reader->read = 0;
94270 *data_page = bpage;
94271
94272diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
94273index 1520933..c651ebc 100644
94274--- a/kernel/trace/trace.c
94275+++ b/kernel/trace/trace.c
94276@@ -3488,7 +3488,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
94277 return 0;
94278 }
94279
94280-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
94281+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
94282 {
94283 /* do nothing if flag is already set */
94284 if (!!(trace_flags & mask) == !!enabled)
94285diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
94286index 385391f..8d2250f 100644
94287--- a/kernel/trace/trace.h
94288+++ b/kernel/trace/trace.h
94289@@ -1280,7 +1280,7 @@ extern const char *__stop___tracepoint_str[];
94290 void trace_printk_init_buffers(void);
94291 void trace_printk_start_comm(void);
94292 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
94293-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
94294+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
94295
94296 /*
94297 * Normal trace_printk() and friends allocates special buffers
94298diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
94299index 57b67b1..66082a9 100644
94300--- a/kernel/trace/trace_clock.c
94301+++ b/kernel/trace/trace_clock.c
94302@@ -124,7 +124,7 @@ u64 notrace trace_clock_global(void)
94303 return now;
94304 }
94305
94306-static atomic64_t trace_counter;
94307+static atomic64_unchecked_t trace_counter;
94308
94309 /*
94310 * trace_clock_counter(): simply an atomic counter.
94311@@ -133,5 +133,5 @@ static atomic64_t trace_counter;
94312 */
94313 u64 notrace trace_clock_counter(void)
94314 {
94315- return atomic64_add_return(1, &trace_counter);
94316+ return atomic64_inc_return_unchecked(&trace_counter);
94317 }
94318diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
94319index ef06ce7..3ea161d 100644
94320--- a/kernel/trace/trace_events.c
94321+++ b/kernel/trace/trace_events.c
94322@@ -1720,7 +1720,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
94323 return 0;
94324 }
94325
94326-struct ftrace_module_file_ops;
94327 static void __add_event_to_tracers(struct ftrace_event_call *call);
94328
94329 /* Add an additional event_call dynamically */
94330diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
94331index 0abd9b8..6a663a2 100644
94332--- a/kernel/trace/trace_mmiotrace.c
94333+++ b/kernel/trace/trace_mmiotrace.c
94334@@ -24,7 +24,7 @@ struct header_iter {
94335 static struct trace_array *mmio_trace_array;
94336 static bool overrun_detected;
94337 static unsigned long prev_overruns;
94338-static atomic_t dropped_count;
94339+static atomic_unchecked_t dropped_count;
94340
94341 static void mmio_reset_data(struct trace_array *tr)
94342 {
94343@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
94344
94345 static unsigned long count_overruns(struct trace_iterator *iter)
94346 {
94347- unsigned long cnt = atomic_xchg(&dropped_count, 0);
94348+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
94349 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
94350
94351 if (over > prev_overruns)
94352@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
94353 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
94354 sizeof(*entry), 0, pc);
94355 if (!event) {
94356- atomic_inc(&dropped_count);
94357+ atomic_inc_unchecked(&dropped_count);
94358 return;
94359 }
94360 entry = ring_buffer_event_data(event);
94361@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
94362 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
94363 sizeof(*entry), 0, pc);
94364 if (!event) {
94365- atomic_inc(&dropped_count);
94366+ atomic_inc_unchecked(&dropped_count);
94367 return;
94368 }
94369 entry = ring_buffer_event_data(event);
94370diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
94371index c6977d5..d243785 100644
94372--- a/kernel/trace/trace_output.c
94373+++ b/kernel/trace/trace_output.c
94374@@ -712,14 +712,16 @@ int register_ftrace_event(struct trace_event *event)
94375 goto out;
94376 }
94377
94378+ pax_open_kernel();
94379 if (event->funcs->trace == NULL)
94380- event->funcs->trace = trace_nop_print;
94381+ *(void **)&event->funcs->trace = trace_nop_print;
94382 if (event->funcs->raw == NULL)
94383- event->funcs->raw = trace_nop_print;
94384+ *(void **)&event->funcs->raw = trace_nop_print;
94385 if (event->funcs->hex == NULL)
94386- event->funcs->hex = trace_nop_print;
94387+ *(void **)&event->funcs->hex = trace_nop_print;
94388 if (event->funcs->binary == NULL)
94389- event->funcs->binary = trace_nop_print;
94390+ *(void **)&event->funcs->binary = trace_nop_print;
94391+ pax_close_kernel();
94392
94393 key = event->type & (EVENT_HASHSIZE - 1);
94394
94395diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
94396index 1f24ed9..10407ec 100644
94397--- a/kernel/trace/trace_seq.c
94398+++ b/kernel/trace/trace_seq.c
94399@@ -367,7 +367,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
94400
94401 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
94402 if (!IS_ERR(p)) {
94403- p = mangle_path(s->buffer + s->len, p, "\n");
94404+ p = mangle_path(s->buffer + s->len, p, "\n\\");
94405 if (p) {
94406 s->len = p - s->buffer;
94407 return 1;
94408diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
94409index 8a4e5cb..64f270d 100644
94410--- a/kernel/trace/trace_stack.c
94411+++ b/kernel/trace/trace_stack.c
94412@@ -91,7 +91,7 @@ check_stack(unsigned long ip, unsigned long *stack)
94413 return;
94414
94415 /* we do not handle interrupt stacks yet */
94416- if (!object_is_on_stack(stack))
94417+ if (!object_starts_on_stack(stack))
94418 return;
94419
94420 local_irq_save(flags);
94421diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
94422index 7e3cd7a..5156a5fe 100644
94423--- a/kernel/trace/trace_syscalls.c
94424+++ b/kernel/trace/trace_syscalls.c
94425@@ -602,6 +602,8 @@ static int perf_sysenter_enable(struct ftrace_event_call *call)
94426 int num;
94427
94428 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94429+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94430+ return -EINVAL;
94431
94432 mutex_lock(&syscall_trace_lock);
94433 if (!sys_perf_refcount_enter)
94434@@ -622,6 +624,8 @@ static void perf_sysenter_disable(struct ftrace_event_call *call)
94435 int num;
94436
94437 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94438+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94439+ return;
94440
94441 mutex_lock(&syscall_trace_lock);
94442 sys_perf_refcount_enter--;
94443@@ -674,6 +678,8 @@ static int perf_sysexit_enable(struct ftrace_event_call *call)
94444 int num;
94445
94446 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94447+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94448+ return -EINVAL;
94449
94450 mutex_lock(&syscall_trace_lock);
94451 if (!sys_perf_refcount_exit)
94452@@ -694,6 +700,8 @@ static void perf_sysexit_disable(struct ftrace_event_call *call)
94453 int num;
94454
94455 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94456+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94457+ return;
94458
94459 mutex_lock(&syscall_trace_lock);
94460 sys_perf_refcount_exit--;
94461diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
94462index aa312b0..395f343 100644
94463--- a/kernel/user_namespace.c
94464+++ b/kernel/user_namespace.c
94465@@ -82,6 +82,21 @@ int create_user_ns(struct cred *new)
94466 !kgid_has_mapping(parent_ns, group))
94467 return -EPERM;
94468
94469+#ifdef CONFIG_GRKERNSEC
94470+ /*
94471+ * This doesn't really inspire confidence:
94472+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
94473+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
94474+ * Increases kernel attack surface in areas developers
94475+ * previously cared little about ("low importance due
94476+ * to requiring "root" capability")
94477+ * To be removed when this code receives *proper* review
94478+ */
94479+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
94480+ !capable(CAP_SETGID))
94481+ return -EPERM;
94482+#endif
94483+
94484 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
94485 if (!ns)
94486 return -ENOMEM;
94487@@ -872,7 +887,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
94488 if (atomic_read(&current->mm->mm_users) > 1)
94489 return -EINVAL;
94490
94491- if (current->fs->users != 1)
94492+ if (atomic_read(&current->fs->users) != 1)
94493 return -EINVAL;
94494
94495 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
94496diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
94497index c8eac43..4b5f08f 100644
94498--- a/kernel/utsname_sysctl.c
94499+++ b/kernel/utsname_sysctl.c
94500@@ -47,7 +47,7 @@ static void put_uts(struct ctl_table *table, int write, void *which)
94501 static int proc_do_uts_string(struct ctl_table *table, int write,
94502 void __user *buffer, size_t *lenp, loff_t *ppos)
94503 {
94504- struct ctl_table uts_table;
94505+ ctl_table_no_const uts_table;
94506 int r;
94507 memcpy(&uts_table, table, sizeof(uts_table));
94508 uts_table.data = get_uts(table, write);
94509diff --git a/kernel/watchdog.c b/kernel/watchdog.c
94510index a8d6914..8fbdb13 100644
94511--- a/kernel/watchdog.c
94512+++ b/kernel/watchdog.c
94513@@ -521,7 +521,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
94514 static void watchdog_nmi_disable(unsigned int cpu) { return; }
94515 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
94516
94517-static struct smp_hotplug_thread watchdog_threads = {
94518+static struct smp_hotplug_thread watchdog_threads __read_only = {
94519 .store = &softlockup_watchdog,
94520 .thread_should_run = watchdog_should_run,
94521 .thread_fn = watchdog,
94522diff --git a/kernel/workqueue.c b/kernel/workqueue.c
94523index 5dbe22a..872413c 100644
94524--- a/kernel/workqueue.c
94525+++ b/kernel/workqueue.c
94526@@ -4507,7 +4507,7 @@ static void rebind_workers(struct worker_pool *pool)
94527 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
94528 worker_flags |= WORKER_REBOUND;
94529 worker_flags &= ~WORKER_UNBOUND;
94530- ACCESS_ONCE(worker->flags) = worker_flags;
94531+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
94532 }
94533
94534 spin_unlock_irq(&pool->lock);
94535diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
94536index a285900..5e3b26b 100644
94537--- a/lib/Kconfig.debug
94538+++ b/lib/Kconfig.debug
94539@@ -882,7 +882,7 @@ config DEBUG_MUTEXES
94540
94541 config DEBUG_WW_MUTEX_SLOWPATH
94542 bool "Wait/wound mutex debugging: Slowpath testing"
94543- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94544+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94545 select DEBUG_LOCK_ALLOC
94546 select DEBUG_SPINLOCK
94547 select DEBUG_MUTEXES
94548@@ -899,7 +899,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
94549
94550 config DEBUG_LOCK_ALLOC
94551 bool "Lock debugging: detect incorrect freeing of live locks"
94552- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94553+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94554 select DEBUG_SPINLOCK
94555 select DEBUG_MUTEXES
94556 select LOCKDEP
94557@@ -913,7 +913,7 @@ config DEBUG_LOCK_ALLOC
94558
94559 config PROVE_LOCKING
94560 bool "Lock debugging: prove locking correctness"
94561- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94562+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94563 select LOCKDEP
94564 select DEBUG_SPINLOCK
94565 select DEBUG_MUTEXES
94566@@ -964,7 +964,7 @@ config LOCKDEP
94567
94568 config LOCK_STAT
94569 bool "Lock usage statistics"
94570- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94571+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94572 select LOCKDEP
94573 select DEBUG_SPINLOCK
94574 select DEBUG_MUTEXES
94575@@ -1437,6 +1437,7 @@ config LATENCYTOP
94576 depends on DEBUG_KERNEL
94577 depends on STACKTRACE_SUPPORT
94578 depends on PROC_FS
94579+ depends on !GRKERNSEC_HIDESYM
94580 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
94581 select KALLSYMS
94582 select KALLSYMS_ALL
94583@@ -1453,7 +1454,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
94584 config DEBUG_STRICT_USER_COPY_CHECKS
94585 bool "Strict user copy size checks"
94586 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
94587- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
94588+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
94589 help
94590 Enabling this option turns a certain set of sanity checks for user
94591 copy operations into compile time failures.
94592@@ -1581,7 +1582,7 @@ endmenu # runtime tests
94593
94594 config PROVIDE_OHCI1394_DMA_INIT
94595 bool "Remote debugging over FireWire early on boot"
94596- depends on PCI && X86
94597+ depends on PCI && X86 && !GRKERNSEC
94598 help
94599 If you want to debug problems which hang or crash the kernel early
94600 on boot and the crashing machine has a FireWire port, you can use
94601diff --git a/lib/Makefile b/lib/Makefile
94602index d6b4bc4..a3724eb 100644
94603--- a/lib/Makefile
94604+++ b/lib/Makefile
94605@@ -55,7 +55,7 @@ obj-$(CONFIG_BTREE) += btree.o
94606 obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
94607 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
94608 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
94609-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
94610+obj-y += list_debug.o
94611 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
94612
94613 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
94614diff --git a/lib/average.c b/lib/average.c
94615index 114d1be..ab0350c 100644
94616--- a/lib/average.c
94617+++ b/lib/average.c
94618@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val)
94619 {
94620 unsigned long internal = ACCESS_ONCE(avg->internal);
94621
94622- ACCESS_ONCE(avg->internal) = internal ?
94623+ ACCESS_ONCE_RW(avg->internal) = internal ?
94624 (((internal << avg->weight) - internal) +
94625 (val << avg->factor)) >> avg->weight :
94626 (val << avg->factor);
94627diff --git a/lib/bitmap.c b/lib/bitmap.c
94628index 33ce011..89e3d6f 100644
94629--- a/lib/bitmap.c
94630+++ b/lib/bitmap.c
94631@@ -433,7 +433,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
94632 {
94633 int c, old_c, totaldigits, ndigits, nchunks, nbits;
94634 u32 chunk;
94635- const char __user __force *ubuf = (const char __user __force *)buf;
94636+ const char __user *ubuf = (const char __force_user *)buf;
94637
94638 bitmap_zero(maskp, nmaskbits);
94639
94640@@ -518,7 +518,7 @@ int bitmap_parse_user(const char __user *ubuf,
94641 {
94642 if (!access_ok(VERIFY_READ, ubuf, ulen))
94643 return -EFAULT;
94644- return __bitmap_parse((const char __force *)ubuf,
94645+ return __bitmap_parse((const char __force_kernel *)ubuf,
94646 ulen, 1, maskp, nmaskbits);
94647
94648 }
94649@@ -609,7 +609,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
94650 {
94651 unsigned a, b;
94652 int c, old_c, totaldigits;
94653- const char __user __force *ubuf = (const char __user __force *)buf;
94654+ const char __user *ubuf = (const char __force_user *)buf;
94655 int exp_digit, in_range;
94656
94657 totaldigits = c = 0;
94658@@ -704,7 +704,7 @@ int bitmap_parselist_user(const char __user *ubuf,
94659 {
94660 if (!access_ok(VERIFY_READ, ubuf, ulen))
94661 return -EFAULT;
94662- return __bitmap_parselist((const char __force *)ubuf,
94663+ return __bitmap_parselist((const char __force_kernel *)ubuf,
94664 ulen, 1, maskp, nmaskbits);
94665 }
94666 EXPORT_SYMBOL(bitmap_parselist_user);
94667diff --git a/lib/bug.c b/lib/bug.c
94668index d1d7c78..b354235 100644
94669--- a/lib/bug.c
94670+++ b/lib/bug.c
94671@@ -137,6 +137,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
94672 return BUG_TRAP_TYPE_NONE;
94673
94674 bug = find_bug(bugaddr);
94675+ if (!bug)
94676+ return BUG_TRAP_TYPE_NONE;
94677
94678 file = NULL;
94679 line = 0;
94680diff --git a/lib/debugobjects.c b/lib/debugobjects.c
94681index 547f7f9..a6d4ba0 100644
94682--- a/lib/debugobjects.c
94683+++ b/lib/debugobjects.c
94684@@ -289,7 +289,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
94685 if (limit > 4)
94686 return;
94687
94688- is_on_stack = object_is_on_stack(addr);
94689+ is_on_stack = object_starts_on_stack(addr);
94690 if (is_on_stack == onstack)
94691 return;
94692
94693diff --git a/lib/div64.c b/lib/div64.c
94694index 4382ad7..08aa558 100644
94695--- a/lib/div64.c
94696+++ b/lib/div64.c
94697@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
94698 EXPORT_SYMBOL(__div64_32);
94699
94700 #ifndef div_s64_rem
94701-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
94702+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
94703 {
94704 u64 quotient;
94705
94706@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
94707 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
94708 */
94709 #ifndef div64_u64
94710-u64 div64_u64(u64 dividend, u64 divisor)
94711+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
94712 {
94713 u32 high = divisor >> 32;
94714 u64 quot;
94715diff --git a/lib/dma-debug.c b/lib/dma-debug.c
94716index 98f2d7e..899da5c 100644
94717--- a/lib/dma-debug.c
94718+++ b/lib/dma-debug.c
94719@@ -971,7 +971,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
94720
94721 void dma_debug_add_bus(struct bus_type *bus)
94722 {
94723- struct notifier_block *nb;
94724+ notifier_block_no_const *nb;
94725
94726 if (global_disable)
94727 return;
94728@@ -1148,7 +1148,7 @@ static void check_unmap(struct dma_debug_entry *ref)
94729
94730 static void check_for_stack(struct device *dev, void *addr)
94731 {
94732- if (object_is_on_stack(addr))
94733+ if (object_starts_on_stack(addr))
94734 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
94735 "stack [addr=%p]\n", addr);
94736 }
94737diff --git a/lib/hash.c b/lib/hash.c
94738index fea973f..386626f 100644
94739--- a/lib/hash.c
94740+++ b/lib/hash.c
94741@@ -14,7 +14,7 @@
94742 #include <linux/hash.h>
94743 #include <linux/cache.h>
94744
94745-static struct fast_hash_ops arch_hash_ops __read_mostly = {
94746+static struct fast_hash_ops arch_hash_ops __read_only = {
94747 .hash = jhash,
94748 .hash2 = jhash2,
94749 };
94750diff --git a/lib/inflate.c b/lib/inflate.c
94751index 013a761..c28f3fc 100644
94752--- a/lib/inflate.c
94753+++ b/lib/inflate.c
94754@@ -269,7 +269,7 @@ static void free(void *where)
94755 malloc_ptr = free_mem_ptr;
94756 }
94757 #else
94758-#define malloc(a) kmalloc(a, GFP_KERNEL)
94759+#define malloc(a) kmalloc((a), GFP_KERNEL)
94760 #define free(a) kfree(a)
94761 #endif
94762
94763diff --git a/lib/ioremap.c b/lib/ioremap.c
94764index 0c9216c..863bd89 100644
94765--- a/lib/ioremap.c
94766+++ b/lib/ioremap.c
94767@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
94768 unsigned long next;
94769
94770 phys_addr -= addr;
94771- pmd = pmd_alloc(&init_mm, pud, addr);
94772+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
94773 if (!pmd)
94774 return -ENOMEM;
94775 do {
94776@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
94777 unsigned long next;
94778
94779 phys_addr -= addr;
94780- pud = pud_alloc(&init_mm, pgd, addr);
94781+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
94782 if (!pud)
94783 return -ENOMEM;
94784 do {
94785diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
94786index bd2bea9..6b3c95e 100644
94787--- a/lib/is_single_threaded.c
94788+++ b/lib/is_single_threaded.c
94789@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
94790 struct task_struct *p, *t;
94791 bool ret;
94792
94793+ if (!mm)
94794+ return true;
94795+
94796 if (atomic_read(&task->signal->live) != 1)
94797 return false;
94798
94799diff --git a/lib/kobject.c b/lib/kobject.c
94800index 58751bb..93a1853 100644
94801--- a/lib/kobject.c
94802+++ b/lib/kobject.c
94803@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
94804
94805
94806 static DEFINE_SPINLOCK(kobj_ns_type_lock);
94807-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
94808+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
94809
94810-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
94811+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
94812 {
94813 enum kobj_ns_type type = ops->type;
94814 int error;
94815diff --git a/lib/list_debug.c b/lib/list_debug.c
94816index c24c2f7..f0296f4 100644
94817--- a/lib/list_debug.c
94818+++ b/lib/list_debug.c
94819@@ -11,7 +11,9 @@
94820 #include <linux/bug.h>
94821 #include <linux/kernel.h>
94822 #include <linux/rculist.h>
94823+#include <linux/mm.h>
94824
94825+#ifdef CONFIG_DEBUG_LIST
94826 /*
94827 * Insert a new entry between two known consecutive entries.
94828 *
94829@@ -19,21 +21,40 @@
94830 * the prev/next entries already!
94831 */
94832
94833+static bool __list_add_debug(struct list_head *new,
94834+ struct list_head *prev,
94835+ struct list_head *next)
94836+{
94837+ if (unlikely(next->prev != prev)) {
94838+ printk(KERN_ERR "list_add corruption. next->prev should be "
94839+ "prev (%p), but was %p. (next=%p).\n",
94840+ prev, next->prev, next);
94841+ BUG();
94842+ return false;
94843+ }
94844+ if (unlikely(prev->next != next)) {
94845+ printk(KERN_ERR "list_add corruption. prev->next should be "
94846+ "next (%p), but was %p. (prev=%p).\n",
94847+ next, prev->next, prev);
94848+ BUG();
94849+ return false;
94850+ }
94851+ if (unlikely(new == prev || new == next)) {
94852+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
94853+ new, prev, next);
94854+ BUG();
94855+ return false;
94856+ }
94857+ return true;
94858+}
94859+
94860 void __list_add(struct list_head *new,
94861- struct list_head *prev,
94862- struct list_head *next)
94863+ struct list_head *prev,
94864+ struct list_head *next)
94865 {
94866- WARN(next->prev != prev,
94867- "list_add corruption. next->prev should be "
94868- "prev (%p), but was %p. (next=%p).\n",
94869- prev, next->prev, next);
94870- WARN(prev->next != next,
94871- "list_add corruption. prev->next should be "
94872- "next (%p), but was %p. (prev=%p).\n",
94873- next, prev->next, prev);
94874- WARN(new == prev || new == next,
94875- "list_add double add: new=%p, prev=%p, next=%p.\n",
94876- new, prev, next);
94877+ if (!__list_add_debug(new, prev, next))
94878+ return;
94879+
94880 next->prev = new;
94881 new->next = next;
94882 new->prev = prev;
94883@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
94884 }
94885 EXPORT_SYMBOL(__list_add);
94886
94887-void __list_del_entry(struct list_head *entry)
94888+static bool __list_del_entry_debug(struct list_head *entry)
94889 {
94890 struct list_head *prev, *next;
94891
94892 prev = entry->prev;
94893 next = entry->next;
94894
94895- if (WARN(next == LIST_POISON1,
94896- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
94897- entry, LIST_POISON1) ||
94898- WARN(prev == LIST_POISON2,
94899- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
94900- entry, LIST_POISON2) ||
94901- WARN(prev->next != entry,
94902- "list_del corruption. prev->next should be %p, "
94903- "but was %p\n", entry, prev->next) ||
94904- WARN(next->prev != entry,
94905- "list_del corruption. next->prev should be %p, "
94906- "but was %p\n", entry, next->prev))
94907+ if (unlikely(next == LIST_POISON1)) {
94908+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
94909+ entry, LIST_POISON1);
94910+ BUG();
94911+ return false;
94912+ }
94913+ if (unlikely(prev == LIST_POISON2)) {
94914+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
94915+ entry, LIST_POISON2);
94916+ BUG();
94917+ return false;
94918+ }
94919+ if (unlikely(entry->prev->next != entry)) {
94920+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
94921+ "but was %p\n", entry, prev->next);
94922+ BUG();
94923+ return false;
94924+ }
94925+ if (unlikely(entry->next->prev != entry)) {
94926+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
94927+ "but was %p\n", entry, next->prev);
94928+ BUG();
94929+ return false;
94930+ }
94931+ return true;
94932+}
94933+
94934+void __list_del_entry(struct list_head *entry)
94935+{
94936+ if (!__list_del_entry_debug(entry))
94937 return;
94938
94939- __list_del(prev, next);
94940+ __list_del(entry->prev, entry->next);
94941 }
94942 EXPORT_SYMBOL(__list_del_entry);
94943
94944@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
94945 void __list_add_rcu(struct list_head *new,
94946 struct list_head *prev, struct list_head *next)
94947 {
94948- WARN(next->prev != prev,
94949- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
94950- prev, next->prev, next);
94951- WARN(prev->next != next,
94952- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
94953- next, prev->next, prev);
94954+ if (!__list_add_debug(new, prev, next))
94955+ return;
94956+
94957 new->next = next;
94958 new->prev = prev;
94959 rcu_assign_pointer(list_next_rcu(prev), new);
94960 next->prev = new;
94961 }
94962 EXPORT_SYMBOL(__list_add_rcu);
94963+#endif
94964+
94965+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
94966+{
94967+#ifdef CONFIG_DEBUG_LIST
94968+ if (!__list_add_debug(new, prev, next))
94969+ return;
94970+#endif
94971+
94972+ pax_open_kernel();
94973+ next->prev = new;
94974+ new->next = next;
94975+ new->prev = prev;
94976+ prev->next = new;
94977+ pax_close_kernel();
94978+}
94979+EXPORT_SYMBOL(__pax_list_add);
94980+
94981+void pax_list_del(struct list_head *entry)
94982+{
94983+#ifdef CONFIG_DEBUG_LIST
94984+ if (!__list_del_entry_debug(entry))
94985+ return;
94986+#endif
94987+
94988+ pax_open_kernel();
94989+ __list_del(entry->prev, entry->next);
94990+ entry->next = LIST_POISON1;
94991+ entry->prev = LIST_POISON2;
94992+ pax_close_kernel();
94993+}
94994+EXPORT_SYMBOL(pax_list_del);
94995+
94996+void pax_list_del_init(struct list_head *entry)
94997+{
94998+ pax_open_kernel();
94999+ __list_del(entry->prev, entry->next);
95000+ INIT_LIST_HEAD(entry);
95001+ pax_close_kernel();
95002+}
95003+EXPORT_SYMBOL(pax_list_del_init);
95004+
95005+void __pax_list_add_rcu(struct list_head *new,
95006+ struct list_head *prev, struct list_head *next)
95007+{
95008+#ifdef CONFIG_DEBUG_LIST
95009+ if (!__list_add_debug(new, prev, next))
95010+ return;
95011+#endif
95012+
95013+ pax_open_kernel();
95014+ new->next = next;
95015+ new->prev = prev;
95016+ rcu_assign_pointer(list_next_rcu(prev), new);
95017+ next->prev = new;
95018+ pax_close_kernel();
95019+}
95020+EXPORT_SYMBOL(__pax_list_add_rcu);
95021+
95022+void pax_list_del_rcu(struct list_head *entry)
95023+{
95024+#ifdef CONFIG_DEBUG_LIST
95025+ if (!__list_del_entry_debug(entry))
95026+ return;
95027+#endif
95028+
95029+ pax_open_kernel();
95030+ __list_del(entry->prev, entry->next);
95031+ entry->next = LIST_POISON1;
95032+ entry->prev = LIST_POISON2;
95033+ pax_close_kernel();
95034+}
95035+EXPORT_SYMBOL(pax_list_del_rcu);
95036diff --git a/lib/lockref.c b/lib/lockref.c
95037index d2233de..fa1a2f6 100644
95038--- a/lib/lockref.c
95039+++ b/lib/lockref.c
95040@@ -48,13 +48,13 @@
95041 void lockref_get(struct lockref *lockref)
95042 {
95043 CMPXCHG_LOOP(
95044- new.count++;
95045+ __lockref_inc(&new);
95046 ,
95047 return;
95048 );
95049
95050 spin_lock(&lockref->lock);
95051- lockref->count++;
95052+ __lockref_inc(lockref);
95053 spin_unlock(&lockref->lock);
95054 }
95055 EXPORT_SYMBOL(lockref_get);
95056@@ -69,7 +69,7 @@ int lockref_get_not_zero(struct lockref *lockref)
95057 int retval;
95058
95059 CMPXCHG_LOOP(
95060- new.count++;
95061+ __lockref_inc(&new);
95062 if (!old.count)
95063 return 0;
95064 ,
95065@@ -79,7 +79,7 @@ int lockref_get_not_zero(struct lockref *lockref)
95066 spin_lock(&lockref->lock);
95067 retval = 0;
95068 if (lockref->count) {
95069- lockref->count++;
95070+ __lockref_inc(lockref);
95071 retval = 1;
95072 }
95073 spin_unlock(&lockref->lock);
95074@@ -96,7 +96,7 @@ EXPORT_SYMBOL(lockref_get_not_zero);
95075 int lockref_get_or_lock(struct lockref *lockref)
95076 {
95077 CMPXCHG_LOOP(
95078- new.count++;
95079+ __lockref_inc(&new);
95080 if (!old.count)
95081 break;
95082 ,
95083@@ -106,7 +106,7 @@ int lockref_get_or_lock(struct lockref *lockref)
95084 spin_lock(&lockref->lock);
95085 if (!lockref->count)
95086 return 0;
95087- lockref->count++;
95088+ __lockref_inc(lockref);
95089 spin_unlock(&lockref->lock);
95090 return 1;
95091 }
95092@@ -120,7 +120,7 @@ EXPORT_SYMBOL(lockref_get_or_lock);
95093 int lockref_put_or_lock(struct lockref *lockref)
95094 {
95095 CMPXCHG_LOOP(
95096- new.count--;
95097+ __lockref_dec(&new);
95098 if (old.count <= 1)
95099 break;
95100 ,
95101@@ -130,7 +130,7 @@ int lockref_put_or_lock(struct lockref *lockref)
95102 spin_lock(&lockref->lock);
95103 if (lockref->count <= 1)
95104 return 0;
95105- lockref->count--;
95106+ __lockref_dec(lockref);
95107 spin_unlock(&lockref->lock);
95108 return 1;
95109 }
95110@@ -157,7 +157,7 @@ int lockref_get_not_dead(struct lockref *lockref)
95111 int retval;
95112
95113 CMPXCHG_LOOP(
95114- new.count++;
95115+ __lockref_inc(&new);
95116 if ((int)old.count < 0)
95117 return 0;
95118 ,
95119@@ -167,7 +167,7 @@ int lockref_get_not_dead(struct lockref *lockref)
95120 spin_lock(&lockref->lock);
95121 retval = 0;
95122 if ((int) lockref->count >= 0) {
95123- lockref->count++;
95124+ __lockref_inc(lockref);
95125 retval = 1;
95126 }
95127 spin_unlock(&lockref->lock);
95128diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
95129index a89cf09..1a42c2d 100644
95130--- a/lib/percpu-refcount.c
95131+++ b/lib/percpu-refcount.c
95132@@ -29,7 +29,7 @@
95133 * can't hit 0 before we've added up all the percpu refs.
95134 */
95135
95136-#define PCPU_COUNT_BIAS (1U << 31)
95137+#define PCPU_COUNT_BIAS (1U << 30)
95138
95139 static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref)
95140 {
95141diff --git a/lib/radix-tree.c b/lib/radix-tree.c
95142index 3291a8e..346a91e 100644
95143--- a/lib/radix-tree.c
95144+++ b/lib/radix-tree.c
95145@@ -67,7 +67,7 @@ struct radix_tree_preload {
95146 int nr;
95147 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
95148 };
95149-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
95150+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
95151
95152 static inline void *ptr_to_indirect(void *ptr)
95153 {
95154diff --git a/lib/random32.c b/lib/random32.c
95155index c9b6bf3..4752c6d4 100644
95156--- a/lib/random32.c
95157+++ b/lib/random32.c
95158@@ -46,7 +46,7 @@ static inline void prandom_state_selftest(void)
95159 }
95160 #endif
95161
95162-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
95163+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
95164
95165 /**
95166 * prandom_u32_state - seeded pseudo-random number generator.
95167diff --git a/lib/rbtree.c b/lib/rbtree.c
95168index c16c81a..4dcbda1 100644
95169--- a/lib/rbtree.c
95170+++ b/lib/rbtree.c
95171@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
95172 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
95173
95174 static const struct rb_augment_callbacks dummy_callbacks = {
95175- dummy_propagate, dummy_copy, dummy_rotate
95176+ .propagate = dummy_propagate,
95177+ .copy = dummy_copy,
95178+ .rotate = dummy_rotate
95179 };
95180
95181 void rb_insert_color(struct rb_node *node, struct rb_root *root)
95182diff --git a/lib/show_mem.c b/lib/show_mem.c
95183index 0922579..9d7adb9 100644
95184--- a/lib/show_mem.c
95185+++ b/lib/show_mem.c
95186@@ -44,6 +44,6 @@ void show_mem(unsigned int filter)
95187 quicklist_total_size());
95188 #endif
95189 #ifdef CONFIG_MEMORY_FAILURE
95190- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
95191+ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
95192 #endif
95193 }
95194diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
95195index bb2b201..46abaf9 100644
95196--- a/lib/strncpy_from_user.c
95197+++ b/lib/strncpy_from_user.c
95198@@ -21,7 +21,7 @@
95199 */
95200 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
95201 {
95202- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95203+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95204 long res = 0;
95205
95206 /*
95207diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
95208index a28df52..3d55877 100644
95209--- a/lib/strnlen_user.c
95210+++ b/lib/strnlen_user.c
95211@@ -26,7 +26,7 @@
95212 */
95213 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
95214 {
95215- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95216+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95217 long align, res = 0;
95218 unsigned long c;
95219
95220diff --git a/lib/swiotlb.c b/lib/swiotlb.c
95221index 4abda07..b9d3765 100644
95222--- a/lib/swiotlb.c
95223+++ b/lib/swiotlb.c
95224@@ -682,7 +682,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
95225
95226 void
95227 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
95228- dma_addr_t dev_addr)
95229+ dma_addr_t dev_addr, struct dma_attrs *attrs)
95230 {
95231 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
95232
95233diff --git a/lib/test_bpf.c b/lib/test_bpf.c
95234index 89e0345..3347efe 100644
95235--- a/lib/test_bpf.c
95236+++ b/lib/test_bpf.c
95237@@ -1798,7 +1798,7 @@ static struct bpf_prog *generate_filter(int which, int *err)
95238 break;
95239
95240 case INTERNAL:
95241- fp = kzalloc(bpf_prog_size(flen), GFP_KERNEL);
95242+ fp = bpf_prog_alloc(bpf_prog_size(flen), 0);
95243 if (fp == NULL) {
95244 pr_cont("UNEXPECTED_FAIL no memory left\n");
95245 *err = -ENOMEM;
95246diff --git a/lib/usercopy.c b/lib/usercopy.c
95247index 4f5b1dd..7cab418 100644
95248--- a/lib/usercopy.c
95249+++ b/lib/usercopy.c
95250@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
95251 WARN(1, "Buffer overflow detected!\n");
95252 }
95253 EXPORT_SYMBOL(copy_from_user_overflow);
95254+
95255+void copy_to_user_overflow(void)
95256+{
95257+ WARN(1, "Buffer overflow detected!\n");
95258+}
95259+EXPORT_SYMBOL(copy_to_user_overflow);
95260diff --git a/lib/vsprintf.c b/lib/vsprintf.c
95261index 6fe2c84..2fe5ec6 100644
95262--- a/lib/vsprintf.c
95263+++ b/lib/vsprintf.c
95264@@ -16,6 +16,9 @@
95265 * - scnprintf and vscnprintf
95266 */
95267
95268+#ifdef CONFIG_GRKERNSEC_HIDESYM
95269+#define __INCLUDED_BY_HIDESYM 1
95270+#endif
95271 #include <stdarg.h>
95272 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
95273 #include <linux/types.h>
95274@@ -624,7 +627,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
95275 #ifdef CONFIG_KALLSYMS
95276 if (*fmt == 'B')
95277 sprint_backtrace(sym, value);
95278- else if (*fmt != 'f' && *fmt != 's')
95279+ else if (*fmt != 'f' && *fmt != 's' && *fmt != 'X')
95280 sprint_symbol(sym, value);
95281 else
95282 sprint_symbol_no_offset(sym, value);
95283@@ -1183,7 +1186,11 @@ char *address_val(char *buf, char *end, const void *addr,
95284 return number(buf, end, num, spec);
95285 }
95286
95287+#ifdef CONFIG_GRKERNSEC_HIDESYM
95288+int kptr_restrict __read_mostly = 2;
95289+#else
95290 int kptr_restrict __read_mostly;
95291+#endif
95292
95293 /*
95294 * Show a '%p' thing. A kernel extension is that the '%p' is followed
95295@@ -1194,8 +1201,10 @@ int kptr_restrict __read_mostly;
95296 *
95297 * - 'F' For symbolic function descriptor pointers with offset
95298 * - 'f' For simple symbolic function names without offset
95299+ * - 'X' For simple symbolic function names without offset approved for use with GRKERNSEC_HIDESYM
95300 * - 'S' For symbolic direct pointers with offset
95301 * - 's' For symbolic direct pointers without offset
95302+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
95303 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
95304 * - 'B' For backtraced symbolic direct pointers with offset
95305 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
95306@@ -1263,12 +1272,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95307
95308 if (!ptr && *fmt != 'K') {
95309 /*
95310- * Print (null) with the same width as a pointer so it makes
95311+ * Print (nil) with the same width as a pointer so it makes
95312 * tabular output look nice.
95313 */
95314 if (spec.field_width == -1)
95315 spec.field_width = default_width;
95316- return string(buf, end, "(null)", spec);
95317+ return string(buf, end, "(nil)", spec);
95318 }
95319
95320 switch (*fmt) {
95321@@ -1278,6 +1287,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95322 /* Fallthrough */
95323 case 'S':
95324 case 's':
95325+#ifdef CONFIG_GRKERNSEC_HIDESYM
95326+ break;
95327+#else
95328+ return symbol_string(buf, end, ptr, spec, fmt);
95329+#endif
95330+ case 'X':
95331+ ptr = dereference_function_descriptor(ptr);
95332+ case 'A':
95333 case 'B':
95334 return symbol_string(buf, end, ptr, spec, fmt);
95335 case 'R':
95336@@ -1333,6 +1350,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95337 va_end(va);
95338 return buf;
95339 }
95340+ case 'P':
95341+ break;
95342 case 'K':
95343 /*
95344 * %pK cannot be used in IRQ context because its test
95345@@ -1390,6 +1409,22 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95346 ((const struct file *)ptr)->f_path.dentry,
95347 spec, fmt);
95348 }
95349+
95350+#ifdef CONFIG_GRKERNSEC_HIDESYM
95351+ /* 'P' = approved pointers to copy to userland,
95352+ as in the /proc/kallsyms case, as we make it display nothing
95353+ for non-root users, and the real contents for root users
95354+ 'X' = approved simple symbols
95355+ Also ignore 'K' pointers, since we force their NULLing for non-root users
95356+ above
95357+ */
95358+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'X' && *fmt != 'K' && is_usercopy_object(buf)) {
95359+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
95360+ dump_stack();
95361+ ptr = NULL;
95362+ }
95363+#endif
95364+
95365 spec.flags |= SMALL;
95366 if (spec.field_width == -1) {
95367 spec.field_width = default_width;
95368@@ -2089,11 +2124,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95369 typeof(type) value; \
95370 if (sizeof(type) == 8) { \
95371 args = PTR_ALIGN(args, sizeof(u32)); \
95372- *(u32 *)&value = *(u32 *)args; \
95373- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
95374+ *(u32 *)&value = *(const u32 *)args; \
95375+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
95376 } else { \
95377 args = PTR_ALIGN(args, sizeof(type)); \
95378- value = *(typeof(type) *)args; \
95379+ value = *(const typeof(type) *)args; \
95380 } \
95381 args += sizeof(type); \
95382 value; \
95383@@ -2156,7 +2191,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95384 case FORMAT_TYPE_STR: {
95385 const char *str_arg = args;
95386 args += strlen(str_arg) + 1;
95387- str = string(str, end, (char *)str_arg, spec);
95388+ str = string(str, end, str_arg, spec);
95389 break;
95390 }
95391
95392diff --git a/localversion-grsec b/localversion-grsec
95393new file mode 100644
95394index 0000000..7cd6065
95395--- /dev/null
95396+++ b/localversion-grsec
95397@@ -0,0 +1 @@
95398+-grsec
95399diff --git a/mm/Kconfig b/mm/Kconfig
95400index 886db21..f514de2 100644
95401--- a/mm/Kconfig
95402+++ b/mm/Kconfig
95403@@ -333,10 +333,11 @@ config KSM
95404 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
95405
95406 config DEFAULT_MMAP_MIN_ADDR
95407- int "Low address space to protect from user allocation"
95408+ int "Low address space to protect from user allocation"
95409 depends on MMU
95410- default 4096
95411- help
95412+ default 32768 if ALPHA || ARM || PARISC || SPARC32
95413+ default 65536
95414+ help
95415 This is the portion of low virtual memory which should be protected
95416 from userspace allocation. Keeping a user from writing to low pages
95417 can help reduce the impact of kernel NULL pointer bugs.
95418@@ -367,7 +368,7 @@ config MEMORY_FAILURE
95419
95420 config HWPOISON_INJECT
95421 tristate "HWPoison pages injector"
95422- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
95423+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
95424 select PROC_PAGE_MONITOR
95425
95426 config NOMMU_INITIAL_TRIM_EXCESS
95427diff --git a/mm/backing-dev.c b/mm/backing-dev.c
95428index 1706cbb..f89dbca 100644
95429--- a/mm/backing-dev.c
95430+++ b/mm/backing-dev.c
95431@@ -12,7 +12,7 @@
95432 #include <linux/device.h>
95433 #include <trace/events/writeback.h>
95434
95435-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
95436+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
95437
95438 struct backing_dev_info default_backing_dev_info = {
95439 .name = "default",
95440@@ -533,7 +533,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
95441 return err;
95442
95443 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
95444- atomic_long_inc_return(&bdi_seq));
95445+ atomic_long_inc_return_unchecked(&bdi_seq));
95446 if (err) {
95447 bdi_destroy(bdi);
95448 return err;
95449diff --git a/mm/filemap.c b/mm/filemap.c
95450index 90effcd..539aa64 100644
95451--- a/mm/filemap.c
95452+++ b/mm/filemap.c
95453@@ -2092,7 +2092,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
95454 struct address_space *mapping = file->f_mapping;
95455
95456 if (!mapping->a_ops->readpage)
95457- return -ENOEXEC;
95458+ return -ENODEV;
95459 file_accessed(file);
95460 vma->vm_ops = &generic_file_vm_ops;
95461 return 0;
95462@@ -2270,6 +2270,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
95463 *pos = i_size_read(inode);
95464
95465 if (limit != RLIM_INFINITY) {
95466+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
95467 if (*pos >= limit) {
95468 send_sig(SIGXFSZ, current, 0);
95469 return -EFBIG;
95470diff --git a/mm/fremap.c b/mm/fremap.c
95471index 72b8fa3..c5b39f1 100644
95472--- a/mm/fremap.c
95473+++ b/mm/fremap.c
95474@@ -180,6 +180,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
95475 retry:
95476 vma = find_vma(mm, start);
95477
95478+#ifdef CONFIG_PAX_SEGMEXEC
95479+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
95480+ goto out;
95481+#endif
95482+
95483 /*
95484 * Make sure the vma is shared, that it supports prefaulting,
95485 * and that the remapped range is valid and fully within
95486diff --git a/mm/gup.c b/mm/gup.c
95487index 91d044b..a58ecf6 100644
95488--- a/mm/gup.c
95489+++ b/mm/gup.c
95490@@ -270,11 +270,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
95491 unsigned int fault_flags = 0;
95492 int ret;
95493
95494- /* For mlock, just skip the stack guard page. */
95495- if ((*flags & FOLL_MLOCK) &&
95496- (stack_guard_page_start(vma, address) ||
95497- stack_guard_page_end(vma, address + PAGE_SIZE)))
95498- return -ENOENT;
95499 if (*flags & FOLL_WRITE)
95500 fault_flags |= FAULT_FLAG_WRITE;
95501 if (nonblocking)
95502@@ -436,14 +431,14 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95503 if (!(gup_flags & FOLL_FORCE))
95504 gup_flags |= FOLL_NUMA;
95505
95506- do {
95507+ while (nr_pages) {
95508 struct page *page;
95509 unsigned int foll_flags = gup_flags;
95510 unsigned int page_increm;
95511
95512 /* first iteration or cross vma bound */
95513 if (!vma || start >= vma->vm_end) {
95514- vma = find_extend_vma(mm, start);
95515+ vma = find_vma(mm, start);
95516 if (!vma && in_gate_area(mm, start)) {
95517 int ret;
95518 ret = get_gate_page(mm, start & PAGE_MASK,
95519@@ -455,7 +450,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95520 goto next_page;
95521 }
95522
95523- if (!vma || check_vma_flags(vma, gup_flags))
95524+ if (!vma || start < vma->vm_start || check_vma_flags(vma, gup_flags))
95525 return i ? : -EFAULT;
95526 if (is_vm_hugetlb_page(vma)) {
95527 i = follow_hugetlb_page(mm, vma, pages, vmas,
95528@@ -510,7 +505,7 @@ next_page:
95529 i += page_increm;
95530 start += page_increm * PAGE_SIZE;
95531 nr_pages -= page_increm;
95532- } while (nr_pages);
95533+ }
95534 return i;
95535 }
95536 EXPORT_SYMBOL(__get_user_pages);
95537diff --git a/mm/highmem.c b/mm/highmem.c
95538index 123bcd3..0de52ba 100644
95539--- a/mm/highmem.c
95540+++ b/mm/highmem.c
95541@@ -195,8 +195,9 @@ static void flush_all_zero_pkmaps(void)
95542 * So no dangers, even with speculative execution.
95543 */
95544 page = pte_page(pkmap_page_table[i]);
95545+ pax_open_kernel();
95546 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
95547-
95548+ pax_close_kernel();
95549 set_page_address(page, NULL);
95550 need_flush = 1;
95551 }
95552@@ -259,9 +260,11 @@ start:
95553 }
95554 }
95555 vaddr = PKMAP_ADDR(last_pkmap_nr);
95556+
95557+ pax_open_kernel();
95558 set_pte_at(&init_mm, vaddr,
95559 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
95560-
95561+ pax_close_kernel();
95562 pkmap_count[last_pkmap_nr] = 1;
95563 set_page_address(page, (void *)vaddr);
95564
95565diff --git a/mm/hugetlb.c b/mm/hugetlb.c
95566index eeceeeb..a209d58 100644
95567--- a/mm/hugetlb.c
95568+++ b/mm/hugetlb.c
95569@@ -2258,6 +2258,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
95570 struct ctl_table *table, int write,
95571 void __user *buffer, size_t *length, loff_t *ppos)
95572 {
95573+ ctl_table_no_const t;
95574 struct hstate *h = &default_hstate;
95575 unsigned long tmp = h->max_huge_pages;
95576 int ret;
95577@@ -2265,9 +2266,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
95578 if (!hugepages_supported())
95579 return -ENOTSUPP;
95580
95581- table->data = &tmp;
95582- table->maxlen = sizeof(unsigned long);
95583- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
95584+ t = *table;
95585+ t.data = &tmp;
95586+ t.maxlen = sizeof(unsigned long);
95587+ ret = proc_doulongvec_minmax(&t, write, buffer, length, ppos);
95588 if (ret)
95589 goto out;
95590
95591@@ -2302,6 +2304,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
95592 struct hstate *h = &default_hstate;
95593 unsigned long tmp;
95594 int ret;
95595+ ctl_table_no_const hugetlb_table;
95596
95597 if (!hugepages_supported())
95598 return -ENOTSUPP;
95599@@ -2311,9 +2314,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
95600 if (write && hstate_is_gigantic(h))
95601 return -EINVAL;
95602
95603- table->data = &tmp;
95604- table->maxlen = sizeof(unsigned long);
95605- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
95606+ hugetlb_table = *table;
95607+ hugetlb_table.data = &tmp;
95608+ hugetlb_table.maxlen = sizeof(unsigned long);
95609+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
95610 if (ret)
95611 goto out;
95612
95613@@ -2792,6 +2796,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
95614 mutex_unlock(&mapping->i_mmap_mutex);
95615 }
95616
95617+#ifdef CONFIG_PAX_SEGMEXEC
95618+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
95619+{
95620+ struct mm_struct *mm = vma->vm_mm;
95621+ struct vm_area_struct *vma_m;
95622+ unsigned long address_m;
95623+ pte_t *ptep_m;
95624+
95625+ vma_m = pax_find_mirror_vma(vma);
95626+ if (!vma_m)
95627+ return;
95628+
95629+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
95630+ address_m = address + SEGMEXEC_TASK_SIZE;
95631+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
95632+ get_page(page_m);
95633+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
95634+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
95635+}
95636+#endif
95637+
95638 /*
95639 * Hugetlb_cow() should be called with page lock of the original hugepage held.
95640 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
95641@@ -2903,6 +2928,11 @@ retry_avoidcopy:
95642 make_huge_pte(vma, new_page, 1));
95643 page_remove_rmap(old_page);
95644 hugepage_add_new_anon_rmap(new_page, vma, address);
95645+
95646+#ifdef CONFIG_PAX_SEGMEXEC
95647+ pax_mirror_huge_pte(vma, address, new_page);
95648+#endif
95649+
95650 /* Make the old page be freed below */
95651 new_page = old_page;
95652 }
95653@@ -3063,6 +3093,10 @@ retry:
95654 && (vma->vm_flags & VM_SHARED)));
95655 set_huge_pte_at(mm, address, ptep, new_pte);
95656
95657+#ifdef CONFIG_PAX_SEGMEXEC
95658+ pax_mirror_huge_pte(vma, address, page);
95659+#endif
95660+
95661 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
95662 /* Optimization, do the COW without a second fault */
95663 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
95664@@ -3129,6 +3163,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95665 struct hstate *h = hstate_vma(vma);
95666 struct address_space *mapping;
95667
95668+#ifdef CONFIG_PAX_SEGMEXEC
95669+ struct vm_area_struct *vma_m;
95670+#endif
95671+
95672 address &= huge_page_mask(h);
95673
95674 ptep = huge_pte_offset(mm, address);
95675@@ -3142,6 +3180,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95676 VM_FAULT_SET_HINDEX(hstate_index(h));
95677 }
95678
95679+#ifdef CONFIG_PAX_SEGMEXEC
95680+ vma_m = pax_find_mirror_vma(vma);
95681+ if (vma_m) {
95682+ unsigned long address_m;
95683+
95684+ if (vma->vm_start > vma_m->vm_start) {
95685+ address_m = address;
95686+ address -= SEGMEXEC_TASK_SIZE;
95687+ vma = vma_m;
95688+ h = hstate_vma(vma);
95689+ } else
95690+ address_m = address + SEGMEXEC_TASK_SIZE;
95691+
95692+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
95693+ return VM_FAULT_OOM;
95694+ address_m &= HPAGE_MASK;
95695+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
95696+ }
95697+#endif
95698+
95699 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
95700 if (!ptep)
95701 return VM_FAULT_OOM;
95702diff --git a/mm/internal.h b/mm/internal.h
95703index 5f2772f..4c3882c 100644
95704--- a/mm/internal.h
95705+++ b/mm/internal.h
95706@@ -134,6 +134,7 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
95707
95708 extern int __isolate_free_page(struct page *page, unsigned int order);
95709 extern void __free_pages_bootmem(struct page *page, unsigned int order);
95710+extern void free_compound_page(struct page *page);
95711 extern void prep_compound_page(struct page *page, unsigned long order);
95712 #ifdef CONFIG_MEMORY_FAILURE
95713 extern bool is_free_buddy_page(struct page *page);
95714@@ -376,7 +377,7 @@ extern u32 hwpoison_filter_enable;
95715
95716 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
95717 unsigned long, unsigned long,
95718- unsigned long, unsigned long);
95719+ unsigned long, unsigned long) __intentional_overflow(-1);
95720
95721 extern void set_pageblock_order(void);
95722 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
95723diff --git a/mm/iov_iter.c b/mm/iov_iter.c
95724index 141dcf7..7327fd3 100644
95725--- a/mm/iov_iter.c
95726+++ b/mm/iov_iter.c
95727@@ -173,7 +173,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
95728
95729 while (bytes) {
95730 char __user *buf = iov->iov_base + base;
95731- int copy = min(bytes, iov->iov_len - base);
95732+ size_t copy = min(bytes, iov->iov_len - base);
95733
95734 base = 0;
95735 left = __copy_from_user_inatomic(vaddr, buf, copy);
95736@@ -201,7 +201,7 @@ static size_t copy_from_user_atomic_iovec(struct page *page,
95737
95738 kaddr = kmap_atomic(page);
95739 if (likely(i->nr_segs == 1)) {
95740- int left;
95741+ size_t left;
95742 char __user *buf = i->iov->iov_base + i->iov_offset;
95743 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
95744 copied = bytes - left;
95745@@ -231,7 +231,7 @@ static void advance_iovec(struct iov_iter *i, size_t bytes)
95746 * zero-length segments (without overruning the iovec).
95747 */
95748 while (bytes || unlikely(i->count && !iov->iov_len)) {
95749- int copy;
95750+ size_t copy;
95751
95752 copy = min(bytes, iov->iov_len - base);
95753 BUG_ON(!i->count || i->count < copy);
95754diff --git a/mm/kmemleak.c b/mm/kmemleak.c
95755index 3cda50c..032ba634 100644
95756--- a/mm/kmemleak.c
95757+++ b/mm/kmemleak.c
95758@@ -364,7 +364,7 @@ static void print_unreferenced(struct seq_file *seq,
95759
95760 for (i = 0; i < object->trace_len; i++) {
95761 void *ptr = (void *)object->trace[i];
95762- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
95763+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
95764 }
95765 }
95766
95767@@ -1905,7 +1905,7 @@ static int __init kmemleak_late_init(void)
95768 return -ENOMEM;
95769 }
95770
95771- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
95772+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
95773 &kmemleak_fops);
95774 if (!dentry)
95775 pr_warning("Failed to create the debugfs kmemleak file\n");
95776diff --git a/mm/maccess.c b/mm/maccess.c
95777index d53adf9..03a24bf 100644
95778--- a/mm/maccess.c
95779+++ b/mm/maccess.c
95780@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
95781 set_fs(KERNEL_DS);
95782 pagefault_disable();
95783 ret = __copy_from_user_inatomic(dst,
95784- (__force const void __user *)src, size);
95785+ (const void __force_user *)src, size);
95786 pagefault_enable();
95787 set_fs(old_fs);
95788
95789@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
95790
95791 set_fs(KERNEL_DS);
95792 pagefault_disable();
95793- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
95794+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
95795 pagefault_enable();
95796 set_fs(old_fs);
95797
95798diff --git a/mm/madvise.c b/mm/madvise.c
95799index 0938b30..199abe8 100644
95800--- a/mm/madvise.c
95801+++ b/mm/madvise.c
95802@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
95803 pgoff_t pgoff;
95804 unsigned long new_flags = vma->vm_flags;
95805
95806+#ifdef CONFIG_PAX_SEGMEXEC
95807+ struct vm_area_struct *vma_m;
95808+#endif
95809+
95810 switch (behavior) {
95811 case MADV_NORMAL:
95812 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
95813@@ -126,6 +130,13 @@ success:
95814 /*
95815 * vm_flags is protected by the mmap_sem held in write mode.
95816 */
95817+
95818+#ifdef CONFIG_PAX_SEGMEXEC
95819+ vma_m = pax_find_mirror_vma(vma);
95820+ if (vma_m)
95821+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
95822+#endif
95823+
95824 vma->vm_flags = new_flags;
95825
95826 out:
95827@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct *vma,
95828 struct vm_area_struct **prev,
95829 unsigned long start, unsigned long end)
95830 {
95831+
95832+#ifdef CONFIG_PAX_SEGMEXEC
95833+ struct vm_area_struct *vma_m;
95834+#endif
95835+
95836 *prev = vma;
95837 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
95838 return -EINVAL;
95839@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct *vma,
95840 zap_page_range(vma, start, end - start, &details);
95841 } else
95842 zap_page_range(vma, start, end - start, NULL);
95843+
95844+#ifdef CONFIG_PAX_SEGMEXEC
95845+ vma_m = pax_find_mirror_vma(vma);
95846+ if (vma_m) {
95847+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
95848+ struct zap_details details = {
95849+ .nonlinear_vma = vma_m,
95850+ .last_index = ULONG_MAX,
95851+ };
95852+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
95853+ } else
95854+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
95855+ }
95856+#endif
95857+
95858 return 0;
95859 }
95860
95861@@ -488,6 +519,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
95862 if (end < start)
95863 return error;
95864
95865+#ifdef CONFIG_PAX_SEGMEXEC
95866+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
95867+ if (end > SEGMEXEC_TASK_SIZE)
95868+ return error;
95869+ } else
95870+#endif
95871+
95872+ if (end > TASK_SIZE)
95873+ return error;
95874+
95875 error = 0;
95876 if (end == start)
95877 return error;
95878diff --git a/mm/memory-failure.c b/mm/memory-failure.c
95879index 44c6bd2..60369dc3 100644
95880--- a/mm/memory-failure.c
95881+++ b/mm/memory-failure.c
95882@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
95883
95884 int sysctl_memory_failure_recovery __read_mostly = 1;
95885
95886-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
95887+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
95888
95889 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
95890
95891@@ -198,7 +198,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
95892 pfn, t->comm, t->pid);
95893 si.si_signo = SIGBUS;
95894 si.si_errno = 0;
95895- si.si_addr = (void *)addr;
95896+ si.si_addr = (void __user *)addr;
95897 #ifdef __ARCH_SI_TRAPNO
95898 si.si_trapno = trapno;
95899 #endif
95900@@ -791,7 +791,7 @@ static struct page_state {
95901 unsigned long res;
95902 char *msg;
95903 int (*action)(struct page *p, unsigned long pfn);
95904-} error_states[] = {
95905+} __do_const error_states[] = {
95906 { reserved, reserved, "reserved kernel", me_kernel },
95907 /*
95908 * free pages are specially detected outside this table:
95909@@ -1099,7 +1099,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
95910 nr_pages = 1 << compound_order(hpage);
95911 else /* normal page or thp */
95912 nr_pages = 1;
95913- atomic_long_add(nr_pages, &num_poisoned_pages);
95914+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
95915
95916 /*
95917 * We need/can do nothing about count=0 pages.
95918@@ -1128,7 +1128,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
95919 if (PageHWPoison(hpage)) {
95920 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
95921 || (p != hpage && TestSetPageHWPoison(hpage))) {
95922- atomic_long_sub(nr_pages, &num_poisoned_pages);
95923+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95924 unlock_page(hpage);
95925 return 0;
95926 }
95927@@ -1196,14 +1196,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
95928 */
95929 if (!PageHWPoison(p)) {
95930 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
95931- atomic_long_sub(nr_pages, &num_poisoned_pages);
95932+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95933 put_page(hpage);
95934 res = 0;
95935 goto out;
95936 }
95937 if (hwpoison_filter(p)) {
95938 if (TestClearPageHWPoison(p))
95939- atomic_long_sub(nr_pages, &num_poisoned_pages);
95940+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95941 unlock_page(hpage);
95942 put_page(hpage);
95943 return 0;
95944@@ -1433,7 +1433,7 @@ int unpoison_memory(unsigned long pfn)
95945 return 0;
95946 }
95947 if (TestClearPageHWPoison(p))
95948- atomic_long_dec(&num_poisoned_pages);
95949+ atomic_long_dec_unchecked(&num_poisoned_pages);
95950 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
95951 return 0;
95952 }
95953@@ -1447,7 +1447,7 @@ int unpoison_memory(unsigned long pfn)
95954 */
95955 if (TestClearPageHWPoison(page)) {
95956 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
95957- atomic_long_sub(nr_pages, &num_poisoned_pages);
95958+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95959 freeit = 1;
95960 if (PageHuge(page))
95961 clear_page_hwpoison_huge_page(page);
95962@@ -1572,11 +1572,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
95963 if (PageHuge(page)) {
95964 set_page_hwpoison_huge_page(hpage);
95965 dequeue_hwpoisoned_huge_page(hpage);
95966- atomic_long_add(1 << compound_order(hpage),
95967+ atomic_long_add_unchecked(1 << compound_order(hpage),
95968 &num_poisoned_pages);
95969 } else {
95970 SetPageHWPoison(page);
95971- atomic_long_inc(&num_poisoned_pages);
95972+ atomic_long_inc_unchecked(&num_poisoned_pages);
95973 }
95974 }
95975 return ret;
95976@@ -1615,7 +1615,7 @@ static int __soft_offline_page(struct page *page, int flags)
95977 put_page(page);
95978 pr_info("soft_offline: %#lx: invalidated\n", pfn);
95979 SetPageHWPoison(page);
95980- atomic_long_inc(&num_poisoned_pages);
95981+ atomic_long_inc_unchecked(&num_poisoned_pages);
95982 return 0;
95983 }
95984
95985@@ -1666,7 +1666,7 @@ static int __soft_offline_page(struct page *page, int flags)
95986 if (!is_free_buddy_page(page))
95987 pr_info("soft offline: %#lx: page leaked\n",
95988 pfn);
95989- atomic_long_inc(&num_poisoned_pages);
95990+ atomic_long_inc_unchecked(&num_poisoned_pages);
95991 }
95992 } else {
95993 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
95994@@ -1736,11 +1736,11 @@ int soft_offline_page(struct page *page, int flags)
95995 if (PageHuge(page)) {
95996 set_page_hwpoison_huge_page(hpage);
95997 dequeue_hwpoisoned_huge_page(hpage);
95998- atomic_long_add(1 << compound_order(hpage),
95999+ atomic_long_add_unchecked(1 << compound_order(hpage),
96000 &num_poisoned_pages);
96001 } else {
96002 SetPageHWPoison(page);
96003- atomic_long_inc(&num_poisoned_pages);
96004+ atomic_long_inc_unchecked(&num_poisoned_pages);
96005 }
96006 }
96007 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
96008diff --git a/mm/memory.c b/mm/memory.c
96009index 37b80fc..9cdef79 100644
96010--- a/mm/memory.c
96011+++ b/mm/memory.c
96012@@ -415,6 +415,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
96013 free_pte_range(tlb, pmd, addr);
96014 } while (pmd++, addr = next, addr != end);
96015
96016+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
96017 start &= PUD_MASK;
96018 if (start < floor)
96019 return;
96020@@ -429,6 +430,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
96021 pmd = pmd_offset(pud, start);
96022 pud_clear(pud);
96023 pmd_free_tlb(tlb, pmd, start);
96024+#endif
96025+
96026 }
96027
96028 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96029@@ -448,6 +451,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96030 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
96031 } while (pud++, addr = next, addr != end);
96032
96033+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
96034 start &= PGDIR_MASK;
96035 if (start < floor)
96036 return;
96037@@ -462,6 +466,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96038 pud = pud_offset(pgd, start);
96039 pgd_clear(pgd);
96040 pud_free_tlb(tlb, pud, start);
96041+#endif
96042+
96043 }
96044
96045 /*
96046@@ -691,10 +697,10 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
96047 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
96048 */
96049 if (vma->vm_ops)
96050- printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
96051+ printk(KERN_ALERT "vma->vm_ops->fault: %pAR\n",
96052 vma->vm_ops->fault);
96053 if (vma->vm_file)
96054- printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
96055+ printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pAR\n",
96056 vma->vm_file->f_op->mmap);
96057 dump_stack();
96058 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
96059@@ -815,20 +821,20 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
96060 if (!pte_file(pte)) {
96061 swp_entry_t entry = pte_to_swp_entry(pte);
96062
96063- if (swap_duplicate(entry) < 0)
96064- return entry.val;
96065+ if (likely(!non_swap_entry(entry))) {
96066+ if (swap_duplicate(entry) < 0)
96067+ return entry.val;
96068
96069- /* make sure dst_mm is on swapoff's mmlist. */
96070- if (unlikely(list_empty(&dst_mm->mmlist))) {
96071- spin_lock(&mmlist_lock);
96072- if (list_empty(&dst_mm->mmlist))
96073- list_add(&dst_mm->mmlist,
96074- &src_mm->mmlist);
96075- spin_unlock(&mmlist_lock);
96076- }
96077- if (likely(!non_swap_entry(entry)))
96078+ /* make sure dst_mm is on swapoff's mmlist. */
96079+ if (unlikely(list_empty(&dst_mm->mmlist))) {
96080+ spin_lock(&mmlist_lock);
96081+ if (list_empty(&dst_mm->mmlist))
96082+ list_add(&dst_mm->mmlist,
96083+ &src_mm->mmlist);
96084+ spin_unlock(&mmlist_lock);
96085+ }
96086 rss[MM_SWAPENTS]++;
96087- else if (is_migration_entry(entry)) {
96088+ } else if (is_migration_entry(entry)) {
96089 page = migration_entry_to_page(entry);
96090
96091 if (PageAnon(page))
96092@@ -1501,6 +1507,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
96093 page_add_file_rmap(page);
96094 set_pte_at(mm, addr, pte, mk_pte(page, prot));
96095
96096+#ifdef CONFIG_PAX_SEGMEXEC
96097+ pax_mirror_file_pte(vma, addr, page, ptl);
96098+#endif
96099+
96100 retval = 0;
96101 pte_unmap_unlock(pte, ptl);
96102 return retval;
96103@@ -1545,9 +1555,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
96104 if (!page_count(page))
96105 return -EINVAL;
96106 if (!(vma->vm_flags & VM_MIXEDMAP)) {
96107+
96108+#ifdef CONFIG_PAX_SEGMEXEC
96109+ struct vm_area_struct *vma_m;
96110+#endif
96111+
96112 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
96113 BUG_ON(vma->vm_flags & VM_PFNMAP);
96114 vma->vm_flags |= VM_MIXEDMAP;
96115+
96116+#ifdef CONFIG_PAX_SEGMEXEC
96117+ vma_m = pax_find_mirror_vma(vma);
96118+ if (vma_m)
96119+ vma_m->vm_flags |= VM_MIXEDMAP;
96120+#endif
96121+
96122 }
96123 return insert_page(vma, addr, page, vma->vm_page_prot);
96124 }
96125@@ -1630,6 +1652,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
96126 unsigned long pfn)
96127 {
96128 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
96129+ BUG_ON(vma->vm_mirror);
96130
96131 if (addr < vma->vm_start || addr >= vma->vm_end)
96132 return -EFAULT;
96133@@ -1877,7 +1900,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
96134
96135 BUG_ON(pud_huge(*pud));
96136
96137- pmd = pmd_alloc(mm, pud, addr);
96138+ pmd = (mm == &init_mm) ?
96139+ pmd_alloc_kernel(mm, pud, addr) :
96140+ pmd_alloc(mm, pud, addr);
96141 if (!pmd)
96142 return -ENOMEM;
96143 do {
96144@@ -1897,7 +1922,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
96145 unsigned long next;
96146 int err;
96147
96148- pud = pud_alloc(mm, pgd, addr);
96149+ pud = (mm == &init_mm) ?
96150+ pud_alloc_kernel(mm, pgd, addr) :
96151+ pud_alloc(mm, pgd, addr);
96152 if (!pud)
96153 return -ENOMEM;
96154 do {
96155@@ -2019,6 +2046,186 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
96156 return ret;
96157 }
96158
96159+#ifdef CONFIG_PAX_SEGMEXEC
96160+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
96161+{
96162+ struct mm_struct *mm = vma->vm_mm;
96163+ spinlock_t *ptl;
96164+ pte_t *pte, entry;
96165+
96166+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
96167+ entry = *pte;
96168+ if (!pte_present(entry)) {
96169+ if (!pte_none(entry)) {
96170+ BUG_ON(pte_file(entry));
96171+ free_swap_and_cache(pte_to_swp_entry(entry));
96172+ pte_clear_not_present_full(mm, address, pte, 0);
96173+ }
96174+ } else {
96175+ struct page *page;
96176+
96177+ flush_cache_page(vma, address, pte_pfn(entry));
96178+ entry = ptep_clear_flush(vma, address, pte);
96179+ BUG_ON(pte_dirty(entry));
96180+ page = vm_normal_page(vma, address, entry);
96181+ if (page) {
96182+ update_hiwater_rss(mm);
96183+ if (PageAnon(page))
96184+ dec_mm_counter_fast(mm, MM_ANONPAGES);
96185+ else
96186+ dec_mm_counter_fast(mm, MM_FILEPAGES);
96187+ page_remove_rmap(page);
96188+ page_cache_release(page);
96189+ }
96190+ }
96191+ pte_unmap_unlock(pte, ptl);
96192+}
96193+
96194+/* PaX: if vma is mirrored, synchronize the mirror's PTE
96195+ *
96196+ * the ptl of the lower mapped page is held on entry and is not released on exit
96197+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
96198+ */
96199+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96200+{
96201+ struct mm_struct *mm = vma->vm_mm;
96202+ unsigned long address_m;
96203+ spinlock_t *ptl_m;
96204+ struct vm_area_struct *vma_m;
96205+ pmd_t *pmd_m;
96206+ pte_t *pte_m, entry_m;
96207+
96208+ BUG_ON(!page_m || !PageAnon(page_m));
96209+
96210+ vma_m = pax_find_mirror_vma(vma);
96211+ if (!vma_m)
96212+ return;
96213+
96214+ BUG_ON(!PageLocked(page_m));
96215+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96216+ address_m = address + SEGMEXEC_TASK_SIZE;
96217+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96218+ pte_m = pte_offset_map(pmd_m, address_m);
96219+ ptl_m = pte_lockptr(mm, pmd_m);
96220+ if (ptl != ptl_m) {
96221+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96222+ if (!pte_none(*pte_m))
96223+ goto out;
96224+ }
96225+
96226+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96227+ page_cache_get(page_m);
96228+ page_add_anon_rmap(page_m, vma_m, address_m);
96229+ inc_mm_counter_fast(mm, MM_ANONPAGES);
96230+ set_pte_at(mm, address_m, pte_m, entry_m);
96231+ update_mmu_cache(vma_m, address_m, pte_m);
96232+out:
96233+ if (ptl != ptl_m)
96234+ spin_unlock(ptl_m);
96235+ pte_unmap(pte_m);
96236+ unlock_page(page_m);
96237+}
96238+
96239+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96240+{
96241+ struct mm_struct *mm = vma->vm_mm;
96242+ unsigned long address_m;
96243+ spinlock_t *ptl_m;
96244+ struct vm_area_struct *vma_m;
96245+ pmd_t *pmd_m;
96246+ pte_t *pte_m, entry_m;
96247+
96248+ BUG_ON(!page_m || PageAnon(page_m));
96249+
96250+ vma_m = pax_find_mirror_vma(vma);
96251+ if (!vma_m)
96252+ return;
96253+
96254+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96255+ address_m = address + SEGMEXEC_TASK_SIZE;
96256+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96257+ pte_m = pte_offset_map(pmd_m, address_m);
96258+ ptl_m = pte_lockptr(mm, pmd_m);
96259+ if (ptl != ptl_m) {
96260+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96261+ if (!pte_none(*pte_m))
96262+ goto out;
96263+ }
96264+
96265+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96266+ page_cache_get(page_m);
96267+ page_add_file_rmap(page_m);
96268+ inc_mm_counter_fast(mm, MM_FILEPAGES);
96269+ set_pte_at(mm, address_m, pte_m, entry_m);
96270+ update_mmu_cache(vma_m, address_m, pte_m);
96271+out:
96272+ if (ptl != ptl_m)
96273+ spin_unlock(ptl_m);
96274+ pte_unmap(pte_m);
96275+}
96276+
96277+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
96278+{
96279+ struct mm_struct *mm = vma->vm_mm;
96280+ unsigned long address_m;
96281+ spinlock_t *ptl_m;
96282+ struct vm_area_struct *vma_m;
96283+ pmd_t *pmd_m;
96284+ pte_t *pte_m, entry_m;
96285+
96286+ vma_m = pax_find_mirror_vma(vma);
96287+ if (!vma_m)
96288+ return;
96289+
96290+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96291+ address_m = address + SEGMEXEC_TASK_SIZE;
96292+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96293+ pte_m = pte_offset_map(pmd_m, address_m);
96294+ ptl_m = pte_lockptr(mm, pmd_m);
96295+ if (ptl != ptl_m) {
96296+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96297+ if (!pte_none(*pte_m))
96298+ goto out;
96299+ }
96300+
96301+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
96302+ set_pte_at(mm, address_m, pte_m, entry_m);
96303+out:
96304+ if (ptl != ptl_m)
96305+ spin_unlock(ptl_m);
96306+ pte_unmap(pte_m);
96307+}
96308+
96309+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
96310+{
96311+ struct page *page_m;
96312+ pte_t entry;
96313+
96314+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
96315+ goto out;
96316+
96317+ entry = *pte;
96318+ page_m = vm_normal_page(vma, address, entry);
96319+ if (!page_m)
96320+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
96321+ else if (PageAnon(page_m)) {
96322+ if (pax_find_mirror_vma(vma)) {
96323+ pte_unmap_unlock(pte, ptl);
96324+ lock_page(page_m);
96325+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
96326+ if (pte_same(entry, *pte))
96327+ pax_mirror_anon_pte(vma, address, page_m, ptl);
96328+ else
96329+ unlock_page(page_m);
96330+ }
96331+ } else
96332+ pax_mirror_file_pte(vma, address, page_m, ptl);
96333+
96334+out:
96335+ pte_unmap_unlock(pte, ptl);
96336+}
96337+#endif
96338+
96339 /*
96340 * This routine handles present pages, when users try to write
96341 * to a shared page. It is done by copying the page to a new address
96342@@ -2217,6 +2424,12 @@ gotten:
96343 */
96344 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96345 if (likely(pte_same(*page_table, orig_pte))) {
96346+
96347+#ifdef CONFIG_PAX_SEGMEXEC
96348+ if (pax_find_mirror_vma(vma))
96349+ BUG_ON(!trylock_page(new_page));
96350+#endif
96351+
96352 if (old_page) {
96353 if (!PageAnon(old_page)) {
96354 dec_mm_counter_fast(mm, MM_FILEPAGES);
96355@@ -2270,6 +2483,10 @@ gotten:
96356 page_remove_rmap(old_page);
96357 }
96358
96359+#ifdef CONFIG_PAX_SEGMEXEC
96360+ pax_mirror_anon_pte(vma, address, new_page, ptl);
96361+#endif
96362+
96363 /* Free the old page.. */
96364 new_page = old_page;
96365 ret |= VM_FAULT_WRITE;
96366@@ -2544,6 +2761,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96367 swap_free(entry);
96368 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
96369 try_to_free_swap(page);
96370+
96371+#ifdef CONFIG_PAX_SEGMEXEC
96372+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
96373+#endif
96374+
96375 unlock_page(page);
96376 if (page != swapcache) {
96377 /*
96378@@ -2567,6 +2789,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96379
96380 /* No need to invalidate - it was non-present before */
96381 update_mmu_cache(vma, address, page_table);
96382+
96383+#ifdef CONFIG_PAX_SEGMEXEC
96384+ pax_mirror_anon_pte(vma, address, page, ptl);
96385+#endif
96386+
96387 unlock:
96388 pte_unmap_unlock(page_table, ptl);
96389 out:
96390@@ -2586,40 +2813,6 @@ out_release:
96391 }
96392
96393 /*
96394- * This is like a special single-page "expand_{down|up}wards()",
96395- * except we must first make sure that 'address{-|+}PAGE_SIZE'
96396- * doesn't hit another vma.
96397- */
96398-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
96399-{
96400- address &= PAGE_MASK;
96401- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
96402- struct vm_area_struct *prev = vma->vm_prev;
96403-
96404- /*
96405- * Is there a mapping abutting this one below?
96406- *
96407- * That's only ok if it's the same stack mapping
96408- * that has gotten split..
96409- */
96410- if (prev && prev->vm_end == address)
96411- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
96412-
96413- expand_downwards(vma, address - PAGE_SIZE);
96414- }
96415- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
96416- struct vm_area_struct *next = vma->vm_next;
96417-
96418- /* As VM_GROWSDOWN but s/below/above/ */
96419- if (next && next->vm_start == address + PAGE_SIZE)
96420- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
96421-
96422- expand_upwards(vma, address + PAGE_SIZE);
96423- }
96424- return 0;
96425-}
96426-
96427-/*
96428 * We enter with non-exclusive mmap_sem (to exclude vma changes,
96429 * but allow concurrent faults), and pte mapped but not yet locked.
96430 * We return with mmap_sem still held, but pte unmapped and unlocked.
96431@@ -2629,27 +2822,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96432 unsigned int flags)
96433 {
96434 struct mem_cgroup *memcg;
96435- struct page *page;
96436+ struct page *page = NULL;
96437 spinlock_t *ptl;
96438 pte_t entry;
96439
96440- pte_unmap(page_table);
96441-
96442- /* Check if we need to add a guard page to the stack */
96443- if (check_stack_guard_page(vma, address) < 0)
96444- return VM_FAULT_SIGBUS;
96445-
96446- /* Use the zero-page for reads */
96447 if (!(flags & FAULT_FLAG_WRITE)) {
96448 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
96449 vma->vm_page_prot));
96450- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96451+ ptl = pte_lockptr(mm, pmd);
96452+ spin_lock(ptl);
96453 if (!pte_none(*page_table))
96454 goto unlock;
96455 goto setpte;
96456 }
96457
96458 /* Allocate our own private page. */
96459+ pte_unmap(page_table);
96460+
96461 if (unlikely(anon_vma_prepare(vma)))
96462 goto oom;
96463 page = alloc_zeroed_user_highpage_movable(vma, address);
96464@@ -2673,6 +2862,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96465 if (!pte_none(*page_table))
96466 goto release;
96467
96468+#ifdef CONFIG_PAX_SEGMEXEC
96469+ if (pax_find_mirror_vma(vma))
96470+ BUG_ON(!trylock_page(page));
96471+#endif
96472+
96473 inc_mm_counter_fast(mm, MM_ANONPAGES);
96474 page_add_new_anon_rmap(page, vma, address);
96475 mem_cgroup_commit_charge(page, memcg, false);
96476@@ -2682,6 +2876,12 @@ setpte:
96477
96478 /* No need to invalidate - it was non-present before */
96479 update_mmu_cache(vma, address, page_table);
96480+
96481+#ifdef CONFIG_PAX_SEGMEXEC
96482+ if (page)
96483+ pax_mirror_anon_pte(vma, address, page, ptl);
96484+#endif
96485+
96486 unlock:
96487 pte_unmap_unlock(page_table, ptl);
96488 return 0;
96489@@ -2912,6 +3112,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96490 return ret;
96491 }
96492 do_set_pte(vma, address, fault_page, pte, false, false);
96493+
96494+#ifdef CONFIG_PAX_SEGMEXEC
96495+ pax_mirror_file_pte(vma, address, fault_page, ptl);
96496+#endif
96497+
96498 unlock_page(fault_page);
96499 unlock_out:
96500 pte_unmap_unlock(pte, ptl);
96501@@ -2954,7 +3159,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96502 page_cache_release(fault_page);
96503 goto uncharge_out;
96504 }
96505+
96506+#ifdef CONFIG_PAX_SEGMEXEC
96507+ if (pax_find_mirror_vma(vma))
96508+ BUG_ON(!trylock_page(new_page));
96509+#endif
96510+
96511 do_set_pte(vma, address, new_page, pte, true, true);
96512+
96513+#ifdef CONFIG_PAX_SEGMEXEC
96514+ pax_mirror_anon_pte(vma, address, new_page, ptl);
96515+#endif
96516+
96517 mem_cgroup_commit_charge(new_page, memcg, false);
96518 lru_cache_add_active_or_unevictable(new_page, vma);
96519 pte_unmap_unlock(pte, ptl);
96520@@ -3004,6 +3220,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96521 return ret;
96522 }
96523 do_set_pte(vma, address, fault_page, pte, true, false);
96524+
96525+#ifdef CONFIG_PAX_SEGMEXEC
96526+ pax_mirror_file_pte(vma, address, fault_page, ptl);
96527+#endif
96528+
96529 pte_unmap_unlock(pte, ptl);
96530
96531 if (set_page_dirty(fault_page))
96532@@ -3245,6 +3466,12 @@ static int handle_pte_fault(struct mm_struct *mm,
96533 if (flags & FAULT_FLAG_WRITE)
96534 flush_tlb_fix_spurious_fault(vma, address);
96535 }
96536+
96537+#ifdef CONFIG_PAX_SEGMEXEC
96538+ pax_mirror_pte(vma, address, pte, pmd, ptl);
96539+ return 0;
96540+#endif
96541+
96542 unlock:
96543 pte_unmap_unlock(pte, ptl);
96544 return 0;
96545@@ -3264,9 +3491,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96546 pmd_t *pmd;
96547 pte_t *pte;
96548
96549+#ifdef CONFIG_PAX_SEGMEXEC
96550+ struct vm_area_struct *vma_m;
96551+#endif
96552+
96553 if (unlikely(is_vm_hugetlb_page(vma)))
96554 return hugetlb_fault(mm, vma, address, flags);
96555
96556+#ifdef CONFIG_PAX_SEGMEXEC
96557+ vma_m = pax_find_mirror_vma(vma);
96558+ if (vma_m) {
96559+ unsigned long address_m;
96560+ pgd_t *pgd_m;
96561+ pud_t *pud_m;
96562+ pmd_t *pmd_m;
96563+
96564+ if (vma->vm_start > vma_m->vm_start) {
96565+ address_m = address;
96566+ address -= SEGMEXEC_TASK_SIZE;
96567+ vma = vma_m;
96568+ } else
96569+ address_m = address + SEGMEXEC_TASK_SIZE;
96570+
96571+ pgd_m = pgd_offset(mm, address_m);
96572+ pud_m = pud_alloc(mm, pgd_m, address_m);
96573+ if (!pud_m)
96574+ return VM_FAULT_OOM;
96575+ pmd_m = pmd_alloc(mm, pud_m, address_m);
96576+ if (!pmd_m)
96577+ return VM_FAULT_OOM;
96578+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
96579+ return VM_FAULT_OOM;
96580+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
96581+ }
96582+#endif
96583+
96584 pgd = pgd_offset(mm, address);
96585 pud = pud_alloc(mm, pgd, address);
96586 if (!pud)
96587@@ -3400,6 +3659,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
96588 spin_unlock(&mm->page_table_lock);
96589 return 0;
96590 }
96591+
96592+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
96593+{
96594+ pud_t *new = pud_alloc_one(mm, address);
96595+ if (!new)
96596+ return -ENOMEM;
96597+
96598+ smp_wmb(); /* See comment in __pte_alloc */
96599+
96600+ spin_lock(&mm->page_table_lock);
96601+ if (pgd_present(*pgd)) /* Another has populated it */
96602+ pud_free(mm, new);
96603+ else
96604+ pgd_populate_kernel(mm, pgd, new);
96605+ spin_unlock(&mm->page_table_lock);
96606+ return 0;
96607+}
96608 #endif /* __PAGETABLE_PUD_FOLDED */
96609
96610 #ifndef __PAGETABLE_PMD_FOLDED
96611@@ -3430,6 +3706,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
96612 spin_unlock(&mm->page_table_lock);
96613 return 0;
96614 }
96615+
96616+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
96617+{
96618+ pmd_t *new = pmd_alloc_one(mm, address);
96619+ if (!new)
96620+ return -ENOMEM;
96621+
96622+ smp_wmb(); /* See comment in __pte_alloc */
96623+
96624+ spin_lock(&mm->page_table_lock);
96625+#ifndef __ARCH_HAS_4LEVEL_HACK
96626+ if (pud_present(*pud)) /* Another has populated it */
96627+ pmd_free(mm, new);
96628+ else
96629+ pud_populate_kernel(mm, pud, new);
96630+#else
96631+ if (pgd_present(*pud)) /* Another has populated it */
96632+ pmd_free(mm, new);
96633+ else
96634+ pgd_populate_kernel(mm, pud, new);
96635+#endif /* __ARCH_HAS_4LEVEL_HACK */
96636+ spin_unlock(&mm->page_table_lock);
96637+ return 0;
96638+}
96639 #endif /* __PAGETABLE_PMD_FOLDED */
96640
96641 static int __follow_pte(struct mm_struct *mm, unsigned long address,
96642@@ -3539,8 +3839,8 @@ out:
96643 return ret;
96644 }
96645
96646-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
96647- void *buf, int len, int write)
96648+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
96649+ void *buf, size_t len, int write)
96650 {
96651 resource_size_t phys_addr;
96652 unsigned long prot = 0;
96653@@ -3566,8 +3866,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
96654 * Access another process' address space as given in mm. If non-NULL, use the
96655 * given task for page fault accounting.
96656 */
96657-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96658- unsigned long addr, void *buf, int len, int write)
96659+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96660+ unsigned long addr, void *buf, size_t len, int write)
96661 {
96662 struct vm_area_struct *vma;
96663 void *old_buf = buf;
96664@@ -3575,7 +3875,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96665 down_read(&mm->mmap_sem);
96666 /* ignore errors, just check how much was successfully transferred */
96667 while (len) {
96668- int bytes, ret, offset;
96669+ ssize_t bytes, ret, offset;
96670 void *maddr;
96671 struct page *page = NULL;
96672
96673@@ -3636,8 +3936,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96674 *
96675 * The caller must hold a reference on @mm.
96676 */
96677-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
96678- void *buf, int len, int write)
96679+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
96680+ void *buf, size_t len, int write)
96681 {
96682 return __access_remote_vm(NULL, mm, addr, buf, len, write);
96683 }
96684@@ -3647,11 +3947,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
96685 * Source/target buffer must be kernel space,
96686 * Do not walk the page table directly, use get_user_pages
96687 */
96688-int access_process_vm(struct task_struct *tsk, unsigned long addr,
96689- void *buf, int len, int write)
96690+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
96691+ void *buf, size_t len, int write)
96692 {
96693 struct mm_struct *mm;
96694- int ret;
96695+ ssize_t ret;
96696
96697 mm = get_task_mm(tsk);
96698 if (!mm)
96699diff --git a/mm/mempolicy.c b/mm/mempolicy.c
96700index 8f5330d..b41914b 100644
96701--- a/mm/mempolicy.c
96702+++ b/mm/mempolicy.c
96703@@ -750,6 +750,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
96704 unsigned long vmstart;
96705 unsigned long vmend;
96706
96707+#ifdef CONFIG_PAX_SEGMEXEC
96708+ struct vm_area_struct *vma_m;
96709+#endif
96710+
96711 vma = find_vma(mm, start);
96712 if (!vma || vma->vm_start > start)
96713 return -EFAULT;
96714@@ -793,6 +797,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
96715 err = vma_replace_policy(vma, new_pol);
96716 if (err)
96717 goto out;
96718+
96719+#ifdef CONFIG_PAX_SEGMEXEC
96720+ vma_m = pax_find_mirror_vma(vma);
96721+ if (vma_m) {
96722+ err = vma_replace_policy(vma_m, new_pol);
96723+ if (err)
96724+ goto out;
96725+ }
96726+#endif
96727+
96728 }
96729
96730 out:
96731@@ -1225,6 +1239,17 @@ static long do_mbind(unsigned long start, unsigned long len,
96732
96733 if (end < start)
96734 return -EINVAL;
96735+
96736+#ifdef CONFIG_PAX_SEGMEXEC
96737+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
96738+ if (end > SEGMEXEC_TASK_SIZE)
96739+ return -EINVAL;
96740+ } else
96741+#endif
96742+
96743+ if (end > TASK_SIZE)
96744+ return -EINVAL;
96745+
96746 if (end == start)
96747 return 0;
96748
96749@@ -1450,8 +1475,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96750 */
96751 tcred = __task_cred(task);
96752 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
96753- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
96754- !capable(CAP_SYS_NICE)) {
96755+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
96756 rcu_read_unlock();
96757 err = -EPERM;
96758 goto out_put;
96759@@ -1482,6 +1506,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96760 goto out;
96761 }
96762
96763+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96764+ if (mm != current->mm &&
96765+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
96766+ mmput(mm);
96767+ err = -EPERM;
96768+ goto out;
96769+ }
96770+#endif
96771+
96772 err = do_migrate_pages(mm, old, new,
96773 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
96774
96775diff --git a/mm/migrate.c b/mm/migrate.c
96776index 0143995..b294728 100644
96777--- a/mm/migrate.c
96778+++ b/mm/migrate.c
96779@@ -1495,8 +1495,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
96780 */
96781 tcred = __task_cred(task);
96782 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
96783- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
96784- !capable(CAP_SYS_NICE)) {
96785+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
96786 rcu_read_unlock();
96787 err = -EPERM;
96788 goto out;
96789diff --git a/mm/mlock.c b/mm/mlock.c
96790index ce84cb0..6d5a9aa 100644
96791--- a/mm/mlock.c
96792+++ b/mm/mlock.c
96793@@ -14,6 +14,7 @@
96794 #include <linux/pagevec.h>
96795 #include <linux/mempolicy.h>
96796 #include <linux/syscalls.h>
96797+#include <linux/security.h>
96798 #include <linux/sched.h>
96799 #include <linux/export.h>
96800 #include <linux/rmap.h>
96801@@ -613,7 +614,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
96802 {
96803 unsigned long nstart, end, tmp;
96804 struct vm_area_struct * vma, * prev;
96805- int error;
96806+ int error = 0;
96807
96808 VM_BUG_ON(start & ~PAGE_MASK);
96809 VM_BUG_ON(len != PAGE_ALIGN(len));
96810@@ -622,6 +623,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
96811 return -EINVAL;
96812 if (end == start)
96813 return 0;
96814+ if (end > TASK_SIZE)
96815+ return -EINVAL;
96816+
96817 vma = find_vma(current->mm, start);
96818 if (!vma || vma->vm_start > start)
96819 return -ENOMEM;
96820@@ -633,6 +637,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
96821 for (nstart = start ; ; ) {
96822 vm_flags_t newflags;
96823
96824+#ifdef CONFIG_PAX_SEGMEXEC
96825+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96826+ break;
96827+#endif
96828+
96829 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
96830
96831 newflags = vma->vm_flags & ~VM_LOCKED;
96832@@ -746,6 +755,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
96833 locked += current->mm->locked_vm;
96834
96835 /* check against resource limits */
96836+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
96837 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
96838 error = do_mlock(start, len, 1);
96839
96840@@ -783,6 +793,11 @@ static int do_mlockall(int flags)
96841 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
96842 vm_flags_t newflags;
96843
96844+#ifdef CONFIG_PAX_SEGMEXEC
96845+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96846+ break;
96847+#endif
96848+
96849 newflags = vma->vm_flags & ~VM_LOCKED;
96850 if (flags & MCL_CURRENT)
96851 newflags |= VM_LOCKED;
96852@@ -814,8 +829,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
96853 lock_limit >>= PAGE_SHIFT;
96854
96855 ret = -ENOMEM;
96856+
96857+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
96858+
96859 down_write(&current->mm->mmap_sem);
96860-
96861 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
96862 capable(CAP_IPC_LOCK))
96863 ret = do_mlockall(flags);
96864diff --git a/mm/mmap.c b/mm/mmap.c
96865index ebc25fa..9135e65 100644
96866--- a/mm/mmap.c
96867+++ b/mm/mmap.c
96868@@ -41,6 +41,7 @@
96869 #include <linux/notifier.h>
96870 #include <linux/memory.h>
96871 #include <linux/printk.h>
96872+#include <linux/random.h>
96873
96874 #include <asm/uaccess.h>
96875 #include <asm/cacheflush.h>
96876@@ -57,6 +58,16 @@
96877 #define arch_rebalance_pgtables(addr, len) (addr)
96878 #endif
96879
96880+static inline void verify_mm_writelocked(struct mm_struct *mm)
96881+{
96882+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
96883+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
96884+ up_read(&mm->mmap_sem);
96885+ BUG();
96886+ }
96887+#endif
96888+}
96889+
96890 static void unmap_region(struct mm_struct *mm,
96891 struct vm_area_struct *vma, struct vm_area_struct *prev,
96892 unsigned long start, unsigned long end);
96893@@ -76,16 +87,25 @@ static void unmap_region(struct mm_struct *mm,
96894 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
96895 *
96896 */
96897-pgprot_t protection_map[16] = {
96898+pgprot_t protection_map[16] __read_only = {
96899 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
96900 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
96901 };
96902
96903-pgprot_t vm_get_page_prot(unsigned long vm_flags)
96904+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
96905 {
96906- return __pgprot(pgprot_val(protection_map[vm_flags &
96907+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
96908 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
96909 pgprot_val(arch_vm_get_page_prot(vm_flags)));
96910+
96911+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
96912+ if (!(__supported_pte_mask & _PAGE_NX) &&
96913+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
96914+ (vm_flags & (VM_READ | VM_WRITE)))
96915+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
96916+#endif
96917+
96918+ return prot;
96919 }
96920 EXPORT_SYMBOL(vm_get_page_prot);
96921
96922@@ -95,6 +115,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
96923 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
96924 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
96925 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
96926+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
96927 /*
96928 * Make sure vm_committed_as in one cacheline and not cacheline shared with
96929 * other variables. It can be updated by several CPUs frequently.
96930@@ -255,6 +276,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
96931 struct vm_area_struct *next = vma->vm_next;
96932
96933 might_sleep();
96934+ BUG_ON(vma->vm_mirror);
96935 if (vma->vm_ops && vma->vm_ops->close)
96936 vma->vm_ops->close(vma);
96937 if (vma->vm_file)
96938@@ -299,6 +321,12 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
96939 * not page aligned -Ram Gupta
96940 */
96941 rlim = rlimit(RLIMIT_DATA);
96942+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96943+ /* force a minimum 16MB brk heap on setuid/setgid binaries */
96944+ if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid()))
96945+ rlim = 4096 * PAGE_SIZE;
96946+#endif
96947+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
96948 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
96949 (mm->end_data - mm->start_data) > rlim)
96950 goto out;
96951@@ -752,8 +780,11 @@ again: remove_next = 1 + (end > next->vm_end);
96952 * shrinking vma had, to cover any anon pages imported.
96953 */
96954 if (exporter && exporter->anon_vma && !importer->anon_vma) {
96955- if (anon_vma_clone(importer, exporter))
96956- return -ENOMEM;
96957+ int error;
96958+
96959+ error = anon_vma_clone(importer, exporter);
96960+ if (error)
96961+ return error;
96962 importer->anon_vma = exporter->anon_vma;
96963 }
96964 }
96965@@ -949,6 +980,12 @@ static int
96966 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
96967 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
96968 {
96969+
96970+#ifdef CONFIG_PAX_SEGMEXEC
96971+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
96972+ return 0;
96973+#endif
96974+
96975 if (is_mergeable_vma(vma, file, vm_flags) &&
96976 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
96977 if (vma->vm_pgoff == vm_pgoff)
96978@@ -968,6 +1005,12 @@ static int
96979 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
96980 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
96981 {
96982+
96983+#ifdef CONFIG_PAX_SEGMEXEC
96984+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
96985+ return 0;
96986+#endif
96987+
96988 if (is_mergeable_vma(vma, file, vm_flags) &&
96989 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
96990 pgoff_t vm_pglen;
96991@@ -1010,13 +1053,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
96992 struct vm_area_struct *vma_merge(struct mm_struct *mm,
96993 struct vm_area_struct *prev, unsigned long addr,
96994 unsigned long end, unsigned long vm_flags,
96995- struct anon_vma *anon_vma, struct file *file,
96996+ struct anon_vma *anon_vma, struct file *file,
96997 pgoff_t pgoff, struct mempolicy *policy)
96998 {
96999 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
97000 struct vm_area_struct *area, *next;
97001 int err;
97002
97003+#ifdef CONFIG_PAX_SEGMEXEC
97004+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
97005+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
97006+
97007+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
97008+#endif
97009+
97010 /*
97011 * We later require that vma->vm_flags == vm_flags,
97012 * so this tests vma->vm_flags & VM_SPECIAL, too.
97013@@ -1032,6 +1082,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97014 if (next && next->vm_end == end) /* cases 6, 7, 8 */
97015 next = next->vm_next;
97016
97017+#ifdef CONFIG_PAX_SEGMEXEC
97018+ if (prev)
97019+ prev_m = pax_find_mirror_vma(prev);
97020+ if (area)
97021+ area_m = pax_find_mirror_vma(area);
97022+ if (next)
97023+ next_m = pax_find_mirror_vma(next);
97024+#endif
97025+
97026 /*
97027 * Can it merge with the predecessor?
97028 */
97029@@ -1051,9 +1110,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97030 /* cases 1, 6 */
97031 err = vma_adjust(prev, prev->vm_start,
97032 next->vm_end, prev->vm_pgoff, NULL);
97033- } else /* cases 2, 5, 7 */
97034+
97035+#ifdef CONFIG_PAX_SEGMEXEC
97036+ if (!err && prev_m)
97037+ err = vma_adjust(prev_m, prev_m->vm_start,
97038+ next_m->vm_end, prev_m->vm_pgoff, NULL);
97039+#endif
97040+
97041+ } else { /* cases 2, 5, 7 */
97042 err = vma_adjust(prev, prev->vm_start,
97043 end, prev->vm_pgoff, NULL);
97044+
97045+#ifdef CONFIG_PAX_SEGMEXEC
97046+ if (!err && prev_m)
97047+ err = vma_adjust(prev_m, prev_m->vm_start,
97048+ end_m, prev_m->vm_pgoff, NULL);
97049+#endif
97050+
97051+ }
97052 if (err)
97053 return NULL;
97054 khugepaged_enter_vma_merge(prev, vm_flags);
97055@@ -1067,12 +1141,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97056 mpol_equal(policy, vma_policy(next)) &&
97057 can_vma_merge_before(next, vm_flags,
97058 anon_vma, file, pgoff+pglen)) {
97059- if (prev && addr < prev->vm_end) /* case 4 */
97060+ if (prev && addr < prev->vm_end) { /* case 4 */
97061 err = vma_adjust(prev, prev->vm_start,
97062 addr, prev->vm_pgoff, NULL);
97063- else /* cases 3, 8 */
97064+
97065+#ifdef CONFIG_PAX_SEGMEXEC
97066+ if (!err && prev_m)
97067+ err = vma_adjust(prev_m, prev_m->vm_start,
97068+ addr_m, prev_m->vm_pgoff, NULL);
97069+#endif
97070+
97071+ } else { /* cases 3, 8 */
97072 err = vma_adjust(area, addr, next->vm_end,
97073 next->vm_pgoff - pglen, NULL);
97074+
97075+#ifdef CONFIG_PAX_SEGMEXEC
97076+ if (!err && area_m)
97077+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
97078+ next_m->vm_pgoff - pglen, NULL);
97079+#endif
97080+
97081+ }
97082 if (err)
97083 return NULL;
97084 khugepaged_enter_vma_merge(area, vm_flags);
97085@@ -1181,8 +1270,10 @@ none:
97086 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
97087 struct file *file, long pages)
97088 {
97089- const unsigned long stack_flags
97090- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
97091+
97092+#ifdef CONFIG_PAX_RANDMMAP
97093+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
97094+#endif
97095
97096 mm->total_vm += pages;
97097
97098@@ -1190,7 +1281,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
97099 mm->shared_vm += pages;
97100 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
97101 mm->exec_vm += pages;
97102- } else if (flags & stack_flags)
97103+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
97104 mm->stack_vm += pages;
97105 }
97106 #endif /* CONFIG_PROC_FS */
97107@@ -1220,6 +1311,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
97108 locked += mm->locked_vm;
97109 lock_limit = rlimit(RLIMIT_MEMLOCK);
97110 lock_limit >>= PAGE_SHIFT;
97111+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97112 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
97113 return -EAGAIN;
97114 }
97115@@ -1246,7 +1338,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97116 * (the exception is when the underlying filesystem is noexec
97117 * mounted, in which case we dont add PROT_EXEC.)
97118 */
97119- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
97120+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
97121 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
97122 prot |= PROT_EXEC;
97123
97124@@ -1272,7 +1364,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97125 /* Obtain the address to map to. we verify (or select) it and ensure
97126 * that it represents a valid section of the address space.
97127 */
97128- addr = get_unmapped_area(file, addr, len, pgoff, flags);
97129+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
97130 if (addr & ~PAGE_MASK)
97131 return addr;
97132
97133@@ -1283,6 +1375,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97134 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
97135 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
97136
97137+#ifdef CONFIG_PAX_MPROTECT
97138+ if (mm->pax_flags & MF_PAX_MPROTECT) {
97139+
97140+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
97141+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
97142+ mm->binfmt->handle_mmap)
97143+ mm->binfmt->handle_mmap(file);
97144+#endif
97145+
97146+#ifndef CONFIG_PAX_MPROTECT_COMPAT
97147+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
97148+ gr_log_rwxmmap(file);
97149+
97150+#ifdef CONFIG_PAX_EMUPLT
97151+ vm_flags &= ~VM_EXEC;
97152+#else
97153+ return -EPERM;
97154+#endif
97155+
97156+ }
97157+
97158+ if (!(vm_flags & VM_EXEC))
97159+ vm_flags &= ~VM_MAYEXEC;
97160+#else
97161+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
97162+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
97163+#endif
97164+ else
97165+ vm_flags &= ~VM_MAYWRITE;
97166+ }
97167+#endif
97168+
97169+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97170+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
97171+ vm_flags &= ~VM_PAGEEXEC;
97172+#endif
97173+
97174 if (flags & MAP_LOCKED)
97175 if (!can_do_mlock())
97176 return -EPERM;
97177@@ -1370,6 +1499,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97178 vm_flags |= VM_NORESERVE;
97179 }
97180
97181+ if (!gr_acl_handle_mmap(file, prot))
97182+ return -EACCES;
97183+
97184 addr = mmap_region(file, addr, len, vm_flags, pgoff);
97185 if (!IS_ERR_VALUE(addr) &&
97186 ((vm_flags & VM_LOCKED) ||
97187@@ -1463,7 +1595,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
97188 vm_flags_t vm_flags = vma->vm_flags;
97189
97190 /* If it was private or non-writable, the write bit is already clear */
97191- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
97192+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
97193 return 0;
97194
97195 /* The backer wishes to know when pages are first written to? */
97196@@ -1509,7 +1641,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97197 struct rb_node **rb_link, *rb_parent;
97198 unsigned long charged = 0;
97199
97200+#ifdef CONFIG_PAX_SEGMEXEC
97201+ struct vm_area_struct *vma_m = NULL;
97202+#endif
97203+
97204+ /*
97205+ * mm->mmap_sem is required to protect against another thread
97206+ * changing the mappings in case we sleep.
97207+ */
97208+ verify_mm_writelocked(mm);
97209+
97210 /* Check against address space limit. */
97211+
97212+#ifdef CONFIG_PAX_RANDMMAP
97213+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
97214+#endif
97215+
97216 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
97217 unsigned long nr_pages;
97218
97219@@ -1528,11 +1675,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97220
97221 /* Clear old maps */
97222 error = -ENOMEM;
97223-munmap_back:
97224 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
97225 if (do_munmap(mm, addr, len))
97226 return -ENOMEM;
97227- goto munmap_back;
97228+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
97229 }
97230
97231 /*
97232@@ -1563,6 +1709,16 @@ munmap_back:
97233 goto unacct_error;
97234 }
97235
97236+#ifdef CONFIG_PAX_SEGMEXEC
97237+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
97238+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97239+ if (!vma_m) {
97240+ error = -ENOMEM;
97241+ goto free_vma;
97242+ }
97243+ }
97244+#endif
97245+
97246 vma->vm_mm = mm;
97247 vma->vm_start = addr;
97248 vma->vm_end = addr + len;
97249@@ -1593,6 +1749,13 @@ munmap_back:
97250 if (error)
97251 goto unmap_and_free_vma;
97252
97253+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97254+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
97255+ vma->vm_flags |= VM_PAGEEXEC;
97256+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
97257+ }
97258+#endif
97259+
97260 /* Can addr have changed??
97261 *
97262 * Answer: Yes, several device drivers can do it in their
97263@@ -1626,6 +1789,12 @@ munmap_back:
97264 }
97265
97266 vma_link(mm, vma, prev, rb_link, rb_parent);
97267+
97268+#ifdef CONFIG_PAX_SEGMEXEC
97269+ if (vma_m)
97270+ BUG_ON(pax_mirror_vma(vma_m, vma));
97271+#endif
97272+
97273 /* Once vma denies write, undo our temporary denial count */
97274 if (file) {
97275 if (vm_flags & VM_SHARED)
97276@@ -1638,6 +1807,7 @@ out:
97277 perf_event_mmap(vma);
97278
97279 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
97280+ track_exec_limit(mm, addr, addr + len, vm_flags);
97281 if (vm_flags & VM_LOCKED) {
97282 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
97283 vma == get_gate_vma(current->mm)))
97284@@ -1673,6 +1843,12 @@ allow_write_and_free_vma:
97285 if (vm_flags & VM_DENYWRITE)
97286 allow_write_access(file);
97287 free_vma:
97288+
97289+#ifdef CONFIG_PAX_SEGMEXEC
97290+ if (vma_m)
97291+ kmem_cache_free(vm_area_cachep, vma_m);
97292+#endif
97293+
97294 kmem_cache_free(vm_area_cachep, vma);
97295 unacct_error:
97296 if (charged)
97297@@ -1680,7 +1856,63 @@ unacct_error:
97298 return error;
97299 }
97300
97301-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
97302+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
97303+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
97304+{
97305+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
97306+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
97307+
97308+ return 0;
97309+}
97310+#endif
97311+
97312+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
97313+{
97314+ if (!vma) {
97315+#ifdef CONFIG_STACK_GROWSUP
97316+ if (addr > sysctl_heap_stack_gap)
97317+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
97318+ else
97319+ vma = find_vma(current->mm, 0);
97320+ if (vma && (vma->vm_flags & VM_GROWSUP))
97321+ return false;
97322+#endif
97323+ return true;
97324+ }
97325+
97326+ if (addr + len > vma->vm_start)
97327+ return false;
97328+
97329+ if (vma->vm_flags & VM_GROWSDOWN)
97330+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
97331+#ifdef CONFIG_STACK_GROWSUP
97332+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
97333+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
97334+#endif
97335+ else if (offset)
97336+ return offset <= vma->vm_start - addr - len;
97337+
97338+ return true;
97339+}
97340+
97341+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
97342+{
97343+ if (vma->vm_start < len)
97344+ return -ENOMEM;
97345+
97346+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
97347+ if (offset <= vma->vm_start - len)
97348+ return vma->vm_start - len - offset;
97349+ else
97350+ return -ENOMEM;
97351+ }
97352+
97353+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
97354+ return vma->vm_start - len - sysctl_heap_stack_gap;
97355+ return -ENOMEM;
97356+}
97357+
97358+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
97359 {
97360 /*
97361 * We implement the search by looking for an rbtree node that
97362@@ -1728,11 +1960,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
97363 }
97364 }
97365
97366- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
97367+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
97368 check_current:
97369 /* Check if current node has a suitable gap */
97370 if (gap_start > high_limit)
97371 return -ENOMEM;
97372+
97373+ if (gap_end - gap_start > info->threadstack_offset)
97374+ gap_start += info->threadstack_offset;
97375+ else
97376+ gap_start = gap_end;
97377+
97378+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
97379+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97380+ gap_start += sysctl_heap_stack_gap;
97381+ else
97382+ gap_start = gap_end;
97383+ }
97384+ if (vma->vm_flags & VM_GROWSDOWN) {
97385+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97386+ gap_end -= sysctl_heap_stack_gap;
97387+ else
97388+ gap_end = gap_start;
97389+ }
97390 if (gap_end >= low_limit && gap_end - gap_start >= length)
97391 goto found;
97392
97393@@ -1782,7 +2032,7 @@ found:
97394 return gap_start;
97395 }
97396
97397-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
97398+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
97399 {
97400 struct mm_struct *mm = current->mm;
97401 struct vm_area_struct *vma;
97402@@ -1836,6 +2086,24 @@ check_current:
97403 gap_end = vma->vm_start;
97404 if (gap_end < low_limit)
97405 return -ENOMEM;
97406+
97407+ if (gap_end - gap_start > info->threadstack_offset)
97408+ gap_end -= info->threadstack_offset;
97409+ else
97410+ gap_end = gap_start;
97411+
97412+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
97413+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97414+ gap_start += sysctl_heap_stack_gap;
97415+ else
97416+ gap_start = gap_end;
97417+ }
97418+ if (vma->vm_flags & VM_GROWSDOWN) {
97419+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97420+ gap_end -= sysctl_heap_stack_gap;
97421+ else
97422+ gap_end = gap_start;
97423+ }
97424 if (gap_start <= high_limit && gap_end - gap_start >= length)
97425 goto found;
97426
97427@@ -1899,6 +2167,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97428 struct mm_struct *mm = current->mm;
97429 struct vm_area_struct *vma;
97430 struct vm_unmapped_area_info info;
97431+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
97432
97433 if (len > TASK_SIZE - mmap_min_addr)
97434 return -ENOMEM;
97435@@ -1906,11 +2175,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97436 if (flags & MAP_FIXED)
97437 return addr;
97438
97439+#ifdef CONFIG_PAX_RANDMMAP
97440+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97441+#endif
97442+
97443 if (addr) {
97444 addr = PAGE_ALIGN(addr);
97445 vma = find_vma(mm, addr);
97446 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
97447- (!vma || addr + len <= vma->vm_start))
97448+ check_heap_stack_gap(vma, addr, len, offset))
97449 return addr;
97450 }
97451
97452@@ -1919,6 +2192,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97453 info.low_limit = mm->mmap_base;
97454 info.high_limit = TASK_SIZE;
97455 info.align_mask = 0;
97456+ info.threadstack_offset = offset;
97457 return vm_unmapped_area(&info);
97458 }
97459 #endif
97460@@ -1937,6 +2211,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97461 struct mm_struct *mm = current->mm;
97462 unsigned long addr = addr0;
97463 struct vm_unmapped_area_info info;
97464+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
97465
97466 /* requested length too big for entire address space */
97467 if (len > TASK_SIZE - mmap_min_addr)
97468@@ -1945,12 +2220,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97469 if (flags & MAP_FIXED)
97470 return addr;
97471
97472+#ifdef CONFIG_PAX_RANDMMAP
97473+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97474+#endif
97475+
97476 /* requesting a specific address */
97477 if (addr) {
97478 addr = PAGE_ALIGN(addr);
97479 vma = find_vma(mm, addr);
97480 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
97481- (!vma || addr + len <= vma->vm_start))
97482+ check_heap_stack_gap(vma, addr, len, offset))
97483 return addr;
97484 }
97485
97486@@ -1959,6 +2238,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97487 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
97488 info.high_limit = mm->mmap_base;
97489 info.align_mask = 0;
97490+ info.threadstack_offset = offset;
97491 addr = vm_unmapped_area(&info);
97492
97493 /*
97494@@ -1971,6 +2251,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97495 VM_BUG_ON(addr != -ENOMEM);
97496 info.flags = 0;
97497 info.low_limit = TASK_UNMAPPED_BASE;
97498+
97499+#ifdef CONFIG_PAX_RANDMMAP
97500+ if (mm->pax_flags & MF_PAX_RANDMMAP)
97501+ info.low_limit += mm->delta_mmap;
97502+#endif
97503+
97504 info.high_limit = TASK_SIZE;
97505 addr = vm_unmapped_area(&info);
97506 }
97507@@ -2071,6 +2357,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
97508 return vma;
97509 }
97510
97511+#ifdef CONFIG_PAX_SEGMEXEC
97512+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
97513+{
97514+ struct vm_area_struct *vma_m;
97515+
97516+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
97517+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
97518+ BUG_ON(vma->vm_mirror);
97519+ return NULL;
97520+ }
97521+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
97522+ vma_m = vma->vm_mirror;
97523+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
97524+ BUG_ON(vma->vm_file != vma_m->vm_file);
97525+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
97526+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
97527+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
97528+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
97529+ return vma_m;
97530+}
97531+#endif
97532+
97533 /*
97534 * Verify that the stack growth is acceptable and
97535 * update accounting. This is shared with both the
97536@@ -2087,6 +2395,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97537 return -ENOMEM;
97538
97539 /* Stack limit test */
97540+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
97541 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
97542 return -ENOMEM;
97543
97544@@ -2097,6 +2406,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97545 locked = mm->locked_vm + grow;
97546 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
97547 limit >>= PAGE_SHIFT;
97548+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97549 if (locked > limit && !capable(CAP_IPC_LOCK))
97550 return -ENOMEM;
97551 }
97552@@ -2126,37 +2436,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97553 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
97554 * vma is the last one with address > vma->vm_end. Have to extend vma.
97555 */
97556+#ifndef CONFIG_IA64
97557+static
97558+#endif
97559 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97560 {
97561 int error;
97562+ bool locknext;
97563
97564 if (!(vma->vm_flags & VM_GROWSUP))
97565 return -EFAULT;
97566
97567+ /* Also guard against wrapping around to address 0. */
97568+ if (address < PAGE_ALIGN(address+1))
97569+ address = PAGE_ALIGN(address+1);
97570+ else
97571+ return -ENOMEM;
97572+
97573 /*
97574 * We must make sure the anon_vma is allocated
97575 * so that the anon_vma locking is not a noop.
97576 */
97577 if (unlikely(anon_vma_prepare(vma)))
97578 return -ENOMEM;
97579+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
97580+ if (locknext && anon_vma_prepare(vma->vm_next))
97581+ return -ENOMEM;
97582 vma_lock_anon_vma(vma);
97583+ if (locknext)
97584+ vma_lock_anon_vma(vma->vm_next);
97585
97586 /*
97587 * vma->vm_start/vm_end cannot change under us because the caller
97588 * is required to hold the mmap_sem in read mode. We need the
97589- * anon_vma lock to serialize against concurrent expand_stacks.
97590- * Also guard against wrapping around to address 0.
97591+ * anon_vma locks to serialize against concurrent expand_stacks
97592+ * and expand_upwards.
97593 */
97594- if (address < PAGE_ALIGN(address+4))
97595- address = PAGE_ALIGN(address+4);
97596- else {
97597- vma_unlock_anon_vma(vma);
97598- return -ENOMEM;
97599- }
97600 error = 0;
97601
97602 /* Somebody else might have raced and expanded it already */
97603- if (address > vma->vm_end) {
97604+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
97605+ error = -ENOMEM;
97606+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
97607 unsigned long size, grow;
97608
97609 size = address - vma->vm_start;
97610@@ -2191,6 +2512,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97611 }
97612 }
97613 }
97614+ if (locknext)
97615+ vma_unlock_anon_vma(vma->vm_next);
97616 vma_unlock_anon_vma(vma);
97617 khugepaged_enter_vma_merge(vma, vma->vm_flags);
97618 validate_mm(vma->vm_mm);
97619@@ -2205,6 +2528,8 @@ int expand_downwards(struct vm_area_struct *vma,
97620 unsigned long address)
97621 {
97622 int error;
97623+ bool lockprev = false;
97624+ struct vm_area_struct *prev;
97625
97626 /*
97627 * We must make sure the anon_vma is allocated
97628@@ -2218,6 +2543,15 @@ int expand_downwards(struct vm_area_struct *vma,
97629 if (error)
97630 return error;
97631
97632+ prev = vma->vm_prev;
97633+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
97634+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
97635+#endif
97636+ if (lockprev && anon_vma_prepare(prev))
97637+ return -ENOMEM;
97638+ if (lockprev)
97639+ vma_lock_anon_vma(prev);
97640+
97641 vma_lock_anon_vma(vma);
97642
97643 /*
97644@@ -2227,9 +2561,17 @@ int expand_downwards(struct vm_area_struct *vma,
97645 */
97646
97647 /* Somebody else might have raced and expanded it already */
97648- if (address < vma->vm_start) {
97649+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
97650+ error = -ENOMEM;
97651+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
97652 unsigned long size, grow;
97653
97654+#ifdef CONFIG_PAX_SEGMEXEC
97655+ struct vm_area_struct *vma_m;
97656+
97657+ vma_m = pax_find_mirror_vma(vma);
97658+#endif
97659+
97660 size = vma->vm_end - address;
97661 grow = (vma->vm_start - address) >> PAGE_SHIFT;
97662
97663@@ -2254,13 +2596,27 @@ int expand_downwards(struct vm_area_struct *vma,
97664 vma->vm_pgoff -= grow;
97665 anon_vma_interval_tree_post_update_vma(vma);
97666 vma_gap_update(vma);
97667+
97668+#ifdef CONFIG_PAX_SEGMEXEC
97669+ if (vma_m) {
97670+ anon_vma_interval_tree_pre_update_vma(vma_m);
97671+ vma_m->vm_start -= grow << PAGE_SHIFT;
97672+ vma_m->vm_pgoff -= grow;
97673+ anon_vma_interval_tree_post_update_vma(vma_m);
97674+ vma_gap_update(vma_m);
97675+ }
97676+#endif
97677+
97678 spin_unlock(&vma->vm_mm->page_table_lock);
97679
97680+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
97681 perf_event_mmap(vma);
97682 }
97683 }
97684 }
97685 vma_unlock_anon_vma(vma);
97686+ if (lockprev)
97687+ vma_unlock_anon_vma(prev);
97688 khugepaged_enter_vma_merge(vma, vma->vm_flags);
97689 validate_mm(vma->vm_mm);
97690 return error;
97691@@ -2358,6 +2714,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
97692 do {
97693 long nrpages = vma_pages(vma);
97694
97695+#ifdef CONFIG_PAX_SEGMEXEC
97696+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
97697+ vma = remove_vma(vma);
97698+ continue;
97699+ }
97700+#endif
97701+
97702 if (vma->vm_flags & VM_ACCOUNT)
97703 nr_accounted += nrpages;
97704 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
97705@@ -2402,6 +2765,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
97706 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
97707 vma->vm_prev = NULL;
97708 do {
97709+
97710+#ifdef CONFIG_PAX_SEGMEXEC
97711+ if (vma->vm_mirror) {
97712+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
97713+ vma->vm_mirror->vm_mirror = NULL;
97714+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
97715+ vma->vm_mirror = NULL;
97716+ }
97717+#endif
97718+
97719 vma_rb_erase(vma, &mm->mm_rb);
97720 mm->map_count--;
97721 tail_vma = vma;
97722@@ -2429,14 +2802,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97723 struct vm_area_struct *new;
97724 int err = -ENOMEM;
97725
97726+#ifdef CONFIG_PAX_SEGMEXEC
97727+ struct vm_area_struct *vma_m, *new_m = NULL;
97728+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
97729+#endif
97730+
97731 if (is_vm_hugetlb_page(vma) && (addr &
97732 ~(huge_page_mask(hstate_vma(vma)))))
97733 return -EINVAL;
97734
97735+#ifdef CONFIG_PAX_SEGMEXEC
97736+ vma_m = pax_find_mirror_vma(vma);
97737+#endif
97738+
97739 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97740 if (!new)
97741 goto out_err;
97742
97743+#ifdef CONFIG_PAX_SEGMEXEC
97744+ if (vma_m) {
97745+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97746+ if (!new_m) {
97747+ kmem_cache_free(vm_area_cachep, new);
97748+ goto out_err;
97749+ }
97750+ }
97751+#endif
97752+
97753 /* most fields are the same, copy all, and then fixup */
97754 *new = *vma;
97755
97756@@ -2449,11 +2841,28 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97757 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
97758 }
97759
97760+#ifdef CONFIG_PAX_SEGMEXEC
97761+ if (vma_m) {
97762+ *new_m = *vma_m;
97763+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
97764+ new_m->vm_mirror = new;
97765+ new->vm_mirror = new_m;
97766+
97767+ if (new_below)
97768+ new_m->vm_end = addr_m;
97769+ else {
97770+ new_m->vm_start = addr_m;
97771+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
97772+ }
97773+ }
97774+#endif
97775+
97776 err = vma_dup_policy(vma, new);
97777 if (err)
97778 goto out_free_vma;
97779
97780- if (anon_vma_clone(new, vma))
97781+ err = anon_vma_clone(new, vma);
97782+ if (err)
97783 goto out_free_mpol;
97784
97785 if (new->vm_file)
97786@@ -2468,6 +2877,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97787 else
97788 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
97789
97790+#ifdef CONFIG_PAX_SEGMEXEC
97791+ if (!err && vma_m) {
97792+ struct mempolicy *pol = vma_policy(new);
97793+
97794+ if (anon_vma_clone(new_m, vma_m))
97795+ goto out_free_mpol;
97796+
97797+ mpol_get(pol);
97798+ set_vma_policy(new_m, pol);
97799+
97800+ if (new_m->vm_file)
97801+ get_file(new_m->vm_file);
97802+
97803+ if (new_m->vm_ops && new_m->vm_ops->open)
97804+ new_m->vm_ops->open(new_m);
97805+
97806+ if (new_below)
97807+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
97808+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
97809+ else
97810+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
97811+
97812+ if (err) {
97813+ if (new_m->vm_ops && new_m->vm_ops->close)
97814+ new_m->vm_ops->close(new_m);
97815+ if (new_m->vm_file)
97816+ fput(new_m->vm_file);
97817+ mpol_put(pol);
97818+ }
97819+ }
97820+#endif
97821+
97822 /* Success. */
97823 if (!err)
97824 return 0;
97825@@ -2477,10 +2918,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97826 new->vm_ops->close(new);
97827 if (new->vm_file)
97828 fput(new->vm_file);
97829- unlink_anon_vmas(new);
97830 out_free_mpol:
97831 mpol_put(vma_policy(new));
97832 out_free_vma:
97833+
97834+#ifdef CONFIG_PAX_SEGMEXEC
97835+ if (new_m) {
97836+ unlink_anon_vmas(new_m);
97837+ kmem_cache_free(vm_area_cachep, new_m);
97838+ }
97839+#endif
97840+
97841+ unlink_anon_vmas(new);
97842 kmem_cache_free(vm_area_cachep, new);
97843 out_err:
97844 return err;
97845@@ -2493,6 +2942,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97846 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97847 unsigned long addr, int new_below)
97848 {
97849+
97850+#ifdef CONFIG_PAX_SEGMEXEC
97851+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
97852+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
97853+ if (mm->map_count >= sysctl_max_map_count-1)
97854+ return -ENOMEM;
97855+ } else
97856+#endif
97857+
97858 if (mm->map_count >= sysctl_max_map_count)
97859 return -ENOMEM;
97860
97861@@ -2504,11 +2962,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97862 * work. This now handles partial unmappings.
97863 * Jeremy Fitzhardinge <jeremy@goop.org>
97864 */
97865+#ifdef CONFIG_PAX_SEGMEXEC
97866 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97867 {
97868+ int ret = __do_munmap(mm, start, len);
97869+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
97870+ return ret;
97871+
97872+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
97873+}
97874+
97875+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97876+#else
97877+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97878+#endif
97879+{
97880 unsigned long end;
97881 struct vm_area_struct *vma, *prev, *last;
97882
97883+ /*
97884+ * mm->mmap_sem is required to protect against another thread
97885+ * changing the mappings in case we sleep.
97886+ */
97887+ verify_mm_writelocked(mm);
97888+
97889 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
97890 return -EINVAL;
97891
97892@@ -2583,6 +3060,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97893 /* Fix up all other VM information */
97894 remove_vma_list(mm, vma);
97895
97896+ track_exec_limit(mm, start, end, 0UL);
97897+
97898 return 0;
97899 }
97900
97901@@ -2591,6 +3070,13 @@ int vm_munmap(unsigned long start, size_t len)
97902 int ret;
97903 struct mm_struct *mm = current->mm;
97904
97905+
97906+#ifdef CONFIG_PAX_SEGMEXEC
97907+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
97908+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
97909+ return -EINVAL;
97910+#endif
97911+
97912 down_write(&mm->mmap_sem);
97913 ret = do_munmap(mm, start, len);
97914 up_write(&mm->mmap_sem);
97915@@ -2604,16 +3090,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
97916 return vm_munmap(addr, len);
97917 }
97918
97919-static inline void verify_mm_writelocked(struct mm_struct *mm)
97920-{
97921-#ifdef CONFIG_DEBUG_VM
97922- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
97923- WARN_ON(1);
97924- up_read(&mm->mmap_sem);
97925- }
97926-#endif
97927-}
97928-
97929 /*
97930 * this is really a simplified "do_mmap". it only handles
97931 * anonymous maps. eventually we may be able to do some
97932@@ -2627,6 +3103,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97933 struct rb_node ** rb_link, * rb_parent;
97934 pgoff_t pgoff = addr >> PAGE_SHIFT;
97935 int error;
97936+ unsigned long charged;
97937
97938 len = PAGE_ALIGN(len);
97939 if (!len)
97940@@ -2634,10 +3111,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97941
97942 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
97943
97944+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
97945+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
97946+ flags &= ~VM_EXEC;
97947+
97948+#ifdef CONFIG_PAX_MPROTECT
97949+ if (mm->pax_flags & MF_PAX_MPROTECT)
97950+ flags &= ~VM_MAYEXEC;
97951+#endif
97952+
97953+ }
97954+#endif
97955+
97956 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
97957 if (error & ~PAGE_MASK)
97958 return error;
97959
97960+ charged = len >> PAGE_SHIFT;
97961+
97962 error = mlock_future_check(mm, mm->def_flags, len);
97963 if (error)
97964 return error;
97965@@ -2651,21 +3142,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97966 /*
97967 * Clear old maps. this also does some error checking for us
97968 */
97969- munmap_back:
97970 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
97971 if (do_munmap(mm, addr, len))
97972 return -ENOMEM;
97973- goto munmap_back;
97974+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
97975 }
97976
97977 /* Check against address space limits *after* clearing old maps... */
97978- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
97979+ if (!may_expand_vm(mm, charged))
97980 return -ENOMEM;
97981
97982 if (mm->map_count > sysctl_max_map_count)
97983 return -ENOMEM;
97984
97985- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
97986+ if (security_vm_enough_memory_mm(mm, charged))
97987 return -ENOMEM;
97988
97989 /* Can we just expand an old private anonymous mapping? */
97990@@ -2679,7 +3169,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97991 */
97992 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97993 if (!vma) {
97994- vm_unacct_memory(len >> PAGE_SHIFT);
97995+ vm_unacct_memory(charged);
97996 return -ENOMEM;
97997 }
97998
97999@@ -2693,10 +3183,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98000 vma_link(mm, vma, prev, rb_link, rb_parent);
98001 out:
98002 perf_event_mmap(vma);
98003- mm->total_vm += len >> PAGE_SHIFT;
98004+ mm->total_vm += charged;
98005 if (flags & VM_LOCKED)
98006- mm->locked_vm += (len >> PAGE_SHIFT);
98007+ mm->locked_vm += charged;
98008 vma->vm_flags |= VM_SOFTDIRTY;
98009+ track_exec_limit(mm, addr, addr + len, flags);
98010 return addr;
98011 }
98012
98013@@ -2758,6 +3249,7 @@ void exit_mmap(struct mm_struct *mm)
98014 while (vma) {
98015 if (vma->vm_flags & VM_ACCOUNT)
98016 nr_accounted += vma_pages(vma);
98017+ vma->vm_mirror = NULL;
98018 vma = remove_vma(vma);
98019 }
98020 vm_unacct_memory(nr_accounted);
98021@@ -2775,6 +3267,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
98022 struct vm_area_struct *prev;
98023 struct rb_node **rb_link, *rb_parent;
98024
98025+#ifdef CONFIG_PAX_SEGMEXEC
98026+ struct vm_area_struct *vma_m = NULL;
98027+#endif
98028+
98029+ if (security_mmap_addr(vma->vm_start))
98030+ return -EPERM;
98031+
98032 /*
98033 * The vm_pgoff of a purely anonymous vma should be irrelevant
98034 * until its first write fault, when page's anon_vma and index
98035@@ -2798,7 +3297,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
98036 security_vm_enough_memory_mm(mm, vma_pages(vma)))
98037 return -ENOMEM;
98038
98039+#ifdef CONFIG_PAX_SEGMEXEC
98040+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
98041+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98042+ if (!vma_m)
98043+ return -ENOMEM;
98044+ }
98045+#endif
98046+
98047 vma_link(mm, vma, prev, rb_link, rb_parent);
98048+
98049+#ifdef CONFIG_PAX_SEGMEXEC
98050+ if (vma_m)
98051+ BUG_ON(pax_mirror_vma(vma_m, vma));
98052+#endif
98053+
98054 return 0;
98055 }
98056
98057@@ -2817,6 +3330,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
98058 struct rb_node **rb_link, *rb_parent;
98059 bool faulted_in_anon_vma = true;
98060
98061+ BUG_ON(vma->vm_mirror);
98062+
98063 /*
98064 * If anonymous vma has not yet been faulted, update new pgoff
98065 * to match new location, to increase its chance of merging.
98066@@ -2881,6 +3396,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
98067 return NULL;
98068 }
98069
98070+#ifdef CONFIG_PAX_SEGMEXEC
98071+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
98072+{
98073+ struct vm_area_struct *prev_m;
98074+ struct rb_node **rb_link_m, *rb_parent_m;
98075+ struct mempolicy *pol_m;
98076+
98077+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
98078+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
98079+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
98080+ *vma_m = *vma;
98081+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
98082+ if (anon_vma_clone(vma_m, vma))
98083+ return -ENOMEM;
98084+ pol_m = vma_policy(vma_m);
98085+ mpol_get(pol_m);
98086+ set_vma_policy(vma_m, pol_m);
98087+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
98088+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
98089+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
98090+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
98091+ if (vma_m->vm_file)
98092+ get_file(vma_m->vm_file);
98093+ if (vma_m->vm_ops && vma_m->vm_ops->open)
98094+ vma_m->vm_ops->open(vma_m);
98095+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
98096+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
98097+ vma_m->vm_mirror = vma;
98098+ vma->vm_mirror = vma_m;
98099+ return 0;
98100+}
98101+#endif
98102+
98103 /*
98104 * Return true if the calling process may expand its vm space by the passed
98105 * number of pages
98106@@ -2892,6 +3440,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
98107
98108 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
98109
98110+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
98111 if (cur + npages > lim)
98112 return 0;
98113 return 1;
98114@@ -2974,6 +3523,22 @@ static struct vm_area_struct *__install_special_mapping(
98115 vma->vm_start = addr;
98116 vma->vm_end = addr + len;
98117
98118+#ifdef CONFIG_PAX_MPROTECT
98119+ if (mm->pax_flags & MF_PAX_MPROTECT) {
98120+#ifndef CONFIG_PAX_MPROTECT_COMPAT
98121+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
98122+ return ERR_PTR(-EPERM);
98123+ if (!(vm_flags & VM_EXEC))
98124+ vm_flags &= ~VM_MAYEXEC;
98125+#else
98126+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
98127+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
98128+#endif
98129+ else
98130+ vm_flags &= ~VM_MAYWRITE;
98131+ }
98132+#endif
98133+
98134 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
98135 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
98136
98137diff --git a/mm/mprotect.c b/mm/mprotect.c
98138index c43d557..0b7ccd2 100644
98139--- a/mm/mprotect.c
98140+++ b/mm/mprotect.c
98141@@ -24,10 +24,18 @@
98142 #include <linux/migrate.h>
98143 #include <linux/perf_event.h>
98144 #include <linux/ksm.h>
98145+#include <linux/sched/sysctl.h>
98146+
98147+#ifdef CONFIG_PAX_MPROTECT
98148+#include <linux/elf.h>
98149+#include <linux/binfmts.h>
98150+#endif
98151+
98152 #include <asm/uaccess.h>
98153 #include <asm/pgtable.h>
98154 #include <asm/cacheflush.h>
98155 #include <asm/tlbflush.h>
98156+#include <asm/mmu_context.h>
98157
98158 #ifndef pgprot_modify
98159 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
98160@@ -256,6 +264,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
98161 return pages;
98162 }
98163
98164+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98165+/* called while holding the mmap semaphor for writing except stack expansion */
98166+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
98167+{
98168+ unsigned long oldlimit, newlimit = 0UL;
98169+
98170+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
98171+ return;
98172+
98173+ spin_lock(&mm->page_table_lock);
98174+ oldlimit = mm->context.user_cs_limit;
98175+ if ((prot & VM_EXEC) && oldlimit < end)
98176+ /* USER_CS limit moved up */
98177+ newlimit = end;
98178+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
98179+ /* USER_CS limit moved down */
98180+ newlimit = start;
98181+
98182+ if (newlimit) {
98183+ mm->context.user_cs_limit = newlimit;
98184+
98185+#ifdef CONFIG_SMP
98186+ wmb();
98187+ cpus_clear(mm->context.cpu_user_cs_mask);
98188+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
98189+#endif
98190+
98191+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
98192+ }
98193+ spin_unlock(&mm->page_table_lock);
98194+ if (newlimit == end) {
98195+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
98196+
98197+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
98198+ if (is_vm_hugetlb_page(vma))
98199+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
98200+ else
98201+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
98202+ }
98203+}
98204+#endif
98205+
98206 int
98207 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98208 unsigned long start, unsigned long end, unsigned long newflags)
98209@@ -268,11 +318,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98210 int error;
98211 int dirty_accountable = 0;
98212
98213+#ifdef CONFIG_PAX_SEGMEXEC
98214+ struct vm_area_struct *vma_m = NULL;
98215+ unsigned long start_m, end_m;
98216+
98217+ start_m = start + SEGMEXEC_TASK_SIZE;
98218+ end_m = end + SEGMEXEC_TASK_SIZE;
98219+#endif
98220+
98221 if (newflags == oldflags) {
98222 *pprev = vma;
98223 return 0;
98224 }
98225
98226+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
98227+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
98228+
98229+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
98230+ return -ENOMEM;
98231+
98232+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
98233+ return -ENOMEM;
98234+ }
98235+
98236 /*
98237 * If we make a private mapping writable we increase our commit;
98238 * but (without finer accounting) cannot reduce our commit if we
98239@@ -289,6 +357,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98240 }
98241 }
98242
98243+#ifdef CONFIG_PAX_SEGMEXEC
98244+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
98245+ if (start != vma->vm_start) {
98246+ error = split_vma(mm, vma, start, 1);
98247+ if (error)
98248+ goto fail;
98249+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
98250+ *pprev = (*pprev)->vm_next;
98251+ }
98252+
98253+ if (end != vma->vm_end) {
98254+ error = split_vma(mm, vma, end, 0);
98255+ if (error)
98256+ goto fail;
98257+ }
98258+
98259+ if (pax_find_mirror_vma(vma)) {
98260+ error = __do_munmap(mm, start_m, end_m - start_m);
98261+ if (error)
98262+ goto fail;
98263+ } else {
98264+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98265+ if (!vma_m) {
98266+ error = -ENOMEM;
98267+ goto fail;
98268+ }
98269+ vma->vm_flags = newflags;
98270+ error = pax_mirror_vma(vma_m, vma);
98271+ if (error) {
98272+ vma->vm_flags = oldflags;
98273+ goto fail;
98274+ }
98275+ }
98276+ }
98277+#endif
98278+
98279 /*
98280 * First try to merge with previous and/or next vma.
98281 */
98282@@ -319,9 +423,21 @@ success:
98283 * vm_flags and vm_page_prot are protected by the mmap_sem
98284 * held in write mode.
98285 */
98286+
98287+#ifdef CONFIG_PAX_SEGMEXEC
98288+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
98289+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
98290+#endif
98291+
98292 vma->vm_flags = newflags;
98293+
98294+#ifdef CONFIG_PAX_MPROTECT
98295+ if (mm->binfmt && mm->binfmt->handle_mprotect)
98296+ mm->binfmt->handle_mprotect(vma, newflags);
98297+#endif
98298+
98299 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
98300- vm_get_page_prot(newflags));
98301+ vm_get_page_prot(vma->vm_flags));
98302
98303 if (vma_wants_writenotify(vma)) {
98304 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
98305@@ -360,6 +476,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98306 end = start + len;
98307 if (end <= start)
98308 return -ENOMEM;
98309+
98310+#ifdef CONFIG_PAX_SEGMEXEC
98311+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
98312+ if (end > SEGMEXEC_TASK_SIZE)
98313+ return -EINVAL;
98314+ } else
98315+#endif
98316+
98317+ if (end > TASK_SIZE)
98318+ return -EINVAL;
98319+
98320 if (!arch_validate_prot(prot))
98321 return -EINVAL;
98322
98323@@ -367,7 +494,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98324 /*
98325 * Does the application expect PROT_READ to imply PROT_EXEC:
98326 */
98327- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
98328+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
98329 prot |= PROT_EXEC;
98330
98331 vm_flags = calc_vm_prot_bits(prot);
98332@@ -399,6 +526,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98333 if (start > vma->vm_start)
98334 prev = vma;
98335
98336+#ifdef CONFIG_PAX_MPROTECT
98337+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
98338+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
98339+#endif
98340+
98341 for (nstart = start ; ; ) {
98342 unsigned long newflags;
98343
98344@@ -409,6 +541,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98345
98346 /* newflags >> 4 shift VM_MAY% in place of VM_% */
98347 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
98348+ if (prot & (PROT_WRITE | PROT_EXEC))
98349+ gr_log_rwxmprotect(vma);
98350+
98351+ error = -EACCES;
98352+ goto out;
98353+ }
98354+
98355+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
98356 error = -EACCES;
98357 goto out;
98358 }
98359@@ -423,6 +563,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98360 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
98361 if (error)
98362 goto out;
98363+
98364+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
98365+
98366 nstart = tmp;
98367
98368 if (nstart < prev->vm_end)
98369diff --git a/mm/mremap.c b/mm/mremap.c
98370index 05f1180..c3cde48 100644
98371--- a/mm/mremap.c
98372+++ b/mm/mremap.c
98373@@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
98374 continue;
98375 pte = ptep_get_and_clear(mm, old_addr, old_pte);
98376 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
98377+
98378+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98379+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
98380+ pte = pte_exprotect(pte);
98381+#endif
98382+
98383 pte = move_soft_dirty_pte(pte);
98384 set_pte_at(mm, new_addr, new_pte, pte);
98385 }
98386@@ -344,6 +350,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
98387 if (is_vm_hugetlb_page(vma))
98388 goto Einval;
98389
98390+#ifdef CONFIG_PAX_SEGMEXEC
98391+ if (pax_find_mirror_vma(vma))
98392+ goto Einval;
98393+#endif
98394+
98395 /* We can't remap across vm area boundaries */
98396 if (old_len > vma->vm_end - addr)
98397 goto Efault;
98398@@ -399,20 +410,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
98399 unsigned long ret = -EINVAL;
98400 unsigned long charged = 0;
98401 unsigned long map_flags;
98402+ unsigned long pax_task_size = TASK_SIZE;
98403
98404 if (new_addr & ~PAGE_MASK)
98405 goto out;
98406
98407- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
98408+#ifdef CONFIG_PAX_SEGMEXEC
98409+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98410+ pax_task_size = SEGMEXEC_TASK_SIZE;
98411+#endif
98412+
98413+ pax_task_size -= PAGE_SIZE;
98414+
98415+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
98416 goto out;
98417
98418 /* Check if the location we're moving into overlaps the
98419 * old location at all, and fail if it does.
98420 */
98421- if ((new_addr <= addr) && (new_addr+new_len) > addr)
98422- goto out;
98423-
98424- if ((addr <= new_addr) && (addr+old_len) > new_addr)
98425+ if (addr + old_len > new_addr && new_addr + new_len > addr)
98426 goto out;
98427
98428 ret = do_munmap(mm, new_addr, new_len);
98429@@ -481,6 +497,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98430 unsigned long ret = -EINVAL;
98431 unsigned long charged = 0;
98432 bool locked = false;
98433+ unsigned long pax_task_size = TASK_SIZE;
98434
98435 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
98436 return ret;
98437@@ -502,6 +519,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98438 if (!new_len)
98439 return ret;
98440
98441+#ifdef CONFIG_PAX_SEGMEXEC
98442+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98443+ pax_task_size = SEGMEXEC_TASK_SIZE;
98444+#endif
98445+
98446+ pax_task_size -= PAGE_SIZE;
98447+
98448+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
98449+ old_len > pax_task_size || addr > pax_task_size-old_len)
98450+ return ret;
98451+
98452 down_write(&current->mm->mmap_sem);
98453
98454 if (flags & MREMAP_FIXED) {
98455@@ -552,6 +580,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98456 new_addr = addr;
98457 }
98458 ret = addr;
98459+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
98460 goto out;
98461 }
98462 }
98463@@ -575,7 +604,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98464 goto out;
98465 }
98466
98467+ map_flags = vma->vm_flags;
98468 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
98469+ if (!(ret & ~PAGE_MASK)) {
98470+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
98471+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
98472+ }
98473 }
98474 out:
98475 if (ret & ~PAGE_MASK)
98476diff --git a/mm/nommu.c b/mm/nommu.c
98477index a881d96..e5932cd 100644
98478--- a/mm/nommu.c
98479+++ b/mm/nommu.c
98480@@ -70,7 +70,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
98481 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
98482 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
98483 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
98484-int heap_stack_gap = 0;
98485
98486 atomic_long_t mmap_pages_allocated;
98487
98488@@ -857,15 +856,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
98489 EXPORT_SYMBOL(find_vma);
98490
98491 /*
98492- * find a VMA
98493- * - we don't extend stack VMAs under NOMMU conditions
98494- */
98495-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
98496-{
98497- return find_vma(mm, addr);
98498-}
98499-
98500-/*
98501 * expand a stack to a given address
98502 * - not supported under NOMMU conditions
98503 */
98504@@ -1572,6 +1562,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98505
98506 /* most fields are the same, copy all, and then fixup */
98507 *new = *vma;
98508+ INIT_LIST_HEAD(&new->anon_vma_chain);
98509 *region = *vma->vm_region;
98510 new->vm_region = region;
98511
98512@@ -2002,8 +1993,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
98513 }
98514 EXPORT_SYMBOL(generic_file_remap_pages);
98515
98516-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98517- unsigned long addr, void *buf, int len, int write)
98518+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98519+ unsigned long addr, void *buf, size_t len, int write)
98520 {
98521 struct vm_area_struct *vma;
98522
98523@@ -2044,8 +2035,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98524 *
98525 * The caller must hold a reference on @mm.
98526 */
98527-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
98528- void *buf, int len, int write)
98529+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
98530+ void *buf, size_t len, int write)
98531 {
98532 return __access_remote_vm(NULL, mm, addr, buf, len, write);
98533 }
98534@@ -2054,7 +2045,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
98535 * Access another process' address space.
98536 * - source/target buffer must be kernel space
98537 */
98538-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
98539+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
98540 {
98541 struct mm_struct *mm;
98542
98543diff --git a/mm/page-writeback.c b/mm/page-writeback.c
98544index ba5fd97..5a95869 100644
98545--- a/mm/page-writeback.c
98546+++ b/mm/page-writeback.c
98547@@ -664,7 +664,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
98548 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
98549 * - the bdi dirty thresh drops quickly due to change of JBOD workload
98550 */
98551-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
98552+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
98553 unsigned long thresh,
98554 unsigned long bg_thresh,
98555 unsigned long dirty,
98556diff --git a/mm/page_alloc.c b/mm/page_alloc.c
98557index c5fe124..2cf7f17 100644
98558--- a/mm/page_alloc.c
98559+++ b/mm/page_alloc.c
98560@@ -61,6 +61,7 @@
98561 #include <linux/page-debug-flags.h>
98562 #include <linux/hugetlb.h>
98563 #include <linux/sched/rt.h>
98564+#include <linux/random.h>
98565
98566 #include <asm/sections.h>
98567 #include <asm/tlbflush.h>
98568@@ -357,7 +358,7 @@ out:
98569 * This usage means that zero-order pages may not be compound.
98570 */
98571
98572-static void free_compound_page(struct page *page)
98573+void free_compound_page(struct page *page)
98574 {
98575 __free_pages_ok(page, compound_order(page));
98576 }
98577@@ -740,6 +741,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
98578 int i;
98579 int bad = 0;
98580
98581+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98582+ unsigned long index = 1UL << order;
98583+#endif
98584+
98585 trace_mm_page_free(page, order);
98586 kmemcheck_free_shadow(page, order);
98587
98588@@ -756,6 +761,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
98589 debug_check_no_obj_freed(page_address(page),
98590 PAGE_SIZE << order);
98591 }
98592+
98593+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98594+ for (; index; --index)
98595+ sanitize_highpage(page + index - 1);
98596+#endif
98597+
98598 arch_free_page(page, order);
98599 kernel_map_pages(page, 1 << order, 0);
98600
98601@@ -779,6 +790,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
98602 local_irq_restore(flags);
98603 }
98604
98605+#ifdef CONFIG_PAX_LATENT_ENTROPY
98606+bool __meminitdata extra_latent_entropy;
98607+
98608+static int __init setup_pax_extra_latent_entropy(char *str)
98609+{
98610+ extra_latent_entropy = true;
98611+ return 0;
98612+}
98613+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
98614+
98615+volatile u64 latent_entropy __latent_entropy;
98616+EXPORT_SYMBOL(latent_entropy);
98617+#endif
98618+
98619 void __init __free_pages_bootmem(struct page *page, unsigned int order)
98620 {
98621 unsigned int nr_pages = 1 << order;
98622@@ -794,6 +819,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
98623 __ClearPageReserved(p);
98624 set_page_count(p, 0);
98625
98626+#ifdef CONFIG_PAX_LATENT_ENTROPY
98627+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
98628+ u64 hash = 0;
98629+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
98630+ const u64 *data = lowmem_page_address(page);
98631+
98632+ for (index = 0; index < end; index++)
98633+ hash ^= hash + data[index];
98634+ latent_entropy ^= hash;
98635+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
98636+ }
98637+#endif
98638+
98639 page_zone(page)->managed_pages += nr_pages;
98640 set_page_refcounted(page);
98641 __free_pages(page, order);
98642@@ -922,8 +960,10 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
98643 arch_alloc_page(page, order);
98644 kernel_map_pages(page, 1 << order, 1);
98645
98646+#ifndef CONFIG_PAX_MEMORY_SANITIZE
98647 if (gfp_flags & __GFP_ZERO)
98648 prep_zero_page(page, order, gfp_flags);
98649+#endif
98650
98651 if (order && (gfp_flags & __GFP_COMP))
98652 prep_compound_page(page, order);
98653@@ -1601,7 +1641,7 @@ again:
98654 }
98655
98656 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
98657- if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
98658+ if (atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
98659 !zone_is_fair_depleted(zone))
98660 zone_set_flag(zone, ZONE_FAIR_DEPLETED);
98661
98662@@ -1922,7 +1962,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
98663 do {
98664 mod_zone_page_state(zone, NR_ALLOC_BATCH,
98665 high_wmark_pages(zone) - low_wmark_pages(zone) -
98666- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
98667+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
98668 zone_clear_flag(zone, ZONE_FAIR_DEPLETED);
98669 } while (zone++ != preferred_zone);
98670 }
98671@@ -5699,7 +5739,7 @@ static void __setup_per_zone_wmarks(void)
98672
98673 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
98674 high_wmark_pages(zone) - low_wmark_pages(zone) -
98675- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
98676+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
98677
98678 setup_zone_migrate_reserve(zone);
98679 spin_unlock_irqrestore(&zone->lock, flags);
98680diff --git a/mm/percpu.c b/mm/percpu.c
98681index 2139e30..1d45bce 100644
98682--- a/mm/percpu.c
98683+++ b/mm/percpu.c
98684@@ -123,7 +123,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
98685 static unsigned int pcpu_high_unit_cpu __read_mostly;
98686
98687 /* the address of the first chunk which starts with the kernel static area */
98688-void *pcpu_base_addr __read_mostly;
98689+void *pcpu_base_addr __read_only;
98690 EXPORT_SYMBOL_GPL(pcpu_base_addr);
98691
98692 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
98693diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
98694index 5077afc..846c9ef 100644
98695--- a/mm/process_vm_access.c
98696+++ b/mm/process_vm_access.c
98697@@ -13,6 +13,7 @@
98698 #include <linux/uio.h>
98699 #include <linux/sched.h>
98700 #include <linux/highmem.h>
98701+#include <linux/security.h>
98702 #include <linux/ptrace.h>
98703 #include <linux/slab.h>
98704 #include <linux/syscalls.h>
98705@@ -157,19 +158,19 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
98706 ssize_t iov_len;
98707 size_t total_len = iov_iter_count(iter);
98708
98709+ return -ENOSYS; // PaX: until properly audited
98710+
98711 /*
98712 * Work out how many pages of struct pages we're going to need
98713 * when eventually calling get_user_pages
98714 */
98715 for (i = 0; i < riovcnt; i++) {
98716 iov_len = rvec[i].iov_len;
98717- if (iov_len > 0) {
98718- nr_pages_iov = ((unsigned long)rvec[i].iov_base
98719- + iov_len)
98720- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
98721- / PAGE_SIZE + 1;
98722- nr_pages = max(nr_pages, nr_pages_iov);
98723- }
98724+ if (iov_len <= 0)
98725+ continue;
98726+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
98727+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
98728+ nr_pages = max(nr_pages, nr_pages_iov);
98729 }
98730
98731 if (nr_pages == 0)
98732@@ -197,6 +198,11 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
98733 goto free_proc_pages;
98734 }
98735
98736+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
98737+ rc = -EPERM;
98738+ goto put_task_struct;
98739+ }
98740+
98741 mm = mm_access(task, PTRACE_MODE_ATTACH);
98742 if (!mm || IS_ERR(mm)) {
98743 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
98744diff --git a/mm/rmap.c b/mm/rmap.c
98745index e01318d..7a532bd 100644
98746--- a/mm/rmap.c
98747+++ b/mm/rmap.c
98748@@ -164,6 +164,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98749 struct anon_vma *anon_vma = vma->anon_vma;
98750 struct anon_vma_chain *avc;
98751
98752+#ifdef CONFIG_PAX_SEGMEXEC
98753+ struct anon_vma_chain *avc_m = NULL;
98754+#endif
98755+
98756 might_sleep();
98757 if (unlikely(!anon_vma)) {
98758 struct mm_struct *mm = vma->vm_mm;
98759@@ -173,6 +177,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98760 if (!avc)
98761 goto out_enomem;
98762
98763+#ifdef CONFIG_PAX_SEGMEXEC
98764+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
98765+ if (!avc_m)
98766+ goto out_enomem_free_avc;
98767+#endif
98768+
98769 anon_vma = find_mergeable_anon_vma(vma);
98770 allocated = NULL;
98771 if (!anon_vma) {
98772@@ -186,6 +196,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98773 /* page_table_lock to protect against threads */
98774 spin_lock(&mm->page_table_lock);
98775 if (likely(!vma->anon_vma)) {
98776+
98777+#ifdef CONFIG_PAX_SEGMEXEC
98778+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
98779+
98780+ if (vma_m) {
98781+ BUG_ON(vma_m->anon_vma);
98782+ vma_m->anon_vma = anon_vma;
98783+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
98784+ avc_m = NULL;
98785+ }
98786+#endif
98787+
98788 vma->anon_vma = anon_vma;
98789 anon_vma_chain_link(vma, avc, anon_vma);
98790 allocated = NULL;
98791@@ -196,12 +218,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98792
98793 if (unlikely(allocated))
98794 put_anon_vma(allocated);
98795+
98796+#ifdef CONFIG_PAX_SEGMEXEC
98797+ if (unlikely(avc_m))
98798+ anon_vma_chain_free(avc_m);
98799+#endif
98800+
98801 if (unlikely(avc))
98802 anon_vma_chain_free(avc);
98803 }
98804 return 0;
98805
98806 out_enomem_free_avc:
98807+
98808+#ifdef CONFIG_PAX_SEGMEXEC
98809+ if (avc_m)
98810+ anon_vma_chain_free(avc_m);
98811+#endif
98812+
98813 anon_vma_chain_free(avc);
98814 out_enomem:
98815 return -ENOMEM;
98816@@ -237,7 +271,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
98817 * Attach the anon_vmas from src to dst.
98818 * Returns 0 on success, -ENOMEM on failure.
98819 */
98820-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
98821+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
98822 {
98823 struct anon_vma_chain *avc, *pavc;
98824 struct anon_vma *root = NULL;
98825@@ -270,10 +304,11 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
98826 * the corresponding VMA in the parent process is attached to.
98827 * Returns 0 on success, non-zero on failure.
98828 */
98829-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
98830+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
98831 {
98832 struct anon_vma_chain *avc;
98833 struct anon_vma *anon_vma;
98834+ int error;
98835
98836 /* Don't bother if the parent process has no anon_vma here. */
98837 if (!pvma->anon_vma)
98838@@ -283,8 +318,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
98839 * First, attach the new VMA to the parent VMA's anon_vmas,
98840 * so rmap can find non-COWed pages in child processes.
98841 */
98842- if (anon_vma_clone(vma, pvma))
98843- return -ENOMEM;
98844+ error = anon_vma_clone(vma, pvma);
98845+ if (error)
98846+ return error;
98847
98848 /* Then add our own anon_vma. */
98849 anon_vma = anon_vma_alloc();
98850@@ -374,8 +410,10 @@ static void anon_vma_ctor(void *data)
98851 void __init anon_vma_init(void)
98852 {
98853 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
98854- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
98855- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
98856+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
98857+ anon_vma_ctor);
98858+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
98859+ SLAB_PANIC|SLAB_NO_SANITIZE);
98860 }
98861
98862 /*
98863diff --git a/mm/shmem.c b/mm/shmem.c
98864index 469f90d..34a09ee 100644
98865--- a/mm/shmem.c
98866+++ b/mm/shmem.c
98867@@ -33,7 +33,7 @@
98868 #include <linux/swap.h>
98869 #include <linux/aio.h>
98870
98871-static struct vfsmount *shm_mnt;
98872+struct vfsmount *shm_mnt;
98873
98874 #ifdef CONFIG_SHMEM
98875 /*
98876@@ -80,7 +80,7 @@ static struct vfsmount *shm_mnt;
98877 #define BOGO_DIRENT_SIZE 20
98878
98879 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
98880-#define SHORT_SYMLINK_LEN 128
98881+#define SHORT_SYMLINK_LEN 64
98882
98883 /*
98884 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
98885@@ -2524,6 +2524,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
98886 static int shmem_xattr_validate(const char *name)
98887 {
98888 struct { const char *prefix; size_t len; } arr[] = {
98889+
98890+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
98891+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
98892+#endif
98893+
98894 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
98895 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
98896 };
98897@@ -2579,6 +2584,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
98898 if (err)
98899 return err;
98900
98901+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
98902+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
98903+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
98904+ return -EOPNOTSUPP;
98905+ if (size > 8)
98906+ return -EINVAL;
98907+ }
98908+#endif
98909+
98910 return simple_xattr_set(&info->xattrs, name, value, size, flags);
98911 }
98912
98913@@ -2962,8 +2976,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
98914 int err = -ENOMEM;
98915
98916 /* Round up to L1_CACHE_BYTES to resist false sharing */
98917- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
98918- L1_CACHE_BYTES), GFP_KERNEL);
98919+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
98920 if (!sbinfo)
98921 return -ENOMEM;
98922
98923diff --git a/mm/slab.c b/mm/slab.c
98924index 7c52b38..3ccc17e 100644
98925--- a/mm/slab.c
98926+++ b/mm/slab.c
98927@@ -316,10 +316,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
98928 if ((x)->max_freeable < i) \
98929 (x)->max_freeable = i; \
98930 } while (0)
98931-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
98932-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
98933-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
98934-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
98935+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
98936+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
98937+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
98938+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
98939+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
98940+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
98941 #else
98942 #define STATS_INC_ACTIVE(x) do { } while (0)
98943 #define STATS_DEC_ACTIVE(x) do { } while (0)
98944@@ -336,6 +338,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
98945 #define STATS_INC_ALLOCMISS(x) do { } while (0)
98946 #define STATS_INC_FREEHIT(x) do { } while (0)
98947 #define STATS_INC_FREEMISS(x) do { } while (0)
98948+#define STATS_INC_SANITIZED(x) do { } while (0)
98949+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
98950 #endif
98951
98952 #if DEBUG
98953@@ -452,7 +456,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
98954 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
98955 */
98956 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
98957- const struct page *page, void *obj)
98958+ const struct page *page, const void *obj)
98959 {
98960 u32 offset = (obj - page->s_mem);
98961 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
98962@@ -1462,12 +1466,12 @@ void __init kmem_cache_init(void)
98963 */
98964
98965 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
98966- kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
98967+ kmalloc_size(INDEX_AC), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
98968
98969 if (INDEX_AC != INDEX_NODE)
98970 kmalloc_caches[INDEX_NODE] =
98971 create_kmalloc_cache("kmalloc-node",
98972- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
98973+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
98974
98975 slab_early_init = 0;
98976
98977@@ -3384,6 +3388,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
98978 struct array_cache *ac = cpu_cache_get(cachep);
98979
98980 check_irq_off();
98981+
98982+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98983+ if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
98984+ STATS_INC_NOT_SANITIZED(cachep);
98985+ else {
98986+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
98987+
98988+ if (cachep->ctor)
98989+ cachep->ctor(objp);
98990+
98991+ STATS_INC_SANITIZED(cachep);
98992+ }
98993+#endif
98994+
98995 kmemleak_free_recursive(objp, cachep->flags);
98996 objp = cache_free_debugcheck(cachep, objp, caller);
98997
98998@@ -3607,6 +3625,7 @@ void kfree(const void *objp)
98999
99000 if (unlikely(ZERO_OR_NULL_PTR(objp)))
99001 return;
99002+ VM_BUG_ON(!virt_addr_valid(objp));
99003 local_irq_save(flags);
99004 kfree_debugcheck(objp);
99005 c = virt_to_cache(objp);
99006@@ -4056,14 +4075,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
99007 }
99008 /* cpu stats */
99009 {
99010- unsigned long allochit = atomic_read(&cachep->allochit);
99011- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
99012- unsigned long freehit = atomic_read(&cachep->freehit);
99013- unsigned long freemiss = atomic_read(&cachep->freemiss);
99014+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
99015+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
99016+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
99017+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
99018
99019 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
99020 allochit, allocmiss, freehit, freemiss);
99021 }
99022+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99023+ {
99024+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
99025+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
99026+
99027+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
99028+ }
99029+#endif
99030 #endif
99031 }
99032
99033@@ -4281,13 +4308,69 @@ static const struct file_operations proc_slabstats_operations = {
99034 static int __init slab_proc_init(void)
99035 {
99036 #ifdef CONFIG_DEBUG_SLAB_LEAK
99037- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
99038+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
99039 #endif
99040 return 0;
99041 }
99042 module_init(slab_proc_init);
99043 #endif
99044
99045+bool is_usercopy_object(const void *ptr)
99046+{
99047+ struct page *page;
99048+ struct kmem_cache *cachep;
99049+
99050+ if (ZERO_OR_NULL_PTR(ptr))
99051+ return false;
99052+
99053+ if (!slab_is_available())
99054+ return false;
99055+
99056+ if (!virt_addr_valid(ptr))
99057+ return false;
99058+
99059+ page = virt_to_head_page(ptr);
99060+
99061+ if (!PageSlab(page))
99062+ return false;
99063+
99064+ cachep = page->slab_cache;
99065+ return cachep->flags & SLAB_USERCOPY;
99066+}
99067+
99068+#ifdef CONFIG_PAX_USERCOPY
99069+const char *check_heap_object(const void *ptr, unsigned long n)
99070+{
99071+ struct page *page;
99072+ struct kmem_cache *cachep;
99073+ unsigned int objnr;
99074+ unsigned long offset;
99075+
99076+ if (ZERO_OR_NULL_PTR(ptr))
99077+ return "<null>";
99078+
99079+ if (!virt_addr_valid(ptr))
99080+ return NULL;
99081+
99082+ page = virt_to_head_page(ptr);
99083+
99084+ if (!PageSlab(page))
99085+ return NULL;
99086+
99087+ cachep = page->slab_cache;
99088+ if (!(cachep->flags & SLAB_USERCOPY))
99089+ return cachep->name;
99090+
99091+ objnr = obj_to_index(cachep, page, ptr);
99092+ BUG_ON(objnr >= cachep->num);
99093+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
99094+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
99095+ return NULL;
99096+
99097+ return cachep->name;
99098+}
99099+#endif
99100+
99101 /**
99102 * ksize - get the actual amount of memory allocated for a given object
99103 * @objp: Pointer to the object
99104diff --git a/mm/slab.h b/mm/slab.h
99105index 0e0fdd3..d0fd761 100644
99106--- a/mm/slab.h
99107+++ b/mm/slab.h
99108@@ -32,6 +32,20 @@ extern struct list_head slab_caches;
99109 /* The slab cache that manages slab cache information */
99110 extern struct kmem_cache *kmem_cache;
99111
99112+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99113+#ifdef CONFIG_X86_64
99114+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
99115+#else
99116+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
99117+#endif
99118+enum pax_sanitize_mode {
99119+ PAX_SANITIZE_SLAB_OFF = 0,
99120+ PAX_SANITIZE_SLAB_FAST,
99121+ PAX_SANITIZE_SLAB_FULL,
99122+};
99123+extern enum pax_sanitize_mode pax_sanitize_slab;
99124+#endif
99125+
99126 unsigned long calculate_alignment(unsigned long flags,
99127 unsigned long align, unsigned long size);
99128
99129@@ -67,7 +81,8 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99130
99131 /* Legal flag mask for kmem_cache_create(), for various configurations */
99132 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
99133- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
99134+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
99135+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
99136
99137 #if defined(CONFIG_DEBUG_SLAB)
99138 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
99139@@ -251,6 +266,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
99140 return s;
99141
99142 page = virt_to_head_page(x);
99143+
99144+ BUG_ON(!PageSlab(page));
99145+
99146 cachep = page->slab_cache;
99147 if (slab_equal_or_root(cachep, s))
99148 return cachep;
99149diff --git a/mm/slab_common.c b/mm/slab_common.c
99150index d319502..da7714e 100644
99151--- a/mm/slab_common.c
99152+++ b/mm/slab_common.c
99153@@ -25,11 +25,35 @@
99154
99155 #include "slab.h"
99156
99157-enum slab_state slab_state;
99158+enum slab_state slab_state __read_only;
99159 LIST_HEAD(slab_caches);
99160 DEFINE_MUTEX(slab_mutex);
99161 struct kmem_cache *kmem_cache;
99162
99163+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99164+enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
99165+static int __init pax_sanitize_slab_setup(char *str)
99166+{
99167+ if (!str)
99168+ return 0;
99169+
99170+ if (!strcmp(str, "0") || !strcmp(str, "off")) {
99171+ pr_info("PaX slab sanitization: %s\n", "disabled");
99172+ pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
99173+ } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
99174+ pr_info("PaX slab sanitization: %s\n", "fast");
99175+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
99176+ } else if (!strcmp(str, "full")) {
99177+ pr_info("PaX slab sanitization: %s\n", "full");
99178+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
99179+ } else
99180+ pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
99181+
99182+ return 0;
99183+}
99184+early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
99185+#endif
99186+
99187 #ifdef CONFIG_DEBUG_VM
99188 static int kmem_cache_sanity_check(const char *name, size_t size)
99189 {
99190@@ -160,7 +184,7 @@ do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
99191 if (err)
99192 goto out_free_cache;
99193
99194- s->refcount = 1;
99195+ atomic_set(&s->refcount, 1);
99196 list_add(&s->list, &slab_caches);
99197 out:
99198 if (err)
99199@@ -222,6 +246,13 @@ kmem_cache_create(const char *name, size_t size, size_t align,
99200 */
99201 flags &= CACHE_CREATE_MASK;
99202
99203+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99204+ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
99205+ flags |= SLAB_NO_SANITIZE;
99206+ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
99207+ flags &= ~SLAB_NO_SANITIZE;
99208+#endif
99209+
99210 s = __kmem_cache_alias(name, size, align, flags, ctor);
99211 if (s)
99212 goto out_unlock;
99213@@ -341,8 +372,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
99214
99215 mutex_lock(&slab_mutex);
99216
99217- s->refcount--;
99218- if (s->refcount)
99219+ if (!atomic_dec_and_test(&s->refcount))
99220 goto out_unlock;
99221
99222 if (memcg_cleanup_cache_params(s) != 0)
99223@@ -362,7 +392,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
99224 rcu_barrier();
99225
99226 memcg_free_cache_params(s);
99227-#ifdef SLAB_SUPPORTS_SYSFS
99228+#if defined(SLAB_SUPPORTS_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99229 sysfs_slab_remove(s);
99230 #else
99231 slab_kmem_cache_release(s);
99232@@ -418,7 +448,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
99233 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
99234 name, size, err);
99235
99236- s->refcount = -1; /* Exempt from merging for now */
99237+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
99238 }
99239
99240 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
99241@@ -431,7 +461,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
99242
99243 create_boot_cache(s, name, size, flags);
99244 list_add(&s->list, &slab_caches);
99245- s->refcount = 1;
99246+ atomic_set(&s->refcount, 1);
99247 return s;
99248 }
99249
99250@@ -443,6 +473,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
99251 EXPORT_SYMBOL(kmalloc_dma_caches);
99252 #endif
99253
99254+#ifdef CONFIG_PAX_USERCOPY_SLABS
99255+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
99256+EXPORT_SYMBOL(kmalloc_usercopy_caches);
99257+#endif
99258+
99259 /*
99260 * Conversion table for small slabs sizes / 8 to the index in the
99261 * kmalloc array. This is necessary for slabs < 192 since we have non power
99262@@ -507,6 +542,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
99263 return kmalloc_dma_caches[index];
99264
99265 #endif
99266+
99267+#ifdef CONFIG_PAX_USERCOPY_SLABS
99268+ if (unlikely((flags & GFP_USERCOPY)))
99269+ return kmalloc_usercopy_caches[index];
99270+
99271+#endif
99272+
99273 return kmalloc_caches[index];
99274 }
99275
99276@@ -563,7 +605,7 @@ void __init create_kmalloc_caches(unsigned long flags)
99277 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
99278 if (!kmalloc_caches[i]) {
99279 kmalloc_caches[i] = create_kmalloc_cache(NULL,
99280- 1 << i, flags);
99281+ 1 << i, SLAB_USERCOPY | flags);
99282 }
99283
99284 /*
99285@@ -572,10 +614,10 @@ void __init create_kmalloc_caches(unsigned long flags)
99286 * earlier power of two caches
99287 */
99288 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
99289- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
99290+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
99291
99292 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
99293- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
99294+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
99295 }
99296
99297 /* Kmalloc array is now usable */
99298@@ -608,6 +650,23 @@ void __init create_kmalloc_caches(unsigned long flags)
99299 }
99300 }
99301 #endif
99302+
99303+#ifdef CONFIG_PAX_USERCOPY_SLABS
99304+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
99305+ struct kmem_cache *s = kmalloc_caches[i];
99306+
99307+ if (s) {
99308+ int size = kmalloc_size(i);
99309+ char *n = kasprintf(GFP_NOWAIT,
99310+ "usercopy-kmalloc-%d", size);
99311+
99312+ BUG_ON(!n);
99313+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
99314+ size, SLAB_USERCOPY | flags);
99315+ }
99316+ }
99317+#endif
99318+
99319 }
99320 #endif /* !CONFIG_SLOB */
99321
99322@@ -666,6 +725,9 @@ void print_slabinfo_header(struct seq_file *m)
99323 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
99324 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
99325 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
99326+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99327+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
99328+#endif
99329 #endif
99330 seq_putc(m, '\n');
99331 }
99332diff --git a/mm/slob.c b/mm/slob.c
99333index 21980e0..975f1bf 100644
99334--- a/mm/slob.c
99335+++ b/mm/slob.c
99336@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
99337 /*
99338 * Return the size of a slob block.
99339 */
99340-static slobidx_t slob_units(slob_t *s)
99341+static slobidx_t slob_units(const slob_t *s)
99342 {
99343 if (s->units > 0)
99344 return s->units;
99345@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
99346 /*
99347 * Return the next free slob block pointer after this one.
99348 */
99349-static slob_t *slob_next(slob_t *s)
99350+static slob_t *slob_next(const slob_t *s)
99351 {
99352 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
99353 slobidx_t next;
99354@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
99355 /*
99356 * Returns true if s is the last free block in its page.
99357 */
99358-static int slob_last(slob_t *s)
99359+static int slob_last(const slob_t *s)
99360 {
99361 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
99362 }
99363
99364-static void *slob_new_pages(gfp_t gfp, int order, int node)
99365+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
99366 {
99367- void *page;
99368+ struct page *page;
99369
99370 #ifdef CONFIG_NUMA
99371 if (node != NUMA_NO_NODE)
99372@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
99373 if (!page)
99374 return NULL;
99375
99376- return page_address(page);
99377+ __SetPageSlab(page);
99378+ return page;
99379 }
99380
99381-static void slob_free_pages(void *b, int order)
99382+static void slob_free_pages(struct page *sp, int order)
99383 {
99384 if (current->reclaim_state)
99385 current->reclaim_state->reclaimed_slab += 1 << order;
99386- free_pages((unsigned long)b, order);
99387+ __ClearPageSlab(sp);
99388+ page_mapcount_reset(sp);
99389+ sp->private = 0;
99390+ __free_pages(sp, order);
99391 }
99392
99393 /*
99394@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
99395
99396 /* Not enough space: must allocate a new page */
99397 if (!b) {
99398- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
99399- if (!b)
99400+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
99401+ if (!sp)
99402 return NULL;
99403- sp = virt_to_page(b);
99404- __SetPageSlab(sp);
99405+ b = page_address(sp);
99406
99407 spin_lock_irqsave(&slob_lock, flags);
99408 sp->units = SLOB_UNITS(PAGE_SIZE);
99409 sp->freelist = b;
99410+ sp->private = 0;
99411 INIT_LIST_HEAD(&sp->lru);
99412 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
99413 set_slob_page_free(sp, slob_list);
99414@@ -337,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
99415 /*
99416 * slob_free: entry point into the slob allocator.
99417 */
99418-static void slob_free(void *block, int size)
99419+static void slob_free(struct kmem_cache *c, void *block, int size)
99420 {
99421 struct page *sp;
99422 slob_t *prev, *next, *b = (slob_t *)block;
99423@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
99424 if (slob_page_free(sp))
99425 clear_slob_page_free(sp);
99426 spin_unlock_irqrestore(&slob_lock, flags);
99427- __ClearPageSlab(sp);
99428- page_mapcount_reset(sp);
99429- slob_free_pages(b, 0);
99430+ slob_free_pages(sp, 0);
99431 return;
99432 }
99433
99434+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99435+ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
99436+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
99437+#endif
99438+
99439 if (!slob_page_free(sp)) {
99440 /* This slob page is about to become partially free. Easy! */
99441 sp->units = units;
99442@@ -424,11 +431,10 @@ out:
99443 */
99444
99445 static __always_inline void *
99446-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99447+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
99448 {
99449- unsigned int *m;
99450- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99451- void *ret;
99452+ slob_t *m;
99453+ void *ret = NULL;
99454
99455 gfp &= gfp_allowed_mask;
99456
99457@@ -442,23 +448,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99458
99459 if (!m)
99460 return NULL;
99461- *m = size;
99462+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
99463+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
99464+ m[0].units = size;
99465+ m[1].units = align;
99466 ret = (void *)m + align;
99467
99468 trace_kmalloc_node(caller, ret,
99469 size, size + align, gfp, node);
99470 } else {
99471 unsigned int order = get_order(size);
99472+ struct page *page;
99473
99474 if (likely(order))
99475 gfp |= __GFP_COMP;
99476- ret = slob_new_pages(gfp, order, node);
99477+ page = slob_new_pages(gfp, order, node);
99478+ if (page) {
99479+ ret = page_address(page);
99480+ page->private = size;
99481+ }
99482
99483 trace_kmalloc_node(caller, ret,
99484 size, PAGE_SIZE << order, gfp, node);
99485 }
99486
99487- kmemleak_alloc(ret, size, 1, gfp);
99488+ return ret;
99489+}
99490+
99491+static __always_inline void *
99492+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99493+{
99494+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99495+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
99496+
99497+ if (!ZERO_OR_NULL_PTR(ret))
99498+ kmemleak_alloc(ret, size, 1, gfp);
99499 return ret;
99500 }
99501
99502@@ -493,34 +517,112 @@ void kfree(const void *block)
99503 return;
99504 kmemleak_free(block);
99505
99506+ VM_BUG_ON(!virt_addr_valid(block));
99507 sp = virt_to_page(block);
99508- if (PageSlab(sp)) {
99509+ VM_BUG_ON(!PageSlab(sp));
99510+ if (!sp->private) {
99511 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99512- unsigned int *m = (unsigned int *)(block - align);
99513- slob_free(m, *m + align);
99514- } else
99515+ slob_t *m = (slob_t *)(block - align);
99516+ slob_free(NULL, m, m[0].units + align);
99517+ } else {
99518+ __ClearPageSlab(sp);
99519+ page_mapcount_reset(sp);
99520+ sp->private = 0;
99521 __free_pages(sp, compound_order(sp));
99522+ }
99523 }
99524 EXPORT_SYMBOL(kfree);
99525
99526+bool is_usercopy_object(const void *ptr)
99527+{
99528+ if (!slab_is_available())
99529+ return false;
99530+
99531+ // PAX: TODO
99532+
99533+ return false;
99534+}
99535+
99536+#ifdef CONFIG_PAX_USERCOPY
99537+const char *check_heap_object(const void *ptr, unsigned long n)
99538+{
99539+ struct page *page;
99540+ const slob_t *free;
99541+ const void *base;
99542+ unsigned long flags;
99543+
99544+ if (ZERO_OR_NULL_PTR(ptr))
99545+ return "<null>";
99546+
99547+ if (!virt_addr_valid(ptr))
99548+ return NULL;
99549+
99550+ page = virt_to_head_page(ptr);
99551+ if (!PageSlab(page))
99552+ return NULL;
99553+
99554+ if (page->private) {
99555+ base = page;
99556+ if (base <= ptr && n <= page->private - (ptr - base))
99557+ return NULL;
99558+ return "<slob>";
99559+ }
99560+
99561+ /* some tricky double walking to find the chunk */
99562+ spin_lock_irqsave(&slob_lock, flags);
99563+ base = (void *)((unsigned long)ptr & PAGE_MASK);
99564+ free = page->freelist;
99565+
99566+ while (!slob_last(free) && (void *)free <= ptr) {
99567+ base = free + slob_units(free);
99568+ free = slob_next(free);
99569+ }
99570+
99571+ while (base < (void *)free) {
99572+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
99573+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
99574+ int offset;
99575+
99576+ if (ptr < base + align)
99577+ break;
99578+
99579+ offset = ptr - base - align;
99580+ if (offset >= m) {
99581+ base += size;
99582+ continue;
99583+ }
99584+
99585+ if (n > m - offset)
99586+ break;
99587+
99588+ spin_unlock_irqrestore(&slob_lock, flags);
99589+ return NULL;
99590+ }
99591+
99592+ spin_unlock_irqrestore(&slob_lock, flags);
99593+ return "<slob>";
99594+}
99595+#endif
99596+
99597 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
99598 size_t ksize(const void *block)
99599 {
99600 struct page *sp;
99601 int align;
99602- unsigned int *m;
99603+ slob_t *m;
99604
99605 BUG_ON(!block);
99606 if (unlikely(block == ZERO_SIZE_PTR))
99607 return 0;
99608
99609 sp = virt_to_page(block);
99610- if (unlikely(!PageSlab(sp)))
99611- return PAGE_SIZE << compound_order(sp);
99612+ VM_BUG_ON(!PageSlab(sp));
99613+ if (sp->private)
99614+ return sp->private;
99615
99616 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99617- m = (unsigned int *)(block - align);
99618- return SLOB_UNITS(*m) * SLOB_UNIT;
99619+ m = (slob_t *)(block - align);
99620+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
99621 }
99622 EXPORT_SYMBOL(ksize);
99623
99624@@ -536,23 +638,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
99625
99626 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
99627 {
99628- void *b;
99629+ void *b = NULL;
99630
99631 flags &= gfp_allowed_mask;
99632
99633 lockdep_trace_alloc(flags);
99634
99635+#ifdef CONFIG_PAX_USERCOPY_SLABS
99636+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
99637+#else
99638 if (c->size < PAGE_SIZE) {
99639 b = slob_alloc(c->size, flags, c->align, node);
99640 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
99641 SLOB_UNITS(c->size) * SLOB_UNIT,
99642 flags, node);
99643 } else {
99644- b = slob_new_pages(flags, get_order(c->size), node);
99645+ struct page *sp;
99646+
99647+ sp = slob_new_pages(flags, get_order(c->size), node);
99648+ if (sp) {
99649+ b = page_address(sp);
99650+ sp->private = c->size;
99651+ }
99652 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
99653 PAGE_SIZE << get_order(c->size),
99654 flags, node);
99655 }
99656+#endif
99657
99658 if (b && c->ctor)
99659 c->ctor(b);
99660@@ -582,12 +694,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
99661 EXPORT_SYMBOL(kmem_cache_alloc_node);
99662 #endif
99663
99664-static void __kmem_cache_free(void *b, int size)
99665+static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
99666 {
99667- if (size < PAGE_SIZE)
99668- slob_free(b, size);
99669+ struct page *sp;
99670+
99671+ sp = virt_to_page(b);
99672+ BUG_ON(!PageSlab(sp));
99673+ if (!sp->private)
99674+ slob_free(c, b, size);
99675 else
99676- slob_free_pages(b, get_order(size));
99677+ slob_free_pages(sp, get_order(size));
99678 }
99679
99680 static void kmem_rcu_free(struct rcu_head *head)
99681@@ -595,22 +711,36 @@ static void kmem_rcu_free(struct rcu_head *head)
99682 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
99683 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
99684
99685- __kmem_cache_free(b, slob_rcu->size);
99686+ __kmem_cache_free(NULL, b, slob_rcu->size);
99687 }
99688
99689 void kmem_cache_free(struct kmem_cache *c, void *b)
99690 {
99691+ int size = c->size;
99692+
99693+#ifdef CONFIG_PAX_USERCOPY_SLABS
99694+ if (size + c->align < PAGE_SIZE) {
99695+ size += c->align;
99696+ b -= c->align;
99697+ }
99698+#endif
99699+
99700 kmemleak_free_recursive(b, c->flags);
99701 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
99702 struct slob_rcu *slob_rcu;
99703- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
99704- slob_rcu->size = c->size;
99705+ slob_rcu = b + (size - sizeof(struct slob_rcu));
99706+ slob_rcu->size = size;
99707 call_rcu(&slob_rcu->head, kmem_rcu_free);
99708 } else {
99709- __kmem_cache_free(b, c->size);
99710+ __kmem_cache_free(c, b, size);
99711 }
99712
99713+#ifdef CONFIG_PAX_USERCOPY_SLABS
99714+ trace_kfree(_RET_IP_, b);
99715+#else
99716 trace_kmem_cache_free(_RET_IP_, b);
99717+#endif
99718+
99719 }
99720 EXPORT_SYMBOL(kmem_cache_free);
99721
99722diff --git a/mm/slub.c b/mm/slub.c
99723index 3e8afcc..d6e2c89 100644
99724--- a/mm/slub.c
99725+++ b/mm/slub.c
99726@@ -207,7 +207,7 @@ struct track {
99727
99728 enum track_item { TRACK_ALLOC, TRACK_FREE };
99729
99730-#ifdef CONFIG_SYSFS
99731+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99732 static int sysfs_slab_add(struct kmem_cache *);
99733 static int sysfs_slab_alias(struct kmem_cache *, const char *);
99734 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
99735@@ -545,7 +545,7 @@ static void print_track(const char *s, struct track *t)
99736 if (!t->addr)
99737 return;
99738
99739- pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
99740+ pr_err("INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
99741 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
99742 #ifdef CONFIG_STACKTRACE
99743 {
99744@@ -2643,6 +2643,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
99745
99746 slab_free_hook(s, x);
99747
99748+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99749+ if (!(s->flags & SLAB_NO_SANITIZE)) {
99750+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
99751+ if (s->ctor)
99752+ s->ctor(x);
99753+ }
99754+#endif
99755+
99756 redo:
99757 /*
99758 * Determine the currently cpus per cpu slab.
99759@@ -2710,7 +2718,7 @@ static int slub_min_objects;
99760 * Merge control. If this is set then no merging of slab caches will occur.
99761 * (Could be removed. This was introduced to pacify the merge skeptics.)
99762 */
99763-static int slub_nomerge;
99764+static int slub_nomerge = 1;
99765
99766 /*
99767 * Calculate the order of allocation given an slab object size.
99768@@ -2986,6 +2994,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
99769 s->inuse = size;
99770
99771 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
99772+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99773+ (!(flags & SLAB_NO_SANITIZE)) ||
99774+#endif
99775 s->ctor)) {
99776 /*
99777 * Relocate free pointer after the object if it is not
99778@@ -3313,6 +3324,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
99779 EXPORT_SYMBOL(__kmalloc_node);
99780 #endif
99781
99782+bool is_usercopy_object(const void *ptr)
99783+{
99784+ struct page *page;
99785+ struct kmem_cache *s;
99786+
99787+ if (ZERO_OR_NULL_PTR(ptr))
99788+ return false;
99789+
99790+ if (!slab_is_available())
99791+ return false;
99792+
99793+ if (!virt_addr_valid(ptr))
99794+ return false;
99795+
99796+ page = virt_to_head_page(ptr);
99797+
99798+ if (!PageSlab(page))
99799+ return false;
99800+
99801+ s = page->slab_cache;
99802+ return s->flags & SLAB_USERCOPY;
99803+}
99804+
99805+#ifdef CONFIG_PAX_USERCOPY
99806+const char *check_heap_object(const void *ptr, unsigned long n)
99807+{
99808+ struct page *page;
99809+ struct kmem_cache *s;
99810+ unsigned long offset;
99811+
99812+ if (ZERO_OR_NULL_PTR(ptr))
99813+ return "<null>";
99814+
99815+ if (!virt_addr_valid(ptr))
99816+ return NULL;
99817+
99818+ page = virt_to_head_page(ptr);
99819+
99820+ if (!PageSlab(page))
99821+ return NULL;
99822+
99823+ s = page->slab_cache;
99824+ if (!(s->flags & SLAB_USERCOPY))
99825+ return s->name;
99826+
99827+ offset = (ptr - page_address(page)) % s->size;
99828+ if (offset <= s->object_size && n <= s->object_size - offset)
99829+ return NULL;
99830+
99831+ return s->name;
99832+}
99833+#endif
99834+
99835 size_t ksize(const void *object)
99836 {
99837 struct page *page;
99838@@ -3341,6 +3405,7 @@ void kfree(const void *x)
99839 if (unlikely(ZERO_OR_NULL_PTR(x)))
99840 return;
99841
99842+ VM_BUG_ON(!virt_addr_valid(x));
99843 page = virt_to_head_page(x);
99844 if (unlikely(!PageSlab(page))) {
99845 BUG_ON(!PageCompound(page));
99846@@ -3642,7 +3707,7 @@ static int slab_unmergeable(struct kmem_cache *s)
99847 /*
99848 * We may have set a slab to be unmergeable during bootstrap.
99849 */
99850- if (s->refcount < 0)
99851+ if (atomic_read(&s->refcount) < 0)
99852 return 1;
99853
99854 return 0;
99855@@ -3699,7 +3764,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99856 int i;
99857 struct kmem_cache *c;
99858
99859- s->refcount++;
99860+ atomic_inc(&s->refcount);
99861
99862 /*
99863 * Adjust the object sizes so that we clear
99864@@ -3718,7 +3783,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99865 }
99866
99867 if (sysfs_slab_alias(s, name)) {
99868- s->refcount--;
99869+ atomic_dec(&s->refcount);
99870 s = NULL;
99871 }
99872 }
99873@@ -3835,7 +3900,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
99874 }
99875 #endif
99876
99877-#ifdef CONFIG_SYSFS
99878+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99879 static int count_inuse(struct page *page)
99880 {
99881 return page->inuse;
99882@@ -4116,7 +4181,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
99883 len += sprintf(buf + len, "%7ld ", l->count);
99884
99885 if (l->addr)
99886+#ifdef CONFIG_GRKERNSEC_HIDESYM
99887+ len += sprintf(buf + len, "%pS", NULL);
99888+#else
99889 len += sprintf(buf + len, "%pS", (void *)l->addr);
99890+#endif
99891 else
99892 len += sprintf(buf + len, "<not-available>");
99893
99894@@ -4218,12 +4287,12 @@ static void __init resiliency_test(void)
99895 validate_slab_cache(kmalloc_caches[9]);
99896 }
99897 #else
99898-#ifdef CONFIG_SYSFS
99899+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99900 static void resiliency_test(void) {};
99901 #endif
99902 #endif
99903
99904-#ifdef CONFIG_SYSFS
99905+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99906 enum slab_stat_type {
99907 SL_ALL, /* All slabs */
99908 SL_PARTIAL, /* Only partially allocated slabs */
99909@@ -4460,13 +4529,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
99910 {
99911 if (!s->ctor)
99912 return 0;
99913+#ifdef CONFIG_GRKERNSEC_HIDESYM
99914+ return sprintf(buf, "%pS\n", NULL);
99915+#else
99916 return sprintf(buf, "%pS\n", s->ctor);
99917+#endif
99918 }
99919 SLAB_ATTR_RO(ctor);
99920
99921 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
99922 {
99923- return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
99924+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) < 0 ? 0 : atomic_read(&s->refcount) - 1);
99925 }
99926 SLAB_ATTR_RO(aliases);
99927
99928@@ -4554,6 +4627,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
99929 SLAB_ATTR_RO(cache_dma);
99930 #endif
99931
99932+#ifdef CONFIG_PAX_USERCOPY_SLABS
99933+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
99934+{
99935+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
99936+}
99937+SLAB_ATTR_RO(usercopy);
99938+#endif
99939+
99940+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99941+static ssize_t sanitize_show(struct kmem_cache *s, char *buf)
99942+{
99943+ return sprintf(buf, "%d\n", !(s->flags & SLAB_NO_SANITIZE));
99944+}
99945+SLAB_ATTR_RO(sanitize);
99946+#endif
99947+
99948 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
99949 {
99950 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
99951@@ -4888,6 +4977,12 @@ static struct attribute *slab_attrs[] = {
99952 #ifdef CONFIG_ZONE_DMA
99953 &cache_dma_attr.attr,
99954 #endif
99955+#ifdef CONFIG_PAX_USERCOPY_SLABS
99956+ &usercopy_attr.attr,
99957+#endif
99958+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99959+ &sanitize_attr.attr,
99960+#endif
99961 #ifdef CONFIG_NUMA
99962 &remote_node_defrag_ratio_attr.attr,
99963 #endif
99964@@ -5132,6 +5227,7 @@ static char *create_unique_id(struct kmem_cache *s)
99965 return name;
99966 }
99967
99968+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99969 static int sysfs_slab_add(struct kmem_cache *s)
99970 {
99971 int err;
99972@@ -5205,6 +5301,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
99973 kobject_del(&s->kobj);
99974 kobject_put(&s->kobj);
99975 }
99976+#endif
99977
99978 /*
99979 * Need to buffer aliases during bootup until sysfs becomes
99980@@ -5218,6 +5315,7 @@ struct saved_alias {
99981
99982 static struct saved_alias *alias_list;
99983
99984+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99985 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
99986 {
99987 struct saved_alias *al;
99988@@ -5240,6 +5338,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
99989 alias_list = al;
99990 return 0;
99991 }
99992+#endif
99993
99994 static int __init slab_sysfs_init(void)
99995 {
99996diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
99997index 4cba9c2..b4f9fcc 100644
99998--- a/mm/sparse-vmemmap.c
99999+++ b/mm/sparse-vmemmap.c
100000@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
100001 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
100002 if (!p)
100003 return NULL;
100004- pud_populate(&init_mm, pud, p);
100005+ pud_populate_kernel(&init_mm, pud, p);
100006 }
100007 return pud;
100008 }
100009@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
100010 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
100011 if (!p)
100012 return NULL;
100013- pgd_populate(&init_mm, pgd, p);
100014+ pgd_populate_kernel(&init_mm, pgd, p);
100015 }
100016 return pgd;
100017 }
100018diff --git a/mm/sparse.c b/mm/sparse.c
100019index d1b48b6..6e8590e 100644
100020--- a/mm/sparse.c
100021+++ b/mm/sparse.c
100022@@ -750,7 +750,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
100023
100024 for (i = 0; i < PAGES_PER_SECTION; i++) {
100025 if (PageHWPoison(&memmap[i])) {
100026- atomic_long_sub(1, &num_poisoned_pages);
100027+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
100028 ClearPageHWPoison(&memmap[i]);
100029 }
100030 }
100031diff --git a/mm/swap.c b/mm/swap.c
100032index 6b2dc38..46b79ba 100644
100033--- a/mm/swap.c
100034+++ b/mm/swap.c
100035@@ -31,6 +31,7 @@
100036 #include <linux/memcontrol.h>
100037 #include <linux/gfp.h>
100038 #include <linux/uio.h>
100039+#include <linux/hugetlb.h>
100040
100041 #include "internal.h"
100042
100043@@ -77,6 +78,8 @@ static void __put_compound_page(struct page *page)
100044
100045 __page_cache_release(page);
100046 dtor = get_compound_page_dtor(page);
100047+ if (!PageHuge(page))
100048+ BUG_ON(dtor != free_compound_page);
100049 (*dtor)(page);
100050 }
100051
100052diff --git a/mm/swapfile.c b/mm/swapfile.c
100053index 8798b2e..348f9dd 100644
100054--- a/mm/swapfile.c
100055+++ b/mm/swapfile.c
100056@@ -84,7 +84,7 @@ static DEFINE_MUTEX(swapon_mutex);
100057
100058 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
100059 /* Activity counter to indicate that a swapon or swapoff has occurred */
100060-static atomic_t proc_poll_event = ATOMIC_INIT(0);
100061+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
100062
100063 static inline unsigned char swap_count(unsigned char ent)
100064 {
100065@@ -1944,7 +1944,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
100066 spin_unlock(&swap_lock);
100067
100068 err = 0;
100069- atomic_inc(&proc_poll_event);
100070+ atomic_inc_unchecked(&proc_poll_event);
100071 wake_up_interruptible(&proc_poll_wait);
100072
100073 out_dput:
100074@@ -1961,8 +1961,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
100075
100076 poll_wait(file, &proc_poll_wait, wait);
100077
100078- if (seq->poll_event != atomic_read(&proc_poll_event)) {
100079- seq->poll_event = atomic_read(&proc_poll_event);
100080+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
100081+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
100082 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
100083 }
100084
100085@@ -2060,7 +2060,7 @@ static int swaps_open(struct inode *inode, struct file *file)
100086 return ret;
100087
100088 seq = file->private_data;
100089- seq->poll_event = atomic_read(&proc_poll_event);
100090+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
100091 return 0;
100092 }
100093
100094@@ -2520,7 +2520,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
100095 (frontswap_map) ? "FS" : "");
100096
100097 mutex_unlock(&swapon_mutex);
100098- atomic_inc(&proc_poll_event);
100099+ atomic_inc_unchecked(&proc_poll_event);
100100 wake_up_interruptible(&proc_poll_wait);
100101
100102 if (S_ISREG(inode->i_mode))
100103diff --git a/mm/util.c b/mm/util.c
100104index 093c973..b70a268 100644
100105--- a/mm/util.c
100106+++ b/mm/util.c
100107@@ -202,6 +202,12 @@ done:
100108 void arch_pick_mmap_layout(struct mm_struct *mm)
100109 {
100110 mm->mmap_base = TASK_UNMAPPED_BASE;
100111+
100112+#ifdef CONFIG_PAX_RANDMMAP
100113+ if (mm->pax_flags & MF_PAX_RANDMMAP)
100114+ mm->mmap_base += mm->delta_mmap;
100115+#endif
100116+
100117 mm->get_unmapped_area = arch_get_unmapped_area;
100118 }
100119 #endif
100120@@ -378,6 +384,9 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
100121 if (!mm->arg_end)
100122 goto out_mm; /* Shh! No looking before we're done */
100123
100124+ if (gr_acl_handle_procpidmem(task))
100125+ goto out_mm;
100126+
100127 len = mm->arg_end - mm->arg_start;
100128
100129 if (len > buflen)
100130diff --git a/mm/vmalloc.c b/mm/vmalloc.c
100131index 2b0aa54..b451f74 100644
100132--- a/mm/vmalloc.c
100133+++ b/mm/vmalloc.c
100134@@ -40,6 +40,21 @@ struct vfree_deferred {
100135 };
100136 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
100137
100138+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100139+struct stack_deferred_llist {
100140+ struct llist_head list;
100141+ void *stack;
100142+ void *lowmem_stack;
100143+};
100144+
100145+struct stack_deferred {
100146+ struct stack_deferred_llist list;
100147+ struct work_struct wq;
100148+};
100149+
100150+static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
100151+#endif
100152+
100153 static void __vunmap(const void *, int);
100154
100155 static void free_work(struct work_struct *w)
100156@@ -47,12 +62,30 @@ static void free_work(struct work_struct *w)
100157 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
100158 struct llist_node *llnode = llist_del_all(&p->list);
100159 while (llnode) {
100160- void *p = llnode;
100161+ void *x = llnode;
100162 llnode = llist_next(llnode);
100163- __vunmap(p, 1);
100164+ __vunmap(x, 1);
100165 }
100166 }
100167
100168+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100169+static void unmap_work(struct work_struct *w)
100170+{
100171+ struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
100172+ struct llist_node *llnode = llist_del_all(&p->list.list);
100173+ while (llnode) {
100174+ struct stack_deferred_llist *x =
100175+ llist_entry((struct llist_head *)llnode,
100176+ struct stack_deferred_llist, list);
100177+ void *stack = ACCESS_ONCE(x->stack);
100178+ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
100179+ llnode = llist_next(llnode);
100180+ __vunmap(stack, 0);
100181+ free_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
100182+ }
100183+}
100184+#endif
100185+
100186 /*** Page table manipulation functions ***/
100187
100188 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100189@@ -61,8 +94,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100190
100191 pte = pte_offset_kernel(pmd, addr);
100192 do {
100193- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100194- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100195+
100196+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100197+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
100198+ BUG_ON(!pte_exec(*pte));
100199+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
100200+ continue;
100201+ }
100202+#endif
100203+
100204+ {
100205+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100206+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100207+ }
100208 } while (pte++, addr += PAGE_SIZE, addr != end);
100209 }
100210
100211@@ -122,16 +166,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
100212 pte = pte_alloc_kernel(pmd, addr);
100213 if (!pte)
100214 return -ENOMEM;
100215+
100216+ pax_open_kernel();
100217 do {
100218 struct page *page = pages[*nr];
100219
100220- if (WARN_ON(!pte_none(*pte)))
100221+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100222+ if (pgprot_val(prot) & _PAGE_NX)
100223+#endif
100224+
100225+ if (!pte_none(*pte)) {
100226+ pax_close_kernel();
100227+ WARN_ON(1);
100228 return -EBUSY;
100229- if (WARN_ON(!page))
100230+ }
100231+ if (!page) {
100232+ pax_close_kernel();
100233+ WARN_ON(1);
100234 return -ENOMEM;
100235+ }
100236 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
100237 (*nr)++;
100238 } while (pte++, addr += PAGE_SIZE, addr != end);
100239+ pax_close_kernel();
100240 return 0;
100241 }
100242
100243@@ -141,7 +198,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
100244 pmd_t *pmd;
100245 unsigned long next;
100246
100247- pmd = pmd_alloc(&init_mm, pud, addr);
100248+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
100249 if (!pmd)
100250 return -ENOMEM;
100251 do {
100252@@ -158,7 +215,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
100253 pud_t *pud;
100254 unsigned long next;
100255
100256- pud = pud_alloc(&init_mm, pgd, addr);
100257+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
100258 if (!pud)
100259 return -ENOMEM;
100260 do {
100261@@ -218,6 +275,12 @@ int is_vmalloc_or_module_addr(const void *x)
100262 if (addr >= MODULES_VADDR && addr < MODULES_END)
100263 return 1;
100264 #endif
100265+
100266+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100267+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
100268+ return 1;
100269+#endif
100270+
100271 return is_vmalloc_addr(x);
100272 }
100273
100274@@ -238,8 +301,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
100275
100276 if (!pgd_none(*pgd)) {
100277 pud_t *pud = pud_offset(pgd, addr);
100278+#ifdef CONFIG_X86
100279+ if (!pud_large(*pud))
100280+#endif
100281 if (!pud_none(*pud)) {
100282 pmd_t *pmd = pmd_offset(pud, addr);
100283+#ifdef CONFIG_X86
100284+ if (!pmd_large(*pmd))
100285+#endif
100286 if (!pmd_none(*pmd)) {
100287 pte_t *ptep, pte;
100288
100289@@ -1183,13 +1252,23 @@ void __init vmalloc_init(void)
100290 for_each_possible_cpu(i) {
100291 struct vmap_block_queue *vbq;
100292 struct vfree_deferred *p;
100293+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100294+ struct stack_deferred *p2;
100295+#endif
100296
100297 vbq = &per_cpu(vmap_block_queue, i);
100298 spin_lock_init(&vbq->lock);
100299 INIT_LIST_HEAD(&vbq->free);
100300+
100301 p = &per_cpu(vfree_deferred, i);
100302 init_llist_head(&p->list);
100303 INIT_WORK(&p->wq, free_work);
100304+
100305+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100306+ p2 = &per_cpu(stack_deferred, i);
100307+ init_llist_head(&p2->list.list);
100308+ INIT_WORK(&p2->wq, unmap_work);
100309+#endif
100310 }
100311
100312 /* Import existing vmlist entries. */
100313@@ -1314,6 +1393,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
100314 struct vm_struct *area;
100315
100316 BUG_ON(in_interrupt());
100317+
100318+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100319+ if (flags & VM_KERNEXEC) {
100320+ if (start != VMALLOC_START || end != VMALLOC_END)
100321+ return NULL;
100322+ start = (unsigned long)MODULES_EXEC_VADDR;
100323+ end = (unsigned long)MODULES_EXEC_END;
100324+ }
100325+#endif
100326+
100327 if (flags & VM_IOREMAP)
100328 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
100329
100330@@ -1519,6 +1608,23 @@ void vunmap(const void *addr)
100331 }
100332 EXPORT_SYMBOL(vunmap);
100333
100334+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100335+void unmap_process_stacks(struct task_struct *task)
100336+{
100337+ if (unlikely(in_interrupt())) {
100338+ struct stack_deferred *p = &__get_cpu_var(stack_deferred);
100339+ struct stack_deferred_llist *list = task->stack;
100340+ list->stack = task->stack;
100341+ list->lowmem_stack = task->lowmem_stack;
100342+ if (llist_add((struct llist_node *)&list->list, &p->list.list))
100343+ schedule_work(&p->wq);
100344+ } else {
100345+ __vunmap(task->stack, 0);
100346+ free_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
100347+ }
100348+}
100349+#endif
100350+
100351 /**
100352 * vmap - map an array of pages into virtually contiguous space
100353 * @pages: array of page pointers
100354@@ -1539,6 +1645,11 @@ void *vmap(struct page **pages, unsigned int count,
100355 if (count > totalram_pages)
100356 return NULL;
100357
100358+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100359+ if (!(pgprot_val(prot) & _PAGE_NX))
100360+ flags |= VM_KERNEXEC;
100361+#endif
100362+
100363 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
100364 __builtin_return_address(0));
100365 if (!area)
100366@@ -1641,6 +1752,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
100367 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
100368 goto fail;
100369
100370+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100371+ if (!(pgprot_val(prot) & _PAGE_NX))
100372+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC,
100373+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
100374+ else
100375+#endif
100376+
100377 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
100378 start, end, node, gfp_mask, caller);
100379 if (!area)
100380@@ -1817,10 +1935,9 @@ EXPORT_SYMBOL(vzalloc_node);
100381 * For tight control over page level allocator and protection flags
100382 * use __vmalloc() instead.
100383 */
100384-
100385 void *vmalloc_exec(unsigned long size)
100386 {
100387- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
100388+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
100389 NUMA_NO_NODE, __builtin_return_address(0));
100390 }
100391
100392@@ -2127,6 +2244,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
100393 {
100394 struct vm_struct *area;
100395
100396+ BUG_ON(vma->vm_mirror);
100397+
100398 size = PAGE_ALIGN(size);
100399
100400 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
100401@@ -2609,7 +2728,11 @@ static int s_show(struct seq_file *m, void *p)
100402 v->addr, v->addr + v->size, v->size);
100403
100404 if (v->caller)
100405+#ifdef CONFIG_GRKERNSEC_HIDESYM
100406+ seq_printf(m, " %pK", v->caller);
100407+#else
100408 seq_printf(m, " %pS", v->caller);
100409+#endif
100410
100411 if (v->nr_pages)
100412 seq_printf(m, " pages=%d", v->nr_pages);
100413diff --git a/mm/vmpressure.c b/mm/vmpressure.c
100414index d4042e7..c5afd57 100644
100415--- a/mm/vmpressure.c
100416+++ b/mm/vmpressure.c
100417@@ -165,6 +165,7 @@ static void vmpressure_work_fn(struct work_struct *work)
100418 unsigned long scanned;
100419 unsigned long reclaimed;
100420
100421+ spin_lock(&vmpr->sr_lock);
100422 /*
100423 * Several contexts might be calling vmpressure(), so it is
100424 * possible that the work was rescheduled again before the old
100425@@ -173,11 +174,12 @@ static void vmpressure_work_fn(struct work_struct *work)
100426 * here. No need for any locks here since we don't care if
100427 * vmpr->reclaimed is in sync.
100428 */
100429- if (!vmpr->scanned)
100430+ scanned = vmpr->scanned;
100431+ if (!scanned) {
100432+ spin_unlock(&vmpr->sr_lock);
100433 return;
100434+ }
100435
100436- spin_lock(&vmpr->sr_lock);
100437- scanned = vmpr->scanned;
100438 reclaimed = vmpr->reclaimed;
100439 vmpr->scanned = 0;
100440 vmpr->reclaimed = 0;
100441diff --git a/mm/vmstat.c b/mm/vmstat.c
100442index e9ab104..de275bd 100644
100443--- a/mm/vmstat.c
100444+++ b/mm/vmstat.c
100445@@ -20,6 +20,7 @@
100446 #include <linux/writeback.h>
100447 #include <linux/compaction.h>
100448 #include <linux/mm_inline.h>
100449+#include <linux/grsecurity.h>
100450
100451 #include "internal.h"
100452
100453@@ -79,7 +80,7 @@ void vm_events_fold_cpu(int cpu)
100454 *
100455 * vm_stat contains the global counters
100456 */
100457-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
100458+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
100459 EXPORT_SYMBOL(vm_stat);
100460
100461 #ifdef CONFIG_SMP
100462@@ -425,7 +426,7 @@ static inline void fold_diff(int *diff)
100463
100464 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
100465 if (diff[i])
100466- atomic_long_add(diff[i], &vm_stat[i]);
100467+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
100468 }
100469
100470 /*
100471@@ -457,7 +458,7 @@ static void refresh_cpu_vm_stats(void)
100472 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
100473 if (v) {
100474
100475- atomic_long_add(v, &zone->vm_stat[i]);
100476+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100477 global_diff[i] += v;
100478 #ifdef CONFIG_NUMA
100479 /* 3 seconds idle till flush */
100480@@ -519,7 +520,7 @@ void cpu_vm_stats_fold(int cpu)
100481
100482 v = p->vm_stat_diff[i];
100483 p->vm_stat_diff[i] = 0;
100484- atomic_long_add(v, &zone->vm_stat[i]);
100485+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100486 global_diff[i] += v;
100487 }
100488 }
100489@@ -539,8 +540,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
100490 if (pset->vm_stat_diff[i]) {
100491 int v = pset->vm_stat_diff[i];
100492 pset->vm_stat_diff[i] = 0;
100493- atomic_long_add(v, &zone->vm_stat[i]);
100494- atomic_long_add(v, &vm_stat[i]);
100495+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100496+ atomic_long_add_unchecked(v, &vm_stat[i]);
100497 }
100498 }
100499 #endif
100500@@ -1163,10 +1164,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
100501 stat_items_size += sizeof(struct vm_event_state);
100502 #endif
100503
100504- v = kmalloc(stat_items_size, GFP_KERNEL);
100505+ v = kzalloc(stat_items_size, GFP_KERNEL);
100506 m->private = v;
100507 if (!v)
100508 return ERR_PTR(-ENOMEM);
100509+
100510+#ifdef CONFIG_GRKERNSEC_PROC_ADD
100511+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
100512+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
100513+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
100514+ && !in_group_p(grsec_proc_gid)
100515+#endif
100516+ )
100517+ return (unsigned long *)m->private + *pos;
100518+#endif
100519+#endif
100520+
100521 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
100522 v[i] = global_page_state(i);
100523 v += NR_VM_ZONE_STAT_ITEMS;
100524@@ -1315,10 +1328,16 @@ static int __init setup_vmstat(void)
100525 cpu_notifier_register_done();
100526 #endif
100527 #ifdef CONFIG_PROC_FS
100528- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
100529- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
100530- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
100531- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
100532+ {
100533+ mode_t gr_mode = S_IRUGO;
100534+#ifdef CONFIG_GRKERNSEC_PROC_ADD
100535+ gr_mode = S_IRUSR;
100536+#endif
100537+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
100538+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
100539+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
100540+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
100541+ }
100542 #endif
100543 return 0;
100544 }
100545diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
100546index 64c6bed..b79a5de 100644
100547--- a/net/8021q/vlan.c
100548+++ b/net/8021q/vlan.c
100549@@ -481,7 +481,7 @@ out:
100550 return NOTIFY_DONE;
100551 }
100552
100553-static struct notifier_block vlan_notifier_block __read_mostly = {
100554+static struct notifier_block vlan_notifier_block = {
100555 .notifier_call = vlan_device_event,
100556 };
100557
100558@@ -556,8 +556,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
100559 err = -EPERM;
100560 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
100561 break;
100562- if ((args.u.name_type >= 0) &&
100563- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
100564+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
100565 struct vlan_net *vn;
100566
100567 vn = net_generic(net, vlan_net_id);
100568diff --git a/net/9p/client.c b/net/9p/client.c
100569index e86a9bea..e91f70e 100644
100570--- a/net/9p/client.c
100571+++ b/net/9p/client.c
100572@@ -596,7 +596,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
100573 len - inline_len);
100574 } else {
100575 err = copy_from_user(ename + inline_len,
100576- uidata, len - inline_len);
100577+ (char __force_user *)uidata, len - inline_len);
100578 if (err) {
100579 err = -EFAULT;
100580 goto out_err;
100581@@ -1570,7 +1570,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
100582 kernel_buf = 1;
100583 indata = data;
100584 } else
100585- indata = (__force char *)udata;
100586+ indata = (__force_kernel char *)udata;
100587 /*
100588 * response header len is 11
100589 * PDU Header(7) + IO Size (4)
100590@@ -1645,7 +1645,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
100591 kernel_buf = 1;
100592 odata = data;
100593 } else
100594- odata = (char *)udata;
100595+ odata = (char __force_kernel *)udata;
100596 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
100597 P9_ZC_HDR_SZ, kernel_buf, "dqd",
100598 fid->fid, offset, rsize);
100599diff --git a/net/9p/mod.c b/net/9p/mod.c
100600index 6ab36ae..6f1841b 100644
100601--- a/net/9p/mod.c
100602+++ b/net/9p/mod.c
100603@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
100604 void v9fs_register_trans(struct p9_trans_module *m)
100605 {
100606 spin_lock(&v9fs_trans_lock);
100607- list_add_tail(&m->list, &v9fs_trans_list);
100608+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
100609 spin_unlock(&v9fs_trans_lock);
100610 }
100611 EXPORT_SYMBOL(v9fs_register_trans);
100612@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
100613 void v9fs_unregister_trans(struct p9_trans_module *m)
100614 {
100615 spin_lock(&v9fs_trans_lock);
100616- list_del_init(&m->list);
100617+ pax_list_del_init((struct list_head *)&m->list);
100618 spin_unlock(&v9fs_trans_lock);
100619 }
100620 EXPORT_SYMBOL(v9fs_unregister_trans);
100621diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
100622index 80d08f6..de63fd1 100644
100623--- a/net/9p/trans_fd.c
100624+++ b/net/9p/trans_fd.c
100625@@ -428,7 +428,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
100626 oldfs = get_fs();
100627 set_fs(get_ds());
100628 /* The cast to a user pointer is valid due to the set_fs() */
100629- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
100630+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
100631 set_fs(oldfs);
100632
100633 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
100634diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
100635index af46bc4..f9adfcd 100644
100636--- a/net/appletalk/atalk_proc.c
100637+++ b/net/appletalk/atalk_proc.c
100638@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
100639 struct proc_dir_entry *p;
100640 int rc = -ENOMEM;
100641
100642- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
100643+ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
100644 if (!atalk_proc_dir)
100645 goto out;
100646
100647diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
100648index 876fbe8..8bbea9f 100644
100649--- a/net/atm/atm_misc.c
100650+++ b/net/atm/atm_misc.c
100651@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
100652 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
100653 return 1;
100654 atm_return(vcc, truesize);
100655- atomic_inc(&vcc->stats->rx_drop);
100656+ atomic_inc_unchecked(&vcc->stats->rx_drop);
100657 return 0;
100658 }
100659 EXPORT_SYMBOL(atm_charge);
100660@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
100661 }
100662 }
100663 atm_return(vcc, guess);
100664- atomic_inc(&vcc->stats->rx_drop);
100665+ atomic_inc_unchecked(&vcc->stats->rx_drop);
100666 return NULL;
100667 }
100668 EXPORT_SYMBOL(atm_alloc_charge);
100669@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
100670
100671 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
100672 {
100673-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
100674+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
100675 __SONET_ITEMS
100676 #undef __HANDLE_ITEM
100677 }
100678@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
100679
100680 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
100681 {
100682-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
100683+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
100684 __SONET_ITEMS
100685 #undef __HANDLE_ITEM
100686 }
100687diff --git a/net/atm/lec.c b/net/atm/lec.c
100688index 4b98f89..5a2f6cb 100644
100689--- a/net/atm/lec.c
100690+++ b/net/atm/lec.c
100691@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
100692 }
100693
100694 static struct lane2_ops lane2_ops = {
100695- lane2_resolve, /* resolve, spec 3.1.3 */
100696- lane2_associate_req, /* associate_req, spec 3.1.4 */
100697- NULL /* associate indicator, spec 3.1.5 */
100698+ .resolve = lane2_resolve,
100699+ .associate_req = lane2_associate_req,
100700+ .associate_indicator = NULL
100701 };
100702
100703 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
100704diff --git a/net/atm/lec.h b/net/atm/lec.h
100705index 4149db1..f2ab682 100644
100706--- a/net/atm/lec.h
100707+++ b/net/atm/lec.h
100708@@ -48,7 +48,7 @@ struct lane2_ops {
100709 const u8 *tlvs, u32 sizeoftlvs);
100710 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
100711 const u8 *tlvs, u32 sizeoftlvs);
100712-};
100713+} __no_const;
100714
100715 /*
100716 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
100717diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
100718index d1b2d9a..d549f7f 100644
100719--- a/net/atm/mpoa_caches.c
100720+++ b/net/atm/mpoa_caches.c
100721@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
100722
100723
100724 static struct in_cache_ops ingress_ops = {
100725- in_cache_add_entry, /* add_entry */
100726- in_cache_get, /* get */
100727- in_cache_get_with_mask, /* get_with_mask */
100728- in_cache_get_by_vcc, /* get_by_vcc */
100729- in_cache_put, /* put */
100730- in_cache_remove_entry, /* remove_entry */
100731- cache_hit, /* cache_hit */
100732- clear_count_and_expired, /* clear_count */
100733- check_resolving_entries, /* check_resolving */
100734- refresh_entries, /* refresh */
100735- in_destroy_cache /* destroy_cache */
100736+ .add_entry = in_cache_add_entry,
100737+ .get = in_cache_get,
100738+ .get_with_mask = in_cache_get_with_mask,
100739+ .get_by_vcc = in_cache_get_by_vcc,
100740+ .put = in_cache_put,
100741+ .remove_entry = in_cache_remove_entry,
100742+ .cache_hit = cache_hit,
100743+ .clear_count = clear_count_and_expired,
100744+ .check_resolving = check_resolving_entries,
100745+ .refresh = refresh_entries,
100746+ .destroy_cache = in_destroy_cache
100747 };
100748
100749 static struct eg_cache_ops egress_ops = {
100750- eg_cache_add_entry, /* add_entry */
100751- eg_cache_get_by_cache_id, /* get_by_cache_id */
100752- eg_cache_get_by_tag, /* get_by_tag */
100753- eg_cache_get_by_vcc, /* get_by_vcc */
100754- eg_cache_get_by_src_ip, /* get_by_src_ip */
100755- eg_cache_put, /* put */
100756- eg_cache_remove_entry, /* remove_entry */
100757- update_eg_cache_entry, /* update */
100758- clear_expired, /* clear_expired */
100759- eg_destroy_cache /* destroy_cache */
100760+ .add_entry = eg_cache_add_entry,
100761+ .get_by_cache_id = eg_cache_get_by_cache_id,
100762+ .get_by_tag = eg_cache_get_by_tag,
100763+ .get_by_vcc = eg_cache_get_by_vcc,
100764+ .get_by_src_ip = eg_cache_get_by_src_ip,
100765+ .put = eg_cache_put,
100766+ .remove_entry = eg_cache_remove_entry,
100767+ .update = update_eg_cache_entry,
100768+ .clear_expired = clear_expired,
100769+ .destroy_cache = eg_destroy_cache
100770 };
100771
100772
100773diff --git a/net/atm/proc.c b/net/atm/proc.c
100774index bbb6461..cf04016 100644
100775--- a/net/atm/proc.c
100776+++ b/net/atm/proc.c
100777@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
100778 const struct k_atm_aal_stats *stats)
100779 {
100780 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
100781- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
100782- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
100783- atomic_read(&stats->rx_drop));
100784+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
100785+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
100786+ atomic_read_unchecked(&stats->rx_drop));
100787 }
100788
100789 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
100790diff --git a/net/atm/resources.c b/net/atm/resources.c
100791index 0447d5d..3cf4728 100644
100792--- a/net/atm/resources.c
100793+++ b/net/atm/resources.c
100794@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
100795 static void copy_aal_stats(struct k_atm_aal_stats *from,
100796 struct atm_aal_stats *to)
100797 {
100798-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
100799+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
100800 __AAL_STAT_ITEMS
100801 #undef __HANDLE_ITEM
100802 }
100803@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
100804 static void subtract_aal_stats(struct k_atm_aal_stats *from,
100805 struct atm_aal_stats *to)
100806 {
100807-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
100808+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
100809 __AAL_STAT_ITEMS
100810 #undef __HANDLE_ITEM
100811 }
100812diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
100813index 919a5ce..cc6b444 100644
100814--- a/net/ax25/sysctl_net_ax25.c
100815+++ b/net/ax25/sysctl_net_ax25.c
100816@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
100817 {
100818 char path[sizeof("net/ax25/") + IFNAMSIZ];
100819 int k;
100820- struct ctl_table *table;
100821+ ctl_table_no_const *table;
100822
100823 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
100824 if (!table)
100825diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
100826index 1e80539..676c37a 100644
100827--- a/net/batman-adv/bat_iv_ogm.c
100828+++ b/net/batman-adv/bat_iv_ogm.c
100829@@ -313,7 +313,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
100830
100831 /* randomize initial seqno to avoid collision */
100832 get_random_bytes(&random_seqno, sizeof(random_seqno));
100833- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
100834+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
100835
100836 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
100837 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
100838@@ -918,9 +918,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
100839 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
100840
100841 /* change sequence number to network order */
100842- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
100843+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
100844 batadv_ogm_packet->seqno = htonl(seqno);
100845- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
100846+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
100847
100848 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
100849
100850@@ -1597,7 +1597,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
100851 return;
100852
100853 /* could be changed by schedule_own_packet() */
100854- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
100855+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
100856
100857 if (ogm_packet->flags & BATADV_DIRECTLINK)
100858 has_directlink_flag = true;
100859diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
100860index fc1835c..eead856 100644
100861--- a/net/batman-adv/fragmentation.c
100862+++ b/net/batman-adv/fragmentation.c
100863@@ -450,7 +450,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
100864 frag_header.packet_type = BATADV_UNICAST_FRAG;
100865 frag_header.version = BATADV_COMPAT_VERSION;
100866 frag_header.ttl = BATADV_TTL;
100867- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
100868+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
100869 frag_header.reserved = 0;
100870 frag_header.no = 0;
100871 frag_header.total_size = htons(skb->len);
100872diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
100873index 5467955..30cc771 100644
100874--- a/net/batman-adv/soft-interface.c
100875+++ b/net/batman-adv/soft-interface.c
100876@@ -296,7 +296,7 @@ send:
100877 primary_if->net_dev->dev_addr);
100878
100879 /* set broadcast sequence number */
100880- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
100881+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
100882 bcast_packet->seqno = htonl(seqno);
100883
100884 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
100885@@ -761,7 +761,7 @@ static int batadv_softif_init_late(struct net_device *dev)
100886 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
100887
100888 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
100889- atomic_set(&bat_priv->bcast_seqno, 1);
100890+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
100891 atomic_set(&bat_priv->tt.vn, 0);
100892 atomic_set(&bat_priv->tt.local_changes, 0);
100893 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
100894@@ -775,7 +775,7 @@ static int batadv_softif_init_late(struct net_device *dev)
100895
100896 /* randomize initial seqno to avoid collision */
100897 get_random_bytes(&random_seqno, sizeof(random_seqno));
100898- atomic_set(&bat_priv->frag_seqno, random_seqno);
100899+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
100900
100901 bat_priv->primary_if = NULL;
100902 bat_priv->num_ifaces = 0;
100903diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
100904index 8854c05..ee5d5497 100644
100905--- a/net/batman-adv/types.h
100906+++ b/net/batman-adv/types.h
100907@@ -67,7 +67,7 @@ enum batadv_dhcp_recipient {
100908 struct batadv_hard_iface_bat_iv {
100909 unsigned char *ogm_buff;
100910 int ogm_buff_len;
100911- atomic_t ogm_seqno;
100912+ atomic_unchecked_t ogm_seqno;
100913 };
100914
100915 /**
100916@@ -768,7 +768,7 @@ struct batadv_priv {
100917 atomic_t bonding;
100918 atomic_t fragmentation;
100919 atomic_t packet_size_max;
100920- atomic_t frag_seqno;
100921+ atomic_unchecked_t frag_seqno;
100922 #ifdef CONFIG_BATMAN_ADV_BLA
100923 atomic_t bridge_loop_avoidance;
100924 #endif
100925@@ -787,7 +787,7 @@ struct batadv_priv {
100926 #endif
100927 uint32_t isolation_mark;
100928 uint32_t isolation_mark_mask;
100929- atomic_t bcast_seqno;
100930+ atomic_unchecked_t bcast_seqno;
100931 atomic_t bcast_queue_left;
100932 atomic_t batman_queue_left;
100933 char num_ifaces;
100934diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
100935index 115f149..f0ba286 100644
100936--- a/net/bluetooth/hci_sock.c
100937+++ b/net/bluetooth/hci_sock.c
100938@@ -1067,7 +1067,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
100939 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
100940 }
100941
100942- len = min_t(unsigned int, len, sizeof(uf));
100943+ len = min((size_t)len, sizeof(uf));
100944 if (copy_from_user(&uf, optval, len)) {
100945 err = -EFAULT;
100946 break;
100947diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
100948index 14ca8ae..262d49a 100644
100949--- a/net/bluetooth/l2cap_core.c
100950+++ b/net/bluetooth/l2cap_core.c
100951@@ -3565,8 +3565,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
100952 break;
100953
100954 case L2CAP_CONF_RFC:
100955- if (olen == sizeof(rfc))
100956- memcpy(&rfc, (void *)val, olen);
100957+ if (olen != sizeof(rfc))
100958+ break;
100959+
100960+ memcpy(&rfc, (void *)val, olen);
100961
100962 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
100963 rfc.mode != chan->mode)
100964diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
100965index 1884f72..b3b71f9 100644
100966--- a/net/bluetooth/l2cap_sock.c
100967+++ b/net/bluetooth/l2cap_sock.c
100968@@ -629,7 +629,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
100969 struct sock *sk = sock->sk;
100970 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
100971 struct l2cap_options opts;
100972- int len, err = 0;
100973+ int err = 0;
100974+ size_t len = optlen;
100975 u32 opt;
100976
100977 BT_DBG("sk %p", sk);
100978@@ -656,7 +657,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
100979 opts.max_tx = chan->max_tx;
100980 opts.txwin_size = chan->tx_win;
100981
100982- len = min_t(unsigned int, sizeof(opts), optlen);
100983+ len = min(sizeof(opts), len);
100984 if (copy_from_user((char *) &opts, optval, len)) {
100985 err = -EFAULT;
100986 break;
100987@@ -743,7 +744,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
100988 struct bt_security sec;
100989 struct bt_power pwr;
100990 struct l2cap_conn *conn;
100991- int len, err = 0;
100992+ int err = 0;
100993+ size_t len = optlen;
100994 u32 opt;
100995
100996 BT_DBG("sk %p", sk);
100997@@ -767,7 +769,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
100998
100999 sec.level = BT_SECURITY_LOW;
101000
101001- len = min_t(unsigned int, sizeof(sec), optlen);
101002+ len = min(sizeof(sec), len);
101003 if (copy_from_user((char *) &sec, optval, len)) {
101004 err = -EFAULT;
101005 break;
101006@@ -862,7 +864,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101007
101008 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
101009
101010- len = min_t(unsigned int, sizeof(pwr), optlen);
101011+ len = min(sizeof(pwr), len);
101012 if (copy_from_user((char *) &pwr, optval, len)) {
101013 err = -EFAULT;
101014 break;
101015diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
101016index 8bbbb5e..6fc0950 100644
101017--- a/net/bluetooth/rfcomm/sock.c
101018+++ b/net/bluetooth/rfcomm/sock.c
101019@@ -687,7 +687,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
101020 struct sock *sk = sock->sk;
101021 struct bt_security sec;
101022 int err = 0;
101023- size_t len;
101024+ size_t len = optlen;
101025 u32 opt;
101026
101027 BT_DBG("sk %p", sk);
101028@@ -709,7 +709,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
101029
101030 sec.level = BT_SECURITY_LOW;
101031
101032- len = min_t(unsigned int, sizeof(sec), optlen);
101033+ len = min(sizeof(sec), len);
101034 if (copy_from_user((char *) &sec, optval, len)) {
101035 err = -EFAULT;
101036 break;
101037diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
101038index 8e385a0..a5bdd8e 100644
101039--- a/net/bluetooth/rfcomm/tty.c
101040+++ b/net/bluetooth/rfcomm/tty.c
101041@@ -752,7 +752,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
101042 BT_DBG("tty %p id %d", tty, tty->index);
101043
101044 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
101045- dev->channel, dev->port.count);
101046+ dev->channel, atomic_read(&dev->port.count));
101047
101048 err = tty_port_open(&dev->port, tty, filp);
101049 if (err)
101050@@ -775,7 +775,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
101051 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
101052
101053 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
101054- dev->port.count);
101055+ atomic_read(&dev->port.count));
101056
101057 tty_port_close(&dev->port, tty, filp);
101058 }
101059diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
101060index 6d69631..b8fdc85 100644
101061--- a/net/bridge/netfilter/ebtables.c
101062+++ b/net/bridge/netfilter/ebtables.c
101063@@ -1518,7 +1518,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
101064 tmp.valid_hooks = t->table->valid_hooks;
101065 }
101066 mutex_unlock(&ebt_mutex);
101067- if (copy_to_user(user, &tmp, *len) != 0) {
101068+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101069 BUGPRINT("c2u Didn't work\n");
101070 ret = -EFAULT;
101071 break;
101072@@ -2324,7 +2324,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101073 goto out;
101074 tmp.valid_hooks = t->valid_hooks;
101075
101076- if (copy_to_user(user, &tmp, *len) != 0) {
101077+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101078 ret = -EFAULT;
101079 break;
101080 }
101081@@ -2335,7 +2335,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101082 tmp.entries_size = t->table->entries_size;
101083 tmp.valid_hooks = t->table->valid_hooks;
101084
101085- if (copy_to_user(user, &tmp, *len) != 0) {
101086+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101087 ret = -EFAULT;
101088 break;
101089 }
101090diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
101091index f5afda1..dcf770a 100644
101092--- a/net/caif/cfctrl.c
101093+++ b/net/caif/cfctrl.c
101094@@ -10,6 +10,7 @@
101095 #include <linux/spinlock.h>
101096 #include <linux/slab.h>
101097 #include <linux/pkt_sched.h>
101098+#include <linux/sched.h>
101099 #include <net/caif/caif_layer.h>
101100 #include <net/caif/cfpkt.h>
101101 #include <net/caif/cfctrl.h>
101102@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
101103 memset(&dev_info, 0, sizeof(dev_info));
101104 dev_info.id = 0xff;
101105 cfsrvl_init(&this->serv, 0, &dev_info, false);
101106- atomic_set(&this->req_seq_no, 1);
101107- atomic_set(&this->rsp_seq_no, 1);
101108+ atomic_set_unchecked(&this->req_seq_no, 1);
101109+ atomic_set_unchecked(&this->rsp_seq_no, 1);
101110 this->serv.layer.receive = cfctrl_recv;
101111 sprintf(this->serv.layer.name, "ctrl");
101112 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
101113@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
101114 struct cfctrl_request_info *req)
101115 {
101116 spin_lock_bh(&ctrl->info_list_lock);
101117- atomic_inc(&ctrl->req_seq_no);
101118- req->sequence_no = atomic_read(&ctrl->req_seq_no);
101119+ atomic_inc_unchecked(&ctrl->req_seq_no);
101120+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
101121 list_add_tail(&req->list, &ctrl->list);
101122 spin_unlock_bh(&ctrl->info_list_lock);
101123 }
101124@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
101125 if (p != first)
101126 pr_warn("Requests are not received in order\n");
101127
101128- atomic_set(&ctrl->rsp_seq_no,
101129+ atomic_set_unchecked(&ctrl->rsp_seq_no,
101130 p->sequence_no);
101131 list_del(&p->list);
101132 goto out;
101133diff --git a/net/can/af_can.c b/net/can/af_can.c
101134index ce82337..5d17b4d 100644
101135--- a/net/can/af_can.c
101136+++ b/net/can/af_can.c
101137@@ -884,7 +884,7 @@ static const struct net_proto_family can_family_ops = {
101138 };
101139
101140 /* notifier block for netdevice event */
101141-static struct notifier_block can_netdev_notifier __read_mostly = {
101142+static struct notifier_block can_netdev_notifier = {
101143 .notifier_call = can_notifier,
101144 };
101145
101146diff --git a/net/can/bcm.c b/net/can/bcm.c
101147index dcb75c0..24b1b43 100644
101148--- a/net/can/bcm.c
101149+++ b/net/can/bcm.c
101150@@ -1624,7 +1624,7 @@ static int __init bcm_module_init(void)
101151 }
101152
101153 /* create /proc/net/can-bcm directory */
101154- proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
101155+ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
101156 return 0;
101157 }
101158
101159diff --git a/net/can/gw.c b/net/can/gw.c
101160index 050a211..bb9fe33 100644
101161--- a/net/can/gw.c
101162+++ b/net/can/gw.c
101163@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
101164 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
101165
101166 static HLIST_HEAD(cgw_list);
101167-static struct notifier_block notifier;
101168
101169 static struct kmem_cache *cgw_cache __read_mostly;
101170
101171@@ -947,6 +946,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
101172 return err;
101173 }
101174
101175+static struct notifier_block notifier = {
101176+ .notifier_call = cgw_notifier
101177+};
101178+
101179 static __init int cgw_module_init(void)
101180 {
101181 /* sanitize given module parameter */
101182@@ -962,7 +965,6 @@ static __init int cgw_module_init(void)
101183 return -ENOMEM;
101184
101185 /* set notifier */
101186- notifier.notifier_call = cgw_notifier;
101187 register_netdevice_notifier(&notifier);
101188
101189 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
101190diff --git a/net/can/proc.c b/net/can/proc.c
101191index 1a19b98..df2b4ec 100644
101192--- a/net/can/proc.c
101193+++ b/net/can/proc.c
101194@@ -514,7 +514,7 @@ static void can_remove_proc_readentry(const char *name)
101195 void can_init_proc(void)
101196 {
101197 /* create /proc/net/can directory */
101198- can_dir = proc_mkdir("can", init_net.proc_net);
101199+ can_dir = proc_mkdir_restrict("can", init_net.proc_net);
101200
101201 if (!can_dir) {
101202 printk(KERN_INFO "can: failed to create /proc/net/can . "
101203diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
101204index 9f02369..e6160e9 100644
101205--- a/net/ceph/messenger.c
101206+++ b/net/ceph/messenger.c
101207@@ -188,7 +188,7 @@ static void con_fault(struct ceph_connection *con);
101208 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
101209
101210 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
101211-static atomic_t addr_str_seq = ATOMIC_INIT(0);
101212+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
101213
101214 static struct page *zero_page; /* used in certain error cases */
101215
101216@@ -199,7 +199,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
101217 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
101218 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
101219
101220- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
101221+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
101222 s = addr_str[i];
101223
101224 switch (ss->ss_family) {
101225diff --git a/net/compat.c b/net/compat.c
101226index bc8aeef..f9c070c 100644
101227--- a/net/compat.c
101228+++ b/net/compat.c
101229@@ -73,9 +73,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
101230 return -EFAULT;
101231 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
101232 kmsg->msg_namelen = sizeof(struct sockaddr_storage);
101233- kmsg->msg_name = compat_ptr(tmp1);
101234- kmsg->msg_iov = compat_ptr(tmp2);
101235- kmsg->msg_control = compat_ptr(tmp3);
101236+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
101237+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
101238+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
101239 return 0;
101240 }
101241
101242@@ -87,7 +87,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
101243
101244 if (kern_msg->msg_name && kern_msg->msg_namelen) {
101245 if (mode == VERIFY_READ) {
101246- int err = move_addr_to_kernel(kern_msg->msg_name,
101247+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
101248 kern_msg->msg_namelen,
101249 kern_address);
101250 if (err < 0)
101251@@ -100,7 +100,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
101252 }
101253
101254 tot_len = iov_from_user_compat_to_kern(kern_iov,
101255- (struct compat_iovec __user *)kern_msg->msg_iov,
101256+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
101257 kern_msg->msg_iovlen);
101258 if (tot_len >= 0)
101259 kern_msg->msg_iov = kern_iov;
101260@@ -120,20 +120,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
101261
101262 #define CMSG_COMPAT_FIRSTHDR(msg) \
101263 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
101264- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
101265+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
101266 (struct compat_cmsghdr __user *)NULL)
101267
101268 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
101269 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
101270 (ucmlen) <= (unsigned long) \
101271 ((mhdr)->msg_controllen - \
101272- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
101273+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
101274
101275 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
101276 struct compat_cmsghdr __user *cmsg, int cmsg_len)
101277 {
101278 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
101279- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
101280+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
101281 msg->msg_controllen)
101282 return NULL;
101283 return (struct compat_cmsghdr __user *)ptr;
101284@@ -223,7 +223,7 @@ Efault:
101285
101286 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
101287 {
101288- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
101289+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
101290 struct compat_cmsghdr cmhdr;
101291 struct compat_timeval ctv;
101292 struct compat_timespec cts[3];
101293@@ -279,7 +279,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
101294
101295 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
101296 {
101297- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
101298+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
101299 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
101300 int fdnum = scm->fp->count;
101301 struct file **fp = scm->fp->fp;
101302@@ -367,7 +367,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
101303 return -EFAULT;
101304 old_fs = get_fs();
101305 set_fs(KERNEL_DS);
101306- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
101307+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
101308 set_fs(old_fs);
101309
101310 return err;
101311@@ -428,7 +428,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
101312 len = sizeof(ktime);
101313 old_fs = get_fs();
101314 set_fs(KERNEL_DS);
101315- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
101316+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
101317 set_fs(old_fs);
101318
101319 if (!err) {
101320@@ -571,7 +571,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101321 case MCAST_JOIN_GROUP:
101322 case MCAST_LEAVE_GROUP:
101323 {
101324- struct compat_group_req __user *gr32 = (void *)optval;
101325+ struct compat_group_req __user *gr32 = (void __user *)optval;
101326 struct group_req __user *kgr =
101327 compat_alloc_user_space(sizeof(struct group_req));
101328 u32 interface;
101329@@ -592,7 +592,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101330 case MCAST_BLOCK_SOURCE:
101331 case MCAST_UNBLOCK_SOURCE:
101332 {
101333- struct compat_group_source_req __user *gsr32 = (void *)optval;
101334+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
101335 struct group_source_req __user *kgsr = compat_alloc_user_space(
101336 sizeof(struct group_source_req));
101337 u32 interface;
101338@@ -613,7 +613,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101339 }
101340 case MCAST_MSFILTER:
101341 {
101342- struct compat_group_filter __user *gf32 = (void *)optval;
101343+ struct compat_group_filter __user *gf32 = (void __user *)optval;
101344 struct group_filter __user *kgf;
101345 u32 interface, fmode, numsrc;
101346
101347@@ -651,7 +651,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
101348 char __user *optval, int __user *optlen,
101349 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
101350 {
101351- struct compat_group_filter __user *gf32 = (void *)optval;
101352+ struct compat_group_filter __user *gf32 = (void __user *)optval;
101353 struct group_filter __user *kgf;
101354 int __user *koptlen;
101355 u32 interface, fmode, numsrc;
101356@@ -804,7 +804,7 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
101357
101358 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
101359 return -EINVAL;
101360- if (copy_from_user(a, args, nas[call]))
101361+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
101362 return -EFAULT;
101363 a0 = a[0];
101364 a1 = a[1];
101365diff --git a/net/core/datagram.c b/net/core/datagram.c
101366index fdbc9a8..cd6972c 100644
101367--- a/net/core/datagram.c
101368+++ b/net/core/datagram.c
101369@@ -301,7 +301,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
101370 }
101371
101372 kfree_skb(skb);
101373- atomic_inc(&sk->sk_drops);
101374+ atomic_inc_unchecked(&sk->sk_drops);
101375 sk_mem_reclaim_partial(sk);
101376
101377 return err;
101378diff --git a/net/core/dev.c b/net/core/dev.c
101379index cf8a95f..2837211 100644
101380--- a/net/core/dev.c
101381+++ b/net/core/dev.c
101382@@ -1683,14 +1683,14 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
101383 {
101384 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
101385 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
101386- atomic_long_inc(&dev->rx_dropped);
101387+ atomic_long_inc_unchecked(&dev->rx_dropped);
101388 kfree_skb(skb);
101389 return NET_RX_DROP;
101390 }
101391 }
101392
101393 if (unlikely(!is_skb_forwardable(dev, skb))) {
101394- atomic_long_inc(&dev->rx_dropped);
101395+ atomic_long_inc_unchecked(&dev->rx_dropped);
101396 kfree_skb(skb);
101397 return NET_RX_DROP;
101398 }
101399@@ -2487,7 +2487,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
101400
101401 struct dev_gso_cb {
101402 void (*destructor)(struct sk_buff *skb);
101403-};
101404+} __no_const;
101405
101406 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
101407
101408@@ -2952,7 +2952,7 @@ recursion_alert:
101409 rc = -ENETDOWN;
101410 rcu_read_unlock_bh();
101411
101412- atomic_long_inc(&dev->tx_dropped);
101413+ atomic_long_inc_unchecked(&dev->tx_dropped);
101414 kfree_skb(skb);
101415 return rc;
101416 out:
101417@@ -3296,7 +3296,7 @@ enqueue:
101418
101419 local_irq_restore(flags);
101420
101421- atomic_long_inc(&skb->dev->rx_dropped);
101422+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
101423 kfree_skb(skb);
101424 return NET_RX_DROP;
101425 }
101426@@ -3373,7 +3373,7 @@ int netif_rx_ni(struct sk_buff *skb)
101427 }
101428 EXPORT_SYMBOL(netif_rx_ni);
101429
101430-static void net_tx_action(struct softirq_action *h)
101431+static __latent_entropy void net_tx_action(void)
101432 {
101433 struct softnet_data *sd = &__get_cpu_var(softnet_data);
101434
101435@@ -3706,7 +3706,7 @@ ncls:
101436 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
101437 } else {
101438 drop:
101439- atomic_long_inc(&skb->dev->rx_dropped);
101440+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
101441 kfree_skb(skb);
101442 /* Jamal, now you will not able to escape explaining
101443 * me how you were going to use this. :-)
101444@@ -4426,7 +4426,7 @@ void netif_napi_del(struct napi_struct *napi)
101445 }
101446 EXPORT_SYMBOL(netif_napi_del);
101447
101448-static void net_rx_action(struct softirq_action *h)
101449+static __latent_entropy void net_rx_action(void)
101450 {
101451 struct softnet_data *sd = &__get_cpu_var(softnet_data);
101452 unsigned long time_limit = jiffies + 2;
101453@@ -6480,8 +6480,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
101454 } else {
101455 netdev_stats_to_stats64(storage, &dev->stats);
101456 }
101457- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
101458- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
101459+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
101460+ storage->tx_dropped += atomic_long_read_unchecked(&dev->tx_dropped);
101461 return storage;
101462 }
101463 EXPORT_SYMBOL(dev_get_stats);
101464diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
101465index cf999e0..c59a9754 100644
101466--- a/net/core/dev_ioctl.c
101467+++ b/net/core/dev_ioctl.c
101468@@ -366,9 +366,13 @@ void dev_load(struct net *net, const char *name)
101469 if (no_module && capable(CAP_NET_ADMIN))
101470 no_module = request_module("netdev-%s", name);
101471 if (no_module && capable(CAP_SYS_MODULE)) {
101472+#ifdef CONFIG_GRKERNSEC_MODHARDEN
101473+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
101474+#else
101475 if (!request_module("%s", name))
101476 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
101477 name);
101478+#endif
101479 }
101480 }
101481 EXPORT_SYMBOL(dev_load);
101482diff --git a/net/core/filter.c b/net/core/filter.c
101483index d814b8a..b5ab778 100644
101484--- a/net/core/filter.c
101485+++ b/net/core/filter.c
101486@@ -559,7 +559,11 @@ do_pass:
101487
101488 /* Unkown instruction. */
101489 default:
101490- goto err;
101491+ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
101492+ fp->code, fp->jt, fp->jf, fp->k);
101493+ kfree(addrs);
101494+ BUG();
101495+ return -EINVAL;
101496 }
101497
101498 insn++;
101499@@ -606,7 +610,7 @@ static int check_load_and_stores(const struct sock_filter *filter, int flen)
101500 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
101501 int pc, ret = 0;
101502
101503- BUILD_BUG_ON(BPF_MEMWORDS > 16);
101504+ BUILD_BUG_ON(BPF_MEMWORDS != 16);
101505
101506 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
101507 if (!masks)
101508@@ -933,7 +937,7 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
101509
101510 /* Expand fp for appending the new filter representation. */
101511 old_fp = fp;
101512- fp = krealloc(old_fp, bpf_prog_size(new_len), GFP_KERNEL);
101513+ fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
101514 if (!fp) {
101515 /* The old_fp is still around in case we couldn't
101516 * allocate new memory, so uncharge on that one.
101517@@ -1013,11 +1017,11 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
101518 if (fprog->filter == NULL)
101519 return -EINVAL;
101520
101521- fp = kmalloc(bpf_prog_size(fprog->len), GFP_KERNEL);
101522+ fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
101523 if (!fp)
101524 return -ENOMEM;
101525
101526- memcpy(fp->insns, fprog->filter, fsize);
101527+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
101528
101529 fp->len = fprog->len;
101530 /* Since unattached filters are not copied back to user
101531@@ -1069,12 +1073,12 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
101532 if (fprog->filter == NULL)
101533 return -EINVAL;
101534
101535- prog = kmalloc(bpf_fsize, GFP_KERNEL);
101536+ prog = bpf_prog_alloc(bpf_fsize, 0);
101537 if (!prog)
101538 return -ENOMEM;
101539
101540 if (copy_from_user(prog->insns, fprog->filter, fsize)) {
101541- kfree(prog);
101542+ __bpf_prog_free(prog);
101543 return -EFAULT;
101544 }
101545
101546@@ -1082,7 +1086,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
101547
101548 err = bpf_prog_store_orig_filter(prog, fprog);
101549 if (err) {
101550- kfree(prog);
101551+ __bpf_prog_free(prog);
101552 return -ENOMEM;
101553 }
101554
101555diff --git a/net/core/flow.c b/net/core/flow.c
101556index a0348fd..6951c76 100644
101557--- a/net/core/flow.c
101558+++ b/net/core/flow.c
101559@@ -65,7 +65,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
101560 static int flow_entry_valid(struct flow_cache_entry *fle,
101561 struct netns_xfrm *xfrm)
101562 {
101563- if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
101564+ if (atomic_read_unchecked(&xfrm->flow_cache_genid) != fle->genid)
101565 return 0;
101566 if (fle->object && !fle->object->ops->check(fle->object))
101567 return 0;
101568@@ -242,7 +242,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
101569 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
101570 fcp->hash_count++;
101571 }
101572- } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
101573+ } else if (likely(fle->genid == atomic_read_unchecked(&net->xfrm.flow_cache_genid))) {
101574 flo = fle->object;
101575 if (!flo)
101576 goto ret_object;
101577@@ -263,7 +263,7 @@ nocache:
101578 }
101579 flo = resolver(net, key, family, dir, flo, ctx);
101580 if (fle) {
101581- fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
101582+ fle->genid = atomic_read_unchecked(&net->xfrm.flow_cache_genid);
101583 if (!IS_ERR(flo))
101584 fle->object = flo;
101585 else
101586diff --git a/net/core/iovec.c b/net/core/iovec.c
101587index e1ec45a..e5c6f16 100644
101588--- a/net/core/iovec.c
101589+++ b/net/core/iovec.c
101590@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
101591 if (m->msg_name && m->msg_namelen) {
101592 if (mode == VERIFY_READ) {
101593 void __user *namep;
101594- namep = (void __user __force *) m->msg_name;
101595+ namep = (void __force_user *) m->msg_name;
101596 err = move_addr_to_kernel(namep, m->msg_namelen,
101597 address);
101598 if (err < 0)
101599@@ -55,7 +55,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
101600 }
101601
101602 size = m->msg_iovlen * sizeof(struct iovec);
101603- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
101604+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
101605 return -EFAULT;
101606
101607 m->msg_iov = iov;
101608diff --git a/net/core/neighbour.c b/net/core/neighbour.c
101609index ef31fef..8be66d9 100644
101610--- a/net/core/neighbour.c
101611+++ b/net/core/neighbour.c
101612@@ -2825,7 +2825,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
101613 void __user *buffer, size_t *lenp, loff_t *ppos)
101614 {
101615 int size, ret;
101616- struct ctl_table tmp = *ctl;
101617+ ctl_table_no_const tmp = *ctl;
101618
101619 tmp.extra1 = &zero;
101620 tmp.extra2 = &unres_qlen_max;
101621@@ -2887,7 +2887,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
101622 void __user *buffer,
101623 size_t *lenp, loff_t *ppos)
101624 {
101625- struct ctl_table tmp = *ctl;
101626+ ctl_table_no_const tmp = *ctl;
101627 int ret;
101628
101629 tmp.extra1 = &zero;
101630diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
101631index 2bf8329..2eb1423 100644
101632--- a/net/core/net-procfs.c
101633+++ b/net/core/net-procfs.c
101634@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
101635 struct rtnl_link_stats64 temp;
101636 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
101637
101638- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101639+ if (gr_proc_is_restricted())
101640+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101641+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
101642+ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
101643+ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
101644+ else
101645+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101646 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
101647 dev->name, stats->rx_bytes, stats->rx_packets,
101648 stats->rx_errors,
101649@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
101650 return 0;
101651 }
101652
101653-static const struct seq_operations dev_seq_ops = {
101654+const struct seq_operations dev_seq_ops = {
101655 .start = dev_seq_start,
101656 .next = dev_seq_next,
101657 .stop = dev_seq_stop,
101658@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = {
101659
101660 static int softnet_seq_open(struct inode *inode, struct file *file)
101661 {
101662- return seq_open(file, &softnet_seq_ops);
101663+ return seq_open_restrict(file, &softnet_seq_ops);
101664 }
101665
101666 static const struct file_operations softnet_seq_fops = {
101667@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
101668 else
101669 seq_printf(seq, "%04x", ntohs(pt->type));
101670
101671+#ifdef CONFIG_GRKERNSEC_HIDESYM
101672+ seq_printf(seq, " %-8s %pf\n",
101673+ pt->dev ? pt->dev->name : "", NULL);
101674+#else
101675 seq_printf(seq, " %-8s %pf\n",
101676 pt->dev ? pt->dev->name : "", pt->func);
101677+#endif
101678 }
101679
101680 return 0;
101681diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
101682index 9dd0669..c52fb1b 100644
101683--- a/net/core/net-sysfs.c
101684+++ b/net/core/net-sysfs.c
101685@@ -278,7 +278,7 @@ static ssize_t carrier_changes_show(struct device *dev,
101686 {
101687 struct net_device *netdev = to_net_dev(dev);
101688 return sprintf(buf, fmt_dec,
101689- atomic_read(&netdev->carrier_changes));
101690+ atomic_read_unchecked(&netdev->carrier_changes));
101691 }
101692 static DEVICE_ATTR_RO(carrier_changes);
101693
101694diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
101695index 7c6b51a..e9dd57f 100644
101696--- a/net/core/net_namespace.c
101697+++ b/net/core/net_namespace.c
101698@@ -445,7 +445,7 @@ static int __register_pernet_operations(struct list_head *list,
101699 int error;
101700 LIST_HEAD(net_exit_list);
101701
101702- list_add_tail(&ops->list, list);
101703+ pax_list_add_tail((struct list_head *)&ops->list, list);
101704 if (ops->init || (ops->id && ops->size)) {
101705 for_each_net(net) {
101706 error = ops_init(ops, net);
101707@@ -458,7 +458,7 @@ static int __register_pernet_operations(struct list_head *list,
101708
101709 out_undo:
101710 /* If I have an error cleanup all namespaces I initialized */
101711- list_del(&ops->list);
101712+ pax_list_del((struct list_head *)&ops->list);
101713 ops_exit_list(ops, &net_exit_list);
101714 ops_free_list(ops, &net_exit_list);
101715 return error;
101716@@ -469,7 +469,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
101717 struct net *net;
101718 LIST_HEAD(net_exit_list);
101719
101720- list_del(&ops->list);
101721+ pax_list_del((struct list_head *)&ops->list);
101722 for_each_net(net)
101723 list_add_tail(&net->exit_list, &net_exit_list);
101724 ops_exit_list(ops, &net_exit_list);
101725@@ -603,7 +603,7 @@ int register_pernet_device(struct pernet_operations *ops)
101726 mutex_lock(&net_mutex);
101727 error = register_pernet_operations(&pernet_list, ops);
101728 if (!error && (first_device == &pernet_list))
101729- first_device = &ops->list;
101730+ first_device = (struct list_head *)&ops->list;
101731 mutex_unlock(&net_mutex);
101732 return error;
101733 }
101734diff --git a/net/core/netpoll.c b/net/core/netpoll.c
101735index 907fb5e..8260f040b 100644
101736--- a/net/core/netpoll.c
101737+++ b/net/core/netpoll.c
101738@@ -382,7 +382,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
101739 struct udphdr *udph;
101740 struct iphdr *iph;
101741 struct ethhdr *eth;
101742- static atomic_t ip_ident;
101743+ static atomic_unchecked_t ip_ident;
101744 struct ipv6hdr *ip6h;
101745
101746 udp_len = len + sizeof(*udph);
101747@@ -453,7 +453,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
101748 put_unaligned(0x45, (unsigned char *)iph);
101749 iph->tos = 0;
101750 put_unaligned(htons(ip_len), &(iph->tot_len));
101751- iph->id = htons(atomic_inc_return(&ip_ident));
101752+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
101753 iph->frag_off = 0;
101754 iph->ttl = 64;
101755 iph->protocol = IPPROTO_UDP;
101756diff --git a/net/core/pktgen.c b/net/core/pktgen.c
101757index 8b849dd..cd88bfc 100644
101758--- a/net/core/pktgen.c
101759+++ b/net/core/pktgen.c
101760@@ -3723,7 +3723,7 @@ static int __net_init pg_net_init(struct net *net)
101761 pn->net = net;
101762 INIT_LIST_HEAD(&pn->pktgen_threads);
101763 pn->pktgen_exiting = false;
101764- pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
101765+ pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
101766 if (!pn->proc_dir) {
101767 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
101768 return -ENODEV;
101769diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
101770index f0493e3..c3ffd7f 100644
101771--- a/net/core/rtnetlink.c
101772+++ b/net/core/rtnetlink.c
101773@@ -58,7 +58,7 @@ struct rtnl_link {
101774 rtnl_doit_func doit;
101775 rtnl_dumpit_func dumpit;
101776 rtnl_calcit_func calcit;
101777-};
101778+} __no_const;
101779
101780 static DEFINE_MUTEX(rtnl_mutex);
101781
101782@@ -304,10 +304,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
101783 * to use the ops for creating device. So do not
101784 * fill up dellink as well. That disables rtnl_dellink.
101785 */
101786- if (ops->setup && !ops->dellink)
101787- ops->dellink = unregister_netdevice_queue;
101788+ if (ops->setup && !ops->dellink) {
101789+ pax_open_kernel();
101790+ *(void **)&ops->dellink = unregister_netdevice_queue;
101791+ pax_close_kernel();
101792+ }
101793
101794- list_add_tail(&ops->list, &link_ops);
101795+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
101796 return 0;
101797 }
101798 EXPORT_SYMBOL_GPL(__rtnl_link_register);
101799@@ -354,7 +357,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
101800 for_each_net(net) {
101801 __rtnl_kill_links(net, ops);
101802 }
101803- list_del(&ops->list);
101804+ pax_list_del((struct list_head *)&ops->list);
101805 }
101806 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
101807
101808@@ -1014,7 +1017,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
101809 (dev->ifalias &&
101810 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
101811 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
101812- atomic_read(&dev->carrier_changes)))
101813+ atomic_read_unchecked(&dev->carrier_changes)))
101814 goto nla_put_failure;
101815
101816 if (1) {
101817@@ -2780,6 +2783,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
101818 if (br_spec) {
101819 nla_for_each_nested(attr, br_spec, rem) {
101820 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
101821+ if (nla_len(attr) < sizeof(flags))
101822+ return -EINVAL;
101823+
101824 have_flags = true;
101825 flags = nla_get_u16(attr);
101826 break;
101827@@ -2850,6 +2856,9 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
101828 if (br_spec) {
101829 nla_for_each_nested(attr, br_spec, rem) {
101830 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
101831+ if (nla_len(attr) < sizeof(flags))
101832+ return -EINVAL;
101833+
101834 have_flags = true;
101835 flags = nla_get_u16(attr);
101836 break;
101837diff --git a/net/core/scm.c b/net/core/scm.c
101838index b442e7e..6f5b5a2 100644
101839--- a/net/core/scm.c
101840+++ b/net/core/scm.c
101841@@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send);
101842 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
101843 {
101844 struct cmsghdr __user *cm
101845- = (__force struct cmsghdr __user *)msg->msg_control;
101846+ = (struct cmsghdr __force_user *)msg->msg_control;
101847 struct cmsghdr cmhdr;
101848 int cmlen = CMSG_LEN(len);
101849 int err;
101850@@ -233,7 +233,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
101851 err = -EFAULT;
101852 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
101853 goto out;
101854- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
101855+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
101856 goto out;
101857 cmlen = CMSG_SPACE(len);
101858 if (msg->msg_controllen < cmlen)
101859@@ -249,7 +249,7 @@ EXPORT_SYMBOL(put_cmsg);
101860 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
101861 {
101862 struct cmsghdr __user *cm
101863- = (__force struct cmsghdr __user*)msg->msg_control;
101864+ = (struct cmsghdr __force_user *)msg->msg_control;
101865
101866 int fdmax = 0;
101867 int fdnum = scm->fp->count;
101868@@ -269,7 +269,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
101869 if (fdnum < fdmax)
101870 fdmax = fdnum;
101871
101872- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
101873+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
101874 i++, cmfptr++)
101875 {
101876 struct socket *sock;
101877diff --git a/net/core/skbuff.c b/net/core/skbuff.c
101878index 8d28969..4d36260 100644
101879--- a/net/core/skbuff.c
101880+++ b/net/core/skbuff.c
101881@@ -360,18 +360,29 @@ refill:
101882 goto end;
101883 }
101884 nc->frag.size = PAGE_SIZE << order;
101885-recycle:
101886- atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS);
101887+ /* Even if we own the page, we do not use atomic_set().
101888+ * This would break get_page_unless_zero() users.
101889+ */
101890+ atomic_add(NETDEV_PAGECNT_MAX_BIAS - 1,
101891+ &nc->frag.page->_count);
101892 nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
101893 nc->frag.offset = 0;
101894 }
101895
101896 if (nc->frag.offset + fragsz > nc->frag.size) {
101897- /* avoid unnecessary locked operations if possible */
101898- if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) ||
101899- atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count))
101900- goto recycle;
101901- goto refill;
101902+ if (atomic_read(&nc->frag.page->_count) != nc->pagecnt_bias) {
101903+ if (!atomic_sub_and_test(nc->pagecnt_bias,
101904+ &nc->frag.page->_count))
101905+ goto refill;
101906+ /* OK, page count is 0, we can safely set it */
101907+ atomic_set(&nc->frag.page->_count,
101908+ NETDEV_PAGECNT_MAX_BIAS);
101909+ } else {
101910+ atomic_add(NETDEV_PAGECNT_MAX_BIAS - nc->pagecnt_bias,
101911+ &nc->frag.page->_count);
101912+ }
101913+ nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
101914+ nc->frag.offset = 0;
101915 }
101916
101917 data = page_address(nc->frag.page) + nc->frag.offset;
101918@@ -2011,7 +2022,7 @@ EXPORT_SYMBOL(__skb_checksum);
101919 __wsum skb_checksum(const struct sk_buff *skb, int offset,
101920 int len, __wsum csum)
101921 {
101922- const struct skb_checksum_ops ops = {
101923+ static const struct skb_checksum_ops ops = {
101924 .update = csum_partial_ext,
101925 .combine = csum_block_add_ext,
101926 };
101927@@ -3237,13 +3248,15 @@ void __init skb_init(void)
101928 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
101929 sizeof(struct sk_buff),
101930 0,
101931- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
101932+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
101933+ SLAB_NO_SANITIZE,
101934 NULL);
101935 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
101936 (2*sizeof(struct sk_buff)) +
101937 sizeof(atomic_t),
101938 0,
101939- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
101940+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
101941+ SLAB_NO_SANITIZE,
101942 NULL);
101943 }
101944
101945diff --git a/net/core/sock.c b/net/core/sock.c
101946index 9c3f823..bd8c884 100644
101947--- a/net/core/sock.c
101948+++ b/net/core/sock.c
101949@@ -442,7 +442,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101950 struct sk_buff_head *list = &sk->sk_receive_queue;
101951
101952 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
101953- atomic_inc(&sk->sk_drops);
101954+ atomic_inc_unchecked(&sk->sk_drops);
101955 trace_sock_rcvqueue_full(sk, skb);
101956 return -ENOMEM;
101957 }
101958@@ -452,7 +452,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101959 return err;
101960
101961 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
101962- atomic_inc(&sk->sk_drops);
101963+ atomic_inc_unchecked(&sk->sk_drops);
101964 return -ENOBUFS;
101965 }
101966
101967@@ -472,7 +472,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101968 skb_dst_force(skb);
101969
101970 spin_lock_irqsave(&list->lock, flags);
101971- skb->dropcount = atomic_read(&sk->sk_drops);
101972+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
101973 __skb_queue_tail(list, skb);
101974 spin_unlock_irqrestore(&list->lock, flags);
101975
101976@@ -492,7 +492,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
101977 skb->dev = NULL;
101978
101979 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
101980- atomic_inc(&sk->sk_drops);
101981+ atomic_inc_unchecked(&sk->sk_drops);
101982 goto discard_and_relse;
101983 }
101984 if (nested)
101985@@ -510,7 +510,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
101986 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
101987 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
101988 bh_unlock_sock(sk);
101989- atomic_inc(&sk->sk_drops);
101990+ atomic_inc_unchecked(&sk->sk_drops);
101991 goto discard_and_relse;
101992 }
101993
101994@@ -999,12 +999,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
101995 struct timeval tm;
101996 } v;
101997
101998- int lv = sizeof(int);
101999- int len;
102000+ unsigned int lv = sizeof(int);
102001+ unsigned int len;
102002
102003 if (get_user(len, optlen))
102004 return -EFAULT;
102005- if (len < 0)
102006+ if (len > INT_MAX)
102007 return -EINVAL;
102008
102009 memset(&v, 0, sizeof(v));
102010@@ -1142,11 +1142,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102011
102012 case SO_PEERNAME:
102013 {
102014- char address[128];
102015+ char address[_K_SS_MAXSIZE];
102016
102017 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
102018 return -ENOTCONN;
102019- if (lv < len)
102020+ if (lv < len || sizeof address < len)
102021 return -EINVAL;
102022 if (copy_to_user(optval, address, len))
102023 return -EFAULT;
102024@@ -1227,7 +1227,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102025
102026 if (len > lv)
102027 len = lv;
102028- if (copy_to_user(optval, &v, len))
102029+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
102030 return -EFAULT;
102031 lenout:
102032 if (put_user(len, optlen))
102033@@ -1723,6 +1723,8 @@ EXPORT_SYMBOL(sock_kmalloc);
102034 */
102035 void sock_kfree_s(struct sock *sk, void *mem, int size)
102036 {
102037+ if (WARN_ON_ONCE(!mem))
102038+ return;
102039 kfree(mem);
102040 atomic_sub(size, &sk->sk_omem_alloc);
102041 }
102042@@ -2369,7 +2371,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
102043 */
102044 smp_wmb();
102045 atomic_set(&sk->sk_refcnt, 1);
102046- atomic_set(&sk->sk_drops, 0);
102047+ atomic_set_unchecked(&sk->sk_drops, 0);
102048 }
102049 EXPORT_SYMBOL(sock_init_data);
102050
102051@@ -2497,6 +2499,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
102052 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
102053 int level, int type)
102054 {
102055+ struct sock_extended_err ee;
102056 struct sock_exterr_skb *serr;
102057 struct sk_buff *skb, *skb2;
102058 int copied, err;
102059@@ -2518,7 +2521,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
102060 sock_recv_timestamp(msg, sk, skb);
102061
102062 serr = SKB_EXT_ERR(skb);
102063- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
102064+ ee = serr->ee;
102065+ put_cmsg(msg, level, type, sizeof ee, &ee);
102066
102067 msg->msg_flags |= MSG_ERRQUEUE;
102068 err = copied;
102069diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
102070index ad704c7..ca48aff 100644
102071--- a/net/core/sock_diag.c
102072+++ b/net/core/sock_diag.c
102073@@ -9,26 +9,33 @@
102074 #include <linux/inet_diag.h>
102075 #include <linux/sock_diag.h>
102076
102077-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
102078+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
102079 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
102080 static DEFINE_MUTEX(sock_diag_table_mutex);
102081
102082 int sock_diag_check_cookie(void *sk, __u32 *cookie)
102083 {
102084+#ifndef CONFIG_GRKERNSEC_HIDESYM
102085 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
102086 cookie[1] != INET_DIAG_NOCOOKIE) &&
102087 ((u32)(unsigned long)sk != cookie[0] ||
102088 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
102089 return -ESTALE;
102090 else
102091+#endif
102092 return 0;
102093 }
102094 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
102095
102096 void sock_diag_save_cookie(void *sk, __u32 *cookie)
102097 {
102098+#ifdef CONFIG_GRKERNSEC_HIDESYM
102099+ cookie[0] = 0;
102100+ cookie[1] = 0;
102101+#else
102102 cookie[0] = (u32)(unsigned long)sk;
102103 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
102104+#endif
102105 }
102106 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
102107
102108@@ -110,8 +117,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
102109 mutex_lock(&sock_diag_table_mutex);
102110 if (sock_diag_handlers[hndl->family])
102111 err = -EBUSY;
102112- else
102113+ else {
102114+ pax_open_kernel();
102115 sock_diag_handlers[hndl->family] = hndl;
102116+ pax_close_kernel();
102117+ }
102118 mutex_unlock(&sock_diag_table_mutex);
102119
102120 return err;
102121@@ -127,7 +137,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
102122
102123 mutex_lock(&sock_diag_table_mutex);
102124 BUG_ON(sock_diag_handlers[family] != hnld);
102125+ pax_open_kernel();
102126 sock_diag_handlers[family] = NULL;
102127+ pax_close_kernel();
102128 mutex_unlock(&sock_diag_table_mutex);
102129 }
102130 EXPORT_SYMBOL_GPL(sock_diag_unregister);
102131diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
102132index cf9cd13..50683950 100644
102133--- a/net/core/sysctl_net_core.c
102134+++ b/net/core/sysctl_net_core.c
102135@@ -32,7 +32,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
102136 {
102137 unsigned int orig_size, size;
102138 int ret, i;
102139- struct ctl_table tmp = {
102140+ ctl_table_no_const tmp = {
102141 .data = &size,
102142 .maxlen = sizeof(size),
102143 .mode = table->mode
102144@@ -200,7 +200,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
102145 void __user *buffer, size_t *lenp, loff_t *ppos)
102146 {
102147 char id[IFNAMSIZ];
102148- struct ctl_table tbl = {
102149+ ctl_table_no_const tbl = {
102150 .data = id,
102151 .maxlen = IFNAMSIZ,
102152 };
102153@@ -263,7 +263,7 @@ static struct ctl_table net_core_table[] = {
102154 .mode = 0644,
102155 .proc_handler = proc_dointvec
102156 },
102157-#ifdef CONFIG_BPF_JIT
102158+#if defined(CONFIG_BPF_JIT) && !defined(CONFIG_GRKERNSEC_BPF_HARDEN)
102159 {
102160 .procname = "bpf_jit_enable",
102161 .data = &bpf_jit_enable,
102162@@ -379,13 +379,12 @@ static struct ctl_table netns_core_table[] = {
102163
102164 static __net_init int sysctl_core_net_init(struct net *net)
102165 {
102166- struct ctl_table *tbl;
102167+ ctl_table_no_const *tbl = NULL;
102168
102169 net->core.sysctl_somaxconn = SOMAXCONN;
102170
102171- tbl = netns_core_table;
102172 if (!net_eq(net, &init_net)) {
102173- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
102174+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
102175 if (tbl == NULL)
102176 goto err_dup;
102177
102178@@ -395,17 +394,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
102179 if (net->user_ns != &init_user_ns) {
102180 tbl[0].procname = NULL;
102181 }
102182- }
102183-
102184- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102185+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102186+ } else
102187+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
102188 if (net->core.sysctl_hdr == NULL)
102189 goto err_reg;
102190
102191 return 0;
102192
102193 err_reg:
102194- if (tbl != netns_core_table)
102195- kfree(tbl);
102196+ kfree(tbl);
102197 err_dup:
102198 return -ENOMEM;
102199 }
102200@@ -420,7 +418,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
102201 kfree(tbl);
102202 }
102203
102204-static __net_initdata struct pernet_operations sysctl_core_ops = {
102205+static __net_initconst struct pernet_operations sysctl_core_ops = {
102206 .init = sysctl_core_net_init,
102207 .exit = sysctl_core_net_exit,
102208 };
102209diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
102210index ae011b4..d2d18bf 100644
102211--- a/net/decnet/af_decnet.c
102212+++ b/net/decnet/af_decnet.c
102213@@ -465,6 +465,7 @@ static struct proto dn_proto = {
102214 .sysctl_rmem = sysctl_decnet_rmem,
102215 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
102216 .obj_size = sizeof(struct dn_sock),
102217+ .slab_flags = SLAB_USERCOPY,
102218 };
102219
102220 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
102221diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
102222index 3b726f3..1af6368 100644
102223--- a/net/decnet/dn_dev.c
102224+++ b/net/decnet/dn_dev.c
102225@@ -200,7 +200,7 @@ static struct dn_dev_sysctl_table {
102226 .extra1 = &min_t3,
102227 .extra2 = &max_t3
102228 },
102229- {0}
102230+ { }
102231 },
102232 };
102233
102234diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
102235index 5325b54..a0d4d69 100644
102236--- a/net/decnet/sysctl_net_decnet.c
102237+++ b/net/decnet/sysctl_net_decnet.c
102238@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
102239
102240 if (len > *lenp) len = *lenp;
102241
102242- if (copy_to_user(buffer, addr, len))
102243+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
102244 return -EFAULT;
102245
102246 *lenp = len;
102247@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
102248
102249 if (len > *lenp) len = *lenp;
102250
102251- if (copy_to_user(buffer, devname, len))
102252+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
102253 return -EFAULT;
102254
102255 *lenp = len;
102256diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c
102257index 32755cb..236d827 100644
102258--- a/net/ieee802154/reassembly.c
102259+++ b/net/ieee802154/reassembly.c
102260@@ -433,14 +433,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
102261
102262 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102263 {
102264- struct ctl_table *table;
102265+ ctl_table_no_const *table = NULL;
102266 struct ctl_table_header *hdr;
102267 struct netns_ieee802154_lowpan *ieee802154_lowpan =
102268 net_ieee802154_lowpan(net);
102269
102270- table = lowpan_frags_ns_ctl_table;
102271 if (!net_eq(net, &init_net)) {
102272- table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
102273+ table = kmemdup(lowpan_frags_ns_ctl_table, sizeof(lowpan_frags_ns_ctl_table),
102274 GFP_KERNEL);
102275 if (table == NULL)
102276 goto err_alloc;
102277@@ -455,9 +454,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102278 /* Don't export sysctls to unprivileged users */
102279 if (net->user_ns != &init_user_ns)
102280 table[0].procname = NULL;
102281- }
102282-
102283- hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
102284+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
102285+ } else
102286+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", lowpan_frags_ns_ctl_table);
102287 if (hdr == NULL)
102288 goto err_reg;
102289
102290@@ -465,8 +464,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102291 return 0;
102292
102293 err_reg:
102294- if (!net_eq(net, &init_net))
102295- kfree(table);
102296+ kfree(table);
102297 err_alloc:
102298 return -ENOMEM;
102299 }
102300diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
102301index 214882e..ec032f6 100644
102302--- a/net/ipv4/devinet.c
102303+++ b/net/ipv4/devinet.c
102304@@ -69,7 +69,8 @@
102305
102306 static struct ipv4_devconf ipv4_devconf = {
102307 .data = {
102308- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
102309+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
102310+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
102311 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
102312 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
102313 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
102314@@ -80,7 +81,8 @@ static struct ipv4_devconf ipv4_devconf = {
102315
102316 static struct ipv4_devconf ipv4_devconf_dflt = {
102317 .data = {
102318- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
102319+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
102320+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
102321 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
102322 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
102323 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
102324@@ -1548,7 +1550,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
102325 idx = 0;
102326 head = &net->dev_index_head[h];
102327 rcu_read_lock();
102328- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
102329+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
102330 net->dev_base_seq;
102331 hlist_for_each_entry_rcu(dev, head, index_hlist) {
102332 if (idx < s_idx)
102333@@ -1866,7 +1868,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
102334 idx = 0;
102335 head = &net->dev_index_head[h];
102336 rcu_read_lock();
102337- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
102338+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
102339 net->dev_base_seq;
102340 hlist_for_each_entry_rcu(dev, head, index_hlist) {
102341 if (idx < s_idx)
102342@@ -2101,7 +2103,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
102343 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
102344 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
102345
102346-static struct devinet_sysctl_table {
102347+static const struct devinet_sysctl_table {
102348 struct ctl_table_header *sysctl_header;
102349 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
102350 } devinet_sysctl = {
102351@@ -2233,7 +2235,7 @@ static __net_init int devinet_init_net(struct net *net)
102352 int err;
102353 struct ipv4_devconf *all, *dflt;
102354 #ifdef CONFIG_SYSCTL
102355- struct ctl_table *tbl = ctl_forward_entry;
102356+ ctl_table_no_const *tbl = NULL;
102357 struct ctl_table_header *forw_hdr;
102358 #endif
102359
102360@@ -2251,7 +2253,7 @@ static __net_init int devinet_init_net(struct net *net)
102361 goto err_alloc_dflt;
102362
102363 #ifdef CONFIG_SYSCTL
102364- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
102365+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
102366 if (tbl == NULL)
102367 goto err_alloc_ctl;
102368
102369@@ -2271,7 +2273,10 @@ static __net_init int devinet_init_net(struct net *net)
102370 goto err_reg_dflt;
102371
102372 err = -ENOMEM;
102373- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
102374+ if (!net_eq(net, &init_net))
102375+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
102376+ else
102377+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
102378 if (forw_hdr == NULL)
102379 goto err_reg_ctl;
102380 net->ipv4.forw_hdr = forw_hdr;
102381@@ -2287,8 +2292,7 @@ err_reg_ctl:
102382 err_reg_dflt:
102383 __devinet_sysctl_unregister(all);
102384 err_reg_all:
102385- if (tbl != ctl_forward_entry)
102386- kfree(tbl);
102387+ kfree(tbl);
102388 err_alloc_ctl:
102389 #endif
102390 if (dflt != &ipv4_devconf_dflt)
102391diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
102392index 255aa99..45c78f8 100644
102393--- a/net/ipv4/fib_frontend.c
102394+++ b/net/ipv4/fib_frontend.c
102395@@ -1015,12 +1015,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
102396 #ifdef CONFIG_IP_ROUTE_MULTIPATH
102397 fib_sync_up(dev);
102398 #endif
102399- atomic_inc(&net->ipv4.dev_addr_genid);
102400+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102401 rt_cache_flush(dev_net(dev));
102402 break;
102403 case NETDEV_DOWN:
102404 fib_del_ifaddr(ifa, NULL);
102405- atomic_inc(&net->ipv4.dev_addr_genid);
102406+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102407 if (ifa->ifa_dev->ifa_list == NULL) {
102408 /* Last address was deleted from this interface.
102409 * Disable IP.
102410@@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
102411 #ifdef CONFIG_IP_ROUTE_MULTIPATH
102412 fib_sync_up(dev);
102413 #endif
102414- atomic_inc(&net->ipv4.dev_addr_genid);
102415+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102416 rt_cache_flush(net);
102417 break;
102418 case NETDEV_DOWN:
102419diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
102420index 4a74ea8..32335a7 100644
102421--- a/net/ipv4/fib_semantics.c
102422+++ b/net/ipv4/fib_semantics.c
102423@@ -768,7 +768,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
102424 nh->nh_saddr = inet_select_addr(nh->nh_dev,
102425 nh->nh_gw,
102426 nh->nh_parent->fib_scope);
102427- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
102428+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
102429
102430 return nh->nh_saddr;
102431 }
102432diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
102433index dd73bea..a2eec02 100644
102434--- a/net/ipv4/gre_offload.c
102435+++ b/net/ipv4/gre_offload.c
102436@@ -59,13 +59,13 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
102437 if (csum)
102438 skb->encap_hdr_csum = 1;
102439
102440- if (unlikely(!pskb_may_pull(skb, ghl)))
102441- goto out;
102442-
102443 /* setup inner skb. */
102444 skb->protocol = greh->protocol;
102445 skb->encapsulation = 0;
102446
102447+ if (unlikely(!pskb_may_pull(skb, ghl)))
102448+ goto out;
102449+
102450 __skb_pull(skb, ghl);
102451 skb_reset_mac_header(skb);
102452 skb_set_network_header(skb, skb_inner_network_offset(skb));
102453diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
102454index 43116e8..ba0916a8 100644
102455--- a/net/ipv4/inet_hashtables.c
102456+++ b/net/ipv4/inet_hashtables.c
102457@@ -18,6 +18,7 @@
102458 #include <linux/sched.h>
102459 #include <linux/slab.h>
102460 #include <linux/wait.h>
102461+#include <linux/security.h>
102462
102463 #include <net/inet_connection_sock.h>
102464 #include <net/inet_hashtables.h>
102465@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
102466 return inet_ehashfn(net, laddr, lport, faddr, fport);
102467 }
102468
102469+extern void gr_update_task_in_ip_table(const struct inet_sock *inet);
102470+
102471 /*
102472 * Allocate and initialize a new local port bind bucket.
102473 * The bindhash mutex for snum's hash chain must be held here.
102474@@ -554,6 +557,8 @@ ok:
102475 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
102476 spin_unlock(&head->lock);
102477
102478+ gr_update_task_in_ip_table(inet_sk(sk));
102479+
102480 if (tw) {
102481 inet_twsk_deschedule(tw, death_row);
102482 while (twrefcnt) {
102483diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
102484index bd5f592..e80e605 100644
102485--- a/net/ipv4/inetpeer.c
102486+++ b/net/ipv4/inetpeer.c
102487@@ -482,7 +482,7 @@ relookup:
102488 if (p) {
102489 p->daddr = *daddr;
102490 atomic_set(&p->refcnt, 1);
102491- atomic_set(&p->rid, 0);
102492+ atomic_set_unchecked(&p->rid, 0);
102493 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
102494 p->rate_tokens = 0;
102495 /* 60*HZ is arbitrary, but chosen enough high so that the first
102496diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
102497index 15f0e2b..8cf8177 100644
102498--- a/net/ipv4/ip_fragment.c
102499+++ b/net/ipv4/ip_fragment.c
102500@@ -268,7 +268,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
102501 return 0;
102502
102503 start = qp->rid;
102504- end = atomic_inc_return(&peer->rid);
102505+ end = atomic_inc_return_unchecked(&peer->rid);
102506 qp->rid = end;
102507
102508 rc = qp->q.fragments && (end - start) > max;
102509@@ -746,12 +746,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
102510
102511 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102512 {
102513- struct ctl_table *table;
102514+ ctl_table_no_const *table = NULL;
102515 struct ctl_table_header *hdr;
102516
102517- table = ip4_frags_ns_ctl_table;
102518 if (!net_eq(net, &init_net)) {
102519- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
102520+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
102521 if (table == NULL)
102522 goto err_alloc;
102523
102524@@ -765,9 +764,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102525 /* Don't export sysctls to unprivileged users */
102526 if (net->user_ns != &init_user_ns)
102527 table[0].procname = NULL;
102528- }
102529+ hdr = register_net_sysctl(net, "net/ipv4", table);
102530+ } else
102531+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
102532
102533- hdr = register_net_sysctl(net, "net/ipv4", table);
102534 if (hdr == NULL)
102535 goto err_reg;
102536
102537@@ -775,8 +775,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102538 return 0;
102539
102540 err_reg:
102541- if (!net_eq(net, &init_net))
102542- kfree(table);
102543+ kfree(table);
102544 err_alloc:
102545 return -ENOMEM;
102546 }
102547diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
102548index 9b84254..c776611 100644
102549--- a/net/ipv4/ip_gre.c
102550+++ b/net/ipv4/ip_gre.c
102551@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
102552 module_param(log_ecn_error, bool, 0644);
102553 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
102554
102555-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
102556+static struct rtnl_link_ops ipgre_link_ops;
102557 static int ipgre_tunnel_init(struct net_device *dev);
102558
102559 static int ipgre_net_id __read_mostly;
102560@@ -733,7 +733,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
102561 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
102562 };
102563
102564-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
102565+static struct rtnl_link_ops ipgre_link_ops = {
102566 .kind = "gre",
102567 .maxtype = IFLA_GRE_MAX,
102568 .policy = ipgre_policy,
102569@@ -747,7 +747,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
102570 .fill_info = ipgre_fill_info,
102571 };
102572
102573-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
102574+static struct rtnl_link_ops ipgre_tap_ops = {
102575 .kind = "gretap",
102576 .maxtype = IFLA_GRE_MAX,
102577 .policy = ipgre_policy,
102578diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
102579index 3d4da2c..40f9c29 100644
102580--- a/net/ipv4/ip_input.c
102581+++ b/net/ipv4/ip_input.c
102582@@ -147,6 +147,10 @@
102583 #include <linux/mroute.h>
102584 #include <linux/netlink.h>
102585
102586+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
102587+extern int grsec_enable_blackhole;
102588+#endif
102589+
102590 /*
102591 * Process Router Attention IP option (RFC 2113)
102592 */
102593@@ -223,6 +227,9 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
102594 if (!raw) {
102595 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
102596 IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
102597+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
102598+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
102599+#endif
102600 icmp_send(skb, ICMP_DEST_UNREACH,
102601 ICMP_PROT_UNREACH, 0);
102602 }
102603diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
102604index c43a1e2..73cbbe1 100644
102605--- a/net/ipv4/ip_output.c
102606+++ b/net/ipv4/ip_output.c
102607@@ -231,7 +231,7 @@ static int ip_finish_output_gso(struct sk_buff *skb)
102608 */
102609 features = netif_skb_features(skb);
102610 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
102611- if (IS_ERR(segs)) {
102612+ if (IS_ERR_OR_NULL(segs)) {
102613 kfree_skb(skb);
102614 return -ENOMEM;
102615 }
102616diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
102617index 2407e5d..edc2f1a 100644
102618--- a/net/ipv4/ip_sockglue.c
102619+++ b/net/ipv4/ip_sockglue.c
102620@@ -1188,7 +1188,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
102621 len = min_t(unsigned int, len, opt->optlen);
102622 if (put_user(len, optlen))
102623 return -EFAULT;
102624- if (copy_to_user(optval, opt->__data, len))
102625+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
102626+ copy_to_user(optval, opt->__data, len))
102627 return -EFAULT;
102628 return 0;
102629 }
102630@@ -1319,7 +1320,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
102631 if (sk->sk_type != SOCK_STREAM)
102632 return -ENOPROTOOPT;
102633
102634- msg.msg_control = (__force void *) optval;
102635+ msg.msg_control = (__force_kernel void *) optval;
102636 msg.msg_controllen = len;
102637 msg.msg_flags = flags;
102638
102639diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
102640index e453cb7..3c8d952 100644
102641--- a/net/ipv4/ip_vti.c
102642+++ b/net/ipv4/ip_vti.c
102643@@ -45,7 +45,7 @@
102644 #include <net/net_namespace.h>
102645 #include <net/netns/generic.h>
102646
102647-static struct rtnl_link_ops vti_link_ops __read_mostly;
102648+static struct rtnl_link_ops vti_link_ops;
102649
102650 static int vti_net_id __read_mostly;
102651 static int vti_tunnel_init(struct net_device *dev);
102652@@ -519,7 +519,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
102653 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
102654 };
102655
102656-static struct rtnl_link_ops vti_link_ops __read_mostly = {
102657+static struct rtnl_link_ops vti_link_ops = {
102658 .kind = "vti",
102659 .maxtype = IFLA_VTI_MAX,
102660 .policy = vti_policy,
102661diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
102662index 5bbef4f..5bc4fb6 100644
102663--- a/net/ipv4/ipconfig.c
102664+++ b/net/ipv4/ipconfig.c
102665@@ -332,7 +332,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
102666
102667 mm_segment_t oldfs = get_fs();
102668 set_fs(get_ds());
102669- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
102670+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
102671 set_fs(oldfs);
102672 return res;
102673 }
102674@@ -343,7 +343,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
102675
102676 mm_segment_t oldfs = get_fs();
102677 set_fs(get_ds());
102678- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
102679+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
102680 set_fs(oldfs);
102681 return res;
102682 }
102683@@ -354,7 +354,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
102684
102685 mm_segment_t oldfs = get_fs();
102686 set_fs(get_ds());
102687- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
102688+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
102689 set_fs(oldfs);
102690 return res;
102691 }
102692diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
102693index 62eaa00..29b2dc2 100644
102694--- a/net/ipv4/ipip.c
102695+++ b/net/ipv4/ipip.c
102696@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
102697 static int ipip_net_id __read_mostly;
102698
102699 static int ipip_tunnel_init(struct net_device *dev);
102700-static struct rtnl_link_ops ipip_link_ops __read_mostly;
102701+static struct rtnl_link_ops ipip_link_ops;
102702
102703 static int ipip_err(struct sk_buff *skb, u32 info)
102704 {
102705@@ -409,7 +409,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
102706 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
102707 };
102708
102709-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
102710+static struct rtnl_link_ops ipip_link_ops = {
102711 .kind = "ipip",
102712 .maxtype = IFLA_IPTUN_MAX,
102713 .policy = ipip_policy,
102714diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
102715index f95b6f9..2ee2097 100644
102716--- a/net/ipv4/netfilter/arp_tables.c
102717+++ b/net/ipv4/netfilter/arp_tables.c
102718@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
102719 #endif
102720
102721 static int get_info(struct net *net, void __user *user,
102722- const int *len, int compat)
102723+ int len, int compat)
102724 {
102725 char name[XT_TABLE_MAXNAMELEN];
102726 struct xt_table *t;
102727 int ret;
102728
102729- if (*len != sizeof(struct arpt_getinfo)) {
102730- duprintf("length %u != %Zu\n", *len,
102731+ if (len != sizeof(struct arpt_getinfo)) {
102732+ duprintf("length %u != %Zu\n", len,
102733 sizeof(struct arpt_getinfo));
102734 return -EINVAL;
102735 }
102736@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
102737 info.size = private->size;
102738 strcpy(info.name, name);
102739
102740- if (copy_to_user(user, &info, *len) != 0)
102741+ if (copy_to_user(user, &info, len) != 0)
102742 ret = -EFAULT;
102743 else
102744 ret = 0;
102745@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
102746
102747 switch (cmd) {
102748 case ARPT_SO_GET_INFO:
102749- ret = get_info(sock_net(sk), user, len, 1);
102750+ ret = get_info(sock_net(sk), user, *len, 1);
102751 break;
102752 case ARPT_SO_GET_ENTRIES:
102753 ret = compat_get_entries(sock_net(sk), user, len);
102754@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
102755
102756 switch (cmd) {
102757 case ARPT_SO_GET_INFO:
102758- ret = get_info(sock_net(sk), user, len, 0);
102759+ ret = get_info(sock_net(sk), user, *len, 0);
102760 break;
102761
102762 case ARPT_SO_GET_ENTRIES:
102763diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
102764index 99e810f..3711b81 100644
102765--- a/net/ipv4/netfilter/ip_tables.c
102766+++ b/net/ipv4/netfilter/ip_tables.c
102767@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
102768 #endif
102769
102770 static int get_info(struct net *net, void __user *user,
102771- const int *len, int compat)
102772+ int len, int compat)
102773 {
102774 char name[XT_TABLE_MAXNAMELEN];
102775 struct xt_table *t;
102776 int ret;
102777
102778- if (*len != sizeof(struct ipt_getinfo)) {
102779- duprintf("length %u != %zu\n", *len,
102780+ if (len != sizeof(struct ipt_getinfo)) {
102781+ duprintf("length %u != %zu\n", len,
102782 sizeof(struct ipt_getinfo));
102783 return -EINVAL;
102784 }
102785@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
102786 info.size = private->size;
102787 strcpy(info.name, name);
102788
102789- if (copy_to_user(user, &info, *len) != 0)
102790+ if (copy_to_user(user, &info, len) != 0)
102791 ret = -EFAULT;
102792 else
102793 ret = 0;
102794@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
102795
102796 switch (cmd) {
102797 case IPT_SO_GET_INFO:
102798- ret = get_info(sock_net(sk), user, len, 1);
102799+ ret = get_info(sock_net(sk), user, *len, 1);
102800 break;
102801 case IPT_SO_GET_ENTRIES:
102802 ret = compat_get_entries(sock_net(sk), user, len);
102803@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
102804
102805 switch (cmd) {
102806 case IPT_SO_GET_INFO:
102807- ret = get_info(sock_net(sk), user, len, 0);
102808+ ret = get_info(sock_net(sk), user, *len, 0);
102809 break;
102810
102811 case IPT_SO_GET_ENTRIES:
102812diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
102813index 2510c02..cfb34fa 100644
102814--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
102815+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
102816@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net)
102817 spin_lock_init(&cn->lock);
102818
102819 #ifdef CONFIG_PROC_FS
102820- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
102821+ cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net);
102822 if (!cn->procdir) {
102823 pr_err("Unable to proc dir entry\n");
102824 return -ENOMEM;
102825diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
102826index 3524762..2e88bfd 100644
102827--- a/net/ipv4/ping.c
102828+++ b/net/ipv4/ping.c
102829@@ -59,7 +59,7 @@ struct ping_table {
102830 };
102831
102832 static struct ping_table ping_table;
102833-struct pingv6_ops pingv6_ops;
102834+struct pingv6_ops *pingv6_ops;
102835 EXPORT_SYMBOL_GPL(pingv6_ops);
102836
102837 static u16 ping_port_rover;
102838@@ -350,7 +350,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
102839 return -ENODEV;
102840 }
102841 }
102842- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
102843+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
102844 scoped);
102845 rcu_read_unlock();
102846
102847@@ -558,7 +558,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
102848 }
102849 #if IS_ENABLED(CONFIG_IPV6)
102850 } else if (skb->protocol == htons(ETH_P_IPV6)) {
102851- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
102852+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
102853 #endif
102854 }
102855
102856@@ -576,7 +576,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
102857 info, (u8 *)icmph);
102858 #if IS_ENABLED(CONFIG_IPV6)
102859 } else if (family == AF_INET6) {
102860- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
102861+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
102862 info, (u8 *)icmph);
102863 #endif
102864 }
102865@@ -860,7 +860,7 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
102866 return ip_recv_error(sk, msg, len, addr_len);
102867 #if IS_ENABLED(CONFIG_IPV6)
102868 } else if (family == AF_INET6) {
102869- return pingv6_ops.ipv6_recv_error(sk, msg, len,
102870+ return pingv6_ops->ipv6_recv_error(sk, msg, len,
102871 addr_len);
102872 #endif
102873 }
102874@@ -918,10 +918,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
102875 }
102876
102877 if (inet6_sk(sk)->rxopt.all)
102878- pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
102879+ pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb);
102880 if (skb->protocol == htons(ETH_P_IPV6) &&
102881 inet6_sk(sk)->rxopt.all)
102882- pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
102883+ pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb);
102884 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
102885 ip_cmsg_recv(msg, skb);
102886 #endif
102887@@ -1113,7 +1113,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
102888 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
102889 0, sock_i_ino(sp),
102890 atomic_read(&sp->sk_refcnt), sp,
102891- atomic_read(&sp->sk_drops));
102892+ atomic_read_unchecked(&sp->sk_drops));
102893 }
102894
102895 static int ping_v4_seq_show(struct seq_file *seq, void *v)
102896diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
102897index 739db31..74f0210 100644
102898--- a/net/ipv4/raw.c
102899+++ b/net/ipv4/raw.c
102900@@ -314,7 +314,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
102901 int raw_rcv(struct sock *sk, struct sk_buff *skb)
102902 {
102903 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
102904- atomic_inc(&sk->sk_drops);
102905+ atomic_inc_unchecked(&sk->sk_drops);
102906 kfree_skb(skb);
102907 return NET_RX_DROP;
102908 }
102909@@ -755,16 +755,20 @@ static int raw_init(struct sock *sk)
102910
102911 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
102912 {
102913+ struct icmp_filter filter;
102914+
102915 if (optlen > sizeof(struct icmp_filter))
102916 optlen = sizeof(struct icmp_filter);
102917- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
102918+ if (copy_from_user(&filter, optval, optlen))
102919 return -EFAULT;
102920+ raw_sk(sk)->filter = filter;
102921 return 0;
102922 }
102923
102924 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
102925 {
102926 int len, ret = -EFAULT;
102927+ struct icmp_filter filter;
102928
102929 if (get_user(len, optlen))
102930 goto out;
102931@@ -774,8 +778,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
102932 if (len > sizeof(struct icmp_filter))
102933 len = sizeof(struct icmp_filter);
102934 ret = -EFAULT;
102935- if (put_user(len, optlen) ||
102936- copy_to_user(optval, &raw_sk(sk)->filter, len))
102937+ filter = raw_sk(sk)->filter;
102938+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
102939 goto out;
102940 ret = 0;
102941 out: return ret;
102942@@ -1004,7 +1008,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
102943 0, 0L, 0,
102944 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
102945 0, sock_i_ino(sp),
102946- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
102947+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
102948 }
102949
102950 static int raw_seq_show(struct seq_file *seq, void *v)
102951diff --git a/net/ipv4/route.c b/net/ipv4/route.c
102952index 29836f8..bd1e2ba 100644
102953--- a/net/ipv4/route.c
102954+++ b/net/ipv4/route.c
102955@@ -228,7 +228,7 @@ static const struct seq_operations rt_cache_seq_ops = {
102956
102957 static int rt_cache_seq_open(struct inode *inode, struct file *file)
102958 {
102959- return seq_open(file, &rt_cache_seq_ops);
102960+ return seq_open_restrict(file, &rt_cache_seq_ops);
102961 }
102962
102963 static const struct file_operations rt_cache_seq_fops = {
102964@@ -319,7 +319,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
102965
102966 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
102967 {
102968- return seq_open(file, &rt_cpu_seq_ops);
102969+ return seq_open_restrict(file, &rt_cpu_seq_ops);
102970 }
102971
102972 static const struct file_operations rt_cpu_seq_fops = {
102973@@ -357,7 +357,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
102974
102975 static int rt_acct_proc_open(struct inode *inode, struct file *file)
102976 {
102977- return single_open(file, rt_acct_proc_show, NULL);
102978+ return single_open_restrict(file, rt_acct_proc_show, NULL);
102979 }
102980
102981 static const struct file_operations rt_acct_proc_fops = {
102982@@ -459,11 +459,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
102983
102984 #define IP_IDENTS_SZ 2048u
102985 struct ip_ident_bucket {
102986- atomic_t id;
102987+ atomic_unchecked_t id;
102988 u32 stamp32;
102989 };
102990
102991-static struct ip_ident_bucket *ip_idents __read_mostly;
102992+static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly;
102993
102994 /* In order to protect privacy, we add a perturbation to identifiers
102995 * if one generator is seldom used. This makes hard for an attacker
102996@@ -479,7 +479,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
102997 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
102998 delta = prandom_u32_max(now - old);
102999
103000- return atomic_add_return(segs + delta, &bucket->id) - segs;
103001+ return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs;
103002 }
103003 EXPORT_SYMBOL(ip_idents_reserve);
103004
103005@@ -2624,34 +2624,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
103006 .maxlen = sizeof(int),
103007 .mode = 0200,
103008 .proc_handler = ipv4_sysctl_rtcache_flush,
103009+ .extra1 = &init_net,
103010 },
103011 { },
103012 };
103013
103014 static __net_init int sysctl_route_net_init(struct net *net)
103015 {
103016- struct ctl_table *tbl;
103017+ ctl_table_no_const *tbl = NULL;
103018
103019- tbl = ipv4_route_flush_table;
103020 if (!net_eq(net, &init_net)) {
103021- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
103022+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
103023 if (tbl == NULL)
103024 goto err_dup;
103025
103026 /* Don't export sysctls to unprivileged users */
103027 if (net->user_ns != &init_user_ns)
103028 tbl[0].procname = NULL;
103029- }
103030- tbl[0].extra1 = net;
103031+ tbl[0].extra1 = net;
103032+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
103033+ } else
103034+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
103035
103036- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
103037 if (net->ipv4.route_hdr == NULL)
103038 goto err_reg;
103039 return 0;
103040
103041 err_reg:
103042- if (tbl != ipv4_route_flush_table)
103043- kfree(tbl);
103044+ kfree(tbl);
103045 err_dup:
103046 return -ENOMEM;
103047 }
103048@@ -2674,8 +2674,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
103049
103050 static __net_init int rt_genid_init(struct net *net)
103051 {
103052- atomic_set(&net->ipv4.rt_genid, 0);
103053- atomic_set(&net->fnhe_genid, 0);
103054+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
103055+ atomic_set_unchecked(&net->fnhe_genid, 0);
103056 get_random_bytes(&net->ipv4.dev_addr_genid,
103057 sizeof(net->ipv4.dev_addr_genid));
103058 return 0;
103059@@ -2718,11 +2718,7 @@ int __init ip_rt_init(void)
103060 {
103061 int rc = 0;
103062
103063- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
103064- if (!ip_idents)
103065- panic("IP: failed to allocate ip_idents\n");
103066-
103067- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
103068+ prandom_bytes(ip_idents, sizeof(ip_idents));
103069
103070 #ifdef CONFIG_IP_ROUTE_CLASSID
103071 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
103072diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
103073index 79a007c..5023029 100644
103074--- a/net/ipv4/sysctl_net_ipv4.c
103075+++ b/net/ipv4/sysctl_net_ipv4.c
103076@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
103077 container_of(table->data, struct net, ipv4.ip_local_ports.range);
103078 int ret;
103079 int range[2];
103080- struct ctl_table tmp = {
103081+ ctl_table_no_const tmp = {
103082 .data = &range,
103083 .maxlen = sizeof(range),
103084 .mode = table->mode,
103085@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
103086 int ret;
103087 gid_t urange[2];
103088 kgid_t low, high;
103089- struct ctl_table tmp = {
103090+ ctl_table_no_const tmp = {
103091 .data = &urange,
103092 .maxlen = sizeof(urange),
103093 .mode = table->mode,
103094@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
103095 void __user *buffer, size_t *lenp, loff_t *ppos)
103096 {
103097 char val[TCP_CA_NAME_MAX];
103098- struct ctl_table tbl = {
103099+ ctl_table_no_const tbl = {
103100 .data = val,
103101 .maxlen = TCP_CA_NAME_MAX,
103102 };
103103@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
103104 void __user *buffer, size_t *lenp,
103105 loff_t *ppos)
103106 {
103107- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
103108+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
103109 int ret;
103110
103111 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
103112@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
103113 void __user *buffer, size_t *lenp,
103114 loff_t *ppos)
103115 {
103116- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
103117+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
103118 int ret;
103119
103120 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
103121@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
103122 void __user *buffer, size_t *lenp,
103123 loff_t *ppos)
103124 {
103125- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
103126+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
103127 struct tcp_fastopen_context *ctxt;
103128 int ret;
103129 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
103130@@ -857,13 +857,12 @@ static struct ctl_table ipv4_net_table[] = {
103131
103132 static __net_init int ipv4_sysctl_init_net(struct net *net)
103133 {
103134- struct ctl_table *table;
103135+ ctl_table_no_const *table = NULL;
103136
103137- table = ipv4_net_table;
103138 if (!net_eq(net, &init_net)) {
103139 int i;
103140
103141- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
103142+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
103143 if (table == NULL)
103144 goto err_alloc;
103145
103146@@ -872,7 +871,10 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
103147 table[i].data += (void *)net - (void *)&init_net;
103148 }
103149
103150- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
103151+ if (!net_eq(net, &init_net))
103152+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
103153+ else
103154+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
103155 if (net->ipv4.ipv4_hdr == NULL)
103156 goto err_reg;
103157
103158diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
103159index a906e02..f3b6a0f 100644
103160--- a/net/ipv4/tcp_input.c
103161+++ b/net/ipv4/tcp_input.c
103162@@ -755,7 +755,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
103163 * without any lock. We want to make sure compiler wont store
103164 * intermediate values in this location.
103165 */
103166- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
103167+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
103168 sk->sk_max_pacing_rate);
103169 }
103170
103171@@ -4488,7 +4488,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
103172 * simplifies code)
103173 */
103174 static void
103175-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
103176+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
103177 struct sk_buff *head, struct sk_buff *tail,
103178 u32 start, u32 end)
103179 {
103180@@ -5546,6 +5546,7 @@ discard:
103181 tcp_paws_reject(&tp->rx_opt, 0))
103182 goto discard_and_undo;
103183
103184+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
103185 if (th->syn) {
103186 /* We see SYN without ACK. It is attempt of
103187 * simultaneous connect with crossed SYNs.
103188@@ -5596,6 +5597,7 @@ discard:
103189 goto discard;
103190 #endif
103191 }
103192+#endif
103193 /* "fifth, if neither of the SYN or RST bits is set then
103194 * drop the segment and return."
103195 */
103196@@ -5642,7 +5644,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
103197 goto discard;
103198
103199 if (th->syn) {
103200- if (th->fin)
103201+ if (th->fin || th->urg || th->psh)
103202 goto discard;
103203 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
103204 return 1;
103205diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
103206index 3f49eae..bde687a 100644
103207--- a/net/ipv4/tcp_ipv4.c
103208+++ b/net/ipv4/tcp_ipv4.c
103209@@ -91,6 +91,10 @@ int sysctl_tcp_low_latency __read_mostly;
103210 EXPORT_SYMBOL(sysctl_tcp_low_latency);
103211
103212
103213+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103214+extern int grsec_enable_blackhole;
103215+#endif
103216+
103217 #ifdef CONFIG_TCP_MD5SIG
103218 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
103219 __be32 daddr, __be32 saddr, const struct tcphdr *th);
103220@@ -1487,6 +1491,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
103221 return 0;
103222
103223 reset:
103224+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103225+ if (!grsec_enable_blackhole)
103226+#endif
103227 tcp_v4_send_reset(rsk, skb);
103228 discard:
103229 kfree_skb(skb);
103230@@ -1633,12 +1640,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
103231 TCP_SKB_CB(skb)->sacked = 0;
103232
103233 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
103234- if (!sk)
103235+ if (!sk) {
103236+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103237+ ret = 1;
103238+#endif
103239 goto no_tcp_socket;
103240-
103241+ }
103242 process:
103243- if (sk->sk_state == TCP_TIME_WAIT)
103244+ if (sk->sk_state == TCP_TIME_WAIT) {
103245+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103246+ ret = 2;
103247+#endif
103248 goto do_time_wait;
103249+ }
103250
103251 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
103252 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
103253@@ -1704,6 +1718,10 @@ csum_error:
103254 bad_packet:
103255 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
103256 } else {
103257+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103258+ if (!grsec_enable_blackhole || (ret == 1 &&
103259+ (skb->dev->flags & IFF_LOOPBACK)))
103260+#endif
103261 tcp_v4_send_reset(NULL, skb);
103262 }
103263
103264diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
103265index 1649988..6251843 100644
103266--- a/net/ipv4/tcp_minisocks.c
103267+++ b/net/ipv4/tcp_minisocks.c
103268@@ -27,6 +27,10 @@
103269 #include <net/inet_common.h>
103270 #include <net/xfrm.h>
103271
103272+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103273+extern int grsec_enable_blackhole;
103274+#endif
103275+
103276 int sysctl_tcp_syncookies __read_mostly = 1;
103277 EXPORT_SYMBOL(sysctl_tcp_syncookies);
103278
103279@@ -740,7 +744,10 @@ embryonic_reset:
103280 * avoid becoming vulnerable to outside attack aiming at
103281 * resetting legit local connections.
103282 */
103283- req->rsk_ops->send_reset(sk, skb);
103284+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103285+ if (!grsec_enable_blackhole)
103286+#endif
103287+ req->rsk_ops->send_reset(sk, skb);
103288 } else if (fastopen) { /* received a valid RST pkt */
103289 reqsk_fastopen_remove(sk, req, true);
103290 tcp_reset(sk);
103291diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
103292index 3b66610..bfbe23a 100644
103293--- a/net/ipv4/tcp_probe.c
103294+++ b/net/ipv4/tcp_probe.c
103295@@ -238,7 +238,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
103296 if (cnt + width >= len)
103297 break;
103298
103299- if (copy_to_user(buf + cnt, tbuf, width))
103300+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
103301 return -EFAULT;
103302 cnt += width;
103303 }
103304diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
103305index df90cd1..9ab2c9b 100644
103306--- a/net/ipv4/tcp_timer.c
103307+++ b/net/ipv4/tcp_timer.c
103308@@ -22,6 +22,10 @@
103309 #include <linux/gfp.h>
103310 #include <net/tcp.h>
103311
103312+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103313+extern int grsec_lastack_retries;
103314+#endif
103315+
103316 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
103317 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
103318 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
103319@@ -192,6 +196,13 @@ static int tcp_write_timeout(struct sock *sk)
103320 }
103321 }
103322
103323+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103324+ if ((sk->sk_state == TCP_LAST_ACK) &&
103325+ (grsec_lastack_retries > 0) &&
103326+ (grsec_lastack_retries < retry_until))
103327+ retry_until = grsec_lastack_retries;
103328+#endif
103329+
103330 if (retransmits_timed_out(sk, retry_until,
103331 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
103332 /* Has it gone just too far? */
103333diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
103334index f57c0e4..cf24bd0 100644
103335--- a/net/ipv4/udp.c
103336+++ b/net/ipv4/udp.c
103337@@ -87,6 +87,7 @@
103338 #include <linux/types.h>
103339 #include <linux/fcntl.h>
103340 #include <linux/module.h>
103341+#include <linux/security.h>
103342 #include <linux/socket.h>
103343 #include <linux/sockios.h>
103344 #include <linux/igmp.h>
103345@@ -113,6 +114,10 @@
103346 #include <net/busy_poll.h>
103347 #include "udp_impl.h"
103348
103349+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103350+extern int grsec_enable_blackhole;
103351+#endif
103352+
103353 struct udp_table udp_table __read_mostly;
103354 EXPORT_SYMBOL(udp_table);
103355
103356@@ -594,6 +599,9 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
103357 return true;
103358 }
103359
103360+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
103361+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
103362+
103363 /*
103364 * This routine is called by the ICMP module when it gets some
103365 * sort of error condition. If err < 0 then the socket should
103366@@ -931,9 +939,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
103367 dport = usin->sin_port;
103368 if (dport == 0)
103369 return -EINVAL;
103370+
103371+ err = gr_search_udp_sendmsg(sk, usin);
103372+ if (err)
103373+ return err;
103374 } else {
103375 if (sk->sk_state != TCP_ESTABLISHED)
103376 return -EDESTADDRREQ;
103377+
103378+ err = gr_search_udp_sendmsg(sk, NULL);
103379+ if (err)
103380+ return err;
103381+
103382 daddr = inet->inet_daddr;
103383 dport = inet->inet_dport;
103384 /* Open fast path for connected socket.
103385@@ -1181,7 +1198,7 @@ static unsigned int first_packet_length(struct sock *sk)
103386 IS_UDPLITE(sk));
103387 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
103388 IS_UDPLITE(sk));
103389- atomic_inc(&sk->sk_drops);
103390+ atomic_inc_unchecked(&sk->sk_drops);
103391 __skb_unlink(skb, rcvq);
103392 __skb_queue_tail(&list_kill, skb);
103393 }
103394@@ -1261,6 +1278,10 @@ try_again:
103395 if (!skb)
103396 goto out;
103397
103398+ err = gr_search_udp_recvmsg(sk, skb);
103399+ if (err)
103400+ goto out_free;
103401+
103402 ulen = skb->len - sizeof(struct udphdr);
103403 copied = len;
103404 if (copied > ulen)
103405@@ -1294,7 +1315,7 @@ try_again:
103406 if (unlikely(err)) {
103407 trace_kfree_skb(skb, udp_recvmsg);
103408 if (!peeked) {
103409- atomic_inc(&sk->sk_drops);
103410+ atomic_inc_unchecked(&sk->sk_drops);
103411 UDP_INC_STATS_USER(sock_net(sk),
103412 UDP_MIB_INERRORS, is_udplite);
103413 }
103414@@ -1591,7 +1612,7 @@ csum_error:
103415 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
103416 drop:
103417 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
103418- atomic_inc(&sk->sk_drops);
103419+ atomic_inc_unchecked(&sk->sk_drops);
103420 kfree_skb(skb);
103421 return -1;
103422 }
103423@@ -1610,7 +1631,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
103424 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
103425
103426 if (!skb1) {
103427- atomic_inc(&sk->sk_drops);
103428+ atomic_inc_unchecked(&sk->sk_drops);
103429 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
103430 IS_UDPLITE(sk));
103431 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
103432@@ -1807,6 +1828,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
103433 goto csum_error;
103434
103435 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
103436+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103437+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
103438+#endif
103439 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
103440
103441 /*
103442@@ -2393,7 +2417,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
103443 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
103444 0, sock_i_ino(sp),
103445 atomic_read(&sp->sk_refcnt), sp,
103446- atomic_read(&sp->sk_drops));
103447+ atomic_read_unchecked(&sp->sk_drops));
103448 }
103449
103450 int udp4_seq_show(struct seq_file *seq, void *v)
103451diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
103452index 6156f68..d6ab46d 100644
103453--- a/net/ipv4/xfrm4_policy.c
103454+++ b/net/ipv4/xfrm4_policy.c
103455@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
103456 fl4->flowi4_tos = iph->tos;
103457 }
103458
103459-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
103460+static int xfrm4_garbage_collect(struct dst_ops *ops)
103461 {
103462 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
103463
103464- xfrm4_policy_afinfo.garbage_collect(net);
103465+ xfrm_garbage_collect_deferred(net);
103466 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
103467 }
103468
103469@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
103470
103471 static int __net_init xfrm4_net_init(struct net *net)
103472 {
103473- struct ctl_table *table;
103474+ ctl_table_no_const *table = NULL;
103475 struct ctl_table_header *hdr;
103476
103477- table = xfrm4_policy_table;
103478 if (!net_eq(net, &init_net)) {
103479- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
103480+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
103481 if (!table)
103482 goto err_alloc;
103483
103484 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
103485- }
103486-
103487- hdr = register_net_sysctl(net, "net/ipv4", table);
103488+ hdr = register_net_sysctl(net, "net/ipv4", table);
103489+ } else
103490+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
103491 if (!hdr)
103492 goto err_reg;
103493
103494@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
103495 return 0;
103496
103497 err_reg:
103498- if (!net_eq(net, &init_net))
103499- kfree(table);
103500+ kfree(table);
103501 err_alloc:
103502 return -ENOMEM;
103503 }
103504diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
103505index 3e118df..288a0d1 100644
103506--- a/net/ipv6/addrconf.c
103507+++ b/net/ipv6/addrconf.c
103508@@ -171,7 +171,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
103509 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
103510 .mtu6 = IPV6_MIN_MTU,
103511 .accept_ra = 1,
103512- .accept_redirects = 1,
103513+ .accept_redirects = 0,
103514 .autoconf = 1,
103515 .force_mld_version = 0,
103516 .mldv1_unsolicited_report_interval = 10 * HZ,
103517@@ -208,7 +208,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
103518 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
103519 .mtu6 = IPV6_MIN_MTU,
103520 .accept_ra = 1,
103521- .accept_redirects = 1,
103522+ .accept_redirects = 0,
103523 .autoconf = 1,
103524 .force_mld_version = 0,
103525 .mldv1_unsolicited_report_interval = 10 * HZ,
103526@@ -604,7 +604,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
103527 idx = 0;
103528 head = &net->dev_index_head[h];
103529 rcu_read_lock();
103530- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
103531+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
103532 net->dev_base_seq;
103533 hlist_for_each_entry_rcu(dev, head, index_hlist) {
103534 if (idx < s_idx)
103535@@ -2396,7 +2396,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
103536 p.iph.ihl = 5;
103537 p.iph.protocol = IPPROTO_IPV6;
103538 p.iph.ttl = 64;
103539- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
103540+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
103541
103542 if (ops->ndo_do_ioctl) {
103543 mm_segment_t oldfs = get_fs();
103544@@ -3531,16 +3531,23 @@ static const struct file_operations if6_fops = {
103545 .release = seq_release_net,
103546 };
103547
103548+extern void register_ipv6_seq_ops_addr(struct seq_operations *addr);
103549+extern void unregister_ipv6_seq_ops_addr(void);
103550+
103551 static int __net_init if6_proc_net_init(struct net *net)
103552 {
103553- if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
103554+ register_ipv6_seq_ops_addr(&if6_seq_ops);
103555+ if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) {
103556+ unregister_ipv6_seq_ops_addr();
103557 return -ENOMEM;
103558+ }
103559 return 0;
103560 }
103561
103562 static void __net_exit if6_proc_net_exit(struct net *net)
103563 {
103564 remove_proc_entry("if_inet6", net->proc_net);
103565+ unregister_ipv6_seq_ops_addr();
103566 }
103567
103568 static struct pernet_operations if6_proc_net_ops = {
103569@@ -4156,7 +4163,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
103570 s_ip_idx = ip_idx = cb->args[2];
103571
103572 rcu_read_lock();
103573- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
103574+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
103575 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
103576 idx = 0;
103577 head = &net->dev_index_head[h];
103578@@ -4784,7 +4791,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
103579 rt_genid_bump_ipv6(net);
103580 break;
103581 }
103582- atomic_inc(&net->ipv6.dev_addr_genid);
103583+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
103584 }
103585
103586 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
103587@@ -4804,7 +4811,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
103588 int *valp = ctl->data;
103589 int val = *valp;
103590 loff_t pos = *ppos;
103591- struct ctl_table lctl;
103592+ ctl_table_no_const lctl;
103593 int ret;
103594
103595 /*
103596@@ -4889,7 +4896,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
103597 int *valp = ctl->data;
103598 int val = *valp;
103599 loff_t pos = *ppos;
103600- struct ctl_table lctl;
103601+ ctl_table_no_const lctl;
103602 int ret;
103603
103604 /*
103605diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
103606index 2daa3a1..341066c 100644
103607--- a/net/ipv6/af_inet6.c
103608+++ b/net/ipv6/af_inet6.c
103609@@ -766,7 +766,7 @@ static int __net_init inet6_net_init(struct net *net)
103610 net->ipv6.sysctl.icmpv6_time = 1*HZ;
103611 net->ipv6.sysctl.flowlabel_consistency = 1;
103612 net->ipv6.sysctl.auto_flowlabels = 0;
103613- atomic_set(&net->ipv6.rt_genid, 0);
103614+ atomic_set_unchecked(&net->ipv6.rt_genid, 0);
103615
103616 err = ipv6_init_mibs(net);
103617 if (err)
103618diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
103619index 2753319..b7e625c 100644
103620--- a/net/ipv6/datagram.c
103621+++ b/net/ipv6/datagram.c
103622@@ -939,5 +939,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
103623 0,
103624 sock_i_ino(sp),
103625 atomic_read(&sp->sk_refcnt), sp,
103626- atomic_read(&sp->sk_drops));
103627+ atomic_read_unchecked(&sp->sk_drops));
103628 }
103629diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
103630index 06ba3e5..5c08d38 100644
103631--- a/net/ipv6/icmp.c
103632+++ b/net/ipv6/icmp.c
103633@@ -993,7 +993,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
103634
103635 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
103636 {
103637- struct ctl_table *table;
103638+ ctl_table_no_const *table;
103639
103640 table = kmemdup(ipv6_icmp_table_template,
103641 sizeof(ipv6_icmp_table_template),
103642diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
103643index cacb493..3cae894 100644
103644--- a/net/ipv6/ip6_gre.c
103645+++ b/net/ipv6/ip6_gre.c
103646@@ -71,8 +71,8 @@ struct ip6gre_net {
103647 struct net_device *fb_tunnel_dev;
103648 };
103649
103650-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
103651-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
103652+static struct rtnl_link_ops ip6gre_link_ops;
103653+static struct rtnl_link_ops ip6gre_tap_ops;
103654 static int ip6gre_tunnel_init(struct net_device *dev);
103655 static void ip6gre_tunnel_setup(struct net_device *dev);
103656 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
103657@@ -1285,7 +1285,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
103658 }
103659
103660
103661-static struct inet6_protocol ip6gre_protocol __read_mostly = {
103662+static struct inet6_protocol ip6gre_protocol = {
103663 .handler = ip6gre_rcv,
103664 .err_handler = ip6gre_err,
103665 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
103666@@ -1646,7 +1646,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
103667 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
103668 };
103669
103670-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
103671+static struct rtnl_link_ops ip6gre_link_ops = {
103672 .kind = "ip6gre",
103673 .maxtype = IFLA_GRE_MAX,
103674 .policy = ip6gre_policy,
103675@@ -1660,7 +1660,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
103676 .fill_info = ip6gre_fill_info,
103677 };
103678
103679-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
103680+static struct rtnl_link_ops ip6gre_tap_ops = {
103681 .kind = "ip6gretap",
103682 .maxtype = IFLA_GRE_MAX,
103683 .policy = ip6gre_policy,
103684diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
103685index 65eda2a..620a102 100644
103686--- a/net/ipv6/ip6_offload.c
103687+++ b/net/ipv6/ip6_offload.c
103688@@ -46,6 +46,7 @@ static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
103689 if (unlikely(!pskb_may_pull(skb, len)))
103690 break;
103691
103692+ opth = (void *)skb->data;
103693 proto = opth->nexthdr;
103694 __skb_pull(skb, len);
103695 }
103696diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
103697index d2eeb3b..c186e9a 100644
103698--- a/net/ipv6/ip6_tunnel.c
103699+++ b/net/ipv6/ip6_tunnel.c
103700@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
103701
103702 static int ip6_tnl_dev_init(struct net_device *dev);
103703 static void ip6_tnl_dev_setup(struct net_device *dev);
103704-static struct rtnl_link_ops ip6_link_ops __read_mostly;
103705+static struct rtnl_link_ops ip6_link_ops;
103706
103707 static int ip6_tnl_net_id __read_mostly;
103708 struct ip6_tnl_net {
103709@@ -1706,7 +1706,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
103710 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
103711 };
103712
103713-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
103714+static struct rtnl_link_ops ip6_link_ops = {
103715 .kind = "ip6tnl",
103716 .maxtype = IFLA_IPTUN_MAX,
103717 .policy = ip6_tnl_policy,
103718diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
103719index 99c9487..63f4d92 100644
103720--- a/net/ipv6/ip6_vti.c
103721+++ b/net/ipv6/ip6_vti.c
103722@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
103723
103724 static int vti6_dev_init(struct net_device *dev);
103725 static void vti6_dev_setup(struct net_device *dev);
103726-static struct rtnl_link_ops vti6_link_ops __read_mostly;
103727+static struct rtnl_link_ops vti6_link_ops;
103728
103729 static int vti6_net_id __read_mostly;
103730 struct vti6_net {
103731@@ -972,7 +972,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
103732 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
103733 };
103734
103735-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
103736+static struct rtnl_link_ops vti6_link_ops = {
103737 .kind = "vti6",
103738 .maxtype = IFLA_VTI_MAX,
103739 .policy = vti6_policy,
103740diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
103741index 0c28998..d0a2ecd 100644
103742--- a/net/ipv6/ipv6_sockglue.c
103743+++ b/net/ipv6/ipv6_sockglue.c
103744@@ -995,7 +995,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
103745 if (sk->sk_type != SOCK_STREAM)
103746 return -ENOPROTOOPT;
103747
103748- msg.msg_control = optval;
103749+ msg.msg_control = (void __force_kernel *)optval;
103750 msg.msg_controllen = len;
103751 msg.msg_flags = flags;
103752
103753diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
103754index e080fbb..412b3cf 100644
103755--- a/net/ipv6/netfilter/ip6_tables.c
103756+++ b/net/ipv6/netfilter/ip6_tables.c
103757@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
103758 #endif
103759
103760 static int get_info(struct net *net, void __user *user,
103761- const int *len, int compat)
103762+ int len, int compat)
103763 {
103764 char name[XT_TABLE_MAXNAMELEN];
103765 struct xt_table *t;
103766 int ret;
103767
103768- if (*len != sizeof(struct ip6t_getinfo)) {
103769- duprintf("length %u != %zu\n", *len,
103770+ if (len != sizeof(struct ip6t_getinfo)) {
103771+ duprintf("length %u != %zu\n", len,
103772 sizeof(struct ip6t_getinfo));
103773 return -EINVAL;
103774 }
103775@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
103776 info.size = private->size;
103777 strcpy(info.name, name);
103778
103779- if (copy_to_user(user, &info, *len) != 0)
103780+ if (copy_to_user(user, &info, len) != 0)
103781 ret = -EFAULT;
103782 else
103783 ret = 0;
103784@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103785
103786 switch (cmd) {
103787 case IP6T_SO_GET_INFO:
103788- ret = get_info(sock_net(sk), user, len, 1);
103789+ ret = get_info(sock_net(sk), user, *len, 1);
103790 break;
103791 case IP6T_SO_GET_ENTRIES:
103792 ret = compat_get_entries(sock_net(sk), user, len);
103793@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103794
103795 switch (cmd) {
103796 case IP6T_SO_GET_INFO:
103797- ret = get_info(sock_net(sk), user, len, 0);
103798+ ret = get_info(sock_net(sk), user, *len, 0);
103799 break;
103800
103801 case IP6T_SO_GET_ENTRIES:
103802diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
103803index 6f187c8..34b367f 100644
103804--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
103805+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
103806@@ -96,12 +96,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
103807
103808 static int nf_ct_frag6_sysctl_register(struct net *net)
103809 {
103810- struct ctl_table *table;
103811+ ctl_table_no_const *table = NULL;
103812 struct ctl_table_header *hdr;
103813
103814- table = nf_ct_frag6_sysctl_table;
103815 if (!net_eq(net, &init_net)) {
103816- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
103817+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
103818 GFP_KERNEL);
103819 if (table == NULL)
103820 goto err_alloc;
103821@@ -112,9 +111,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
103822 table[2].data = &net->nf_frag.frags.high_thresh;
103823 table[2].extra1 = &net->nf_frag.frags.low_thresh;
103824 table[2].extra2 = &init_net.nf_frag.frags.high_thresh;
103825- }
103826-
103827- hdr = register_net_sysctl(net, "net/netfilter", table);
103828+ hdr = register_net_sysctl(net, "net/netfilter", table);
103829+ } else
103830+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
103831 if (hdr == NULL)
103832 goto err_reg;
103833
103834@@ -122,8 +121,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
103835 return 0;
103836
103837 err_reg:
103838- if (!net_eq(net, &init_net))
103839- kfree(table);
103840+ kfree(table);
103841 err_alloc:
103842 return -ENOMEM;
103843 }
103844diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
103845index 5b7a1ed..d9da205 100644
103846--- a/net/ipv6/ping.c
103847+++ b/net/ipv6/ping.c
103848@@ -240,6 +240,24 @@ static struct pernet_operations ping_v6_net_ops = {
103849 };
103850 #endif
103851
103852+static struct pingv6_ops real_pingv6_ops = {
103853+ .ipv6_recv_error = ipv6_recv_error,
103854+ .ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl,
103855+ .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl,
103856+ .icmpv6_err_convert = icmpv6_err_convert,
103857+ .ipv6_icmp_error = ipv6_icmp_error,
103858+ .ipv6_chk_addr = ipv6_chk_addr,
103859+};
103860+
103861+static struct pingv6_ops dummy_pingv6_ops = {
103862+ .ipv6_recv_error = dummy_ipv6_recv_error,
103863+ .ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl,
103864+ .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl,
103865+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
103866+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
103867+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
103868+};
103869+
103870 int __init pingv6_init(void)
103871 {
103872 #ifdef CONFIG_PROC_FS
103873@@ -247,13 +265,7 @@ int __init pingv6_init(void)
103874 if (ret)
103875 return ret;
103876 #endif
103877- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
103878- pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl;
103879- pingv6_ops.ip6_datagram_recv_specific_ctl =
103880- ip6_datagram_recv_specific_ctl;
103881- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
103882- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
103883- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
103884+ pingv6_ops = &real_pingv6_ops;
103885 return inet6_register_protosw(&pingv6_protosw);
103886 }
103887
103888@@ -262,14 +274,9 @@ int __init pingv6_init(void)
103889 */
103890 void pingv6_exit(void)
103891 {
103892- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
103893- pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl;
103894- pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl;
103895- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
103896- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
103897- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
103898 #ifdef CONFIG_PROC_FS
103899 unregister_pernet_subsys(&ping_v6_net_ops);
103900 #endif
103901+ pingv6_ops = &dummy_pingv6_ops;
103902 inet6_unregister_protosw(&pingv6_protosw);
103903 }
103904diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
103905index 2d6f860..b0165f5 100644
103906--- a/net/ipv6/proc.c
103907+++ b/net/ipv6/proc.c
103908@@ -309,7 +309,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
103909 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
103910 goto proc_snmp6_fail;
103911
103912- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
103913+ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
103914 if (!net->mib.proc_net_devsnmp6)
103915 goto proc_dev_snmp6_fail;
103916 return 0;
103917diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
103918index 39d4422..b0979547 100644
103919--- a/net/ipv6/raw.c
103920+++ b/net/ipv6/raw.c
103921@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
103922 {
103923 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
103924 skb_checksum_complete(skb)) {
103925- atomic_inc(&sk->sk_drops);
103926+ atomic_inc_unchecked(&sk->sk_drops);
103927 kfree_skb(skb);
103928 return NET_RX_DROP;
103929 }
103930@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
103931 struct raw6_sock *rp = raw6_sk(sk);
103932
103933 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
103934- atomic_inc(&sk->sk_drops);
103935+ atomic_inc_unchecked(&sk->sk_drops);
103936 kfree_skb(skb);
103937 return NET_RX_DROP;
103938 }
103939@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
103940
103941 if (inet->hdrincl) {
103942 if (skb_checksum_complete(skb)) {
103943- atomic_inc(&sk->sk_drops);
103944+ atomic_inc_unchecked(&sk->sk_drops);
103945 kfree_skb(skb);
103946 return NET_RX_DROP;
103947 }
103948@@ -608,7 +608,7 @@ out:
103949 return err;
103950 }
103951
103952-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
103953+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
103954 struct flowi6 *fl6, struct dst_entry **dstp,
103955 unsigned int flags)
103956 {
103957@@ -914,12 +914,15 @@ do_confirm:
103958 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
103959 char __user *optval, int optlen)
103960 {
103961+ struct icmp6_filter filter;
103962+
103963 switch (optname) {
103964 case ICMPV6_FILTER:
103965 if (optlen > sizeof(struct icmp6_filter))
103966 optlen = sizeof(struct icmp6_filter);
103967- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
103968+ if (copy_from_user(&filter, optval, optlen))
103969 return -EFAULT;
103970+ raw6_sk(sk)->filter = filter;
103971 return 0;
103972 default:
103973 return -ENOPROTOOPT;
103974@@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
103975 char __user *optval, int __user *optlen)
103976 {
103977 int len;
103978+ struct icmp6_filter filter;
103979
103980 switch (optname) {
103981 case ICMPV6_FILTER:
103982@@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
103983 len = sizeof(struct icmp6_filter);
103984 if (put_user(len, optlen))
103985 return -EFAULT;
103986- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
103987+ filter = raw6_sk(sk)->filter;
103988+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
103989 return -EFAULT;
103990 return 0;
103991 default:
103992diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
103993index c6557d9..173e728 100644
103994--- a/net/ipv6/reassembly.c
103995+++ b/net/ipv6/reassembly.c
103996@@ -627,12 +627,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
103997
103998 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
103999 {
104000- struct ctl_table *table;
104001+ ctl_table_no_const *table = NULL;
104002 struct ctl_table_header *hdr;
104003
104004- table = ip6_frags_ns_ctl_table;
104005 if (!net_eq(net, &init_net)) {
104006- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
104007+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
104008 if (table == NULL)
104009 goto err_alloc;
104010
104011@@ -646,9 +645,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104012 /* Don't export sysctls to unprivileged users */
104013 if (net->user_ns != &init_user_ns)
104014 table[0].procname = NULL;
104015- }
104016+ hdr = register_net_sysctl(net, "net/ipv6", table);
104017+ } else
104018+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
104019
104020- hdr = register_net_sysctl(net, "net/ipv6", table);
104021 if (hdr == NULL)
104022 goto err_reg;
104023
104024@@ -656,8 +656,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104025 return 0;
104026
104027 err_reg:
104028- if (!net_eq(net, &init_net))
104029- kfree(table);
104030+ kfree(table);
104031 err_alloc:
104032 return -ENOMEM;
104033 }
104034diff --git a/net/ipv6/route.c b/net/ipv6/route.c
104035index bafde82..af2c91f 100644
104036--- a/net/ipv6/route.c
104037+++ b/net/ipv6/route.c
104038@@ -2967,7 +2967,7 @@ struct ctl_table ipv6_route_table_template[] = {
104039
104040 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
104041 {
104042- struct ctl_table *table;
104043+ ctl_table_no_const *table;
104044
104045 table = kmemdup(ipv6_route_table_template,
104046 sizeof(ipv6_route_table_template),
104047diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
104048index ca1c7c4..37fba59 100644
104049--- a/net/ipv6/sit.c
104050+++ b/net/ipv6/sit.c
104051@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
104052 static void ipip6_dev_free(struct net_device *dev);
104053 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
104054 __be32 *v4dst);
104055-static struct rtnl_link_ops sit_link_ops __read_mostly;
104056+static struct rtnl_link_ops sit_link_ops;
104057
104058 static int sit_net_id __read_mostly;
104059 struct sit_net {
104060@@ -484,11 +484,11 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
104061 */
104062 static int ipip6_err_gen_icmpv6_unreach(struct sk_buff *skb)
104063 {
104064- const struct iphdr *iph = (const struct iphdr *) skb->data;
104065+ int ihl = ((const struct iphdr *)skb->data)->ihl*4;
104066 struct rt6_info *rt;
104067 struct sk_buff *skb2;
104068
104069- if (!pskb_may_pull(skb, iph->ihl * 4 + sizeof(struct ipv6hdr) + 8))
104070+ if (!pskb_may_pull(skb, ihl + sizeof(struct ipv6hdr) + 8))
104071 return 1;
104072
104073 skb2 = skb_clone(skb, GFP_ATOMIC);
104074@@ -497,7 +497,7 @@ static int ipip6_err_gen_icmpv6_unreach(struct sk_buff *skb)
104075 return 1;
104076
104077 skb_dst_drop(skb2);
104078- skb_pull(skb2, iph->ihl * 4);
104079+ skb_pull(skb2, ihl);
104080 skb_reset_network_header(skb2);
104081
104082 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0, 0);
104083@@ -1659,7 +1659,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
104084 unregister_netdevice_queue(dev, head);
104085 }
104086
104087-static struct rtnl_link_ops sit_link_ops __read_mostly = {
104088+static struct rtnl_link_ops sit_link_ops = {
104089 .kind = "sit",
104090 .maxtype = IFLA_IPTUN_MAX,
104091 .policy = ipip6_policy,
104092diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
104093index 0c56c93..ece50df 100644
104094--- a/net/ipv6/sysctl_net_ipv6.c
104095+++ b/net/ipv6/sysctl_net_ipv6.c
104096@@ -68,7 +68,7 @@ static struct ctl_table ipv6_rotable[] = {
104097
104098 static int __net_init ipv6_sysctl_net_init(struct net *net)
104099 {
104100- struct ctl_table *ipv6_table;
104101+ ctl_table_no_const *ipv6_table;
104102 struct ctl_table *ipv6_route_table;
104103 struct ctl_table *ipv6_icmp_table;
104104 int err;
104105diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
104106index 264c0f2..b6512c6 100644
104107--- a/net/ipv6/tcp_ipv6.c
104108+++ b/net/ipv6/tcp_ipv6.c
104109@@ -102,6 +102,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
104110 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
104111 }
104112
104113+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104114+extern int grsec_enable_blackhole;
104115+#endif
104116+
104117 static void tcp_v6_hash(struct sock *sk)
104118 {
104119 if (sk->sk_state != TCP_CLOSE) {
104120@@ -1333,6 +1337,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
104121 return 0;
104122
104123 reset:
104124+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104125+ if (!grsec_enable_blackhole)
104126+#endif
104127 tcp_v6_send_reset(sk, skb);
104128 discard:
104129 if (opt_skb)
104130@@ -1417,12 +1424,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
104131 TCP_SKB_CB(skb)->sacked = 0;
104132
104133 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
104134- if (!sk)
104135+ if (!sk) {
104136+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104137+ ret = 1;
104138+#endif
104139 goto no_tcp_socket;
104140+ }
104141
104142 process:
104143- if (sk->sk_state == TCP_TIME_WAIT)
104144+ if (sk->sk_state == TCP_TIME_WAIT) {
104145+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104146+ ret = 2;
104147+#endif
104148 goto do_time_wait;
104149+ }
104150
104151 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
104152 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
104153@@ -1479,6 +1494,10 @@ csum_error:
104154 bad_packet:
104155 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
104156 } else {
104157+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104158+ if (!grsec_enable_blackhole || (ret == 1 &&
104159+ (skb->dev->flags & IFF_LOOPBACK)))
104160+#endif
104161 tcp_v6_send_reset(NULL, skb);
104162 }
104163
104164diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
104165index 4836af8..0e52bbd 100644
104166--- a/net/ipv6/udp.c
104167+++ b/net/ipv6/udp.c
104168@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
104169 udp_ipv6_hash_secret + net_hash_mix(net));
104170 }
104171
104172+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104173+extern int grsec_enable_blackhole;
104174+#endif
104175+
104176 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
104177 {
104178 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
104179@@ -434,7 +438,7 @@ try_again:
104180 if (unlikely(err)) {
104181 trace_kfree_skb(skb, udpv6_recvmsg);
104182 if (!peeked) {
104183- atomic_inc(&sk->sk_drops);
104184+ atomic_inc_unchecked(&sk->sk_drops);
104185 if (is_udp4)
104186 UDP_INC_STATS_USER(sock_net(sk),
104187 UDP_MIB_INERRORS,
104188@@ -701,7 +705,7 @@ csum_error:
104189 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
104190 drop:
104191 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
104192- atomic_inc(&sk->sk_drops);
104193+ atomic_inc_unchecked(&sk->sk_drops);
104194 kfree_skb(skb);
104195 return -1;
104196 }
104197@@ -740,7 +744,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
104198 if (likely(skb1 == NULL))
104199 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
104200 if (!skb1) {
104201- atomic_inc(&sk->sk_drops);
104202+ atomic_inc_unchecked(&sk->sk_drops);
104203 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
104204 IS_UDPLITE(sk));
104205 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
104206@@ -915,6 +919,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
104207 goto csum_error;
104208
104209 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
104210+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104211+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
104212+#endif
104213 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
104214
104215 kfree_skb(skb);
104216diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
104217index 2a0bbda..fcd5396 100644
104218--- a/net/ipv6/xfrm6_policy.c
104219+++ b/net/ipv6/xfrm6_policy.c
104220@@ -130,8 +130,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
104221 {
104222 struct flowi6 *fl6 = &fl->u.ip6;
104223 int onlyproto = 0;
104224- u16 offset = skb_network_header_len(skb);
104225 const struct ipv6hdr *hdr = ipv6_hdr(skb);
104226+ u16 offset = sizeof(*hdr);
104227 struct ipv6_opt_hdr *exthdr;
104228 const unsigned char *nh = skb_network_header(skb);
104229 u8 nexthdr = nh[IP6CB(skb)->nhoff];
104230@@ -170,8 +170,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
104231 case IPPROTO_DCCP:
104232 if (!onlyproto && (nh + offset + 4 < skb->data ||
104233 pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
104234- __be16 *ports = (__be16 *)exthdr;
104235+ __be16 *ports;
104236
104237+ nh = skb_network_header(skb);
104238+ ports = (__be16 *)(nh + offset);
104239 fl6->fl6_sport = ports[!!reverse];
104240 fl6->fl6_dport = ports[!reverse];
104241 }
104242@@ -180,8 +182,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
104243
104244 case IPPROTO_ICMPV6:
104245 if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) {
104246- u8 *icmp = (u8 *)exthdr;
104247+ u8 *icmp;
104248
104249+ nh = skb_network_header(skb);
104250+ icmp = (u8 *)(nh + offset);
104251 fl6->fl6_icmp_type = icmp[0];
104252 fl6->fl6_icmp_code = icmp[1];
104253 }
104254@@ -192,8 +196,9 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
104255 case IPPROTO_MH:
104256 if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
104257 struct ip6_mh *mh;
104258- mh = (struct ip6_mh *)exthdr;
104259
104260+ nh = skb_network_header(skb);
104261+ mh = (struct ip6_mh *)(nh + offset);
104262 fl6->fl6_mh_type = mh->ip6mh_type;
104263 }
104264 fl6->flowi6_proto = nexthdr;
104265@@ -212,11 +217,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
104266 }
104267 }
104268
104269-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
104270+static int xfrm6_garbage_collect(struct dst_ops *ops)
104271 {
104272 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
104273
104274- xfrm6_policy_afinfo.garbage_collect(net);
104275+ xfrm_garbage_collect_deferred(net);
104276 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
104277 }
104278
104279@@ -329,19 +334,19 @@ static struct ctl_table xfrm6_policy_table[] = {
104280
104281 static int __net_init xfrm6_net_init(struct net *net)
104282 {
104283- struct ctl_table *table;
104284+ ctl_table_no_const *table = NULL;
104285 struct ctl_table_header *hdr;
104286
104287- table = xfrm6_policy_table;
104288 if (!net_eq(net, &init_net)) {
104289- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
104290+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
104291 if (!table)
104292 goto err_alloc;
104293
104294 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
104295- }
104296+ hdr = register_net_sysctl(net, "net/ipv6", table);
104297+ } else
104298+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
104299
104300- hdr = register_net_sysctl(net, "net/ipv6", table);
104301 if (!hdr)
104302 goto err_reg;
104303
104304@@ -349,8 +354,7 @@ static int __net_init xfrm6_net_init(struct net *net)
104305 return 0;
104306
104307 err_reg:
104308- if (!net_eq(net, &init_net))
104309- kfree(table);
104310+ kfree(table);
104311 err_alloc:
104312 return -ENOMEM;
104313 }
104314diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
104315index e15c16a..7cf07aa 100644
104316--- a/net/ipx/ipx_proc.c
104317+++ b/net/ipx/ipx_proc.c
104318@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
104319 struct proc_dir_entry *p;
104320 int rc = -ENOMEM;
104321
104322- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
104323+ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
104324
104325 if (!ipx_proc_dir)
104326 goto out;
104327diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
104328index 61ceb4c..e788eb8 100644
104329--- a/net/irda/ircomm/ircomm_tty.c
104330+++ b/net/irda/ircomm/ircomm_tty.c
104331@@ -317,10 +317,10 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104332 add_wait_queue(&port->open_wait, &wait);
104333
104334 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
104335- __FILE__, __LINE__, tty->driver->name, port->count);
104336+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104337
104338 spin_lock_irqsave(&port->lock, flags);
104339- port->count--;
104340+ atomic_dec(&port->count);
104341 port->blocked_open++;
104342 spin_unlock_irqrestore(&port->lock, flags);
104343
104344@@ -355,7 +355,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104345 }
104346
104347 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
104348- __FILE__, __LINE__, tty->driver->name, port->count);
104349+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104350
104351 schedule();
104352 }
104353@@ -365,12 +365,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104354
104355 spin_lock_irqsave(&port->lock, flags);
104356 if (!tty_hung_up_p(filp))
104357- port->count++;
104358+ atomic_inc(&port->count);
104359 port->blocked_open--;
104360 spin_unlock_irqrestore(&port->lock, flags);
104361
104362 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
104363- __FILE__, __LINE__, tty->driver->name, port->count);
104364+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104365
104366 if (!retval)
104367 port->flags |= ASYNC_NORMAL_ACTIVE;
104368@@ -444,12 +444,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
104369
104370 /* ++ is not atomic, so this should be protected - Jean II */
104371 spin_lock_irqsave(&self->port.lock, flags);
104372- self->port.count++;
104373+ atomic_inc(&self->port.count);
104374 spin_unlock_irqrestore(&self->port.lock, flags);
104375 tty_port_tty_set(&self->port, tty);
104376
104377 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
104378- self->line, self->port.count);
104379+ self->line, atomic_read(&self->port.count));
104380
104381 /* Not really used by us, but lets do it anyway */
104382 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
104383@@ -985,7 +985,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
104384 tty_kref_put(port->tty);
104385 }
104386 port->tty = NULL;
104387- port->count = 0;
104388+ atomic_set(&port->count, 0);
104389 spin_unlock_irqrestore(&port->lock, flags);
104390
104391 wake_up_interruptible(&port->open_wait);
104392@@ -1342,7 +1342,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
104393 seq_putc(m, '\n');
104394
104395 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
104396- seq_printf(m, "Open count: %d\n", self->port.count);
104397+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
104398 seq_printf(m, "Max data size: %d\n", self->max_data_size);
104399 seq_printf(m, "Max header size: %d\n", self->max_header_size);
104400
104401diff --git a/net/irda/irproc.c b/net/irda/irproc.c
104402index b9ac598..f88cc56 100644
104403--- a/net/irda/irproc.c
104404+++ b/net/irda/irproc.c
104405@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
104406 {
104407 int i;
104408
104409- proc_irda = proc_mkdir("irda", init_net.proc_net);
104410+ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
104411 if (proc_irda == NULL)
104412 return;
104413
104414diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
104415index a089b6b..3ca3b60 100644
104416--- a/net/iucv/af_iucv.c
104417+++ b/net/iucv/af_iucv.c
104418@@ -686,10 +686,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
104419 {
104420 char name[12];
104421
104422- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
104423+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
104424 while (__iucv_get_sock_by_name(name)) {
104425 sprintf(name, "%08x",
104426- atomic_inc_return(&iucv_sk_list.autobind_name));
104427+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
104428 }
104429 memcpy(iucv->src_name, name, 8);
104430 }
104431diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
104432index da78793..bdd78cf 100644
104433--- a/net/iucv/iucv.c
104434+++ b/net/iucv/iucv.c
104435@@ -702,7 +702,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
104436 return NOTIFY_OK;
104437 }
104438
104439-static struct notifier_block __refdata iucv_cpu_notifier = {
104440+static struct notifier_block iucv_cpu_notifier = {
104441 .notifier_call = iucv_cpu_notify,
104442 };
104443
104444diff --git a/net/key/af_key.c b/net/key/af_key.c
104445index 1847ec4..26ef732 100644
104446--- a/net/key/af_key.c
104447+++ b/net/key/af_key.c
104448@@ -3049,10 +3049,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
104449 static u32 get_acqseq(void)
104450 {
104451 u32 res;
104452- static atomic_t acqseq;
104453+ static atomic_unchecked_t acqseq;
104454
104455 do {
104456- res = atomic_inc_return(&acqseq);
104457+ res = atomic_inc_return_unchecked(&acqseq);
104458 } while (!res);
104459 return res;
104460 }
104461diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
104462index edb78e6..8dc654a 100644
104463--- a/net/l2tp/l2tp_eth.c
104464+++ b/net/l2tp/l2tp_eth.c
104465@@ -42,12 +42,12 @@ struct l2tp_eth {
104466 struct sock *tunnel_sock;
104467 struct l2tp_session *session;
104468 struct list_head list;
104469- atomic_long_t tx_bytes;
104470- atomic_long_t tx_packets;
104471- atomic_long_t tx_dropped;
104472- atomic_long_t rx_bytes;
104473- atomic_long_t rx_packets;
104474- atomic_long_t rx_errors;
104475+ atomic_long_unchecked_t tx_bytes;
104476+ atomic_long_unchecked_t tx_packets;
104477+ atomic_long_unchecked_t tx_dropped;
104478+ atomic_long_unchecked_t rx_bytes;
104479+ atomic_long_unchecked_t rx_packets;
104480+ atomic_long_unchecked_t rx_errors;
104481 };
104482
104483 /* via l2tp_session_priv() */
104484@@ -98,10 +98,10 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
104485 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
104486
104487 if (likely(ret == NET_XMIT_SUCCESS)) {
104488- atomic_long_add(len, &priv->tx_bytes);
104489- atomic_long_inc(&priv->tx_packets);
104490+ atomic_long_add_unchecked(len, &priv->tx_bytes);
104491+ atomic_long_inc_unchecked(&priv->tx_packets);
104492 } else {
104493- atomic_long_inc(&priv->tx_dropped);
104494+ atomic_long_inc_unchecked(&priv->tx_dropped);
104495 }
104496 return NETDEV_TX_OK;
104497 }
104498@@ -111,12 +111,12 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
104499 {
104500 struct l2tp_eth *priv = netdev_priv(dev);
104501
104502- stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
104503- stats->tx_packets = atomic_long_read(&priv->tx_packets);
104504- stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
104505- stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
104506- stats->rx_packets = atomic_long_read(&priv->rx_packets);
104507- stats->rx_errors = atomic_long_read(&priv->rx_errors);
104508+ stats->tx_bytes = atomic_long_read_unchecked(&priv->tx_bytes);
104509+ stats->tx_packets = atomic_long_read_unchecked(&priv->tx_packets);
104510+ stats->tx_dropped = atomic_long_read_unchecked(&priv->tx_dropped);
104511+ stats->rx_bytes = atomic_long_read_unchecked(&priv->rx_bytes);
104512+ stats->rx_packets = atomic_long_read_unchecked(&priv->rx_packets);
104513+ stats->rx_errors = atomic_long_read_unchecked(&priv->rx_errors);
104514 return stats;
104515 }
104516
104517@@ -166,15 +166,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
104518 nf_reset(skb);
104519
104520 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
104521- atomic_long_inc(&priv->rx_packets);
104522- atomic_long_add(data_len, &priv->rx_bytes);
104523+ atomic_long_inc_unchecked(&priv->rx_packets);
104524+ atomic_long_add_unchecked(data_len, &priv->rx_bytes);
104525 } else {
104526- atomic_long_inc(&priv->rx_errors);
104527+ atomic_long_inc_unchecked(&priv->rx_errors);
104528 }
104529 return;
104530
104531 error:
104532- atomic_long_inc(&priv->rx_errors);
104533+ atomic_long_inc_unchecked(&priv->rx_errors);
104534 kfree_skb(skb);
104535 }
104536
104537diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
104538index 1a3c7e0..80f8b0c 100644
104539--- a/net/llc/llc_proc.c
104540+++ b/net/llc/llc_proc.c
104541@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
104542 int rc = -ENOMEM;
104543 struct proc_dir_entry *p;
104544
104545- llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
104546+ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
104547 if (!llc_proc_dir)
104548 goto out;
104549
104550diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
104551index 927b4ea..88a30e2 100644
104552--- a/net/mac80211/cfg.c
104553+++ b/net/mac80211/cfg.c
104554@@ -540,7 +540,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
104555 ret = ieee80211_vif_use_channel(sdata, chandef,
104556 IEEE80211_CHANCTX_EXCLUSIVE);
104557 }
104558- } else if (local->open_count == local->monitors) {
104559+ } else if (local_read(&local->open_count) == local->monitors) {
104560 local->_oper_chandef = *chandef;
104561 ieee80211_hw_config(local, 0);
104562 }
104563@@ -3286,7 +3286,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
104564 else
104565 local->probe_req_reg--;
104566
104567- if (!local->open_count)
104568+ if (!local_read(&local->open_count))
104569 break;
104570
104571 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
104572@@ -3420,8 +3420,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
104573 if (chanctx_conf) {
104574 *chandef = chanctx_conf->def;
104575 ret = 0;
104576- } else if (local->open_count > 0 &&
104577- local->open_count == local->monitors &&
104578+ } else if (local_read(&local->open_count) > 0 &&
104579+ local_read(&local->open_count) == local->monitors &&
104580 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
104581 if (local->use_chanctx)
104582 *chandef = local->monitor_chandef;
104583diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
104584index 5d102b5..6199fca 100644
104585--- a/net/mac80211/ieee80211_i.h
104586+++ b/net/mac80211/ieee80211_i.h
104587@@ -28,6 +28,7 @@
104588 #include <net/ieee80211_radiotap.h>
104589 #include <net/cfg80211.h>
104590 #include <net/mac80211.h>
104591+#include <asm/local.h>
104592 #include "key.h"
104593 #include "sta_info.h"
104594 #include "debug.h"
104595@@ -1055,7 +1056,7 @@ struct ieee80211_local {
104596 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
104597 spinlock_t queue_stop_reason_lock;
104598
104599- int open_count;
104600+ local_t open_count;
104601 int monitors, cooked_mntrs;
104602 /* number of interfaces with corresponding FIF_ flags */
104603 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
104604diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
104605index 3538e5e..0aa7879 100644
104606--- a/net/mac80211/iface.c
104607+++ b/net/mac80211/iface.c
104608@@ -531,7 +531,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104609 break;
104610 }
104611
104612- if (local->open_count == 0) {
104613+ if (local_read(&local->open_count) == 0) {
104614 res = drv_start(local);
104615 if (res)
104616 goto err_del_bss;
104617@@ -578,7 +578,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104618 res = drv_add_interface(local, sdata);
104619 if (res)
104620 goto err_stop;
104621- } else if (local->monitors == 0 && local->open_count == 0) {
104622+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
104623 res = ieee80211_add_virtual_monitor(local);
104624 if (res)
104625 goto err_stop;
104626@@ -687,7 +687,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104627 atomic_inc(&local->iff_promiscs);
104628
104629 if (coming_up)
104630- local->open_count++;
104631+ local_inc(&local->open_count);
104632
104633 if (hw_reconf_flags)
104634 ieee80211_hw_config(local, hw_reconf_flags);
104635@@ -725,7 +725,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104636 err_del_interface:
104637 drv_remove_interface(local, sdata);
104638 err_stop:
104639- if (!local->open_count)
104640+ if (!local_read(&local->open_count))
104641 drv_stop(local);
104642 err_del_bss:
104643 sdata->bss = NULL;
104644@@ -891,7 +891,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104645 }
104646
104647 if (going_down)
104648- local->open_count--;
104649+ local_dec(&local->open_count);
104650
104651 switch (sdata->vif.type) {
104652 case NL80211_IFTYPE_AP_VLAN:
104653@@ -952,7 +952,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104654 }
104655 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
104656
104657- if (local->open_count == 0)
104658+ if (local_read(&local->open_count) == 0)
104659 ieee80211_clear_tx_pending(local);
104660
104661 /*
104662@@ -995,7 +995,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104663 if (cancel_scan)
104664 flush_delayed_work(&local->scan_work);
104665
104666- if (local->open_count == 0) {
104667+ if (local_read(&local->open_count) == 0) {
104668 ieee80211_stop_device(local);
104669
104670 /* no reconfiguring after stop! */
104671@@ -1006,7 +1006,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104672 ieee80211_configure_filter(local);
104673 ieee80211_hw_config(local, hw_reconf_flags);
104674
104675- if (local->monitors == local->open_count)
104676+ if (local->monitors == local_read(&local->open_count))
104677 ieee80211_add_virtual_monitor(local);
104678 }
104679
104680diff --git a/net/mac80211/main.c b/net/mac80211/main.c
104681index e0ab432..36b7b94 100644
104682--- a/net/mac80211/main.c
104683+++ b/net/mac80211/main.c
104684@@ -174,7 +174,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
104685 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
104686 IEEE80211_CONF_CHANGE_POWER);
104687
104688- if (changed && local->open_count) {
104689+ if (changed && local_read(&local->open_count)) {
104690 ret = drv_config(local, changed);
104691 /*
104692 * Goal:
104693diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
104694index 4c5192e..04cc0d8 100644
104695--- a/net/mac80211/pm.c
104696+++ b/net/mac80211/pm.c
104697@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104698 struct ieee80211_sub_if_data *sdata;
104699 struct sta_info *sta;
104700
104701- if (!local->open_count)
104702+ if (!local_read(&local->open_count))
104703 goto suspend;
104704
104705 ieee80211_scan_cancel(local);
104706@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104707 cancel_work_sync(&local->dynamic_ps_enable_work);
104708 del_timer_sync(&local->dynamic_ps_timer);
104709
104710- local->wowlan = wowlan && local->open_count;
104711+ local->wowlan = wowlan && local_read(&local->open_count);
104712 if (local->wowlan) {
104713 int err = drv_suspend(local, wowlan);
104714 if (err < 0) {
104715@@ -125,7 +125,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104716 WARN_ON(!list_empty(&local->chanctx_list));
104717
104718 /* stop hardware - this must stop RX */
104719- if (local->open_count)
104720+ if (local_read(&local->open_count))
104721 ieee80211_stop_device(local);
104722
104723 suspend:
104724diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
104725index 6081329..ab23834 100644
104726--- a/net/mac80211/rate.c
104727+++ b/net/mac80211/rate.c
104728@@ -720,7 +720,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
104729
104730 ASSERT_RTNL();
104731
104732- if (local->open_count)
104733+ if (local_read(&local->open_count))
104734 return -EBUSY;
104735
104736 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
104737diff --git a/net/mac80211/util.c b/net/mac80211/util.c
104738index 725af7a..a21a20a 100644
104739--- a/net/mac80211/util.c
104740+++ b/net/mac80211/util.c
104741@@ -1643,7 +1643,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
104742 }
104743 #endif
104744 /* everything else happens only if HW was up & running */
104745- if (!local->open_count)
104746+ if (!local_read(&local->open_count))
104747 goto wake_up;
104748
104749 /*
104750@@ -1869,7 +1869,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
104751 local->in_reconfig = false;
104752 barrier();
104753
104754- if (local->monitors == local->open_count && local->monitors > 0)
104755+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
104756 ieee80211_add_virtual_monitor(local);
104757
104758 /*
104759diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
104760index 6d77cce..36e2fc3 100644
104761--- a/net/netfilter/Kconfig
104762+++ b/net/netfilter/Kconfig
104763@@ -1096,6 +1096,16 @@ config NETFILTER_XT_MATCH_ESP
104764
104765 To compile it as a module, choose M here. If unsure, say N.
104766
104767+config NETFILTER_XT_MATCH_GRADM
104768+ tristate '"gradm" match support'
104769+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
104770+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
104771+ ---help---
104772+ The gradm match allows to match on grsecurity RBAC being enabled.
104773+ It is useful when iptables rules are applied early on bootup to
104774+ prevent connections to the machine (except from a trusted host)
104775+ while the RBAC system is disabled.
104776+
104777 config NETFILTER_XT_MATCH_HASHLIMIT
104778 tristate '"hashlimit" match support'
104779 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
104780diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
104781index fad5fdb..ba3672a 100644
104782--- a/net/netfilter/Makefile
104783+++ b/net/netfilter/Makefile
104784@@ -136,6 +136,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
104785 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
104786 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
104787 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
104788+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
104789 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
104790 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
104791 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
104792diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
104793index 6582dce..a911da7 100644
104794--- a/net/netfilter/ipset/ip_set_core.c
104795+++ b/net/netfilter/ipset/ip_set_core.c
104796@@ -1921,7 +1921,7 @@ done:
104797 return ret;
104798 }
104799
104800-static struct nf_sockopt_ops so_set __read_mostly = {
104801+static struct nf_sockopt_ops so_set = {
104802 .pf = PF_INET,
104803 .get_optmin = SO_IP_SET,
104804 .get_optmax = SO_IP_SET + 1,
104805diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
104806index 610e19c..08d0c3f 100644
104807--- a/net/netfilter/ipvs/ip_vs_conn.c
104808+++ b/net/netfilter/ipvs/ip_vs_conn.c
104809@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
104810 /* Increase the refcnt counter of the dest */
104811 ip_vs_dest_hold(dest);
104812
104813- conn_flags = atomic_read(&dest->conn_flags);
104814+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
104815 if (cp->protocol != IPPROTO_UDP)
104816 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
104817 flags = cp->flags;
104818@@ -899,7 +899,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
104819
104820 cp->control = NULL;
104821 atomic_set(&cp->n_control, 0);
104822- atomic_set(&cp->in_pkts, 0);
104823+ atomic_set_unchecked(&cp->in_pkts, 0);
104824
104825 cp->packet_xmit = NULL;
104826 cp->app = NULL;
104827@@ -1187,7 +1187,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
104828
104829 /* Don't drop the entry if its number of incoming packets is not
104830 located in [0, 8] */
104831- i = atomic_read(&cp->in_pkts);
104832+ i = atomic_read_unchecked(&cp->in_pkts);
104833 if (i > 8 || i < 0) return 0;
104834
104835 if (!todrop_rate[i]) return 0;
104836diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
104837index 5c34e8d..0d8eb7f 100644
104838--- a/net/netfilter/ipvs/ip_vs_core.c
104839+++ b/net/netfilter/ipvs/ip_vs_core.c
104840@@ -567,7 +567,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
104841 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
104842 /* do not touch skb anymore */
104843
104844- atomic_inc(&cp->in_pkts);
104845+ atomic_inc_unchecked(&cp->in_pkts);
104846 ip_vs_conn_put(cp);
104847 return ret;
104848 }
104849@@ -1711,7 +1711,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
104850 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
104851 pkts = sysctl_sync_threshold(ipvs);
104852 else
104853- pkts = atomic_add_return(1, &cp->in_pkts);
104854+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
104855
104856 if (ipvs->sync_state & IP_VS_STATE_MASTER)
104857 ip_vs_sync_conn(net, cp, pkts);
104858diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
104859index fd3f444..ab28fa24 100644
104860--- a/net/netfilter/ipvs/ip_vs_ctl.c
104861+++ b/net/netfilter/ipvs/ip_vs_ctl.c
104862@@ -794,7 +794,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
104863 */
104864 ip_vs_rs_hash(ipvs, dest);
104865 }
104866- atomic_set(&dest->conn_flags, conn_flags);
104867+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
104868
104869 /* bind the service */
104870 old_svc = rcu_dereference_protected(dest->svc, 1);
104871@@ -1654,7 +1654,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
104872 * align with netns init in ip_vs_control_net_init()
104873 */
104874
104875-static struct ctl_table vs_vars[] = {
104876+static ctl_table_no_const vs_vars[] __read_only = {
104877 {
104878 .procname = "amemthresh",
104879 .maxlen = sizeof(int),
104880@@ -1989,7 +1989,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
104881 " %-7s %-6d %-10d %-10d\n",
104882 &dest->addr.in6,
104883 ntohs(dest->port),
104884- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
104885+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
104886 atomic_read(&dest->weight),
104887 atomic_read(&dest->activeconns),
104888 atomic_read(&dest->inactconns));
104889@@ -2000,7 +2000,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
104890 "%-7s %-6d %-10d %-10d\n",
104891 ntohl(dest->addr.ip),
104892 ntohs(dest->port),
104893- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
104894+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
104895 atomic_read(&dest->weight),
104896 atomic_read(&dest->activeconns),
104897 atomic_read(&dest->inactconns));
104898@@ -2471,7 +2471,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
104899
104900 entry.addr = dest->addr.ip;
104901 entry.port = dest->port;
104902- entry.conn_flags = atomic_read(&dest->conn_flags);
104903+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
104904 entry.weight = atomic_read(&dest->weight);
104905 entry.u_threshold = dest->u_threshold;
104906 entry.l_threshold = dest->l_threshold;
104907@@ -3010,7 +3010,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
104908 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
104909 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
104910 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
104911- (atomic_read(&dest->conn_flags) &
104912+ (atomic_read_unchecked(&dest->conn_flags) &
104913 IP_VS_CONN_F_FWD_MASK)) ||
104914 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
104915 atomic_read(&dest->weight)) ||
104916@@ -3600,7 +3600,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
104917 {
104918 int idx;
104919 struct netns_ipvs *ipvs = net_ipvs(net);
104920- struct ctl_table *tbl;
104921+ ctl_table_no_const *tbl;
104922
104923 atomic_set(&ipvs->dropentry, 0);
104924 spin_lock_init(&ipvs->dropentry_lock);
104925diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
104926index 547ff33..c8c8117 100644
104927--- a/net/netfilter/ipvs/ip_vs_lblc.c
104928+++ b/net/netfilter/ipvs/ip_vs_lblc.c
104929@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
104930 * IPVS LBLC sysctl table
104931 */
104932 #ifdef CONFIG_SYSCTL
104933-static struct ctl_table vs_vars_table[] = {
104934+static ctl_table_no_const vs_vars_table[] __read_only = {
104935 {
104936 .procname = "lblc_expiration",
104937 .data = NULL,
104938diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
104939index 3f21a2f..a112e85 100644
104940--- a/net/netfilter/ipvs/ip_vs_lblcr.c
104941+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
104942@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
104943 * IPVS LBLCR sysctl table
104944 */
104945
104946-static struct ctl_table vs_vars_table[] = {
104947+static ctl_table_no_const vs_vars_table[] __read_only = {
104948 {
104949 .procname = "lblcr_expiration",
104950 .data = NULL,
104951diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
104952index eadffb2..c2feeae 100644
104953--- a/net/netfilter/ipvs/ip_vs_sync.c
104954+++ b/net/netfilter/ipvs/ip_vs_sync.c
104955@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
104956 cp = cp->control;
104957 if (cp) {
104958 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
104959- pkts = atomic_add_return(1, &cp->in_pkts);
104960+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
104961 else
104962 pkts = sysctl_sync_threshold(ipvs);
104963 ip_vs_sync_conn(net, cp->control, pkts);
104964@@ -771,7 +771,7 @@ control:
104965 if (!cp)
104966 return;
104967 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
104968- pkts = atomic_add_return(1, &cp->in_pkts);
104969+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
104970 else
104971 pkts = sysctl_sync_threshold(ipvs);
104972 goto sloop;
104973@@ -894,7 +894,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
104974
104975 if (opt)
104976 memcpy(&cp->in_seq, opt, sizeof(*opt));
104977- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
104978+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
104979 cp->state = state;
104980 cp->old_state = cp->state;
104981 /*
104982diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
104983index 56896a4..dfe3806 100644
104984--- a/net/netfilter/ipvs/ip_vs_xmit.c
104985+++ b/net/netfilter/ipvs/ip_vs_xmit.c
104986@@ -1114,7 +1114,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
104987 else
104988 rc = NF_ACCEPT;
104989 /* do not touch skb anymore */
104990- atomic_inc(&cp->in_pkts);
104991+ atomic_inc_unchecked(&cp->in_pkts);
104992 goto out;
104993 }
104994
104995@@ -1206,7 +1206,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
104996 else
104997 rc = NF_ACCEPT;
104998 /* do not touch skb anymore */
104999- atomic_inc(&cp->in_pkts);
105000+ atomic_inc_unchecked(&cp->in_pkts);
105001 goto out;
105002 }
105003
105004diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
105005index a4b5e2a..13b1de3 100644
105006--- a/net/netfilter/nf_conntrack_acct.c
105007+++ b/net/netfilter/nf_conntrack_acct.c
105008@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
105009 #ifdef CONFIG_SYSCTL
105010 static int nf_conntrack_acct_init_sysctl(struct net *net)
105011 {
105012- struct ctl_table *table;
105013+ ctl_table_no_const *table;
105014
105015 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
105016 GFP_KERNEL);
105017diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
105018index ec94ba9..52f8f3d 100644
105019--- a/net/netfilter/nf_conntrack_core.c
105020+++ b/net/netfilter/nf_conntrack_core.c
105021@@ -1741,6 +1741,10 @@ void nf_conntrack_init_end(void)
105022 #define DYING_NULLS_VAL ((1<<30)+1)
105023 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
105024
105025+#ifdef CONFIG_GRKERNSEC_HIDESYM
105026+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
105027+#endif
105028+
105029 int nf_conntrack_init_net(struct net *net)
105030 {
105031 int ret = -ENOMEM;
105032@@ -1766,7 +1770,11 @@ int nf_conntrack_init_net(struct net *net)
105033 if (!net->ct.stat)
105034 goto err_pcpu_lists;
105035
105036+#ifdef CONFIG_GRKERNSEC_HIDESYM
105037+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
105038+#else
105039 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
105040+#endif
105041 if (!net->ct.slabname)
105042 goto err_slabname;
105043
105044diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
105045index 4e78c57..ec8fb74 100644
105046--- a/net/netfilter/nf_conntrack_ecache.c
105047+++ b/net/netfilter/nf_conntrack_ecache.c
105048@@ -264,7 +264,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
105049 #ifdef CONFIG_SYSCTL
105050 static int nf_conntrack_event_init_sysctl(struct net *net)
105051 {
105052- struct ctl_table *table;
105053+ ctl_table_no_const *table;
105054
105055 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
105056 GFP_KERNEL);
105057diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
105058index 5b3eae7..dd4b8fe 100644
105059--- a/net/netfilter/nf_conntrack_helper.c
105060+++ b/net/netfilter/nf_conntrack_helper.c
105061@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
105062
105063 static int nf_conntrack_helper_init_sysctl(struct net *net)
105064 {
105065- struct ctl_table *table;
105066+ ctl_table_no_const *table;
105067
105068 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
105069 GFP_KERNEL);
105070diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
105071index b65d586..beec902 100644
105072--- a/net/netfilter/nf_conntrack_proto.c
105073+++ b/net/netfilter/nf_conntrack_proto.c
105074@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
105075
105076 static void
105077 nf_ct_unregister_sysctl(struct ctl_table_header **header,
105078- struct ctl_table **table,
105079+ ctl_table_no_const **table,
105080 unsigned int users)
105081 {
105082 if (users > 0)
105083diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
105084index f641751..d3c5b51 100644
105085--- a/net/netfilter/nf_conntrack_standalone.c
105086+++ b/net/netfilter/nf_conntrack_standalone.c
105087@@ -471,7 +471,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
105088
105089 static int nf_conntrack_standalone_init_sysctl(struct net *net)
105090 {
105091- struct ctl_table *table;
105092+ ctl_table_no_const *table;
105093
105094 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
105095 GFP_KERNEL);
105096diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
105097index 7a394df..bd91a8a 100644
105098--- a/net/netfilter/nf_conntrack_timestamp.c
105099+++ b/net/netfilter/nf_conntrack_timestamp.c
105100@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
105101 #ifdef CONFIG_SYSCTL
105102 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
105103 {
105104- struct ctl_table *table;
105105+ ctl_table_no_const *table;
105106
105107 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
105108 GFP_KERNEL);
105109diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
105110index daad602..384be13 100644
105111--- a/net/netfilter/nf_log.c
105112+++ b/net/netfilter/nf_log.c
105113@@ -353,7 +353,7 @@ static const struct file_operations nflog_file_ops = {
105114
105115 #ifdef CONFIG_SYSCTL
105116 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
105117-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
105118+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
105119
105120 static int nf_log_proc_dostring(struct ctl_table *table, int write,
105121 void __user *buffer, size_t *lenp, loff_t *ppos)
105122@@ -384,14 +384,16 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
105123 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
105124 mutex_unlock(&nf_log_mutex);
105125 } else {
105126+ ctl_table_no_const nf_log_table = *table;
105127+
105128 mutex_lock(&nf_log_mutex);
105129 logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
105130 lockdep_is_held(&nf_log_mutex));
105131 if (!logger)
105132- table->data = "NONE";
105133+ nf_log_table.data = "NONE";
105134 else
105135- table->data = logger->name;
105136- r = proc_dostring(table, write, buffer, lenp, ppos);
105137+ nf_log_table.data = logger->name;
105138+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
105139 mutex_unlock(&nf_log_mutex);
105140 }
105141
105142diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
105143index c68c1e5..8b5d670 100644
105144--- a/net/netfilter/nf_sockopt.c
105145+++ b/net/netfilter/nf_sockopt.c
105146@@ -43,7 +43,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
105147 }
105148 }
105149
105150- list_add(&reg->list, &nf_sockopts);
105151+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
105152 out:
105153 mutex_unlock(&nf_sockopt_mutex);
105154 return ret;
105155@@ -53,7 +53,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
105156 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
105157 {
105158 mutex_lock(&nf_sockopt_mutex);
105159- list_del(&reg->list);
105160+ pax_list_del((struct list_head *)&reg->list);
105161 mutex_unlock(&nf_sockopt_mutex);
105162 }
105163 EXPORT_SYMBOL(nf_unregister_sockopt);
105164diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
105165index 3250735..1fac969 100644
105166--- a/net/netfilter/nfnetlink_log.c
105167+++ b/net/netfilter/nfnetlink_log.c
105168@@ -80,7 +80,7 @@ static int nfnl_log_net_id __read_mostly;
105169 struct nfnl_log_net {
105170 spinlock_t instances_lock;
105171 struct hlist_head instance_table[INSTANCE_BUCKETS];
105172- atomic_t global_seq;
105173+ atomic_unchecked_t global_seq;
105174 };
105175
105176 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
105177@@ -563,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
105178 /* global sequence number */
105179 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
105180 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
105181- htonl(atomic_inc_return(&log->global_seq))))
105182+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
105183 goto nla_put_failure;
105184
105185 if (data_len) {
105186diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
105187index 108120f..5b169db 100644
105188--- a/net/netfilter/nfnetlink_queue_core.c
105189+++ b/net/netfilter/nfnetlink_queue_core.c
105190@@ -665,7 +665,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
105191 * returned by nf_queue. For instance, callers rely on -ECANCELED to
105192 * mean 'ignore this hook'.
105193 */
105194- if (IS_ERR(segs))
105195+ if (IS_ERR_OR_NULL(segs))
105196 goto out_err;
105197 queued = 0;
105198 err = 0;
105199diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
105200index 5b5ab9e..fc1015c 100644
105201--- a/net/netfilter/nft_compat.c
105202+++ b/net/netfilter/nft_compat.c
105203@@ -225,7 +225,7 @@ target_dump_info(struct sk_buff *skb, const struct xt_target *t, const void *in)
105204 /* We want to reuse existing compat_to_user */
105205 old_fs = get_fs();
105206 set_fs(KERNEL_DS);
105207- t->compat_to_user(out, in);
105208+ t->compat_to_user((void __force_user *)out, in);
105209 set_fs(old_fs);
105210 ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), out);
105211 kfree(out);
105212@@ -421,7 +421,7 @@ match_dump_info(struct sk_buff *skb, const struct xt_match *m, const void *in)
105213 /* We want to reuse existing compat_to_user */
105214 old_fs = get_fs();
105215 set_fs(KERNEL_DS);
105216- m->compat_to_user(out, in);
105217+ m->compat_to_user((void __force_user *)out, in);
105218 set_fs(old_fs);
105219 ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), out);
105220 kfree(out);
105221diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
105222new file mode 100644
105223index 0000000..c566332
105224--- /dev/null
105225+++ b/net/netfilter/xt_gradm.c
105226@@ -0,0 +1,51 @@
105227+/*
105228+ * gradm match for netfilter
105229