]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.0-3.17.6-201412071639.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.0-3.17.6-201412071639.patch
CommitLineData
36402851
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 9de9813..1462492 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -3,9 +3,11 @@
6 *.bc
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -15,6 +17,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -51,14 +54,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -72,9 +78,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -83,6 +91,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -95,32 +104,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -148,14 +168,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -165,14 +185,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -188,6 +209,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -197,6 +220,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -206,7 +230,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -216,8 +245,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -227,6 +260,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -238,13 +272,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -252,9 +290,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
238index 764f599..c600e2f 100644
239--- a/Documentation/kbuild/makefiles.txt
240+++ b/Documentation/kbuild/makefiles.txt
241@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
242 === 4 Host Program support
243 --- 4.1 Simple Host Program
244 --- 4.2 Composite Host Programs
245- --- 4.3 Using C++ for host programs
246- --- 4.4 Controlling compiler options for host programs
247- --- 4.5 When host programs are actually built
248- --- 4.6 Using hostprogs-$(CONFIG_FOO)
249+ --- 4.3 Defining shared libraries
250+ --- 4.4 Using C++ for host programs
251+ --- 4.5 Controlling compiler options for host programs
252+ --- 4.6 When host programs are actually built
253+ --- 4.7 Using hostprogs-$(CONFIG_FOO)
254
255 === 5 Kbuild clean infrastructure
256
257@@ -642,7 +643,29 @@ Both possibilities are described in the following.
258 Finally, the two .o files are linked to the executable, lxdialog.
259 Note: The syntax <executable>-y is not permitted for host-programs.
260
261---- 4.3 Using C++ for host programs
262+--- 4.3 Defining shared libraries
263+
264+ Objects with extension .so are considered shared libraries, and
265+ will be compiled as position independent objects.
266+ Kbuild provides support for shared libraries, but the usage
267+ shall be restricted.
268+ In the following example the libkconfig.so shared library is used
269+ to link the executable conf.
270+
271+ Example:
272+ #scripts/kconfig/Makefile
273+ hostprogs-y := conf
274+ conf-objs := conf.o libkconfig.so
275+ libkconfig-objs := expr.o type.o
276+
277+ Shared libraries always require a corresponding -objs line, and
278+ in the example above the shared library libkconfig is composed by
279+ the two objects expr.o and type.o.
280+ expr.o and type.o will be built as position independent code and
281+ linked as a shared library libkconfig.so. C++ is not supported for
282+ shared libraries.
283+
284+--- 4.4 Using C++ for host programs
285
286 kbuild offers support for host programs written in C++. This was
287 introduced solely to support kconfig, and is not recommended
288@@ -665,7 +688,7 @@ Both possibilities are described in the following.
289 qconf-cxxobjs := qconf.o
290 qconf-objs := check.o
291
292---- 4.4 Controlling compiler options for host programs
293+--- 4.5 Controlling compiler options for host programs
294
295 When compiling host programs, it is possible to set specific flags.
296 The programs will always be compiled utilising $(HOSTCC) passed
297@@ -693,7 +716,7 @@ Both possibilities are described in the following.
298 When linking qconf, it will be passed the extra option
299 "-L$(QTDIR)/lib".
300
301---- 4.5 When host programs are actually built
302+--- 4.6 When host programs are actually built
303
304 Kbuild will only build host-programs when they are referenced
305 as a prerequisite.
306@@ -724,7 +747,7 @@ Both possibilities are described in the following.
307 This will tell kbuild to build lxdialog even if not referenced in
308 any rule.
309
310---- 4.6 Using hostprogs-$(CONFIG_FOO)
311+--- 4.7 Using hostprogs-$(CONFIG_FOO)
312
313 A typical pattern in a Kbuild file looks like this:
314
315diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
316index 1edd5fd..107ff46 100644
317--- a/Documentation/kernel-parameters.txt
318+++ b/Documentation/kernel-parameters.txt
319@@ -1155,6 +1155,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
320 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
321 Default: 1024
322
323+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
324+ ignore grsecurity's /proc restrictions
325+
326+
327 hashdist= [KNL,NUMA] Large hashes allocated during boot
328 are distributed across NUMA nodes. Defaults on
329 for 64-bit NUMA, off otherwise.
330@@ -2175,6 +2179,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
331 noexec=on: enable non-executable mappings (default)
332 noexec=off: disable non-executable mappings
333
334+ nopcid [X86-64]
335+ Disable PCID (Process-Context IDentifier) even if it
336+ is supported by the processor.
337+
338 nosmap [X86]
339 Disable SMAP (Supervisor Mode Access Prevention)
340 even if it is supported by processor.
341@@ -2467,6 +2475,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
342 the specified number of seconds. This is to be used if
343 your oopses keep scrolling off the screen.
344
345+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
346+ virtualization environments that don't cope well with the
347+ expand down segment used by UDEREF on X86-32 or the frequent
348+ page table updates on X86-64.
349+
350+ pax_sanitize_slab=
351+ Format: { 0 | 1 | off | fast | full }
352+ Options '0' and '1' are only provided for backward
353+ compatibility, 'off' or 'fast' should be used instead.
354+ 0|off : disable slab object sanitization
355+ 1|fast: enable slab object sanitization excluding
356+ whitelisted slabs (default)
357+ full : sanitize all slabs, even the whitelisted ones
358+
359+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
360+
361+ pax_extra_latent_entropy
362+ Enable a very simple form of latent entropy extraction
363+ from the first 4GB of memory as the bootmem allocator
364+ passes the memory pages to the buddy allocator.
365+
366+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
367+ when the processor supports PCID.
368+
369 pcbit= [HW,ISDN]
370
371 pcd. [PARIDE]
372diff --git a/Makefile b/Makefile
373index bb43e9e..9dfc034 100644
374--- a/Makefile
375+++ b/Makefile
376@@ -303,8 +303,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
377
378 HOSTCC = gcc
379 HOSTCXX = g++
380-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
381-HOSTCXXFLAGS = -O2
382+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
383+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
384+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
385
386 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
387 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
388@@ -450,8 +451,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
389 # Rules shared between *config targets and build targets
390
391 # Basic helpers built in scripts/
392-PHONY += scripts_basic
393-scripts_basic:
394+PHONY += scripts_basic gcc-plugins
395+scripts_basic: gcc-plugins
396 $(Q)$(MAKE) $(build)=scripts/basic
397 $(Q)rm -f .tmp_quiet_recordmcount
398
399@@ -625,6 +626,72 @@ endif
400 # Tell gcc to never replace conditional load with a non-conditional one
401 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
402
403+ifndef DISABLE_PAX_PLUGINS
404+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
405+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
406+else
407+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
408+endif
409+ifneq ($(PLUGINCC),)
410+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
411+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
412+endif
413+ifdef CONFIG_PAX_MEMORY_STACKLEAK
414+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
415+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
416+endif
417+ifdef CONFIG_KALLOCSTAT_PLUGIN
418+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
419+endif
420+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
421+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
422+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
423+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
424+endif
425+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
426+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
427+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
428+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
429+endif
430+endif
431+ifdef CONFIG_CHECKER_PLUGIN
432+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
433+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
434+endif
435+endif
436+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
437+ifdef CONFIG_PAX_SIZE_OVERFLOW
438+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
439+endif
440+ifdef CONFIG_PAX_LATENT_ENTROPY
441+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
442+endif
443+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
444+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
445+endif
446+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
447+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
448+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
449+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
450+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
451+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
452+ifeq ($(KBUILD_EXTMOD),)
453+gcc-plugins:
454+ $(Q)$(MAKE) $(build)=tools/gcc
455+else
456+gcc-plugins: ;
457+endif
458+else
459+gcc-plugins:
460+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
461+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
462+else
463+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
464+endif
465+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
466+endif
467+endif
468+
469 ifdef CONFIG_READABLE_ASM
470 # Disable optimizations that make assembler listings hard to read.
471 # reorder blocks reorders the control in the function
472@@ -717,7 +784,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
473 else
474 KBUILD_CFLAGS += -g
475 endif
476-KBUILD_AFLAGS += -Wa,-gdwarf-2
477+KBUILD_AFLAGS += -Wa,--gdwarf-2
478 endif
479 ifdef CONFIG_DEBUG_INFO_DWARF4
480 KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
481@@ -867,7 +934,7 @@ export mod_sign_cmd
482
483
484 ifeq ($(KBUILD_EXTMOD),)
485-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
486+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
487
488 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
489 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
490@@ -916,6 +983,8 @@ endif
491
492 # The actual objects are generated when descending,
493 # make sure no implicit rule kicks in
494+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
495+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
496 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
497
498 # Handle descending into subdirectories listed in $(vmlinux-dirs)
499@@ -925,7 +994,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
500 # Error messages still appears in the original language
501
502 PHONY += $(vmlinux-dirs)
503-$(vmlinux-dirs): prepare scripts
504+$(vmlinux-dirs): gcc-plugins prepare scripts
505 $(Q)$(MAKE) $(build)=$@
506
507 define filechk_kernel.release
508@@ -968,10 +1037,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
509
510 archprepare: archheaders archscripts prepare1 scripts_basic
511
512+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
513+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
514 prepare0: archprepare FORCE
515 $(Q)$(MAKE) $(build)=.
516
517 # All the preparing..
518+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
519 prepare: prepare0
520
521 # Generate some files
522@@ -1086,6 +1158,8 @@ all: modules
523 # using awk while concatenating to the final file.
524
525 PHONY += modules
526+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
527+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
528 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
529 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
530 @$(kecho) ' Building modules, stage 2.';
531@@ -1101,7 +1175,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
532
533 # Target to prepare building external modules
534 PHONY += modules_prepare
535-modules_prepare: prepare scripts
536+modules_prepare: gcc-plugins prepare scripts
537
538 # Target to install modules
539 PHONY += modules_install
540@@ -1167,7 +1241,10 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
541 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
542 signing_key.priv signing_key.x509 x509.genkey \
543 extra_certificates signing_key.x509.keyid \
544- signing_key.x509.signer include/linux/version.h
545+ signing_key.x509.signer include/linux/version.h \
546+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
547+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
548+ tools/gcc/randomize_layout_seed.h
549
550 # clean - Delete most, but leave enough to build external modules
551 #
552@@ -1206,7 +1283,7 @@ distclean: mrproper
553 @find $(srctree) $(RCS_FIND_IGNORE) \
554 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
555 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
556- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
557+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
558 -type f -print | xargs rm -f
559
560
561@@ -1372,6 +1449,8 @@ PHONY += $(module-dirs) modules
562 $(module-dirs): crmodverdir $(objtree)/Module.symvers
563 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
564
565+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
566+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
567 modules: $(module-dirs)
568 @$(kecho) ' Building modules, stage 2.';
569 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
570@@ -1512,17 +1591,21 @@ else
571 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
572 endif
573
574-%.s: %.c prepare scripts FORCE
575+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
576+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
577+%.s: %.c gcc-plugins prepare scripts FORCE
578 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
579 %.i: %.c prepare scripts FORCE
580 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
581-%.o: %.c prepare scripts FORCE
582+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
583+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
584+%.o: %.c gcc-plugins prepare scripts FORCE
585 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
586 %.lst: %.c prepare scripts FORCE
587 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
588-%.s: %.S prepare scripts FORCE
589+%.s: %.S gcc-plugins prepare scripts FORCE
590 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
591-%.o: %.S prepare scripts FORCE
592+%.o: %.S gcc-plugins prepare scripts FORCE
593 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
594 %.symtypes: %.c prepare scripts FORCE
595 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
596@@ -1532,11 +1615,15 @@ endif
597 $(cmd_crmodverdir)
598 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
599 $(build)=$(build-dir)
600-%/: prepare scripts FORCE
601+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
602+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
603+%/: gcc-plugins prepare scripts FORCE
604 $(cmd_crmodverdir)
605 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
606 $(build)=$(build-dir)
607-%.ko: prepare scripts FORCE
608+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
609+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
610+%.ko: gcc-plugins prepare scripts FORCE
611 $(cmd_crmodverdir)
612 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
613 $(build)=$(build-dir) $(@:.ko=.o)
614diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
615index ed60a1e..47f1a55 100644
616--- a/arch/alpha/include/asm/atomic.h
617+++ b/arch/alpha/include/asm/atomic.h
618@@ -292,4 +292,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
619 #define atomic_dec(v) atomic_sub(1,(v))
620 #define atomic64_dec(v) atomic64_sub(1,(v))
621
622+#define atomic64_read_unchecked(v) atomic64_read(v)
623+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
624+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
625+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
626+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
627+#define atomic64_inc_unchecked(v) atomic64_inc(v)
628+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
629+#define atomic64_dec_unchecked(v) atomic64_dec(v)
630+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
631+
632 #endif /* _ALPHA_ATOMIC_H */
633diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
634index ad368a9..fbe0f25 100644
635--- a/arch/alpha/include/asm/cache.h
636+++ b/arch/alpha/include/asm/cache.h
637@@ -4,19 +4,19 @@
638 #ifndef __ARCH_ALPHA_CACHE_H
639 #define __ARCH_ALPHA_CACHE_H
640
641+#include <linux/const.h>
642
643 /* Bytes per L1 (data) cache line. */
644 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
645-# define L1_CACHE_BYTES 64
646 # define L1_CACHE_SHIFT 6
647 #else
648 /* Both EV4 and EV5 are write-through, read-allocate,
649 direct-mapped, physical.
650 */
651-# define L1_CACHE_BYTES 32
652 # define L1_CACHE_SHIFT 5
653 #endif
654
655+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
656 #define SMP_CACHE_BYTES L1_CACHE_BYTES
657
658 #endif
659diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
660index 968d999..d36b2df 100644
661--- a/arch/alpha/include/asm/elf.h
662+++ b/arch/alpha/include/asm/elf.h
663@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
664
665 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
666
667+#ifdef CONFIG_PAX_ASLR
668+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
669+
670+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
671+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
672+#endif
673+
674 /* $0 is set by ld.so to a pointer to a function which might be
675 registered using atexit. This provides a mean for the dynamic
676 linker to call DT_FINI functions for shared libraries that have
677diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
678index aab14a0..b4fa3e7 100644
679--- a/arch/alpha/include/asm/pgalloc.h
680+++ b/arch/alpha/include/asm/pgalloc.h
681@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
682 pgd_set(pgd, pmd);
683 }
684
685+static inline void
686+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
687+{
688+ pgd_populate(mm, pgd, pmd);
689+}
690+
691 extern pgd_t *pgd_alloc(struct mm_struct *mm);
692
693 static inline void
694diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
695index d8f9b7e..f6222fa 100644
696--- a/arch/alpha/include/asm/pgtable.h
697+++ b/arch/alpha/include/asm/pgtable.h
698@@ -102,6 +102,17 @@ struct vm_area_struct;
699 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
700 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
701 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
702+
703+#ifdef CONFIG_PAX_PAGEEXEC
704+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
705+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
706+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
707+#else
708+# define PAGE_SHARED_NOEXEC PAGE_SHARED
709+# define PAGE_COPY_NOEXEC PAGE_COPY
710+# define PAGE_READONLY_NOEXEC PAGE_READONLY
711+#endif
712+
713 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
714
715 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
716diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
717index 2fd00b7..cfd5069 100644
718--- a/arch/alpha/kernel/module.c
719+++ b/arch/alpha/kernel/module.c
720@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
721
722 /* The small sections were sorted to the end of the segment.
723 The following should definitely cover them. */
724- gp = (u64)me->module_core + me->core_size - 0x8000;
725+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
726 got = sechdrs[me->arch.gotsecindex].sh_addr;
727
728 for (i = 0; i < n; i++) {
729diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
730index 1402fcc..0b1abd2 100644
731--- a/arch/alpha/kernel/osf_sys.c
732+++ b/arch/alpha/kernel/osf_sys.c
733@@ -1298,10 +1298,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
734 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
735
736 static unsigned long
737-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
738- unsigned long limit)
739+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
740+ unsigned long limit, unsigned long flags)
741 {
742 struct vm_unmapped_area_info info;
743+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
744
745 info.flags = 0;
746 info.length = len;
747@@ -1309,6 +1310,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
748 info.high_limit = limit;
749 info.align_mask = 0;
750 info.align_offset = 0;
751+ info.threadstack_offset = offset;
752 return vm_unmapped_area(&info);
753 }
754
755@@ -1341,20 +1343,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
756 merely specific addresses, but regions of memory -- perhaps
757 this feature should be incorporated into all ports? */
758
759+#ifdef CONFIG_PAX_RANDMMAP
760+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
761+#endif
762+
763 if (addr) {
764- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
765+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
766 if (addr != (unsigned long) -ENOMEM)
767 return addr;
768 }
769
770 /* Next, try allocating at TASK_UNMAPPED_BASE. */
771- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
772- len, limit);
773+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
774+
775 if (addr != (unsigned long) -ENOMEM)
776 return addr;
777
778 /* Finally, try allocating in low memory. */
779- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
780+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
781
782 return addr;
783 }
784diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
785index 98838a0..b304fb4 100644
786--- a/arch/alpha/mm/fault.c
787+++ b/arch/alpha/mm/fault.c
788@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
789 __reload_thread(pcb);
790 }
791
792+#ifdef CONFIG_PAX_PAGEEXEC
793+/*
794+ * PaX: decide what to do with offenders (regs->pc = fault address)
795+ *
796+ * returns 1 when task should be killed
797+ * 2 when patched PLT trampoline was detected
798+ * 3 when unpatched PLT trampoline was detected
799+ */
800+static int pax_handle_fetch_fault(struct pt_regs *regs)
801+{
802+
803+#ifdef CONFIG_PAX_EMUPLT
804+ int err;
805+
806+ do { /* PaX: patched PLT emulation #1 */
807+ unsigned int ldah, ldq, jmp;
808+
809+ err = get_user(ldah, (unsigned int *)regs->pc);
810+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
811+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
812+
813+ if (err)
814+ break;
815+
816+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
817+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
818+ jmp == 0x6BFB0000U)
819+ {
820+ unsigned long r27, addr;
821+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
822+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
823+
824+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
825+ err = get_user(r27, (unsigned long *)addr);
826+ if (err)
827+ break;
828+
829+ regs->r27 = r27;
830+ regs->pc = r27;
831+ return 2;
832+ }
833+ } while (0);
834+
835+ do { /* PaX: patched PLT emulation #2 */
836+ unsigned int ldah, lda, br;
837+
838+ err = get_user(ldah, (unsigned int *)regs->pc);
839+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
840+ err |= get_user(br, (unsigned int *)(regs->pc+8));
841+
842+ if (err)
843+ break;
844+
845+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
846+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
847+ (br & 0xFFE00000U) == 0xC3E00000U)
848+ {
849+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
850+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
851+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
852+
853+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
854+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
855+ return 2;
856+ }
857+ } while (0);
858+
859+ do { /* PaX: unpatched PLT emulation */
860+ unsigned int br;
861+
862+ err = get_user(br, (unsigned int *)regs->pc);
863+
864+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
865+ unsigned int br2, ldq, nop, jmp;
866+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
867+
868+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
869+ err = get_user(br2, (unsigned int *)addr);
870+ err |= get_user(ldq, (unsigned int *)(addr+4));
871+ err |= get_user(nop, (unsigned int *)(addr+8));
872+ err |= get_user(jmp, (unsigned int *)(addr+12));
873+ err |= get_user(resolver, (unsigned long *)(addr+16));
874+
875+ if (err)
876+ break;
877+
878+ if (br2 == 0xC3600000U &&
879+ ldq == 0xA77B000CU &&
880+ nop == 0x47FF041FU &&
881+ jmp == 0x6B7B0000U)
882+ {
883+ regs->r28 = regs->pc+4;
884+ regs->r27 = addr+16;
885+ regs->pc = resolver;
886+ return 3;
887+ }
888+ }
889+ } while (0);
890+#endif
891+
892+ return 1;
893+}
894+
895+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
896+{
897+ unsigned long i;
898+
899+ printk(KERN_ERR "PAX: bytes at PC: ");
900+ for (i = 0; i < 5; i++) {
901+ unsigned int c;
902+ if (get_user(c, (unsigned int *)pc+i))
903+ printk(KERN_CONT "???????? ");
904+ else
905+ printk(KERN_CONT "%08x ", c);
906+ }
907+ printk("\n");
908+}
909+#endif
910
911 /*
912 * This routine handles page faults. It determines the address,
913@@ -133,8 +251,29 @@ retry:
914 good_area:
915 si_code = SEGV_ACCERR;
916 if (cause < 0) {
917- if (!(vma->vm_flags & VM_EXEC))
918+ if (!(vma->vm_flags & VM_EXEC)) {
919+
920+#ifdef CONFIG_PAX_PAGEEXEC
921+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
922+ goto bad_area;
923+
924+ up_read(&mm->mmap_sem);
925+ switch (pax_handle_fetch_fault(regs)) {
926+
927+#ifdef CONFIG_PAX_EMUPLT
928+ case 2:
929+ case 3:
930+ return;
931+#endif
932+
933+ }
934+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
935+ do_group_exit(SIGKILL);
936+#else
937 goto bad_area;
938+#endif
939+
940+ }
941 } else if (!cause) {
942 /* Allow reads even for write-only mappings */
943 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
944diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c
945index a2ff5c5..ecf6a78 100644
946--- a/arch/arc/kernel/kgdb.c
947+++ b/arch/arc/kernel/kgdb.c
948@@ -158,11 +158,6 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
949 return -1;
950 }
951
952-unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
953-{
954- return instruction_pointer(regs);
955-}
956-
957 int kgdb_arch_init(void)
958 {
959 single_step_data.armed = 0;
960diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
961index 32cbbd5..c102df9 100644
962--- a/arch/arm/Kconfig
963+++ b/arch/arm/Kconfig
964@@ -1719,7 +1719,7 @@ config ALIGNMENT_TRAP
965
966 config UACCESS_WITH_MEMCPY
967 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
968- depends on MMU
969+ depends on MMU && !PAX_MEMORY_UDEREF
970 default y if CPU_FEROCEON
971 help
972 Implement faster copy_to_user and clear_user methods for CPU
973@@ -1983,6 +1983,7 @@ config XIP_PHYS_ADDR
974 config KEXEC
975 bool "Kexec system call (EXPERIMENTAL)"
976 depends on (!SMP || PM_SLEEP_SMP)
977+ depends on !GRKERNSEC_KMEM
978 help
979 kexec is a system call that implements the ability to shutdown your
980 current kernel, and to start another kernel. It is like a reboot
981diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
982index 3040359..a494fa3 100644
983--- a/arch/arm/include/asm/atomic.h
984+++ b/arch/arm/include/asm/atomic.h
985@@ -18,17 +18,41 @@
986 #include <asm/barrier.h>
987 #include <asm/cmpxchg.h>
988
989+#ifdef CONFIG_GENERIC_ATOMIC64
990+#include <asm-generic/atomic64.h>
991+#endif
992+
993 #define ATOMIC_INIT(i) { (i) }
994
995 #ifdef __KERNEL__
996
997+#ifdef CONFIG_THUMB2_KERNEL
998+#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
999+#else
1000+#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
1001+#endif
1002+
1003+#define _ASM_EXTABLE(from, to) \
1004+" .pushsection __ex_table,\"a\"\n"\
1005+" .align 3\n" \
1006+" .long " #from ", " #to"\n" \
1007+" .popsection"
1008+
1009 /*
1010 * On ARM, ordinary assignment (str instruction) doesn't clear the local
1011 * strex/ldrex monitor on some implementations. The reason we can use it for
1012 * atomic_set() is the clrex or dummy strex done on every exception return.
1013 */
1014 #define atomic_read(v) (*(volatile int *)&(v)->counter)
1015+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
1016+{
1017+ return v->counter;
1018+}
1019 #define atomic_set(v,i) (((v)->counter) = (i))
1020+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1021+{
1022+ v->counter = i;
1023+}
1024
1025 #if __LINUX_ARM_ARCH__ >= 6
1026
1027@@ -44,6 +68,36 @@ static inline void atomic_add(int i, atomic_t *v)
1028
1029 prefetchw(&v->counter);
1030 __asm__ __volatile__("@ atomic_add\n"
1031+"1: ldrex %1, [%3]\n"
1032+" adds %0, %1, %4\n"
1033+
1034+#ifdef CONFIG_PAX_REFCOUNT
1035+" bvc 3f\n"
1036+"2: " REFCOUNT_TRAP_INSN "\n"
1037+"3:\n"
1038+#endif
1039+
1040+" strex %1, %0, [%3]\n"
1041+" teq %1, #0\n"
1042+" bne 1b"
1043+
1044+#ifdef CONFIG_PAX_REFCOUNT
1045+"\n4:\n"
1046+ _ASM_EXTABLE(2b, 4b)
1047+#endif
1048+
1049+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1050+ : "r" (&v->counter), "Ir" (i)
1051+ : "cc");
1052+}
1053+
1054+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1055+{
1056+ unsigned long tmp;
1057+ int result;
1058+
1059+ prefetchw(&v->counter);
1060+ __asm__ __volatile__("@ atomic_add_unchecked\n"
1061 "1: ldrex %0, [%3]\n"
1062 " add %0, %0, %4\n"
1063 " strex %1, %0, [%3]\n"
1064@@ -63,6 +117,43 @@ static inline int atomic_add_return(int i, atomic_t *v)
1065 prefetchw(&v->counter);
1066
1067 __asm__ __volatile__("@ atomic_add_return\n"
1068+"1: ldrex %1, [%3]\n"
1069+" adds %0, %1, %4\n"
1070+
1071+#ifdef CONFIG_PAX_REFCOUNT
1072+" bvc 3f\n"
1073+" mov %0, %1\n"
1074+"2: " REFCOUNT_TRAP_INSN "\n"
1075+"3:\n"
1076+#endif
1077+
1078+" strex %1, %0, [%3]\n"
1079+" teq %1, #0\n"
1080+" bne 1b"
1081+
1082+#ifdef CONFIG_PAX_REFCOUNT
1083+"\n4:\n"
1084+ _ASM_EXTABLE(2b, 4b)
1085+#endif
1086+
1087+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1088+ : "r" (&v->counter), "Ir" (i)
1089+ : "cc");
1090+
1091+ smp_mb();
1092+
1093+ return result;
1094+}
1095+
1096+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1097+{
1098+ unsigned long tmp;
1099+ int result;
1100+
1101+ smp_mb();
1102+ prefetchw(&v->counter);
1103+
1104+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
1105 "1: ldrex %0, [%3]\n"
1106 " add %0, %0, %4\n"
1107 " strex %1, %0, [%3]\n"
1108@@ -84,6 +175,36 @@ static inline void atomic_sub(int i, atomic_t *v)
1109
1110 prefetchw(&v->counter);
1111 __asm__ __volatile__("@ atomic_sub\n"
1112+"1: ldrex %1, [%3]\n"
1113+" subs %0, %1, %4\n"
1114+
1115+#ifdef CONFIG_PAX_REFCOUNT
1116+" bvc 3f\n"
1117+"2: " REFCOUNT_TRAP_INSN "\n"
1118+"3:\n"
1119+#endif
1120+
1121+" strex %1, %0, [%3]\n"
1122+" teq %1, #0\n"
1123+" bne 1b"
1124+
1125+#ifdef CONFIG_PAX_REFCOUNT
1126+"\n4:\n"
1127+ _ASM_EXTABLE(2b, 4b)
1128+#endif
1129+
1130+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1131+ : "r" (&v->counter), "Ir" (i)
1132+ : "cc");
1133+}
1134+
1135+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1136+{
1137+ unsigned long tmp;
1138+ int result;
1139+
1140+ prefetchw(&v->counter);
1141+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
1142 "1: ldrex %0, [%3]\n"
1143 " sub %0, %0, %4\n"
1144 " strex %1, %0, [%3]\n"
1145@@ -103,11 +224,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1146 prefetchw(&v->counter);
1147
1148 __asm__ __volatile__("@ atomic_sub_return\n"
1149-"1: ldrex %0, [%3]\n"
1150-" sub %0, %0, %4\n"
1151+"1: ldrex %1, [%3]\n"
1152+" subs %0, %1, %4\n"
1153+
1154+#ifdef CONFIG_PAX_REFCOUNT
1155+" bvc 3f\n"
1156+" mov %0, %1\n"
1157+"2: " REFCOUNT_TRAP_INSN "\n"
1158+"3:\n"
1159+#endif
1160+
1161 " strex %1, %0, [%3]\n"
1162 " teq %1, #0\n"
1163 " bne 1b"
1164+
1165+#ifdef CONFIG_PAX_REFCOUNT
1166+"\n4:\n"
1167+ _ASM_EXTABLE(2b, 4b)
1168+#endif
1169+
1170 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1171 : "r" (&v->counter), "Ir" (i)
1172 : "cc");
1173@@ -152,12 +287,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1174 __asm__ __volatile__ ("@ atomic_add_unless\n"
1175 "1: ldrex %0, [%4]\n"
1176 " teq %0, %5\n"
1177-" beq 2f\n"
1178-" add %1, %0, %6\n"
1179+" beq 4f\n"
1180+" adds %1, %0, %6\n"
1181+
1182+#ifdef CONFIG_PAX_REFCOUNT
1183+" bvc 3f\n"
1184+"2: " REFCOUNT_TRAP_INSN "\n"
1185+"3:\n"
1186+#endif
1187+
1188 " strex %2, %1, [%4]\n"
1189 " teq %2, #0\n"
1190 " bne 1b\n"
1191-"2:"
1192+"4:"
1193+
1194+#ifdef CONFIG_PAX_REFCOUNT
1195+ _ASM_EXTABLE(2b, 4b)
1196+#endif
1197+
1198 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1199 : "r" (&v->counter), "r" (u), "r" (a)
1200 : "cc");
1201@@ -168,6 +315,28 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1202 return oldval;
1203 }
1204
1205+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1206+{
1207+ unsigned long oldval, res;
1208+
1209+ smp_mb();
1210+
1211+ do {
1212+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1213+ "ldrex %1, [%3]\n"
1214+ "mov %0, #0\n"
1215+ "teq %1, %4\n"
1216+ "strexeq %0, %5, [%3]\n"
1217+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1218+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1219+ : "cc");
1220+ } while (res);
1221+
1222+ smp_mb();
1223+
1224+ return oldval;
1225+}
1226+
1227 #else /* ARM_ARCH_6 */
1228
1229 #ifdef CONFIG_SMP
1230@@ -186,7 +355,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1231
1232 return val;
1233 }
1234+
1235+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1236+{
1237+ return atomic_add_return(i, v);
1238+}
1239+
1240 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1241+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1242+{
1243+ (void) atomic_add_return(i, v);
1244+}
1245
1246 static inline int atomic_sub_return(int i, atomic_t *v)
1247 {
1248@@ -201,6 +380,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1249 return val;
1250 }
1251 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1252+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1253+{
1254+ (void) atomic_sub_return(i, v);
1255+}
1256
1257 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1258 {
1259@@ -216,6 +399,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1260 return ret;
1261 }
1262
1263+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1264+{
1265+ return atomic_cmpxchg(v, old, new);
1266+}
1267+
1268 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1269 {
1270 int c, old;
1271@@ -229,13 +417,33 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1272 #endif /* __LINUX_ARM_ARCH__ */
1273
1274 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1275+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1276+{
1277+ return xchg(&v->counter, new);
1278+}
1279
1280 #define atomic_inc(v) atomic_add(1, v)
1281+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1282+{
1283+ atomic_add_unchecked(1, v);
1284+}
1285 #define atomic_dec(v) atomic_sub(1, v)
1286+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1287+{
1288+ atomic_sub_unchecked(1, v);
1289+}
1290
1291 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1292+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1293+{
1294+ return atomic_add_return_unchecked(1, v) == 0;
1295+}
1296 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1297 #define atomic_inc_return(v) (atomic_add_return(1, v))
1298+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1299+{
1300+ return atomic_add_return_unchecked(1, v);
1301+}
1302 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1303 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1304
1305@@ -246,6 +454,14 @@ typedef struct {
1306 long long counter;
1307 } atomic64_t;
1308
1309+#ifdef CONFIG_PAX_REFCOUNT
1310+typedef struct {
1311+ long long counter;
1312+} atomic64_unchecked_t;
1313+#else
1314+typedef atomic64_t atomic64_unchecked_t;
1315+#endif
1316+
1317 #define ATOMIC64_INIT(i) { (i) }
1318
1319 #ifdef CONFIG_ARM_LPAE
1320@@ -262,6 +478,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1321 return result;
1322 }
1323
1324+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1325+{
1326+ long long result;
1327+
1328+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1329+" ldrd %0, %H0, [%1]"
1330+ : "=&r" (result)
1331+ : "r" (&v->counter), "Qo" (v->counter)
1332+ );
1333+
1334+ return result;
1335+}
1336+
1337 static inline void atomic64_set(atomic64_t *v, long long i)
1338 {
1339 __asm__ __volatile__("@ atomic64_set\n"
1340@@ -270,6 +499,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1341 : "r" (&v->counter), "r" (i)
1342 );
1343 }
1344+
1345+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1346+{
1347+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1348+" strd %2, %H2, [%1]"
1349+ : "=Qo" (v->counter)
1350+ : "r" (&v->counter), "r" (i)
1351+ );
1352+}
1353 #else
1354 static inline long long atomic64_read(const atomic64_t *v)
1355 {
1356@@ -284,6 +522,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1357 return result;
1358 }
1359
1360+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1361+{
1362+ long long result;
1363+
1364+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1365+" ldrexd %0, %H0, [%1]"
1366+ : "=&r" (result)
1367+ : "r" (&v->counter), "Qo" (v->counter)
1368+ );
1369+
1370+ return result;
1371+}
1372+
1373 static inline void atomic64_set(atomic64_t *v, long long i)
1374 {
1375 long long tmp;
1376@@ -298,6 +549,21 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1377 : "r" (&v->counter), "r" (i)
1378 : "cc");
1379 }
1380+
1381+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1382+{
1383+ long long tmp;
1384+
1385+ prefetchw(&v->counter);
1386+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1387+"1: ldrexd %0, %H0, [%2]\n"
1388+" strexd %0, %3, %H3, [%2]\n"
1389+" teq %0, #0\n"
1390+" bne 1b"
1391+ : "=&r" (tmp), "=Qo" (v->counter)
1392+ : "r" (&v->counter), "r" (i)
1393+ : "cc");
1394+}
1395 #endif
1396
1397 static inline void atomic64_add(long long i, atomic64_t *v)
1398@@ -309,6 +575,37 @@ static inline void atomic64_add(long long i, atomic64_t *v)
1399 __asm__ __volatile__("@ atomic64_add\n"
1400 "1: ldrexd %0, %H0, [%3]\n"
1401 " adds %Q0, %Q0, %Q4\n"
1402+" adcs %R0, %R0, %R4\n"
1403+
1404+#ifdef CONFIG_PAX_REFCOUNT
1405+" bvc 3f\n"
1406+"2: " REFCOUNT_TRAP_INSN "\n"
1407+"3:\n"
1408+#endif
1409+
1410+" strexd %1, %0, %H0, [%3]\n"
1411+" teq %1, #0\n"
1412+" bne 1b"
1413+
1414+#ifdef CONFIG_PAX_REFCOUNT
1415+"\n4:\n"
1416+ _ASM_EXTABLE(2b, 4b)
1417+#endif
1418+
1419+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1420+ : "r" (&v->counter), "r" (i)
1421+ : "cc");
1422+}
1423+
1424+static inline void atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
1425+{
1426+ long long result;
1427+ unsigned long tmp;
1428+
1429+ prefetchw(&v->counter);
1430+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1431+"1: ldrexd %0, %H0, [%3]\n"
1432+" adds %Q0, %Q0, %Q4\n"
1433 " adc %R0, %R0, %R4\n"
1434 " strexd %1, %0, %H0, [%3]\n"
1435 " teq %1, #0\n"
1436@@ -329,6 +626,44 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
1437 __asm__ __volatile__("@ atomic64_add_return\n"
1438 "1: ldrexd %0, %H0, [%3]\n"
1439 " adds %Q0, %Q0, %Q4\n"
1440+" adcs %R0, %R0, %R4\n"
1441+
1442+#ifdef CONFIG_PAX_REFCOUNT
1443+" bvc 3f\n"
1444+" mov %0, %1\n"
1445+" mov %H0, %H1\n"
1446+"2: " REFCOUNT_TRAP_INSN "\n"
1447+"3:\n"
1448+#endif
1449+
1450+" strexd %1, %0, %H0, [%3]\n"
1451+" teq %1, #0\n"
1452+" bne 1b"
1453+
1454+#ifdef CONFIG_PAX_REFCOUNT
1455+"\n4:\n"
1456+ _ASM_EXTABLE(2b, 4b)
1457+#endif
1458+
1459+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1460+ : "r" (&v->counter), "r" (i)
1461+ : "cc");
1462+
1463+ smp_mb();
1464+
1465+ return result;
1466+}
1467+
1468+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
1469+{
1470+ long long result;
1471+ unsigned long tmp;
1472+
1473+ smp_mb();
1474+
1475+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1476+"1: ldrexd %0, %H0, [%3]\n"
1477+" adds %Q0, %Q0, %Q4\n"
1478 " adc %R0, %R0, %R4\n"
1479 " strexd %1, %0, %H0, [%3]\n"
1480 " teq %1, #0\n"
1481@@ -351,6 +686,37 @@ static inline void atomic64_sub(long long i, atomic64_t *v)
1482 __asm__ __volatile__("@ atomic64_sub\n"
1483 "1: ldrexd %0, %H0, [%3]\n"
1484 " subs %Q0, %Q0, %Q4\n"
1485+" sbcs %R0, %R0, %R4\n"
1486+
1487+#ifdef CONFIG_PAX_REFCOUNT
1488+" bvc 3f\n"
1489+"2: " REFCOUNT_TRAP_INSN "\n"
1490+"3:\n"
1491+#endif
1492+
1493+" strexd %1, %0, %H0, [%3]\n"
1494+" teq %1, #0\n"
1495+" bne 1b"
1496+
1497+#ifdef CONFIG_PAX_REFCOUNT
1498+"\n4:\n"
1499+ _ASM_EXTABLE(2b, 4b)
1500+#endif
1501+
1502+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1503+ : "r" (&v->counter), "r" (i)
1504+ : "cc");
1505+}
1506+
1507+static inline void atomic64_sub_unchecked(long long i, atomic64_unchecked_t *v)
1508+{
1509+ long long result;
1510+ unsigned long tmp;
1511+
1512+ prefetchw(&v->counter);
1513+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1514+"1: ldrexd %0, %H0, [%3]\n"
1515+" subs %Q0, %Q0, %Q4\n"
1516 " sbc %R0, %R0, %R4\n"
1517 " strexd %1, %0, %H0, [%3]\n"
1518 " teq %1, #0\n"
1519@@ -371,10 +737,25 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v)
1520 __asm__ __volatile__("@ atomic64_sub_return\n"
1521 "1: ldrexd %0, %H0, [%3]\n"
1522 " subs %Q0, %Q0, %Q4\n"
1523-" sbc %R0, %R0, %R4\n"
1524+" sbcs %R0, %R0, %R4\n"
1525+
1526+#ifdef CONFIG_PAX_REFCOUNT
1527+" bvc 3f\n"
1528+" mov %0, %1\n"
1529+" mov %H0, %H1\n"
1530+"2: " REFCOUNT_TRAP_INSN "\n"
1531+"3:\n"
1532+#endif
1533+
1534 " strexd %1, %0, %H0, [%3]\n"
1535 " teq %1, #0\n"
1536 " bne 1b"
1537+
1538+#ifdef CONFIG_PAX_REFCOUNT
1539+"\n4:\n"
1540+ _ASM_EXTABLE(2b, 4b)
1541+#endif
1542+
1543 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1544 : "r" (&v->counter), "r" (i)
1545 : "cc");
1546@@ -410,6 +791,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1547 return oldval;
1548 }
1549
1550+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1551+ long long new)
1552+{
1553+ long long oldval;
1554+ unsigned long res;
1555+
1556+ smp_mb();
1557+
1558+ do {
1559+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1560+ "ldrexd %1, %H1, [%3]\n"
1561+ "mov %0, #0\n"
1562+ "teq %1, %4\n"
1563+ "teqeq %H1, %H4\n"
1564+ "strexdeq %0, %5, %H5, [%3]"
1565+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1566+ : "r" (&ptr->counter), "r" (old), "r" (new)
1567+ : "cc");
1568+ } while (res);
1569+
1570+ smp_mb();
1571+
1572+ return oldval;
1573+}
1574+
1575 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1576 {
1577 long long result;
1578@@ -435,21 +841,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1579 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1580 {
1581 long long result;
1582- unsigned long tmp;
1583+ u64 tmp;
1584
1585 smp_mb();
1586 prefetchw(&v->counter);
1587
1588 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1589-"1: ldrexd %0, %H0, [%3]\n"
1590-" subs %Q0, %Q0, #1\n"
1591-" sbc %R0, %R0, #0\n"
1592+"1: ldrexd %1, %H1, [%3]\n"
1593+" subs %Q0, %Q1, #1\n"
1594+" sbcs %R0, %R1, #0\n"
1595+
1596+#ifdef CONFIG_PAX_REFCOUNT
1597+" bvc 3f\n"
1598+" mov %Q0, %Q1\n"
1599+" mov %R0, %R1\n"
1600+"2: " REFCOUNT_TRAP_INSN "\n"
1601+"3:\n"
1602+#endif
1603+
1604 " teq %R0, #0\n"
1605-" bmi 2f\n"
1606+" bmi 4f\n"
1607 " strexd %1, %0, %H0, [%3]\n"
1608 " teq %1, #0\n"
1609 " bne 1b\n"
1610-"2:"
1611+"4:\n"
1612+
1613+#ifdef CONFIG_PAX_REFCOUNT
1614+ _ASM_EXTABLE(2b, 4b)
1615+#endif
1616+
1617 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1618 : "r" (&v->counter)
1619 : "cc");
1620@@ -473,13 +893,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1621 " teq %0, %5\n"
1622 " teqeq %H0, %H5\n"
1623 " moveq %1, #0\n"
1624-" beq 2f\n"
1625+" beq 4f\n"
1626 " adds %Q0, %Q0, %Q6\n"
1627-" adc %R0, %R0, %R6\n"
1628+" adcs %R0, %R0, %R6\n"
1629+
1630+#ifdef CONFIG_PAX_REFCOUNT
1631+" bvc 3f\n"
1632+"2: " REFCOUNT_TRAP_INSN "\n"
1633+"3:\n"
1634+#endif
1635+
1636 " strexd %2, %0, %H0, [%4]\n"
1637 " teq %2, #0\n"
1638 " bne 1b\n"
1639-"2:"
1640+"4:\n"
1641+
1642+#ifdef CONFIG_PAX_REFCOUNT
1643+ _ASM_EXTABLE(2b, 4b)
1644+#endif
1645+
1646 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1647 : "r" (&v->counter), "r" (u), "r" (a)
1648 : "cc");
1649@@ -492,10 +924,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1650
1651 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1652 #define atomic64_inc(v) atomic64_add(1LL, (v))
1653+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1654 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1655+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1656 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1657 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1658 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1659+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1660 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1661 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1662 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1663diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
1664index c6a3e73..35cca85 100644
1665--- a/arch/arm/include/asm/barrier.h
1666+++ b/arch/arm/include/asm/barrier.h
1667@@ -63,7 +63,7 @@
1668 do { \
1669 compiletime_assert_atomic_type(*p); \
1670 smp_mb(); \
1671- ACCESS_ONCE(*p) = (v); \
1672+ ACCESS_ONCE_RW(*p) = (v); \
1673 } while (0)
1674
1675 #define smp_load_acquire(p) \
1676diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1677index 75fe66b..ba3dee4 100644
1678--- a/arch/arm/include/asm/cache.h
1679+++ b/arch/arm/include/asm/cache.h
1680@@ -4,8 +4,10 @@
1681 #ifndef __ASMARM_CACHE_H
1682 #define __ASMARM_CACHE_H
1683
1684+#include <linux/const.h>
1685+
1686 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1687-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1688+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1689
1690 /*
1691 * Memory returned by kmalloc() may be used for DMA, so we must make
1692@@ -24,5 +26,6 @@
1693 #endif
1694
1695 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1696+#define __read_only __attribute__ ((__section__(".data..read_only")))
1697
1698 #endif
1699diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1700index 10e78d0..dc8505d 100644
1701--- a/arch/arm/include/asm/cacheflush.h
1702+++ b/arch/arm/include/asm/cacheflush.h
1703@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1704 void (*dma_unmap_area)(const void *, size_t, int);
1705
1706 void (*dma_flush_range)(const void *, const void *);
1707-};
1708+} __no_const;
1709
1710 /*
1711 * Select the calling method
1712diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1713index 5233151..87a71fa 100644
1714--- a/arch/arm/include/asm/checksum.h
1715+++ b/arch/arm/include/asm/checksum.h
1716@@ -37,7 +37,19 @@ __wsum
1717 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1718
1719 __wsum
1720-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1721+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1722+
1723+static inline __wsum
1724+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1725+{
1726+ __wsum ret;
1727+ pax_open_userland();
1728+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1729+ pax_close_userland();
1730+ return ret;
1731+}
1732+
1733+
1734
1735 /*
1736 * Fold a partial checksum without adding pseudo headers
1737diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1738index abb2c37..96db950 100644
1739--- a/arch/arm/include/asm/cmpxchg.h
1740+++ b/arch/arm/include/asm/cmpxchg.h
1741@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1742
1743 #define xchg(ptr,x) \
1744 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1745+#define xchg_unchecked(ptr,x) \
1746+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1747
1748 #include <asm-generic/cmpxchg-local.h>
1749
1750diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1751index 6ddbe44..b5e38b1 100644
1752--- a/arch/arm/include/asm/domain.h
1753+++ b/arch/arm/include/asm/domain.h
1754@@ -48,18 +48,37 @@
1755 * Domain types
1756 */
1757 #define DOMAIN_NOACCESS 0
1758-#define DOMAIN_CLIENT 1
1759 #ifdef CONFIG_CPU_USE_DOMAINS
1760+#define DOMAIN_USERCLIENT 1
1761+#define DOMAIN_KERNELCLIENT 1
1762 #define DOMAIN_MANAGER 3
1763+#define DOMAIN_VECTORS DOMAIN_USER
1764 #else
1765+
1766+#ifdef CONFIG_PAX_KERNEXEC
1767 #define DOMAIN_MANAGER 1
1768+#define DOMAIN_KERNEXEC 3
1769+#else
1770+#define DOMAIN_MANAGER 1
1771+#endif
1772+
1773+#ifdef CONFIG_PAX_MEMORY_UDEREF
1774+#define DOMAIN_USERCLIENT 0
1775+#define DOMAIN_UDEREF 1
1776+#define DOMAIN_VECTORS DOMAIN_KERNEL
1777+#else
1778+#define DOMAIN_USERCLIENT 1
1779+#define DOMAIN_VECTORS DOMAIN_USER
1780+#endif
1781+#define DOMAIN_KERNELCLIENT 1
1782+
1783 #endif
1784
1785 #define domain_val(dom,type) ((type) << (2*(dom)))
1786
1787 #ifndef __ASSEMBLY__
1788
1789-#ifdef CONFIG_CPU_USE_DOMAINS
1790+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1791 static inline void set_domain(unsigned val)
1792 {
1793 asm volatile(
1794@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1795 isb();
1796 }
1797
1798-#define modify_domain(dom,type) \
1799- do { \
1800- struct thread_info *thread = current_thread_info(); \
1801- unsigned int domain = thread->cpu_domain; \
1802- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1803- thread->cpu_domain = domain | domain_val(dom, type); \
1804- set_domain(thread->cpu_domain); \
1805- } while (0)
1806-
1807+extern void modify_domain(unsigned int dom, unsigned int type);
1808 #else
1809 static inline void set_domain(unsigned val) { }
1810 static inline void modify_domain(unsigned dom, unsigned type) { }
1811diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1812index afb9caf..9a0bac0 100644
1813--- a/arch/arm/include/asm/elf.h
1814+++ b/arch/arm/include/asm/elf.h
1815@@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1816 the loader. We need to make sure that it is out of the way of the program
1817 that it will "exec", and that there is sufficient room for the brk. */
1818
1819-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1820+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1821+
1822+#ifdef CONFIG_PAX_ASLR
1823+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1824+
1825+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1826+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1827+#endif
1828
1829 /* When the program starts, a1 contains a pointer to a function to be
1830 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1831@@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1832 extern void elf_set_personality(const struct elf32_hdr *);
1833 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1834
1835-struct mm_struct;
1836-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1837-#define arch_randomize_brk arch_randomize_brk
1838-
1839 #ifdef CONFIG_MMU
1840 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1841 struct linux_binprm;
1842diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1843index de53547..52b9a28 100644
1844--- a/arch/arm/include/asm/fncpy.h
1845+++ b/arch/arm/include/asm/fncpy.h
1846@@ -81,7 +81,9 @@
1847 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1848 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1849 \
1850+ pax_open_kernel(); \
1851 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1852+ pax_close_kernel(); \
1853 flush_icache_range((unsigned long)(dest_buf), \
1854 (unsigned long)(dest_buf) + (size)); \
1855 \
1856diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1857index 53e69da..3fdc896 100644
1858--- a/arch/arm/include/asm/futex.h
1859+++ b/arch/arm/include/asm/futex.h
1860@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1861 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1862 return -EFAULT;
1863
1864+ pax_open_userland();
1865+
1866 smp_mb();
1867 /* Prefetching cannot fault */
1868 prefetchw(uaddr);
1869@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1870 : "cc", "memory");
1871 smp_mb();
1872
1873+ pax_close_userland();
1874+
1875 *uval = val;
1876 return ret;
1877 }
1878@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1879 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1880 return -EFAULT;
1881
1882+ pax_open_userland();
1883+
1884 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1885 "1: " TUSER(ldr) " %1, [%4]\n"
1886 " teq %1, %2\n"
1887@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1888 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1889 : "cc", "memory");
1890
1891+ pax_close_userland();
1892+
1893 *uval = val;
1894 return ret;
1895 }
1896@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1897 return -EFAULT;
1898
1899 pagefault_disable(); /* implies preempt_disable() */
1900+ pax_open_userland();
1901
1902 switch (op) {
1903 case FUTEX_OP_SET:
1904@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1905 ret = -ENOSYS;
1906 }
1907
1908+ pax_close_userland();
1909 pagefault_enable(); /* subsumes preempt_enable() */
1910
1911 if (!ret) {
1912diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1913index 83eb2f7..ed77159 100644
1914--- a/arch/arm/include/asm/kmap_types.h
1915+++ b/arch/arm/include/asm/kmap_types.h
1916@@ -4,6 +4,6 @@
1917 /*
1918 * This is the "bare minimum". AIO seems to require this.
1919 */
1920-#define KM_TYPE_NR 16
1921+#define KM_TYPE_NR 17
1922
1923 #endif
1924diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1925index 9e614a1..3302cca 100644
1926--- a/arch/arm/include/asm/mach/dma.h
1927+++ b/arch/arm/include/asm/mach/dma.h
1928@@ -22,7 +22,7 @@ struct dma_ops {
1929 int (*residue)(unsigned int, dma_t *); /* optional */
1930 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1931 const char *type;
1932-};
1933+} __do_const;
1934
1935 struct dma_struct {
1936 void *addr; /* single DMA address */
1937diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1938index f98c7f3..e5c626d 100644
1939--- a/arch/arm/include/asm/mach/map.h
1940+++ b/arch/arm/include/asm/mach/map.h
1941@@ -23,17 +23,19 @@ struct map_desc {
1942
1943 /* types 0-3 are defined in asm/io.h */
1944 enum {
1945- MT_UNCACHED = 4,
1946- MT_CACHECLEAN,
1947- MT_MINICLEAN,
1948+ MT_UNCACHED_RW = 4,
1949+ MT_CACHECLEAN_RO,
1950+ MT_MINICLEAN_RO,
1951 MT_LOW_VECTORS,
1952 MT_HIGH_VECTORS,
1953- MT_MEMORY_RWX,
1954+ __MT_MEMORY_RWX,
1955 MT_MEMORY_RW,
1956- MT_ROM,
1957- MT_MEMORY_RWX_NONCACHED,
1958+ MT_MEMORY_RX,
1959+ MT_ROM_RX,
1960+ MT_MEMORY_RW_NONCACHED,
1961+ MT_MEMORY_RX_NONCACHED,
1962 MT_MEMORY_RW_DTCM,
1963- MT_MEMORY_RWX_ITCM,
1964+ MT_MEMORY_RX_ITCM,
1965 MT_MEMORY_RW_SO,
1966 MT_MEMORY_DMA_READY,
1967 };
1968diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1969index 891a56b..48f337e 100644
1970--- a/arch/arm/include/asm/outercache.h
1971+++ b/arch/arm/include/asm/outercache.h
1972@@ -36,7 +36,7 @@ struct outer_cache_fns {
1973
1974 /* This is an ARM L2C thing */
1975 void (*write_sec)(unsigned long, unsigned);
1976-};
1977+} __no_const;
1978
1979 extern struct outer_cache_fns outer_cache;
1980
1981diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1982index 4355f0e..cd9168e 100644
1983--- a/arch/arm/include/asm/page.h
1984+++ b/arch/arm/include/asm/page.h
1985@@ -23,6 +23,7 @@
1986
1987 #else
1988
1989+#include <linux/compiler.h>
1990 #include <asm/glue.h>
1991
1992 /*
1993@@ -114,7 +115,7 @@ struct cpu_user_fns {
1994 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1995 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1996 unsigned long vaddr, struct vm_area_struct *vma);
1997-};
1998+} __no_const;
1999
2000 #ifdef MULTI_USER
2001 extern struct cpu_user_fns cpu_user;
2002diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
2003index 78a7793..e3dc06c 100644
2004--- a/arch/arm/include/asm/pgalloc.h
2005+++ b/arch/arm/include/asm/pgalloc.h
2006@@ -17,6 +17,7 @@
2007 #include <asm/processor.h>
2008 #include <asm/cacheflush.h>
2009 #include <asm/tlbflush.h>
2010+#include <asm/system_info.h>
2011
2012 #define check_pgt_cache() do { } while (0)
2013
2014@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2015 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
2016 }
2017
2018+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2019+{
2020+ pud_populate(mm, pud, pmd);
2021+}
2022+
2023 #else /* !CONFIG_ARM_LPAE */
2024
2025 /*
2026@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2027 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
2028 #define pmd_free(mm, pmd) do { } while (0)
2029 #define pud_populate(mm,pmd,pte) BUG()
2030+#define pud_populate_kernel(mm,pmd,pte) BUG()
2031
2032 #endif /* CONFIG_ARM_LPAE */
2033
2034@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
2035 __free_page(pte);
2036 }
2037
2038+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
2039+{
2040+#ifdef CONFIG_ARM_LPAE
2041+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
2042+#else
2043+ if (addr & SECTION_SIZE)
2044+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
2045+ else
2046+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
2047+#endif
2048+ flush_pmd_entry(pmdp);
2049+}
2050+
2051 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
2052 pmdval_t prot)
2053 {
2054@@ -157,7 +177,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
2055 static inline void
2056 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
2057 {
2058- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
2059+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
2060 }
2061 #define pmd_pgtable(pmd) pmd_page(pmd)
2062
2063diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
2064index 5cfba15..f415e1a 100644
2065--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
2066+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
2067@@ -20,12 +20,15 @@
2068 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
2069 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
2070 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
2071+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
2072 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
2073 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
2074 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
2075+
2076 /*
2077 * - section
2078 */
2079+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
2080 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
2081 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
2082 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
2083@@ -37,6 +40,7 @@
2084 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
2085 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
2086 #define PMD_SECT_AF (_AT(pmdval_t, 0))
2087+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
2088
2089 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
2090 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
2091@@ -66,6 +70,7 @@
2092 * - extended small page/tiny page
2093 */
2094 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
2095+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
2096 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
2097 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
2098 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
2099diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
2100index 219ac88..73ec32a 100644
2101--- a/arch/arm/include/asm/pgtable-2level.h
2102+++ b/arch/arm/include/asm/pgtable-2level.h
2103@@ -126,6 +126,9 @@
2104 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
2105 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
2106
2107+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
2108+#define L_PTE_PXN (_AT(pteval_t, 0))
2109+
2110 /*
2111 * These are the memory types, defined to be compatible with
2112 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
2113diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
2114index 9fd61c7..f8f1cff 100644
2115--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
2116+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
2117@@ -76,6 +76,7 @@
2118 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
2119 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
2120 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
2121+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2122 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
2123
2124 /*
2125diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
2126index 06e0bc0..c65bca8 100644
2127--- a/arch/arm/include/asm/pgtable-3level.h
2128+++ b/arch/arm/include/asm/pgtable-3level.h
2129@@ -81,6 +81,7 @@
2130 #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
2131 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
2132 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
2133+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2134 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
2135 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
2136 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
2137@@ -92,10 +93,12 @@
2138 #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
2139 #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
2140 #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
2141+#define PMD_SECT_RDONLY PMD_SECT_AP2
2142
2143 /*
2144 * To be used in assembly code with the upper page attributes.
2145 */
2146+#define L_PTE_PXN_HIGH (1 << (53 - 32))
2147 #define L_PTE_XN_HIGH (1 << (54 - 32))
2148 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
2149
2150diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
2151index 01baef0..73c156e 100644
2152--- a/arch/arm/include/asm/pgtable.h
2153+++ b/arch/arm/include/asm/pgtable.h
2154@@ -33,6 +33,9 @@
2155 #include <asm/pgtable-2level.h>
2156 #endif
2157
2158+#define ktla_ktva(addr) (addr)
2159+#define ktva_ktla(addr) (addr)
2160+
2161 /*
2162 * Just any arbitrary offset to the start of the vmalloc VM area: the
2163 * current 8MB value just means that there will be a 8MB "hole" after the
2164@@ -48,6 +51,9 @@
2165 #define LIBRARY_TEXT_START 0x0c000000
2166
2167 #ifndef __ASSEMBLY__
2168+extern pteval_t __supported_pte_mask;
2169+extern pmdval_t __supported_pmd_mask;
2170+
2171 extern void __pte_error(const char *file, int line, pte_t);
2172 extern void __pmd_error(const char *file, int line, pmd_t);
2173 extern void __pgd_error(const char *file, int line, pgd_t);
2174@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2175 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2176 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2177
2178+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2179+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2180+
2181+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2182+#include <asm/domain.h>
2183+#include <linux/thread_info.h>
2184+#include <linux/preempt.h>
2185+
2186+static inline int test_domain(int domain, int domaintype)
2187+{
2188+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2189+}
2190+#endif
2191+
2192+#ifdef CONFIG_PAX_KERNEXEC
2193+static inline unsigned long pax_open_kernel(void) {
2194+#ifdef CONFIG_ARM_LPAE
2195+ /* TODO */
2196+#else
2197+ preempt_disable();
2198+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2199+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2200+#endif
2201+ return 0;
2202+}
2203+
2204+static inline unsigned long pax_close_kernel(void) {
2205+#ifdef CONFIG_ARM_LPAE
2206+ /* TODO */
2207+#else
2208+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2209+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2210+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2211+ preempt_enable_no_resched();
2212+#endif
2213+ return 0;
2214+}
2215+#else
2216+static inline unsigned long pax_open_kernel(void) { return 0; }
2217+static inline unsigned long pax_close_kernel(void) { return 0; }
2218+#endif
2219+
2220 /*
2221 * This is the lowest virtual address we can permit any user space
2222 * mapping to be mapped at. This is particularly important for
2223@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2224 /*
2225 * The pgprot_* and protection_map entries will be fixed up in runtime
2226 * to include the cachable and bufferable bits based on memory policy,
2227- * as well as any architecture dependent bits like global/ASID and SMP
2228- * shared mapping bits.
2229+ * as well as any architecture dependent bits like global/ASID, PXN,
2230+ * and SMP shared mapping bits.
2231 */
2232 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2233
2234@@ -269,7 +317,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2235 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2236 {
2237 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2238- L_PTE_NONE | L_PTE_VALID;
2239+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2240 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2241 return pte;
2242 }
2243diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2244index c25ef3e..735f14b 100644
2245--- a/arch/arm/include/asm/psci.h
2246+++ b/arch/arm/include/asm/psci.h
2247@@ -32,7 +32,7 @@ struct psci_operations {
2248 int (*affinity_info)(unsigned long target_affinity,
2249 unsigned long lowest_affinity_level);
2250 int (*migrate_info_type)(void);
2251-};
2252+} __no_const;
2253
2254 extern struct psci_operations psci_ops;
2255 extern struct smp_operations psci_smp_ops;
2256diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2257index 2ec765c..beb1fe16 100644
2258--- a/arch/arm/include/asm/smp.h
2259+++ b/arch/arm/include/asm/smp.h
2260@@ -113,7 +113,7 @@ struct smp_operations {
2261 int (*cpu_disable)(unsigned int cpu);
2262 #endif
2263 #endif
2264-};
2265+} __no_const;
2266
2267 struct of_cpu_method {
2268 const char *method;
2269diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2270index ce73ab6..7310f8a 100644
2271--- a/arch/arm/include/asm/thread_info.h
2272+++ b/arch/arm/include/asm/thread_info.h
2273@@ -78,9 +78,9 @@ struct thread_info {
2274 .flags = 0, \
2275 .preempt_count = INIT_PREEMPT_COUNT, \
2276 .addr_limit = KERNEL_DS, \
2277- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2278- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2279- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2280+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2281+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2282+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2283 .restart_block = { \
2284 .fn = do_no_restart_syscall, \
2285 }, \
2286@@ -154,7 +154,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2287 #define TIF_SYSCALL_AUDIT 9
2288 #define TIF_SYSCALL_TRACEPOINT 10
2289 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2290-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2291+/* within 8 bits of TIF_SYSCALL_TRACE
2292+ * to meet flexible second operand requirements
2293+ */
2294+#define TIF_GRSEC_SETXID 12
2295+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2296 #define TIF_USING_IWMMXT 17
2297 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2298 #define TIF_RESTORE_SIGMASK 20
2299@@ -168,10 +172,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2300 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2301 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2302 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2303+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2304
2305 /* Checks for any syscall work in entry-common.S */
2306 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2307- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2308+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2309
2310 /*
2311 * Change these and you break ASM code in entry-common.S
2312diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
2313index 5f833f7..76e6644 100644
2314--- a/arch/arm/include/asm/tls.h
2315+++ b/arch/arm/include/asm/tls.h
2316@@ -3,6 +3,7 @@
2317
2318 #include <linux/compiler.h>
2319 #include <asm/thread_info.h>
2320+#include <asm/pgtable.h>
2321
2322 #ifdef __ASSEMBLY__
2323 #include <asm/asm-offsets.h>
2324@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
2325 * at 0xffff0fe0 must be used instead. (see
2326 * entry-armv.S for details)
2327 */
2328+ pax_open_kernel();
2329 *((unsigned int *)0xffff0ff0) = val;
2330+ pax_close_kernel();
2331 #endif
2332 }
2333
2334diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2335index 4767eb9..bf00668 100644
2336--- a/arch/arm/include/asm/uaccess.h
2337+++ b/arch/arm/include/asm/uaccess.h
2338@@ -18,6 +18,7 @@
2339 #include <asm/domain.h>
2340 #include <asm/unified.h>
2341 #include <asm/compiler.h>
2342+#include <asm/pgtable.h>
2343
2344 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2345 #include <asm-generic/uaccess-unaligned.h>
2346@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2347 static inline void set_fs(mm_segment_t fs)
2348 {
2349 current_thread_info()->addr_limit = fs;
2350- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2351+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2352 }
2353
2354 #define segment_eq(a,b) ((a) == (b))
2355
2356+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2357+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2358+
2359+static inline void pax_open_userland(void)
2360+{
2361+
2362+#ifdef CONFIG_PAX_MEMORY_UDEREF
2363+ if (segment_eq(get_fs(), USER_DS)) {
2364+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2365+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2366+ }
2367+#endif
2368+
2369+}
2370+
2371+static inline void pax_close_userland(void)
2372+{
2373+
2374+#ifdef CONFIG_PAX_MEMORY_UDEREF
2375+ if (segment_eq(get_fs(), USER_DS)) {
2376+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2377+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2378+ }
2379+#endif
2380+
2381+}
2382+
2383 #define __addr_ok(addr) ({ \
2384 unsigned long flag; \
2385 __asm__("cmp %2, %0; movlo %0, #0" \
2386@@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
2387
2388 #define get_user(x,p) \
2389 ({ \
2390+ int __e; \
2391 might_fault(); \
2392- __get_user_check(x,p); \
2393+ pax_open_userland(); \
2394+ __e = __get_user_check(x,p); \
2395+ pax_close_userland(); \
2396+ __e; \
2397 })
2398
2399 extern int __put_user_1(void *, unsigned int);
2400@@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
2401
2402 #define put_user(x,p) \
2403 ({ \
2404+ int __e; \
2405 might_fault(); \
2406- __put_user_check(x,p); \
2407+ pax_open_userland(); \
2408+ __e = __put_user_check(x,p); \
2409+ pax_close_userland(); \
2410+ __e; \
2411 })
2412
2413 #else /* CONFIG_MMU */
2414@@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
2415
2416 #endif /* CONFIG_MMU */
2417
2418+#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size))
2419 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
2420
2421 #define user_addr_max() \
2422@@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
2423 #define __get_user(x,ptr) \
2424 ({ \
2425 long __gu_err = 0; \
2426+ pax_open_userland(); \
2427 __get_user_err((x),(ptr),__gu_err); \
2428+ pax_close_userland(); \
2429 __gu_err; \
2430 })
2431
2432 #define __get_user_error(x,ptr,err) \
2433 ({ \
2434+ pax_open_userland(); \
2435 __get_user_err((x),(ptr),err); \
2436+ pax_close_userland(); \
2437 (void) 0; \
2438 })
2439
2440@@ -368,13 +409,17 @@ do { \
2441 #define __put_user(x,ptr) \
2442 ({ \
2443 long __pu_err = 0; \
2444+ pax_open_userland(); \
2445 __put_user_err((x),(ptr),__pu_err); \
2446+ pax_close_userland(); \
2447 __pu_err; \
2448 })
2449
2450 #define __put_user_error(x,ptr,err) \
2451 ({ \
2452+ pax_open_userland(); \
2453 __put_user_err((x),(ptr),err); \
2454+ pax_close_userland(); \
2455 (void) 0; \
2456 })
2457
2458@@ -474,11 +519,44 @@ do { \
2459
2460
2461 #ifdef CONFIG_MMU
2462-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2463-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2464+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2465+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2466+
2467+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2468+{
2469+ unsigned long ret;
2470+
2471+ check_object_size(to, n, false);
2472+ pax_open_userland();
2473+ ret = ___copy_from_user(to, from, n);
2474+ pax_close_userland();
2475+ return ret;
2476+}
2477+
2478+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2479+{
2480+ unsigned long ret;
2481+
2482+ check_object_size(from, n, true);
2483+ pax_open_userland();
2484+ ret = ___copy_to_user(to, from, n);
2485+ pax_close_userland();
2486+ return ret;
2487+}
2488+
2489 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2490-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2491+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2492 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2493+
2494+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2495+{
2496+ unsigned long ret;
2497+ pax_open_userland();
2498+ ret = ___clear_user(addr, n);
2499+ pax_close_userland();
2500+ return ret;
2501+}
2502+
2503 #else
2504 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2505 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2506@@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2507
2508 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2509 {
2510+ if ((long)n < 0)
2511+ return n;
2512+
2513 if (access_ok(VERIFY_READ, from, n))
2514 n = __copy_from_user(to, from, n);
2515 else /* security hole - plug it */
2516@@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2517
2518 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2519 {
2520+ if ((long)n < 0)
2521+ return n;
2522+
2523 if (access_ok(VERIFY_WRITE, to, n))
2524 n = __copy_to_user(to, from, n);
2525 return n;
2526diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2527index 5af0ed1..cea83883 100644
2528--- a/arch/arm/include/uapi/asm/ptrace.h
2529+++ b/arch/arm/include/uapi/asm/ptrace.h
2530@@ -92,7 +92,7 @@
2531 * ARMv7 groups of PSR bits
2532 */
2533 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2534-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2535+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2536 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2537 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2538
2539diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2540index a88671c..1cc895e 100644
2541--- a/arch/arm/kernel/armksyms.c
2542+++ b/arch/arm/kernel/armksyms.c
2543@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2544
2545 /* networking */
2546 EXPORT_SYMBOL(csum_partial);
2547-EXPORT_SYMBOL(csum_partial_copy_from_user);
2548+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2549 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2550 EXPORT_SYMBOL(__csum_ipv6_magic);
2551
2552@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
2553 #ifdef CONFIG_MMU
2554 EXPORT_SYMBOL(copy_page);
2555
2556-EXPORT_SYMBOL(__copy_from_user);
2557-EXPORT_SYMBOL(__copy_to_user);
2558-EXPORT_SYMBOL(__clear_user);
2559+EXPORT_SYMBOL(___copy_from_user);
2560+EXPORT_SYMBOL(___copy_to_user);
2561+EXPORT_SYMBOL(___clear_user);
2562
2563 EXPORT_SYMBOL(__get_user_1);
2564 EXPORT_SYMBOL(__get_user_2);
2565diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2566index 36276cd..9d7b13b 100644
2567--- a/arch/arm/kernel/entry-armv.S
2568+++ b/arch/arm/kernel/entry-armv.S
2569@@ -47,6 +47,87 @@
2570 9997:
2571 .endm
2572
2573+ .macro pax_enter_kernel
2574+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2575+ @ make aligned space for saved DACR
2576+ sub sp, sp, #8
2577+ @ save regs
2578+ stmdb sp!, {r1, r2}
2579+ @ read DACR from cpu_domain into r1
2580+ mov r2, sp
2581+ @ assume 8K pages, since we have to split the immediate in two
2582+ bic r2, r2, #(0x1fc0)
2583+ bic r2, r2, #(0x3f)
2584+ ldr r1, [r2, #TI_CPU_DOMAIN]
2585+ @ store old DACR on stack
2586+ str r1, [sp, #8]
2587+#ifdef CONFIG_PAX_KERNEXEC
2588+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2589+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2590+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2591+#endif
2592+#ifdef CONFIG_PAX_MEMORY_UDEREF
2593+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2594+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2595+#endif
2596+ @ write r1 to current_thread_info()->cpu_domain
2597+ str r1, [r2, #TI_CPU_DOMAIN]
2598+ @ write r1 to DACR
2599+ mcr p15, 0, r1, c3, c0, 0
2600+ @ instruction sync
2601+ instr_sync
2602+ @ restore regs
2603+ ldmia sp!, {r1, r2}
2604+#endif
2605+ .endm
2606+
2607+ .macro pax_open_userland
2608+#ifdef CONFIG_PAX_MEMORY_UDEREF
2609+ @ save regs
2610+ stmdb sp!, {r0, r1}
2611+ @ read DACR from cpu_domain into r1
2612+ mov r0, sp
2613+ @ assume 8K pages, since we have to split the immediate in two
2614+ bic r0, r0, #(0x1fc0)
2615+ bic r0, r0, #(0x3f)
2616+ ldr r1, [r0, #TI_CPU_DOMAIN]
2617+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2618+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2619+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2620+ @ write r1 to current_thread_info()->cpu_domain
2621+ str r1, [r0, #TI_CPU_DOMAIN]
2622+ @ write r1 to DACR
2623+ mcr p15, 0, r1, c3, c0, 0
2624+ @ instruction sync
2625+ instr_sync
2626+ @ restore regs
2627+ ldmia sp!, {r0, r1}
2628+#endif
2629+ .endm
2630+
2631+ .macro pax_close_userland
2632+#ifdef CONFIG_PAX_MEMORY_UDEREF
2633+ @ save regs
2634+ stmdb sp!, {r0, r1}
2635+ @ read DACR from cpu_domain into r1
2636+ mov r0, sp
2637+ @ assume 8K pages, since we have to split the immediate in two
2638+ bic r0, r0, #(0x1fc0)
2639+ bic r0, r0, #(0x3f)
2640+ ldr r1, [r0, #TI_CPU_DOMAIN]
2641+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2642+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2643+ @ write r1 to current_thread_info()->cpu_domain
2644+ str r1, [r0, #TI_CPU_DOMAIN]
2645+ @ write r1 to DACR
2646+ mcr p15, 0, r1, c3, c0, 0
2647+ @ instruction sync
2648+ instr_sync
2649+ @ restore regs
2650+ ldmia sp!, {r0, r1}
2651+#endif
2652+ .endm
2653+
2654 .macro pabt_helper
2655 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2656 #ifdef MULTI_PABORT
2657@@ -89,11 +170,15 @@
2658 * Invalid mode handlers
2659 */
2660 .macro inv_entry, reason
2661+
2662+ pax_enter_kernel
2663+
2664 sub sp, sp, #S_FRAME_SIZE
2665 ARM( stmib sp, {r1 - lr} )
2666 THUMB( stmia sp, {r0 - r12} )
2667 THUMB( str sp, [sp, #S_SP] )
2668 THUMB( str lr, [sp, #S_LR] )
2669+
2670 mov r1, #\reason
2671 .endm
2672
2673@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2674 .macro svc_entry, stack_hole=0
2675 UNWIND(.fnstart )
2676 UNWIND(.save {r0 - pc} )
2677+
2678+ pax_enter_kernel
2679+
2680 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2681+
2682 #ifdef CONFIG_THUMB2_KERNEL
2683 SPFIX( str r0, [sp] ) @ temporarily saved
2684 SPFIX( mov r0, sp )
2685@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2686 ldmia r0, {r3 - r5}
2687 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2688 mov r6, #-1 @ "" "" "" ""
2689+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2690+ @ offset sp by 8 as done in pax_enter_kernel
2691+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2692+#else
2693 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2694+#endif
2695 SPFIX( addeq r2, r2, #4 )
2696 str r3, [sp, #-4]! @ save the "real" r0 copied
2697 @ from the exception stack
2698@@ -317,6 +411,9 @@ ENDPROC(__pabt_svc)
2699 .macro usr_entry
2700 UNWIND(.fnstart )
2701 UNWIND(.cantunwind ) @ don't unwind the user space
2702+
2703+ pax_enter_kernel_user
2704+
2705 sub sp, sp, #S_FRAME_SIZE
2706 ARM( stmib sp, {r1 - r12} )
2707 THUMB( stmia sp, {r0 - r12} )
2708@@ -421,7 +518,9 @@ __und_usr:
2709 tst r3, #PSR_T_BIT @ Thumb mode?
2710 bne __und_usr_thumb
2711 sub r4, r2, #4 @ ARM instr at LR - 4
2712+ pax_open_userland
2713 1: ldrt r0, [r4]
2714+ pax_close_userland
2715 ARM_BE8(rev r0, r0) @ little endian instruction
2716
2717 @ r0 = 32-bit ARM instruction which caused the exception
2718@@ -455,11 +554,15 @@ __und_usr_thumb:
2719 */
2720 .arch armv6t2
2721 #endif
2722+ pax_open_userland
2723 2: ldrht r5, [r4]
2724+ pax_close_userland
2725 ARM_BE8(rev16 r5, r5) @ little endian instruction
2726 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2727 blo __und_usr_fault_16 @ 16bit undefined instruction
2728+ pax_open_userland
2729 3: ldrht r0, [r2]
2730+ pax_close_userland
2731 ARM_BE8(rev16 r0, r0) @ little endian instruction
2732 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2733 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2734@@ -489,7 +592,8 @@ ENDPROC(__und_usr)
2735 */
2736 .pushsection .fixup, "ax"
2737 .align 2
2738-4: str r4, [sp, #S_PC] @ retry current instruction
2739+4: pax_close_userland
2740+ str r4, [sp, #S_PC] @ retry current instruction
2741 ret r9
2742 .popsection
2743 .pushsection __ex_table,"a"
2744@@ -698,7 +802,7 @@ ENTRY(__switch_to)
2745 THUMB( str lr, [ip], #4 )
2746 ldr r4, [r2, #TI_TP_VALUE]
2747 ldr r5, [r2, #TI_TP_VALUE + 4]
2748-#ifdef CONFIG_CPU_USE_DOMAINS
2749+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2750 ldr r6, [r2, #TI_CPU_DOMAIN]
2751 #endif
2752 switch_tls r1, r4, r5, r3, r7
2753@@ -707,7 +811,7 @@ ENTRY(__switch_to)
2754 ldr r8, =__stack_chk_guard
2755 ldr r7, [r7, #TSK_STACK_CANARY]
2756 #endif
2757-#ifdef CONFIG_CPU_USE_DOMAINS
2758+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2759 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2760 #endif
2761 mov r5, r0
2762diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2763index e52fe5a..1b0a924 100644
2764--- a/arch/arm/kernel/entry-common.S
2765+++ b/arch/arm/kernel/entry-common.S
2766@@ -11,18 +11,46 @@
2767 #include <asm/assembler.h>
2768 #include <asm/unistd.h>
2769 #include <asm/ftrace.h>
2770+#include <asm/domain.h>
2771 #include <asm/unwind.h>
2772
2773+#include "entry-header.S"
2774+
2775 #ifdef CONFIG_NEED_RET_TO_USER
2776 #include <mach/entry-macro.S>
2777 #else
2778 .macro arch_ret_to_user, tmp1, tmp2
2779+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2780+ @ save regs
2781+ stmdb sp!, {r1, r2}
2782+ @ read DACR from cpu_domain into r1
2783+ mov r2, sp
2784+ @ assume 8K pages, since we have to split the immediate in two
2785+ bic r2, r2, #(0x1fc0)
2786+ bic r2, r2, #(0x3f)
2787+ ldr r1, [r2, #TI_CPU_DOMAIN]
2788+#ifdef CONFIG_PAX_KERNEXEC
2789+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2790+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2791+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2792+#endif
2793+#ifdef CONFIG_PAX_MEMORY_UDEREF
2794+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2795+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2796+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2797+#endif
2798+ @ write r1 to current_thread_info()->cpu_domain
2799+ str r1, [r2, #TI_CPU_DOMAIN]
2800+ @ write r1 to DACR
2801+ mcr p15, 0, r1, c3, c0, 0
2802+ @ instruction sync
2803+ instr_sync
2804+ @ restore regs
2805+ ldmia sp!, {r1, r2}
2806+#endif
2807 .endm
2808 #endif
2809
2810-#include "entry-header.S"
2811-
2812-
2813 .align 5
2814 /*
2815 * This is the fast syscall return path. We do as little as
2816@@ -406,6 +434,12 @@ ENTRY(vector_swi)
2817 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2818 #endif
2819
2820+ /*
2821+ * do this here to avoid a performance hit of wrapping the code above
2822+ * that directly dereferences userland to parse the SWI instruction
2823+ */
2824+ pax_enter_kernel_user
2825+
2826 adr tbl, sys_call_table @ load syscall table pointer
2827
2828 #if defined(CONFIG_OABI_COMPAT)
2829diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2830index 2fdf867..6e909e4 100644
2831--- a/arch/arm/kernel/entry-header.S
2832+++ b/arch/arm/kernel/entry-header.S
2833@@ -188,6 +188,60 @@
2834 msr cpsr_c, \rtemp @ switch back to the SVC mode
2835 .endm
2836
2837+ .macro pax_enter_kernel_user
2838+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2839+ @ save regs
2840+ stmdb sp!, {r0, r1}
2841+ @ read DACR from cpu_domain into r1
2842+ mov r0, sp
2843+ @ assume 8K pages, since we have to split the immediate in two
2844+ bic r0, r0, #(0x1fc0)
2845+ bic r0, r0, #(0x3f)
2846+ ldr r1, [r0, #TI_CPU_DOMAIN]
2847+#ifdef CONFIG_PAX_MEMORY_UDEREF
2848+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2849+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2850+#endif
2851+#ifdef CONFIG_PAX_KERNEXEC
2852+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2853+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2854+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2855+#endif
2856+ @ write r1 to current_thread_info()->cpu_domain
2857+ str r1, [r0, #TI_CPU_DOMAIN]
2858+ @ write r1 to DACR
2859+ mcr p15, 0, r1, c3, c0, 0
2860+ @ instruction sync
2861+ instr_sync
2862+ @ restore regs
2863+ ldmia sp!, {r0, r1}
2864+#endif
2865+ .endm
2866+
2867+ .macro pax_exit_kernel
2868+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2869+ @ save regs
2870+ stmdb sp!, {r0, r1}
2871+ @ read old DACR from stack into r1
2872+ ldr r1, [sp, #(8 + S_SP)]
2873+ sub r1, r1, #8
2874+ ldr r1, [r1]
2875+
2876+ @ write r1 to current_thread_info()->cpu_domain
2877+ mov r0, sp
2878+ @ assume 8K pages, since we have to split the immediate in two
2879+ bic r0, r0, #(0x1fc0)
2880+ bic r0, r0, #(0x3f)
2881+ str r1, [r0, #TI_CPU_DOMAIN]
2882+ @ write r1 to DACR
2883+ mcr p15, 0, r1, c3, c0, 0
2884+ @ instruction sync
2885+ instr_sync
2886+ @ restore regs
2887+ ldmia sp!, {r0, r1}
2888+#endif
2889+ .endm
2890+
2891 #ifndef CONFIG_THUMB2_KERNEL
2892 .macro svc_exit, rpsr, irq = 0
2893 .if \irq != 0
2894@@ -207,6 +261,9 @@
2895 blne trace_hardirqs_off
2896 #endif
2897 .endif
2898+
2899+ pax_exit_kernel
2900+
2901 msr spsr_cxsf, \rpsr
2902 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
2903 @ We must avoid clrex due to Cortex-A15 erratum #830321
2904@@ -254,6 +311,9 @@
2905 blne trace_hardirqs_off
2906 #endif
2907 .endif
2908+
2909+ pax_exit_kernel
2910+
2911 ldr lr, [sp, #S_SP] @ top of the stack
2912 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2913
2914diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2915index 918875d..cd5fa27 100644
2916--- a/arch/arm/kernel/fiq.c
2917+++ b/arch/arm/kernel/fiq.c
2918@@ -87,7 +87,10 @@ void set_fiq_handler(void *start, unsigned int length)
2919 void *base = vectors_page;
2920 unsigned offset = FIQ_OFFSET;
2921
2922+ pax_open_kernel();
2923 memcpy(base + offset, start, length);
2924+ pax_close_kernel();
2925+
2926 if (!cache_is_vipt_nonaliasing())
2927 flush_icache_range((unsigned long)base + offset, offset +
2928 length);
2929diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2930index 664eee8..f470938 100644
2931--- a/arch/arm/kernel/head.S
2932+++ b/arch/arm/kernel/head.S
2933@@ -437,7 +437,7 @@ __enable_mmu:
2934 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2935 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2936 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2937- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2938+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2939 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2940 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2941 #endif
2942diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2943index 6a4dffe..4a86a70 100644
2944--- a/arch/arm/kernel/module.c
2945+++ b/arch/arm/kernel/module.c
2946@@ -38,12 +38,39 @@
2947 #endif
2948
2949 #ifdef CONFIG_MMU
2950-void *module_alloc(unsigned long size)
2951+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2952 {
2953+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2954+ return NULL;
2955 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2956- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
2957+ GFP_KERNEL, prot, NUMA_NO_NODE,
2958 __builtin_return_address(0));
2959 }
2960+
2961+void *module_alloc(unsigned long size)
2962+{
2963+
2964+#ifdef CONFIG_PAX_KERNEXEC
2965+ return __module_alloc(size, PAGE_KERNEL);
2966+#else
2967+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2968+#endif
2969+
2970+}
2971+
2972+#ifdef CONFIG_PAX_KERNEXEC
2973+void module_free_exec(struct module *mod, void *module_region)
2974+{
2975+ module_free(mod, module_region);
2976+}
2977+EXPORT_SYMBOL(module_free_exec);
2978+
2979+void *module_alloc_exec(unsigned long size)
2980+{
2981+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2982+}
2983+EXPORT_SYMBOL(module_alloc_exec);
2984+#endif
2985 #endif
2986
2987 int
2988diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2989index 07314af..c46655c 100644
2990--- a/arch/arm/kernel/patch.c
2991+++ b/arch/arm/kernel/patch.c
2992@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2993 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2994 int size;
2995
2996+ pax_open_kernel();
2997 if (thumb2 && __opcode_is_thumb16(insn)) {
2998 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2999 size = sizeof(u16);
3000@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
3001 *(u32 *)addr = insn;
3002 size = sizeof(u32);
3003 }
3004+ pax_close_kernel();
3005
3006 flush_icache_range((uintptr_t)(addr),
3007 (uintptr_t)(addr) + size);
3008diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
3009index a35f6eb..7af43a0 100644
3010--- a/arch/arm/kernel/process.c
3011+++ b/arch/arm/kernel/process.c
3012@@ -212,6 +212,7 @@ void machine_power_off(void)
3013
3014 if (pm_power_off)
3015 pm_power_off();
3016+ BUG();
3017 }
3018
3019 /*
3020@@ -225,7 +226,7 @@ void machine_power_off(void)
3021 * executing pre-reset code, and using RAM that the primary CPU's code wishes
3022 * to use. Implementing such co-ordination would be essentially impossible.
3023 */
3024-void machine_restart(char *cmd)
3025+__noreturn void machine_restart(char *cmd)
3026 {
3027 local_irq_disable();
3028 smp_send_stop();
3029@@ -248,8 +249,8 @@ void __show_regs(struct pt_regs *regs)
3030
3031 show_regs_print_info(KERN_DEFAULT);
3032
3033- print_symbol("PC is at %s\n", instruction_pointer(regs));
3034- print_symbol("LR is at %s\n", regs->ARM_lr);
3035+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
3036+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
3037 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
3038 "sp : %08lx ip : %08lx fp : %08lx\n",
3039 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
3040@@ -427,12 +428,6 @@ unsigned long get_wchan(struct task_struct *p)
3041 return 0;
3042 }
3043
3044-unsigned long arch_randomize_brk(struct mm_struct *mm)
3045-{
3046- unsigned long range_end = mm->brk + 0x02000000;
3047- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
3048-}
3049-
3050 #ifdef CONFIG_MMU
3051 #ifdef CONFIG_KUSER_HELPERS
3052 /*
3053@@ -448,7 +443,7 @@ static struct vm_area_struct gate_vma = {
3054
3055 static int __init gate_vma_init(void)
3056 {
3057- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
3058+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
3059 return 0;
3060 }
3061 arch_initcall(gate_vma_init);
3062@@ -474,41 +469,16 @@ int in_gate_area_no_mm(unsigned long addr)
3063
3064 const char *arch_vma_name(struct vm_area_struct *vma)
3065 {
3066- return is_gate_vma(vma) ? "[vectors]" :
3067- (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
3068- "[sigpage]" : NULL;
3069+ return is_gate_vma(vma) ? "[vectors]" : NULL;
3070 }
3071
3072-static struct page *signal_page;
3073-extern struct page *get_signal_page(void);
3074-
3075 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3076 {
3077 struct mm_struct *mm = current->mm;
3078- unsigned long addr;
3079- int ret;
3080-
3081- if (!signal_page)
3082- signal_page = get_signal_page();
3083- if (!signal_page)
3084- return -ENOMEM;
3085
3086 down_write(&mm->mmap_sem);
3087- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
3088- if (IS_ERR_VALUE(addr)) {
3089- ret = addr;
3090- goto up_fail;
3091- }
3092-
3093- ret = install_special_mapping(mm, addr, PAGE_SIZE,
3094- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
3095- &signal_page);
3096-
3097- if (ret == 0)
3098- mm->context.sigpage = addr;
3099-
3100- up_fail:
3101+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
3102 up_write(&mm->mmap_sem);
3103- return ret;
3104+ return 0;
3105 }
3106 #endif
3107diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
3108index f73891b..cf3004e 100644
3109--- a/arch/arm/kernel/psci.c
3110+++ b/arch/arm/kernel/psci.c
3111@@ -28,7 +28,7 @@
3112 #include <asm/psci.h>
3113 #include <asm/system_misc.h>
3114
3115-struct psci_operations psci_ops;
3116+struct psci_operations psci_ops __read_only;
3117
3118 static int (*invoke_psci_fn)(u32, u32, u32, u32);
3119 typedef int (*psci_initcall_t)(const struct device_node *);
3120diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3121index 0c27ed6..b67388e 100644
3122--- a/arch/arm/kernel/ptrace.c
3123+++ b/arch/arm/kernel/ptrace.c
3124@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3125 regs->ARM_ip = ip;
3126 }
3127
3128+#ifdef CONFIG_GRKERNSEC_SETXID
3129+extern void gr_delayed_cred_worker(void);
3130+#endif
3131+
3132 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3133 {
3134 current_thread_info()->syscall = scno;
3135
3136+#ifdef CONFIG_GRKERNSEC_SETXID
3137+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3138+ gr_delayed_cred_worker();
3139+#endif
3140+
3141 /* Do the secure computing check first; failures should be fast. */
3142 if (secure_computing(scno) == -1)
3143 return -1;
3144diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3145index 84db893d..bd8213a 100644
3146--- a/arch/arm/kernel/setup.c
3147+++ b/arch/arm/kernel/setup.c
3148@@ -104,21 +104,23 @@ EXPORT_SYMBOL(elf_hwcap);
3149 unsigned int elf_hwcap2 __read_mostly;
3150 EXPORT_SYMBOL(elf_hwcap2);
3151
3152+pteval_t __supported_pte_mask __read_only;
3153+pmdval_t __supported_pmd_mask __read_only;
3154
3155 #ifdef MULTI_CPU
3156-struct processor processor __read_mostly;
3157+struct processor processor __read_only;
3158 #endif
3159 #ifdef MULTI_TLB
3160-struct cpu_tlb_fns cpu_tlb __read_mostly;
3161+struct cpu_tlb_fns cpu_tlb __read_only;
3162 #endif
3163 #ifdef MULTI_USER
3164-struct cpu_user_fns cpu_user __read_mostly;
3165+struct cpu_user_fns cpu_user __read_only;
3166 #endif
3167 #ifdef MULTI_CACHE
3168-struct cpu_cache_fns cpu_cache __read_mostly;
3169+struct cpu_cache_fns cpu_cache __read_only;
3170 #endif
3171 #ifdef CONFIG_OUTER_CACHE
3172-struct outer_cache_fns outer_cache __read_mostly;
3173+struct outer_cache_fns outer_cache __read_only;
3174 EXPORT_SYMBOL(outer_cache);
3175 #endif
3176
3177@@ -251,9 +253,13 @@ static int __get_cpu_architecture(void)
3178 asm("mrc p15, 0, %0, c0, c1, 4"
3179 : "=r" (mmfr0));
3180 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3181- (mmfr0 & 0x000000f0) >= 0x00000030)
3182+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3183 cpu_arch = CPU_ARCH_ARMv7;
3184- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3185+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3186+ __supported_pte_mask |= L_PTE_PXN;
3187+ __supported_pmd_mask |= PMD_PXNTABLE;
3188+ }
3189+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3190 (mmfr0 & 0x000000f0) == 0x00000020)
3191 cpu_arch = CPU_ARCH_ARMv6;
3192 else
3193diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3194index bd19834..e4d8c66 100644
3195--- a/arch/arm/kernel/signal.c
3196+++ b/arch/arm/kernel/signal.c
3197@@ -24,8 +24,6 @@
3198
3199 extern const unsigned long sigreturn_codes[7];
3200
3201-static unsigned long signal_return_offset;
3202-
3203 #ifdef CONFIG_CRUNCH
3204 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3205 {
3206@@ -396,8 +394,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3207 * except when the MPU has protected the vectors
3208 * page from PL0
3209 */
3210- retcode = mm->context.sigpage + signal_return_offset +
3211- (idx << 2) + thumb;
3212+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3213 } else
3214 #endif
3215 {
3216@@ -604,33 +601,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3217 } while (thread_flags & _TIF_WORK_MASK);
3218 return 0;
3219 }
3220-
3221-struct page *get_signal_page(void)
3222-{
3223- unsigned long ptr;
3224- unsigned offset;
3225- struct page *page;
3226- void *addr;
3227-
3228- page = alloc_pages(GFP_KERNEL, 0);
3229-
3230- if (!page)
3231- return NULL;
3232-
3233- addr = page_address(page);
3234-
3235- /* Give the signal return code some randomness */
3236- offset = 0x200 + (get_random_int() & 0x7fc);
3237- signal_return_offset = offset;
3238-
3239- /*
3240- * Copy signal return handlers into the vector page, and
3241- * set sigreturn to be a pointer to these.
3242- */
3243- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3244-
3245- ptr = (unsigned long)addr + offset;
3246- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3247-
3248- return page;
3249-}
3250diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3251index bbe22fc..d7737f5 100644
3252--- a/arch/arm/kernel/smp.c
3253+++ b/arch/arm/kernel/smp.c
3254@@ -76,7 +76,7 @@ enum ipi_msg_type {
3255
3256 static DECLARE_COMPLETION(cpu_running);
3257
3258-static struct smp_operations smp_ops;
3259+static struct smp_operations smp_ops __read_only;
3260
3261 void __init smp_set_ops(struct smp_operations *ops)
3262 {
3263diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3264index 7a3be1d..b00c7de 100644
3265--- a/arch/arm/kernel/tcm.c
3266+++ b/arch/arm/kernel/tcm.c
3267@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3268 .virtual = ITCM_OFFSET,
3269 .pfn = __phys_to_pfn(ITCM_OFFSET),
3270 .length = 0,
3271- .type = MT_MEMORY_RWX_ITCM,
3272+ .type = MT_MEMORY_RX_ITCM,
3273 }
3274 };
3275
3276@@ -267,7 +267,9 @@ no_dtcm:
3277 start = &__sitcm_text;
3278 end = &__eitcm_text;
3279 ram = &__itcm_start;
3280+ pax_open_kernel();
3281 memcpy(start, ram, itcm_code_sz);
3282+ pax_close_kernel();
3283 pr_debug("CPU ITCM: copied code from %p - %p\n",
3284 start, end);
3285 itcm_present = true;
3286diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3287index bea63f5..bc660a7 100644
3288--- a/arch/arm/kernel/traps.c
3289+++ b/arch/arm/kernel/traps.c
3290@@ -64,7 +64,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3291 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3292 {
3293 #ifdef CONFIG_KALLSYMS
3294- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3295+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3296 #else
3297 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3298 #endif
3299@@ -266,6 +266,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3300 static int die_owner = -1;
3301 static unsigned int die_nest_count;
3302
3303+extern void gr_handle_kernel_exploit(void);
3304+
3305 static unsigned long oops_begin(void)
3306 {
3307 int cpu;
3308@@ -308,6 +310,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3309 panic("Fatal exception in interrupt");
3310 if (panic_on_oops)
3311 panic("Fatal exception");
3312+
3313+ gr_handle_kernel_exploit();
3314+
3315 if (signr)
3316 do_exit(signr);
3317 }
3318@@ -860,7 +865,11 @@ void __init early_trap_init(void *vectors_base)
3319 kuser_init(vectors_base);
3320
3321 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3322- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3323+
3324+#ifndef CONFIG_PAX_MEMORY_UDEREF
3325+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3326+#endif
3327+
3328 #else /* ifndef CONFIG_CPU_V7M */
3329 /*
3330 * on V7-M there is no need to copy the vector table to a dedicated
3331diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3332index 6f57cb9..645f8c4 100644
3333--- a/arch/arm/kernel/vmlinux.lds.S
3334+++ b/arch/arm/kernel/vmlinux.lds.S
3335@@ -8,7 +8,11 @@
3336 #include <asm/thread_info.h>
3337 #include <asm/memory.h>
3338 #include <asm/page.h>
3339-
3340+
3341+#ifdef CONFIG_PAX_KERNEXEC
3342+#include <asm/pgtable.h>
3343+#endif
3344+
3345 #define PROC_INFO \
3346 . = ALIGN(4); \
3347 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3348@@ -34,7 +38,7 @@
3349 #endif
3350
3351 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3352- defined(CONFIG_GENERIC_BUG)
3353+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3354 #define ARM_EXIT_KEEP(x) x
3355 #define ARM_EXIT_DISCARD(x)
3356 #else
3357@@ -90,6 +94,11 @@ SECTIONS
3358 _text = .;
3359 HEAD_TEXT
3360 }
3361+
3362+#ifdef CONFIG_PAX_KERNEXEC
3363+ . = ALIGN(1<<SECTION_SHIFT);
3364+#endif
3365+
3366 .text : { /* Real text segment */
3367 _stext = .; /* Text and read-only data */
3368 __exception_text_start = .;
3369@@ -112,6 +121,8 @@ SECTIONS
3370 ARM_CPU_KEEP(PROC_INFO)
3371 }
3372
3373+ _etext = .; /* End of text section */
3374+
3375 RO_DATA(PAGE_SIZE)
3376
3377 . = ALIGN(4);
3378@@ -142,7 +153,9 @@ SECTIONS
3379
3380 NOTES
3381
3382- _etext = .; /* End of text and rodata section */
3383+#ifdef CONFIG_PAX_KERNEXEC
3384+ . = ALIGN(1<<SECTION_SHIFT);
3385+#endif
3386
3387 #ifndef CONFIG_XIP_KERNEL
3388 . = ALIGN(PAGE_SIZE);
3389@@ -220,6 +233,11 @@ SECTIONS
3390 . = PAGE_OFFSET + TEXT_OFFSET;
3391 #else
3392 __init_end = .;
3393+
3394+#ifdef CONFIG_PAX_KERNEXEC
3395+ . = ALIGN(1<<SECTION_SHIFT);
3396+#endif
3397+
3398 . = ALIGN(THREAD_SIZE);
3399 __data_loc = .;
3400 #endif
3401diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3402index a99e0cd..ab56421d 100644
3403--- a/arch/arm/kvm/arm.c
3404+++ b/arch/arm/kvm/arm.c
3405@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3406 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3407
3408 /* The VMID used in the VTTBR */
3409-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3410+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3411 static u8 kvm_next_vmid;
3412 static DEFINE_SPINLOCK(kvm_vmid_lock);
3413
3414@@ -372,7 +372,7 @@ void force_vm_exit(const cpumask_t *mask)
3415 */
3416 static bool need_new_vmid_gen(struct kvm *kvm)
3417 {
3418- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3419+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3420 }
3421
3422 /**
3423@@ -405,7 +405,7 @@ static void update_vttbr(struct kvm *kvm)
3424
3425 /* First user of a new VMID generation? */
3426 if (unlikely(kvm_next_vmid == 0)) {
3427- atomic64_inc(&kvm_vmid_gen);
3428+ atomic64_inc_unchecked(&kvm_vmid_gen);
3429 kvm_next_vmid = 1;
3430
3431 /*
3432@@ -422,7 +422,7 @@ static void update_vttbr(struct kvm *kvm)
3433 kvm_call_hyp(__kvm_flush_vm_context);
3434 }
3435
3436- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3437+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3438 kvm->arch.vmid = kvm_next_vmid;
3439 kvm_next_vmid++;
3440
3441@@ -997,7 +997,7 @@ static void check_kvm_target_cpu(void *ret)
3442 /**
3443 * Initialize Hyp-mode and memory mappings on all CPUs.
3444 */
3445-int kvm_arch_init(void *opaque)
3446+int kvm_arch_init(const void *opaque)
3447 {
3448 int err;
3449 int ret, cpu;
3450diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3451index 14a0d98..7771a7d 100644
3452--- a/arch/arm/lib/clear_user.S
3453+++ b/arch/arm/lib/clear_user.S
3454@@ -12,14 +12,14 @@
3455
3456 .text
3457
3458-/* Prototype: int __clear_user(void *addr, size_t sz)
3459+/* Prototype: int ___clear_user(void *addr, size_t sz)
3460 * Purpose : clear some user memory
3461 * Params : addr - user memory address to clear
3462 * : sz - number of bytes to clear
3463 * Returns : number of bytes NOT cleared
3464 */
3465 ENTRY(__clear_user_std)
3466-WEAK(__clear_user)
3467+WEAK(___clear_user)
3468 stmfd sp!, {r1, lr}
3469 mov r2, #0
3470 cmp r1, #4
3471@@ -44,7 +44,7 @@ WEAK(__clear_user)
3472 USER( strnebt r2, [r0])
3473 mov r0, #0
3474 ldmfd sp!, {r1, pc}
3475-ENDPROC(__clear_user)
3476+ENDPROC(___clear_user)
3477 ENDPROC(__clear_user_std)
3478
3479 .pushsection .fixup,"ax"
3480diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3481index 66a477a..bee61d3 100644
3482--- a/arch/arm/lib/copy_from_user.S
3483+++ b/arch/arm/lib/copy_from_user.S
3484@@ -16,7 +16,7 @@
3485 /*
3486 * Prototype:
3487 *
3488- * size_t __copy_from_user(void *to, const void *from, size_t n)
3489+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3490 *
3491 * Purpose:
3492 *
3493@@ -84,11 +84,11 @@
3494
3495 .text
3496
3497-ENTRY(__copy_from_user)
3498+ENTRY(___copy_from_user)
3499
3500 #include "copy_template.S"
3501
3502-ENDPROC(__copy_from_user)
3503+ENDPROC(___copy_from_user)
3504
3505 .pushsection .fixup,"ax"
3506 .align 0
3507diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3508index 6ee2f67..d1cce76 100644
3509--- a/arch/arm/lib/copy_page.S
3510+++ b/arch/arm/lib/copy_page.S
3511@@ -10,6 +10,7 @@
3512 * ASM optimised string functions
3513 */
3514 #include <linux/linkage.h>
3515+#include <linux/const.h>
3516 #include <asm/assembler.h>
3517 #include <asm/asm-offsets.h>
3518 #include <asm/cache.h>
3519diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3520index d066df6..df28194 100644
3521--- a/arch/arm/lib/copy_to_user.S
3522+++ b/arch/arm/lib/copy_to_user.S
3523@@ -16,7 +16,7 @@
3524 /*
3525 * Prototype:
3526 *
3527- * size_t __copy_to_user(void *to, const void *from, size_t n)
3528+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3529 *
3530 * Purpose:
3531 *
3532@@ -88,11 +88,11 @@
3533 .text
3534
3535 ENTRY(__copy_to_user_std)
3536-WEAK(__copy_to_user)
3537+WEAK(___copy_to_user)
3538
3539 #include "copy_template.S"
3540
3541-ENDPROC(__copy_to_user)
3542+ENDPROC(___copy_to_user)
3543 ENDPROC(__copy_to_user_std)
3544
3545 .pushsection .fixup,"ax"
3546diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3547index 7d08b43..f7ca7ea 100644
3548--- a/arch/arm/lib/csumpartialcopyuser.S
3549+++ b/arch/arm/lib/csumpartialcopyuser.S
3550@@ -57,8 +57,8 @@
3551 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3552 */
3553
3554-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3555-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3556+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3557+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3558
3559 #include "csumpartialcopygeneric.S"
3560
3561diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3562index 312d43e..21d2322 100644
3563--- a/arch/arm/lib/delay.c
3564+++ b/arch/arm/lib/delay.c
3565@@ -29,7 +29,7 @@
3566 /*
3567 * Default to the loop-based delay implementation.
3568 */
3569-struct arm_delay_ops arm_delay_ops = {
3570+struct arm_delay_ops arm_delay_ops __read_only = {
3571 .delay = __loop_delay,
3572 .const_udelay = __loop_const_udelay,
3573 .udelay = __loop_udelay,
3574diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3575index 3e58d71..029817c 100644
3576--- a/arch/arm/lib/uaccess_with_memcpy.c
3577+++ b/arch/arm/lib/uaccess_with_memcpy.c
3578@@ -136,7 +136,7 @@ out:
3579 }
3580
3581 unsigned long
3582-__copy_to_user(void __user *to, const void *from, unsigned long n)
3583+___copy_to_user(void __user *to, const void *from, unsigned long n)
3584 {
3585 /*
3586 * This test is stubbed out of the main function above to keep
3587@@ -190,7 +190,7 @@ out:
3588 return n;
3589 }
3590
3591-unsigned long __clear_user(void __user *addr, unsigned long n)
3592+unsigned long ___clear_user(void __user *addr, unsigned long n)
3593 {
3594 /* See rational for this in __copy_to_user() above. */
3595 if (n < 64)
3596diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c
3597index f7a07a5..258e1f7 100644
3598--- a/arch/arm/mach-at91/setup.c
3599+++ b/arch/arm/mach-at91/setup.c
3600@@ -81,7 +81,7 @@ void __init at91_init_sram(int bank, unsigned long base, unsigned int length)
3601
3602 desc->pfn = __phys_to_pfn(base);
3603 desc->length = length;
3604- desc->type = MT_MEMORY_RWX_NONCACHED;
3605+ desc->type = MT_MEMORY_RW_NONCACHED;
3606
3607 pr_info("AT91: sram at 0x%lx of 0x%x mapped at 0x%lx\n",
3608 base, length, desc->virtual);
3609diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
3610index 7f352de..6dc0929 100644
3611--- a/arch/arm/mach-keystone/keystone.c
3612+++ b/arch/arm/mach-keystone/keystone.c
3613@@ -27,7 +27,7 @@
3614
3615 #include "keystone.h"
3616
3617-static struct notifier_block platform_nb;
3618+static notifier_block_no_const platform_nb;
3619 static unsigned long keystone_dma_pfn_offset __read_mostly;
3620
3621 static int keystone_platform_notifier(struct notifier_block *nb,
3622diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
3623index 044b511..afd1da8 100644
3624--- a/arch/arm/mach-mvebu/coherency.c
3625+++ b/arch/arm/mach-mvebu/coherency.c
3626@@ -316,7 +316,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
3627
3628 /*
3629 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
3630- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
3631+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
3632 * is needed as a workaround for a deadlock issue between the PCIe
3633 * interface and the cache controller.
3634 */
3635@@ -329,7 +329,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
3636 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
3637
3638 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
3639- mtype = MT_UNCACHED;
3640+ mtype = MT_UNCACHED_RW;
3641
3642 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
3643 }
3644diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3645index aead77a..a2253fa 100644
3646--- a/arch/arm/mach-omap2/board-n8x0.c
3647+++ b/arch/arm/mach-omap2/board-n8x0.c
3648@@ -568,7 +568,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3649 }
3650 #endif
3651
3652-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3653+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3654 .late_init = n8x0_menelaus_late_init,
3655 };
3656
3657diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3658index 2f97228..6ce10e1 100644
3659--- a/arch/arm/mach-omap2/gpmc.c
3660+++ b/arch/arm/mach-omap2/gpmc.c
3661@@ -151,7 +151,6 @@ struct omap3_gpmc_regs {
3662 };
3663
3664 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3665-static struct irq_chip gpmc_irq_chip;
3666 static int gpmc_irq_start;
3667
3668 static struct resource gpmc_mem_root;
3669@@ -736,6 +735,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3670
3671 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3672
3673+static struct irq_chip gpmc_irq_chip = {
3674+ .name = "gpmc",
3675+ .irq_startup = gpmc_irq_noop_ret,
3676+ .irq_enable = gpmc_irq_enable,
3677+ .irq_disable = gpmc_irq_disable,
3678+ .irq_shutdown = gpmc_irq_noop,
3679+ .irq_ack = gpmc_irq_noop,
3680+ .irq_mask = gpmc_irq_noop,
3681+ .irq_unmask = gpmc_irq_noop,
3682+
3683+};
3684+
3685 static int gpmc_setup_irq(void)
3686 {
3687 int i;
3688@@ -750,15 +761,6 @@ static int gpmc_setup_irq(void)
3689 return gpmc_irq_start;
3690 }
3691
3692- gpmc_irq_chip.name = "gpmc";
3693- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3694- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3695- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3696- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3697- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3698- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3699- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3700-
3701 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3702 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3703
3704diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3705index 4001325..b14e2a0 100644
3706--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3707+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3708@@ -84,7 +84,7 @@ struct cpu_pm_ops {
3709 int (*finish_suspend)(unsigned long cpu_state);
3710 void (*resume)(void);
3711 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3712-};
3713+} __no_const;
3714
3715 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3716 static struct powerdomain *mpuss_pd;
3717@@ -102,7 +102,7 @@ static void dummy_cpu_resume(void)
3718 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3719 {}
3720
3721-struct cpu_pm_ops omap_pm_ops = {
3722+static struct cpu_pm_ops omap_pm_ops __read_only = {
3723 .finish_suspend = default_finish_suspend,
3724 .resume = dummy_cpu_resume,
3725 .scu_prepare = dummy_scu_prepare,
3726diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3727index 37843a7..a98df13 100644
3728--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3729+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3730@@ -343,7 +343,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3731 return NOTIFY_OK;
3732 }
3733
3734-static struct notifier_block __refdata irq_hotplug_notifier = {
3735+static struct notifier_block irq_hotplug_notifier = {
3736 .notifier_call = irq_cpu_hotplug_notify,
3737 };
3738
3739diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3740index d22c30d..23697a1 100644
3741--- a/arch/arm/mach-omap2/omap_device.c
3742+++ b/arch/arm/mach-omap2/omap_device.c
3743@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
3744 struct platform_device __init *omap_device_build(const char *pdev_name,
3745 int pdev_id,
3746 struct omap_hwmod *oh,
3747- void *pdata, int pdata_len)
3748+ const void *pdata, int pdata_len)
3749 {
3750 struct omap_hwmod *ohs[] = { oh };
3751
3752@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3753 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3754 int pdev_id,
3755 struct omap_hwmod **ohs,
3756- int oh_cnt, void *pdata,
3757+ int oh_cnt, const void *pdata,
3758 int pdata_len)
3759 {
3760 int ret = -ENOMEM;
3761diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3762index 78c02b3..c94109a 100644
3763--- a/arch/arm/mach-omap2/omap_device.h
3764+++ b/arch/arm/mach-omap2/omap_device.h
3765@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3766 /* Core code interface */
3767
3768 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3769- struct omap_hwmod *oh, void *pdata,
3770+ struct omap_hwmod *oh, const void *pdata,
3771 int pdata_len);
3772
3773 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3774 struct omap_hwmod **oh, int oh_cnt,
3775- void *pdata, int pdata_len);
3776+ const void *pdata, int pdata_len);
3777
3778 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3779 struct omap_hwmod **ohs, int oh_cnt);
3780diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3781index 9e91a4e..357ed0d 100644
3782--- a/arch/arm/mach-omap2/omap_hwmod.c
3783+++ b/arch/arm/mach-omap2/omap_hwmod.c
3784@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
3785 int (*init_clkdm)(struct omap_hwmod *oh);
3786 void (*update_context_lost)(struct omap_hwmod *oh);
3787 int (*get_context_lost)(struct omap_hwmod *oh);
3788-};
3789+} __no_const;
3790
3791 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3792-static struct omap_hwmod_soc_ops soc_ops;
3793+static struct omap_hwmod_soc_ops soc_ops __read_only;
3794
3795 /* omap_hwmod_list contains all registered struct omap_hwmods */
3796 static LIST_HEAD(omap_hwmod_list);
3797diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3798index 95fee54..cfa9cf1 100644
3799--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3800+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3801@@ -10,6 +10,7 @@
3802
3803 #include <linux/kernel.h>
3804 #include <linux/init.h>
3805+#include <asm/pgtable.h>
3806
3807 #include "powerdomain.h"
3808
3809@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3810
3811 void __init am43xx_powerdomains_init(void)
3812 {
3813- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3814+ pax_open_kernel();
3815+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3816+ pax_close_kernel();
3817 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3818 pwrdm_register_pwrdms(powerdomains_am43xx);
3819 pwrdm_complete_init();
3820diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3821index 97d6607..8429d14 100644
3822--- a/arch/arm/mach-omap2/wd_timer.c
3823+++ b/arch/arm/mach-omap2/wd_timer.c
3824@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3825 struct omap_hwmod *oh;
3826 char *oh_name = "wd_timer2";
3827 char *dev_name = "omap_wdt";
3828- struct omap_wd_timer_platform_data pdata;
3829+ static struct omap_wd_timer_platform_data pdata = {
3830+ .read_reset_sources = prm_read_reset_sources
3831+ };
3832
3833 if (!cpu_class_is_omap2() || of_have_populated_dt())
3834 return 0;
3835@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3836 return -EINVAL;
3837 }
3838
3839- pdata.read_reset_sources = prm_read_reset_sources;
3840-
3841 pdev = omap_device_build(dev_name, id, oh, &pdata,
3842 sizeof(struct omap_wd_timer_platform_data));
3843 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3844diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3845index b30bf5c..d0825bf 100644
3846--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3847+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3848@@ -180,7 +180,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3849 bool entered_lp2 = false;
3850
3851 if (tegra_pending_sgi())
3852- ACCESS_ONCE(abort_flag) = true;
3853+ ACCESS_ONCE_RW(abort_flag) = true;
3854
3855 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3856
3857diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3858index 2dea8b5..6499da2 100644
3859--- a/arch/arm/mach-ux500/setup.h
3860+++ b/arch/arm/mach-ux500/setup.h
3861@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
3862 .type = MT_DEVICE, \
3863 }
3864
3865-#define __MEM_DEV_DESC(x, sz) { \
3866- .virtual = IO_ADDRESS(x), \
3867- .pfn = __phys_to_pfn(x), \
3868- .length = sz, \
3869- .type = MT_MEMORY_RWX, \
3870-}
3871-
3872 extern struct smp_operations ux500_smp_ops;
3873 extern void ux500_cpu_die(unsigned int cpu);
3874
3875diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3876index 7eb94e6..799ad3e 100644
3877--- a/arch/arm/mm/Kconfig
3878+++ b/arch/arm/mm/Kconfig
3879@@ -446,6 +446,7 @@ config CPU_32v5
3880
3881 config CPU_32v6
3882 bool
3883+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3884 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3885
3886 config CPU_32v6K
3887@@ -600,6 +601,7 @@ config CPU_CP15_MPU
3888
3889 config CPU_USE_DOMAINS
3890 bool
3891+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3892 help
3893 This option enables or disables the use of domain switching
3894 via the set_fs() function.
3895@@ -798,7 +800,7 @@ config NEED_KUSER_HELPERS
3896
3897 config KUSER_HELPERS
3898 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3899- depends on MMU
3900+ depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
3901 default y
3902 help
3903 Warning: disabling this option may break user programs.
3904@@ -812,7 +814,7 @@ config KUSER_HELPERS
3905 See Documentation/arm/kernel_user_helpers.txt for details.
3906
3907 However, the fixed address nature of these helpers can be used
3908- by ROP (return orientated programming) authors when creating
3909+ by ROP (Return Oriented Programming) authors when creating
3910 exploits.
3911
3912 If all of the binaries and libraries which run on your platform
3913diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3914index 83792f4..c25d36b 100644
3915--- a/arch/arm/mm/alignment.c
3916+++ b/arch/arm/mm/alignment.c
3917@@ -216,10 +216,12 @@ union offset_union {
3918 #define __get16_unaligned_check(ins,val,addr) \
3919 do { \
3920 unsigned int err = 0, v, a = addr; \
3921+ pax_open_userland(); \
3922 __get8_unaligned_check(ins,v,a,err); \
3923 val = v << ((BE) ? 8 : 0); \
3924 __get8_unaligned_check(ins,v,a,err); \
3925 val |= v << ((BE) ? 0 : 8); \
3926+ pax_close_userland(); \
3927 if (err) \
3928 goto fault; \
3929 } while (0)
3930@@ -233,6 +235,7 @@ union offset_union {
3931 #define __get32_unaligned_check(ins,val,addr) \
3932 do { \
3933 unsigned int err = 0, v, a = addr; \
3934+ pax_open_userland(); \
3935 __get8_unaligned_check(ins,v,a,err); \
3936 val = v << ((BE) ? 24 : 0); \
3937 __get8_unaligned_check(ins,v,a,err); \
3938@@ -241,6 +244,7 @@ union offset_union {
3939 val |= v << ((BE) ? 8 : 16); \
3940 __get8_unaligned_check(ins,v,a,err); \
3941 val |= v << ((BE) ? 0 : 24); \
3942+ pax_close_userland(); \
3943 if (err) \
3944 goto fault; \
3945 } while (0)
3946@@ -254,6 +258,7 @@ union offset_union {
3947 #define __put16_unaligned_check(ins,val,addr) \
3948 do { \
3949 unsigned int err = 0, v = val, a = addr; \
3950+ pax_open_userland(); \
3951 __asm__( FIRST_BYTE_16 \
3952 ARM( "1: "ins" %1, [%2], #1\n" ) \
3953 THUMB( "1: "ins" %1, [%2]\n" ) \
3954@@ -273,6 +278,7 @@ union offset_union {
3955 " .popsection\n" \
3956 : "=r" (err), "=&r" (v), "=&r" (a) \
3957 : "0" (err), "1" (v), "2" (a)); \
3958+ pax_close_userland(); \
3959 if (err) \
3960 goto fault; \
3961 } while (0)
3962@@ -286,6 +292,7 @@ union offset_union {
3963 #define __put32_unaligned_check(ins,val,addr) \
3964 do { \
3965 unsigned int err = 0, v = val, a = addr; \
3966+ pax_open_userland(); \
3967 __asm__( FIRST_BYTE_32 \
3968 ARM( "1: "ins" %1, [%2], #1\n" ) \
3969 THUMB( "1: "ins" %1, [%2]\n" ) \
3970@@ -315,6 +322,7 @@ union offset_union {
3971 " .popsection\n" \
3972 : "=r" (err), "=&r" (v), "=&r" (a) \
3973 : "0" (err), "1" (v), "2" (a)); \
3974+ pax_close_userland(); \
3975 if (err) \
3976 goto fault; \
3977 } while (0)
3978diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3979index 5f2c988..221412d 100644
3980--- a/arch/arm/mm/cache-l2x0.c
3981+++ b/arch/arm/mm/cache-l2x0.c
3982@@ -41,7 +41,7 @@ struct l2c_init_data {
3983 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
3984 void (*save)(void __iomem *);
3985 struct outer_cache_fns outer_cache;
3986-};
3987+} __do_const;
3988
3989 #define CACHE_LINE_SIZE 32
3990
3991diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3992index 6eb97b3..ac509f6 100644
3993--- a/arch/arm/mm/context.c
3994+++ b/arch/arm/mm/context.c
3995@@ -43,7 +43,7 @@
3996 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3997
3998 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3999-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
4000+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
4001 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
4002
4003 static DEFINE_PER_CPU(atomic64_t, active_asids);
4004@@ -182,7 +182,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
4005 {
4006 static u32 cur_idx = 1;
4007 u64 asid = atomic64_read(&mm->context.id);
4008- u64 generation = atomic64_read(&asid_generation);
4009+ u64 generation = atomic64_read_unchecked(&asid_generation);
4010
4011 if (asid != 0 && is_reserved_asid(asid)) {
4012 /*
4013@@ -203,7 +203,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
4014 */
4015 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
4016 if (asid == NUM_USER_ASIDS) {
4017- generation = atomic64_add_return(ASID_FIRST_VERSION,
4018+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
4019 &asid_generation);
4020 flush_context(cpu);
4021 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
4022@@ -234,14 +234,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
4023 cpu_set_reserved_ttbr0();
4024
4025 asid = atomic64_read(&mm->context.id);
4026- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
4027+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
4028 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
4029 goto switch_mm_fastpath;
4030
4031 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
4032 /* Check that our ASID belongs to the current generation. */
4033 asid = atomic64_read(&mm->context.id);
4034- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
4035+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
4036 asid = new_context(mm, cpu);
4037 atomic64_set(&mm->context.id, asid);
4038 }
4039diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
4040index eb8830a..e8ff52e 100644
4041--- a/arch/arm/mm/fault.c
4042+++ b/arch/arm/mm/fault.c
4043@@ -25,6 +25,7 @@
4044 #include <asm/system_misc.h>
4045 #include <asm/system_info.h>
4046 #include <asm/tlbflush.h>
4047+#include <asm/sections.h>
4048
4049 #include "fault.h"
4050
4051@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
4052 if (fixup_exception(regs))
4053 return;
4054
4055+#ifdef CONFIG_PAX_MEMORY_UDEREF
4056+ if (addr < TASK_SIZE) {
4057+ if (current->signal->curr_ip)
4058+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4059+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4060+ else
4061+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4062+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4063+ }
4064+#endif
4065+
4066+#ifdef CONFIG_PAX_KERNEXEC
4067+ if ((fsr & FSR_WRITE) &&
4068+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
4069+ (MODULES_VADDR <= addr && addr < MODULES_END)))
4070+ {
4071+ if (current->signal->curr_ip)
4072+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4073+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
4074+ else
4075+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
4076+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
4077+ }
4078+#endif
4079+
4080 /*
4081 * No handler, we'll have to terminate things with extreme prejudice.
4082 */
4083@@ -174,6 +200,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
4084 }
4085 #endif
4086
4087+#ifdef CONFIG_PAX_PAGEEXEC
4088+ if (fsr & FSR_LNX_PF) {
4089+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
4090+ do_group_exit(SIGKILL);
4091+ }
4092+#endif
4093+
4094 tsk->thread.address = addr;
4095 tsk->thread.error_code = fsr;
4096 tsk->thread.trap_no = 14;
4097@@ -401,6 +434,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
4098 }
4099 #endif /* CONFIG_MMU */
4100
4101+#ifdef CONFIG_PAX_PAGEEXEC
4102+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4103+{
4104+ long i;
4105+
4106+ printk(KERN_ERR "PAX: bytes at PC: ");
4107+ for (i = 0; i < 20; i++) {
4108+ unsigned char c;
4109+ if (get_user(c, (__force unsigned char __user *)pc+i))
4110+ printk(KERN_CONT "?? ");
4111+ else
4112+ printk(KERN_CONT "%02x ", c);
4113+ }
4114+ printk("\n");
4115+
4116+ printk(KERN_ERR "PAX: bytes at SP-4: ");
4117+ for (i = -1; i < 20; i++) {
4118+ unsigned long c;
4119+ if (get_user(c, (__force unsigned long __user *)sp+i))
4120+ printk(KERN_CONT "???????? ");
4121+ else
4122+ printk(KERN_CONT "%08lx ", c);
4123+ }
4124+ printk("\n");
4125+}
4126+#endif
4127+
4128 /*
4129 * First Level Translation Fault Handler
4130 *
4131@@ -548,9 +608,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
4132 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
4133 struct siginfo info;
4134
4135+#ifdef CONFIG_PAX_MEMORY_UDEREF
4136+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
4137+ if (current->signal->curr_ip)
4138+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4139+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4140+ else
4141+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4142+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4143+ goto die;
4144+ }
4145+#endif
4146+
4147 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
4148 return;
4149
4150+die:
4151 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
4152 inf->name, fsr, addr);
4153
4154@@ -574,15 +647,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
4155 ifsr_info[nr].name = name;
4156 }
4157
4158+asmlinkage int sys_sigreturn(struct pt_regs *regs);
4159+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4160+
4161 asmlinkage void __exception
4162 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4163 {
4164 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4165 struct siginfo info;
4166+ unsigned long pc = instruction_pointer(regs);
4167+
4168+ if (user_mode(regs)) {
4169+ unsigned long sigpage = current->mm->context.sigpage;
4170+
4171+ if (sigpage <= pc && pc < sigpage + 7*4) {
4172+ if (pc < sigpage + 3*4)
4173+ sys_sigreturn(regs);
4174+ else
4175+ sys_rt_sigreturn(regs);
4176+ return;
4177+ }
4178+ if (pc == 0xffff0f60UL) {
4179+ /*
4180+ * PaX: __kuser_cmpxchg64 emulation
4181+ */
4182+ // TODO
4183+ //regs->ARM_pc = regs->ARM_lr;
4184+ //return;
4185+ }
4186+ if (pc == 0xffff0fa0UL) {
4187+ /*
4188+ * PaX: __kuser_memory_barrier emulation
4189+ */
4190+ // dmb(); implied by the exception
4191+ regs->ARM_pc = regs->ARM_lr;
4192+ return;
4193+ }
4194+ if (pc == 0xffff0fc0UL) {
4195+ /*
4196+ * PaX: __kuser_cmpxchg emulation
4197+ */
4198+ // TODO
4199+ //long new;
4200+ //int op;
4201+
4202+ //op = FUTEX_OP_SET << 28;
4203+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4204+ //regs->ARM_r0 = old != new;
4205+ //regs->ARM_pc = regs->ARM_lr;
4206+ //return;
4207+ }
4208+ if (pc == 0xffff0fe0UL) {
4209+ /*
4210+ * PaX: __kuser_get_tls emulation
4211+ */
4212+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4213+ regs->ARM_pc = regs->ARM_lr;
4214+ return;
4215+ }
4216+ }
4217+
4218+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4219+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4220+ if (current->signal->curr_ip)
4221+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4222+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4223+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4224+ else
4225+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4226+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4227+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4228+ goto die;
4229+ }
4230+#endif
4231+
4232+#ifdef CONFIG_PAX_REFCOUNT
4233+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4234+#ifdef CONFIG_THUMB2_KERNEL
4235+ unsigned short bkpt;
4236+
4237+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
4238+#else
4239+ unsigned int bkpt;
4240+
4241+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4242+#endif
4243+ current->thread.error_code = ifsr;
4244+ current->thread.trap_no = 0;
4245+ pax_report_refcount_overflow(regs);
4246+ fixup_exception(regs);
4247+ return;
4248+ }
4249+ }
4250+#endif
4251
4252 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4253 return;
4254
4255+die:
4256 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4257 inf->name, ifsr, addr);
4258
4259diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4260index cf08bdf..772656c 100644
4261--- a/arch/arm/mm/fault.h
4262+++ b/arch/arm/mm/fault.h
4263@@ -3,6 +3,7 @@
4264
4265 /*
4266 * Fault status register encodings. We steal bit 31 for our own purposes.
4267+ * Set when the FSR value is from an instruction fault.
4268 */
4269 #define FSR_LNX_PF (1 << 31)
4270 #define FSR_WRITE (1 << 11)
4271@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4272 }
4273 #endif
4274
4275+/* valid for LPAE and !LPAE */
4276+static inline int is_xn_fault(unsigned int fsr)
4277+{
4278+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4279+}
4280+
4281+static inline int is_domain_fault(unsigned int fsr)
4282+{
4283+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4284+}
4285+
4286 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4287 unsigned long search_exception_table(unsigned long addr);
4288
4289diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4290index 659c75d..6f8c029 100644
4291--- a/arch/arm/mm/init.c
4292+++ b/arch/arm/mm/init.c
4293@@ -31,6 +31,8 @@
4294 #include <asm/setup.h>
4295 #include <asm/tlb.h>
4296 #include <asm/fixmap.h>
4297+#include <asm/system_info.h>
4298+#include <asm/cp15.h>
4299
4300 #include <asm/mach/arch.h>
4301 #include <asm/mach/map.h>
4302@@ -619,7 +621,46 @@ void free_initmem(void)
4303 {
4304 #ifdef CONFIG_HAVE_TCM
4305 extern char __tcm_start, __tcm_end;
4306+#endif
4307
4308+#ifdef CONFIG_PAX_KERNEXEC
4309+ unsigned long addr;
4310+ pgd_t *pgd;
4311+ pud_t *pud;
4312+ pmd_t *pmd;
4313+ int cpu_arch = cpu_architecture();
4314+ unsigned int cr = get_cr();
4315+
4316+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4317+ /* make pages tables, etc before .text NX */
4318+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4319+ pgd = pgd_offset_k(addr);
4320+ pud = pud_offset(pgd, addr);
4321+ pmd = pmd_offset(pud, addr);
4322+ __section_update(pmd, addr, PMD_SECT_XN);
4323+ }
4324+ /* make init NX */
4325+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4326+ pgd = pgd_offset_k(addr);
4327+ pud = pud_offset(pgd, addr);
4328+ pmd = pmd_offset(pud, addr);
4329+ __section_update(pmd, addr, PMD_SECT_XN);
4330+ }
4331+ /* make kernel code/rodata RX */
4332+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4333+ pgd = pgd_offset_k(addr);
4334+ pud = pud_offset(pgd, addr);
4335+ pmd = pmd_offset(pud, addr);
4336+#ifdef CONFIG_ARM_LPAE
4337+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4338+#else
4339+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4340+#endif
4341+ }
4342+ }
4343+#endif
4344+
4345+#ifdef CONFIG_HAVE_TCM
4346 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4347 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4348 #endif
4349diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4350index d1e5ad7..84dcbf2 100644
4351--- a/arch/arm/mm/ioremap.c
4352+++ b/arch/arm/mm/ioremap.c
4353@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4354 unsigned int mtype;
4355
4356 if (cached)
4357- mtype = MT_MEMORY_RWX;
4358+ mtype = MT_MEMORY_RX;
4359 else
4360- mtype = MT_MEMORY_RWX_NONCACHED;
4361+ mtype = MT_MEMORY_RX_NONCACHED;
4362
4363 return __arm_ioremap_caller(phys_addr, size, mtype,
4364 __builtin_return_address(0));
4365diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4366index 5e85ed3..b10a7ed 100644
4367--- a/arch/arm/mm/mmap.c
4368+++ b/arch/arm/mm/mmap.c
4369@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4370 struct vm_area_struct *vma;
4371 int do_align = 0;
4372 int aliasing = cache_is_vipt_aliasing();
4373+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4374 struct vm_unmapped_area_info info;
4375
4376 /*
4377@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4378 if (len > TASK_SIZE)
4379 return -ENOMEM;
4380
4381+#ifdef CONFIG_PAX_RANDMMAP
4382+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4383+#endif
4384+
4385 if (addr) {
4386 if (do_align)
4387 addr = COLOUR_ALIGN(addr, pgoff);
4388@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4389 addr = PAGE_ALIGN(addr);
4390
4391 vma = find_vma(mm, addr);
4392- if (TASK_SIZE - len >= addr &&
4393- (!vma || addr + len <= vma->vm_start))
4394+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4395 return addr;
4396 }
4397
4398@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4399 info.high_limit = TASK_SIZE;
4400 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4401 info.align_offset = pgoff << PAGE_SHIFT;
4402+ info.threadstack_offset = offset;
4403 return vm_unmapped_area(&info);
4404 }
4405
4406@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4407 unsigned long addr = addr0;
4408 int do_align = 0;
4409 int aliasing = cache_is_vipt_aliasing();
4410+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4411 struct vm_unmapped_area_info info;
4412
4413 /*
4414@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4415 return addr;
4416 }
4417
4418+#ifdef CONFIG_PAX_RANDMMAP
4419+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4420+#endif
4421+
4422 /* requesting a specific address */
4423 if (addr) {
4424 if (do_align)
4425@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4426 else
4427 addr = PAGE_ALIGN(addr);
4428 vma = find_vma(mm, addr);
4429- if (TASK_SIZE - len >= addr &&
4430- (!vma || addr + len <= vma->vm_start))
4431+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4432 return addr;
4433 }
4434
4435@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4436 info.high_limit = mm->mmap_base;
4437 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4438 info.align_offset = pgoff << PAGE_SHIFT;
4439+ info.threadstack_offset = offset;
4440 addr = vm_unmapped_area(&info);
4441
4442 /*
4443@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4444 {
4445 unsigned long random_factor = 0UL;
4446
4447+#ifdef CONFIG_PAX_RANDMMAP
4448+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4449+#endif
4450+
4451 /* 8 bits of randomness in 20 address space bits */
4452 if ((current->flags & PF_RANDOMIZE) &&
4453 !(current->personality & ADDR_NO_RANDOMIZE))
4454@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4455
4456 if (mmap_is_legacy()) {
4457 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4458+
4459+#ifdef CONFIG_PAX_RANDMMAP
4460+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4461+ mm->mmap_base += mm->delta_mmap;
4462+#endif
4463+
4464 mm->get_unmapped_area = arch_get_unmapped_area;
4465 } else {
4466 mm->mmap_base = mmap_base(random_factor);
4467+
4468+#ifdef CONFIG_PAX_RANDMMAP
4469+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4470+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4471+#endif
4472+
4473 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4474 }
4475 }
4476diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4477index 8348ed6..b73a807 100644
4478--- a/arch/arm/mm/mmu.c
4479+++ b/arch/arm/mm/mmu.c
4480@@ -40,6 +40,22 @@
4481 #include "mm.h"
4482 #include "tcm.h"
4483
4484+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4485+void modify_domain(unsigned int dom, unsigned int type)
4486+{
4487+ struct thread_info *thread = current_thread_info();
4488+ unsigned int domain = thread->cpu_domain;
4489+ /*
4490+ * DOMAIN_MANAGER might be defined to some other value,
4491+ * use the arch-defined constant
4492+ */
4493+ domain &= ~domain_val(dom, 3);
4494+ thread->cpu_domain = domain | domain_val(dom, type);
4495+ set_domain(thread->cpu_domain);
4496+}
4497+EXPORT_SYMBOL(modify_domain);
4498+#endif
4499+
4500 /*
4501 * empty_zero_page is a special page that is used for
4502 * zero-initialized data and COW.
4503@@ -239,7 +255,15 @@ __setup("noalign", noalign_setup);
4504 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4505 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4506
4507-static struct mem_type mem_types[] = {
4508+#ifdef CONFIG_PAX_KERNEXEC
4509+#define L_PTE_KERNEXEC L_PTE_RDONLY
4510+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4511+#else
4512+#define L_PTE_KERNEXEC L_PTE_DIRTY
4513+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4514+#endif
4515+
4516+static struct mem_type mem_types[] __read_only = {
4517 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4518 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4519 L_PTE_SHARED,
4520@@ -268,19 +292,19 @@ static struct mem_type mem_types[] = {
4521 .prot_sect = PROT_SECT_DEVICE,
4522 .domain = DOMAIN_IO,
4523 },
4524- [MT_UNCACHED] = {
4525+ [MT_UNCACHED_RW] = {
4526 .prot_pte = PROT_PTE_DEVICE,
4527 .prot_l1 = PMD_TYPE_TABLE,
4528 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4529 .domain = DOMAIN_IO,
4530 },
4531- [MT_CACHECLEAN] = {
4532- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4533+ [MT_CACHECLEAN_RO] = {
4534+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4535 .domain = DOMAIN_KERNEL,
4536 },
4537 #ifndef CONFIG_ARM_LPAE
4538- [MT_MINICLEAN] = {
4539- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4540+ [MT_MINICLEAN_RO] = {
4541+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4542 .domain = DOMAIN_KERNEL,
4543 },
4544 #endif
4545@@ -288,15 +312,15 @@ static struct mem_type mem_types[] = {
4546 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4547 L_PTE_RDONLY,
4548 .prot_l1 = PMD_TYPE_TABLE,
4549- .domain = DOMAIN_USER,
4550+ .domain = DOMAIN_VECTORS,
4551 },
4552 [MT_HIGH_VECTORS] = {
4553 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4554 L_PTE_USER | L_PTE_RDONLY,
4555 .prot_l1 = PMD_TYPE_TABLE,
4556- .domain = DOMAIN_USER,
4557+ .domain = DOMAIN_VECTORS,
4558 },
4559- [MT_MEMORY_RWX] = {
4560+ [__MT_MEMORY_RWX] = {
4561 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4562 .prot_l1 = PMD_TYPE_TABLE,
4563 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4564@@ -309,17 +333,30 @@ static struct mem_type mem_types[] = {
4565 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4566 .domain = DOMAIN_KERNEL,
4567 },
4568- [MT_ROM] = {
4569- .prot_sect = PMD_TYPE_SECT,
4570+ [MT_MEMORY_RX] = {
4571+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4572+ .prot_l1 = PMD_TYPE_TABLE,
4573+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4574+ .domain = DOMAIN_KERNEL,
4575+ },
4576+ [MT_ROM_RX] = {
4577+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4578 .domain = DOMAIN_KERNEL,
4579 },
4580- [MT_MEMORY_RWX_NONCACHED] = {
4581+ [MT_MEMORY_RW_NONCACHED] = {
4582 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4583 L_PTE_MT_BUFFERABLE,
4584 .prot_l1 = PMD_TYPE_TABLE,
4585 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4586 .domain = DOMAIN_KERNEL,
4587 },
4588+ [MT_MEMORY_RX_NONCACHED] = {
4589+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4590+ L_PTE_MT_BUFFERABLE,
4591+ .prot_l1 = PMD_TYPE_TABLE,
4592+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4593+ .domain = DOMAIN_KERNEL,
4594+ },
4595 [MT_MEMORY_RW_DTCM] = {
4596 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4597 L_PTE_XN,
4598@@ -327,9 +364,10 @@ static struct mem_type mem_types[] = {
4599 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4600 .domain = DOMAIN_KERNEL,
4601 },
4602- [MT_MEMORY_RWX_ITCM] = {
4603- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4604+ [MT_MEMORY_RX_ITCM] = {
4605+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4606 .prot_l1 = PMD_TYPE_TABLE,
4607+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4608 .domain = DOMAIN_KERNEL,
4609 },
4610 [MT_MEMORY_RW_SO] = {
4611@@ -547,9 +585,14 @@ static void __init build_mem_type_table(void)
4612 * Mark cache clean areas and XIP ROM read only
4613 * from SVC mode and no access from userspace.
4614 */
4615- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4616- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4617- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4618+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4619+#ifdef CONFIG_PAX_KERNEXEC
4620+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4621+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4622+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4623+#endif
4624+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4625+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4626 #endif
4627
4628 /*
4629@@ -566,13 +609,17 @@ static void __init build_mem_type_table(void)
4630 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4631 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4632 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4633- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4634- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4635+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4636+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4637 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4638 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4639+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4640+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4641 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4642- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
4643- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
4644+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
4645+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
4646+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
4647+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
4648 }
4649 }
4650
4651@@ -583,15 +630,20 @@ static void __init build_mem_type_table(void)
4652 if (cpu_arch >= CPU_ARCH_ARMv6) {
4653 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4654 /* Non-cacheable Normal is XCB = 001 */
4655- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4656+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4657+ PMD_SECT_BUFFERED;
4658+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4659 PMD_SECT_BUFFERED;
4660 } else {
4661 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4662- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4663+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4664+ PMD_SECT_TEX(1);
4665+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4666 PMD_SECT_TEX(1);
4667 }
4668 } else {
4669- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4670+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4671+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4672 }
4673
4674 #ifdef CONFIG_ARM_LPAE
4675@@ -607,6 +659,8 @@ static void __init build_mem_type_table(void)
4676 vecs_pgprot |= PTE_EXT_AF;
4677 #endif
4678
4679+ user_pgprot |= __supported_pte_mask;
4680+
4681 for (i = 0; i < 16; i++) {
4682 pteval_t v = pgprot_val(protection_map[i]);
4683 protection_map[i] = __pgprot(v | user_pgprot);
4684@@ -624,21 +678,24 @@ static void __init build_mem_type_table(void)
4685
4686 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4687 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4688- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4689- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4690+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4691+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4692 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4693 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4694+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4695+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4696 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4697- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
4698- mem_types[MT_ROM].prot_sect |= cp->pmd;
4699+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
4700+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
4701+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
4702
4703 switch (cp->pmd) {
4704 case PMD_SECT_WT:
4705- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
4706+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
4707 break;
4708 case PMD_SECT_WB:
4709 case PMD_SECT_WBWA:
4710- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
4711+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
4712 break;
4713 }
4714 pr_info("Memory policy: %sData cache %s\n",
4715@@ -856,7 +913,7 @@ static void __init create_mapping(struct map_desc *md)
4716 return;
4717 }
4718
4719- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
4720+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
4721 md->virtual >= PAGE_OFFSET &&
4722 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
4723 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
4724@@ -1224,18 +1281,15 @@ void __init arm_mm_memblock_reserve(void)
4725 * called function. This means you can't use any function or debugging
4726 * method which may touch any device, otherwise the kernel _will_ crash.
4727 */
4728+
4729+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4730+
4731 static void __init devicemaps_init(const struct machine_desc *mdesc)
4732 {
4733 struct map_desc map;
4734 unsigned long addr;
4735- void *vectors;
4736
4737- /*
4738- * Allocate the vector page early.
4739- */
4740- vectors = early_alloc(PAGE_SIZE * 2);
4741-
4742- early_trap_init(vectors);
4743+ early_trap_init(&vectors);
4744
4745 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4746 pmd_clear(pmd_off_k(addr));
4747@@ -1248,7 +1302,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4748 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
4749 map.virtual = MODULES_VADDR;
4750 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
4751- map.type = MT_ROM;
4752+ map.type = MT_ROM_RX;
4753 create_mapping(&map);
4754 #endif
4755
4756@@ -1259,14 +1313,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4757 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
4758 map.virtual = FLUSH_BASE;
4759 map.length = SZ_1M;
4760- map.type = MT_CACHECLEAN;
4761+ map.type = MT_CACHECLEAN_RO;
4762 create_mapping(&map);
4763 #endif
4764 #ifdef FLUSH_BASE_MINICACHE
4765 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
4766 map.virtual = FLUSH_BASE_MINICACHE;
4767 map.length = SZ_1M;
4768- map.type = MT_MINICLEAN;
4769+ map.type = MT_MINICLEAN_RO;
4770 create_mapping(&map);
4771 #endif
4772
4773@@ -1275,7 +1329,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4774 * location (0xffff0000). If we aren't using high-vectors, also
4775 * create a mapping at the low-vectors virtual address.
4776 */
4777- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4778+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4779 map.virtual = 0xffff0000;
4780 map.length = PAGE_SIZE;
4781 #ifdef CONFIG_KUSER_HELPERS
4782@@ -1335,8 +1389,10 @@ static void __init kmap_init(void)
4783 static void __init map_lowmem(void)
4784 {
4785 struct memblock_region *reg;
4786+#ifndef CONFIG_PAX_KERNEXEC
4787 unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
4788 unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
4789+#endif
4790
4791 /* Map all the lowmem memory banks. */
4792 for_each_memblock(memory, reg) {
4793@@ -1349,11 +1405,48 @@ static void __init map_lowmem(void)
4794 if (start >= end)
4795 break;
4796
4797+#ifdef CONFIG_PAX_KERNEXEC
4798+ map.pfn = __phys_to_pfn(start);
4799+ map.virtual = __phys_to_virt(start);
4800+ map.length = end - start;
4801+
4802+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4803+ struct map_desc kernel;
4804+ struct map_desc initmap;
4805+
4806+ /* when freeing initmem we will make this RW */
4807+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4808+ initmap.virtual = (unsigned long)__init_begin;
4809+ initmap.length = _sdata - __init_begin;
4810+ initmap.type = __MT_MEMORY_RWX;
4811+ create_mapping(&initmap);
4812+
4813+ /* when freeing initmem we will make this RX */
4814+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4815+ kernel.virtual = (unsigned long)_stext;
4816+ kernel.length = __init_begin - _stext;
4817+ kernel.type = __MT_MEMORY_RWX;
4818+ create_mapping(&kernel);
4819+
4820+ if (map.virtual < (unsigned long)_stext) {
4821+ map.length = (unsigned long)_stext - map.virtual;
4822+ map.type = __MT_MEMORY_RWX;
4823+ create_mapping(&map);
4824+ }
4825+
4826+ map.pfn = __phys_to_pfn(__pa(_sdata));
4827+ map.virtual = (unsigned long)_sdata;
4828+ map.length = end - __pa(_sdata);
4829+ }
4830+
4831+ map.type = MT_MEMORY_RW;
4832+ create_mapping(&map);
4833+#else
4834 if (end < kernel_x_start || start >= kernel_x_end) {
4835 map.pfn = __phys_to_pfn(start);
4836 map.virtual = __phys_to_virt(start);
4837 map.length = end - start;
4838- map.type = MT_MEMORY_RWX;
4839+ map.type = __MT_MEMORY_RWX;
4840
4841 create_mapping(&map);
4842 } else {
4843@@ -1370,7 +1463,7 @@ static void __init map_lowmem(void)
4844 map.pfn = __phys_to_pfn(kernel_x_start);
4845 map.virtual = __phys_to_virt(kernel_x_start);
4846 map.length = kernel_x_end - kernel_x_start;
4847- map.type = MT_MEMORY_RWX;
4848+ map.type = __MT_MEMORY_RWX;
4849
4850 create_mapping(&map);
4851
4852@@ -1383,6 +1476,7 @@ static void __init map_lowmem(void)
4853 create_mapping(&map);
4854 }
4855 }
4856+#endif
4857 }
4858 }
4859
4860diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
4861index a37b989..5c9ae75 100644
4862--- a/arch/arm/net/bpf_jit_32.c
4863+++ b/arch/arm/net/bpf_jit_32.c
4864@@ -71,7 +71,11 @@ struct jit_ctx {
4865 #endif
4866 };
4867
4868+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
4869+int bpf_jit_enable __read_only;
4870+#else
4871 int bpf_jit_enable __read_mostly;
4872+#endif
4873
4874 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
4875 {
4876@@ -930,5 +934,6 @@ void bpf_jit_free(struct bpf_prog *fp)
4877 {
4878 if (fp->jited)
4879 module_free(NULL, fp->bpf_func);
4880- kfree(fp);
4881+
4882+ bpf_prog_unlock_free(fp);
4883 }
4884diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
4885index 5b217f4..c23f40e 100644
4886--- a/arch/arm/plat-iop/setup.c
4887+++ b/arch/arm/plat-iop/setup.c
4888@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
4889 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
4890 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
4891 .length = IOP3XX_PERIPHERAL_SIZE,
4892- .type = MT_UNCACHED,
4893+ .type = MT_UNCACHED_RW,
4894 },
4895 };
4896
4897diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4898index a5bc92d..0bb4730 100644
4899--- a/arch/arm/plat-omap/sram.c
4900+++ b/arch/arm/plat-omap/sram.c
4901@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4902 * Looks like we need to preserve some bootloader code at the
4903 * beginning of SRAM for jumping to flash for reboot to work...
4904 */
4905+ pax_open_kernel();
4906 memset_io(omap_sram_base + omap_sram_skip, 0,
4907 omap_sram_size - omap_sram_skip);
4908+ pax_close_kernel();
4909 }
4910diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4911index ce6d763..cfea917 100644
4912--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4913+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4914@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4915 int (*started)(unsigned ch);
4916 int (*flush)(unsigned ch);
4917 int (*stop)(unsigned ch);
4918-};
4919+} __no_const;
4920
4921 extern void *samsung_dmadev_get_ops(void);
4922 extern void *s3c_dma_get_ops(void);
4923diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
4924index 6389d60..b5d3bdd 100644
4925--- a/arch/arm64/include/asm/barrier.h
4926+++ b/arch/arm64/include/asm/barrier.h
4927@@ -41,7 +41,7 @@
4928 do { \
4929 compiletime_assert_atomic_type(*p); \
4930 barrier(); \
4931- ACCESS_ONCE(*p) = (v); \
4932+ ACCESS_ONCE_RW(*p) = (v); \
4933 } while (0)
4934
4935 #define smp_load_acquire(p) \
4936diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
4937index 3bf8f4e..5dd5491 100644
4938--- a/arch/arm64/include/asm/uaccess.h
4939+++ b/arch/arm64/include/asm/uaccess.h
4940@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
4941 flag; \
4942 })
4943
4944+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
4945 #define access_ok(type, addr, size) __range_ok(addr, size)
4946 #define user_addr_max get_fs
4947
4948diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4949index c3a58a1..78fbf54 100644
4950--- a/arch/avr32/include/asm/cache.h
4951+++ b/arch/avr32/include/asm/cache.h
4952@@ -1,8 +1,10 @@
4953 #ifndef __ASM_AVR32_CACHE_H
4954 #define __ASM_AVR32_CACHE_H
4955
4956+#include <linux/const.h>
4957+
4958 #define L1_CACHE_SHIFT 5
4959-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4960+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4961
4962 /*
4963 * Memory returned by kmalloc() may be used for DMA, so we must make
4964diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4965index d232888..87c8df1 100644
4966--- a/arch/avr32/include/asm/elf.h
4967+++ b/arch/avr32/include/asm/elf.h
4968@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4969 the loader. We need to make sure that it is out of the way of the program
4970 that it will "exec", and that there is sufficient room for the brk. */
4971
4972-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4973+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4974
4975+#ifdef CONFIG_PAX_ASLR
4976+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4977+
4978+#define PAX_DELTA_MMAP_LEN 15
4979+#define PAX_DELTA_STACK_LEN 15
4980+#endif
4981
4982 /* This yields a mask that user programs can use to figure out what
4983 instruction set this CPU supports. This could be done in user space,
4984diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4985index 479330b..53717a8 100644
4986--- a/arch/avr32/include/asm/kmap_types.h
4987+++ b/arch/avr32/include/asm/kmap_types.h
4988@@ -2,9 +2,9 @@
4989 #define __ASM_AVR32_KMAP_TYPES_H
4990
4991 #ifdef CONFIG_DEBUG_HIGHMEM
4992-# define KM_TYPE_NR 29
4993+# define KM_TYPE_NR 30
4994 #else
4995-# define KM_TYPE_NR 14
4996+# define KM_TYPE_NR 15
4997 #endif
4998
4999 #endif /* __ASM_AVR32_KMAP_TYPES_H */
5000diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
5001index 0eca933..eb78c7b 100644
5002--- a/arch/avr32/mm/fault.c
5003+++ b/arch/avr32/mm/fault.c
5004@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
5005
5006 int exception_trace = 1;
5007
5008+#ifdef CONFIG_PAX_PAGEEXEC
5009+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5010+{
5011+ unsigned long i;
5012+
5013+ printk(KERN_ERR "PAX: bytes at PC: ");
5014+ for (i = 0; i < 20; i++) {
5015+ unsigned char c;
5016+ if (get_user(c, (unsigned char *)pc+i))
5017+ printk(KERN_CONT "???????? ");
5018+ else
5019+ printk(KERN_CONT "%02x ", c);
5020+ }
5021+ printk("\n");
5022+}
5023+#endif
5024+
5025 /*
5026 * This routine handles page faults. It determines the address and the
5027 * problem, and then passes it off to one of the appropriate routines.
5028@@ -176,6 +193,16 @@ bad_area:
5029 up_read(&mm->mmap_sem);
5030
5031 if (user_mode(regs)) {
5032+
5033+#ifdef CONFIG_PAX_PAGEEXEC
5034+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
5035+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
5036+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
5037+ do_group_exit(SIGKILL);
5038+ }
5039+ }
5040+#endif
5041+
5042 if (exception_trace && printk_ratelimit())
5043 printk("%s%s[%d]: segfault at %08lx pc %08lx "
5044 "sp %08lx ecr %lu\n",
5045diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
5046index 568885a..f8008df 100644
5047--- a/arch/blackfin/include/asm/cache.h
5048+++ b/arch/blackfin/include/asm/cache.h
5049@@ -7,6 +7,7 @@
5050 #ifndef __ARCH_BLACKFIN_CACHE_H
5051 #define __ARCH_BLACKFIN_CACHE_H
5052
5053+#include <linux/const.h>
5054 #include <linux/linkage.h> /* for asmlinkage */
5055
5056 /*
5057@@ -14,7 +15,7 @@
5058 * Blackfin loads 32 bytes for cache
5059 */
5060 #define L1_CACHE_SHIFT 5
5061-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5062+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5063 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5064
5065 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5066diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
5067index aea2718..3639a60 100644
5068--- a/arch/cris/include/arch-v10/arch/cache.h
5069+++ b/arch/cris/include/arch-v10/arch/cache.h
5070@@ -1,8 +1,9 @@
5071 #ifndef _ASM_ARCH_CACHE_H
5072 #define _ASM_ARCH_CACHE_H
5073
5074+#include <linux/const.h>
5075 /* Etrax 100LX have 32-byte cache-lines. */
5076-#define L1_CACHE_BYTES 32
5077 #define L1_CACHE_SHIFT 5
5078+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5079
5080 #endif /* _ASM_ARCH_CACHE_H */
5081diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
5082index 7caf25d..ee65ac5 100644
5083--- a/arch/cris/include/arch-v32/arch/cache.h
5084+++ b/arch/cris/include/arch-v32/arch/cache.h
5085@@ -1,11 +1,12 @@
5086 #ifndef _ASM_CRIS_ARCH_CACHE_H
5087 #define _ASM_CRIS_ARCH_CACHE_H
5088
5089+#include <linux/const.h>
5090 #include <arch/hwregs/dma.h>
5091
5092 /* A cache-line is 32 bytes. */
5093-#define L1_CACHE_BYTES 32
5094 #define L1_CACHE_SHIFT 5
5095+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5096
5097 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5098
5099diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
5100index f6c3a16..cd422a4 100644
5101--- a/arch/frv/include/asm/atomic.h
5102+++ b/arch/frv/include/asm/atomic.h
5103@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
5104 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
5105 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
5106
5107+#define atomic64_read_unchecked(v) atomic64_read(v)
5108+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5109+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5110+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5111+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5112+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5113+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5114+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5115+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5116+
5117 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5118 {
5119 int c, old;
5120diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
5121index 2797163..c2a401df9 100644
5122--- a/arch/frv/include/asm/cache.h
5123+++ b/arch/frv/include/asm/cache.h
5124@@ -12,10 +12,11 @@
5125 #ifndef __ASM_CACHE_H
5126 #define __ASM_CACHE_H
5127
5128+#include <linux/const.h>
5129
5130 /* bytes per L1 cache line */
5131 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
5132-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5133+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5134
5135 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5136 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5137diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
5138index 43901f2..0d8b865 100644
5139--- a/arch/frv/include/asm/kmap_types.h
5140+++ b/arch/frv/include/asm/kmap_types.h
5141@@ -2,6 +2,6 @@
5142 #ifndef _ASM_KMAP_TYPES_H
5143 #define _ASM_KMAP_TYPES_H
5144
5145-#define KM_TYPE_NR 17
5146+#define KM_TYPE_NR 18
5147
5148 #endif
5149diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
5150index 836f147..4cf23f5 100644
5151--- a/arch/frv/mm/elf-fdpic.c
5152+++ b/arch/frv/mm/elf-fdpic.c
5153@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5154 {
5155 struct vm_area_struct *vma;
5156 struct vm_unmapped_area_info info;
5157+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5158
5159 if (len > TASK_SIZE)
5160 return -ENOMEM;
5161@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5162 if (addr) {
5163 addr = PAGE_ALIGN(addr);
5164 vma = find_vma(current->mm, addr);
5165- if (TASK_SIZE - len >= addr &&
5166- (!vma || addr + len <= vma->vm_start))
5167+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5168 goto success;
5169 }
5170
5171@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5172 info.high_limit = (current->mm->start_stack - 0x00200000);
5173 info.align_mask = 0;
5174 info.align_offset = 0;
5175+ info.threadstack_offset = offset;
5176 addr = vm_unmapped_area(&info);
5177 if (!(addr & ~PAGE_MASK))
5178 goto success;
5179diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
5180index 2635117..fa223cb 100644
5181--- a/arch/hexagon/include/asm/cache.h
5182+++ b/arch/hexagon/include/asm/cache.h
5183@@ -21,9 +21,11 @@
5184 #ifndef __ASM_CACHE_H
5185 #define __ASM_CACHE_H
5186
5187+#include <linux/const.h>
5188+
5189 /* Bytes per L1 cache line */
5190-#define L1_CACHE_SHIFT (5)
5191-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5192+#define L1_CACHE_SHIFT 5
5193+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5194
5195 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
5196 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
5197diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5198index c84c88b..2a6e1ba 100644
5199--- a/arch/ia64/Kconfig
5200+++ b/arch/ia64/Kconfig
5201@@ -549,6 +549,7 @@ source "drivers/sn/Kconfig"
5202 config KEXEC
5203 bool "kexec system call"
5204 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5205+ depends on !GRKERNSEC_KMEM
5206 help
5207 kexec is a system call that implements the ability to shutdown your
5208 current kernel, and to start another kernel. It is like a reboot
5209diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
5210index 5441b14..039a446 100644
5211--- a/arch/ia64/Makefile
5212+++ b/arch/ia64/Makefile
5213@@ -99,5 +99,6 @@ endef
5214 archprepare: make_nr_irqs_h FORCE
5215 PHONY += make_nr_irqs_h FORCE
5216
5217+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
5218 make_nr_irqs_h: FORCE
5219 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
5220diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5221index 0f8bf48..40ea950 100644
5222--- a/arch/ia64/include/asm/atomic.h
5223+++ b/arch/ia64/include/asm/atomic.h
5224@@ -209,4 +209,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5225 #define atomic64_inc(v) atomic64_add(1, (v))
5226 #define atomic64_dec(v) atomic64_sub(1, (v))
5227
5228+#define atomic64_read_unchecked(v) atomic64_read(v)
5229+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5230+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5231+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5232+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5233+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5234+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5235+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5236+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5237+
5238 #endif /* _ASM_IA64_ATOMIC_H */
5239diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5240index a48957c..e097b56 100644
5241--- a/arch/ia64/include/asm/barrier.h
5242+++ b/arch/ia64/include/asm/barrier.h
5243@@ -67,7 +67,7 @@
5244 do { \
5245 compiletime_assert_atomic_type(*p); \
5246 barrier(); \
5247- ACCESS_ONCE(*p) = (v); \
5248+ ACCESS_ONCE_RW(*p) = (v); \
5249 } while (0)
5250
5251 #define smp_load_acquire(p) \
5252diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5253index 988254a..e1ee885 100644
5254--- a/arch/ia64/include/asm/cache.h
5255+++ b/arch/ia64/include/asm/cache.h
5256@@ -1,6 +1,7 @@
5257 #ifndef _ASM_IA64_CACHE_H
5258 #define _ASM_IA64_CACHE_H
5259
5260+#include <linux/const.h>
5261
5262 /*
5263 * Copyright (C) 1998-2000 Hewlett-Packard Co
5264@@ -9,7 +10,7 @@
5265
5266 /* Bytes per L1 (data) cache line. */
5267 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5268-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5269+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5270
5271 #ifdef CONFIG_SMP
5272 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5273diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5274index 5a83c5c..4d7f553 100644
5275--- a/arch/ia64/include/asm/elf.h
5276+++ b/arch/ia64/include/asm/elf.h
5277@@ -42,6 +42,13 @@
5278 */
5279 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5280
5281+#ifdef CONFIG_PAX_ASLR
5282+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5283+
5284+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5285+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5286+#endif
5287+
5288 #define PT_IA_64_UNWIND 0x70000001
5289
5290 /* IA-64 relocations: */
5291diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5292index 5767cdf..7462574 100644
5293--- a/arch/ia64/include/asm/pgalloc.h
5294+++ b/arch/ia64/include/asm/pgalloc.h
5295@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5296 pgd_val(*pgd_entry) = __pa(pud);
5297 }
5298
5299+static inline void
5300+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5301+{
5302+ pgd_populate(mm, pgd_entry, pud);
5303+}
5304+
5305 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5306 {
5307 return quicklist_alloc(0, GFP_KERNEL, NULL);
5308@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5309 pud_val(*pud_entry) = __pa(pmd);
5310 }
5311
5312+static inline void
5313+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5314+{
5315+ pud_populate(mm, pud_entry, pmd);
5316+}
5317+
5318 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5319 {
5320 return quicklist_alloc(0, GFP_KERNEL, NULL);
5321diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5322index 7935115..c0eca6a 100644
5323--- a/arch/ia64/include/asm/pgtable.h
5324+++ b/arch/ia64/include/asm/pgtable.h
5325@@ -12,7 +12,7 @@
5326 * David Mosberger-Tang <davidm@hpl.hp.com>
5327 */
5328
5329-
5330+#include <linux/const.h>
5331 #include <asm/mman.h>
5332 #include <asm/page.h>
5333 #include <asm/processor.h>
5334@@ -142,6 +142,17 @@
5335 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5336 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5337 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5338+
5339+#ifdef CONFIG_PAX_PAGEEXEC
5340+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5341+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5342+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5343+#else
5344+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5345+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5346+# define PAGE_COPY_NOEXEC PAGE_COPY
5347+#endif
5348+
5349 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5350 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5351 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5352diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5353index 45698cd..e8e2dbc 100644
5354--- a/arch/ia64/include/asm/spinlock.h
5355+++ b/arch/ia64/include/asm/spinlock.h
5356@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5357 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5358
5359 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5360- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5361+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5362 }
5363
5364 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5365diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5366index 449c8c0..3d4b1e9 100644
5367--- a/arch/ia64/include/asm/uaccess.h
5368+++ b/arch/ia64/include/asm/uaccess.h
5369@@ -70,6 +70,7 @@
5370 && ((segment).seg == KERNEL_DS.seg \
5371 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5372 })
5373+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5374 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5375
5376 /*
5377@@ -240,12 +241,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5378 static inline unsigned long
5379 __copy_to_user (void __user *to, const void *from, unsigned long count)
5380 {
5381+ if (count > INT_MAX)
5382+ return count;
5383+
5384+ if (!__builtin_constant_p(count))
5385+ check_object_size(from, count, true);
5386+
5387 return __copy_user(to, (__force void __user *) from, count);
5388 }
5389
5390 static inline unsigned long
5391 __copy_from_user (void *to, const void __user *from, unsigned long count)
5392 {
5393+ if (count > INT_MAX)
5394+ return count;
5395+
5396+ if (!__builtin_constant_p(count))
5397+ check_object_size(to, count, false);
5398+
5399 return __copy_user((__force void __user *) to, from, count);
5400 }
5401
5402@@ -255,10 +268,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5403 ({ \
5404 void __user *__cu_to = (to); \
5405 const void *__cu_from = (from); \
5406- long __cu_len = (n); \
5407+ unsigned long __cu_len = (n); \
5408 \
5409- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5410+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5411+ if (!__builtin_constant_p(n)) \
5412+ check_object_size(__cu_from, __cu_len, true); \
5413 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5414+ } \
5415 __cu_len; \
5416 })
5417
5418@@ -266,11 +282,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5419 ({ \
5420 void *__cu_to = (to); \
5421 const void __user *__cu_from = (from); \
5422- long __cu_len = (n); \
5423+ unsigned long __cu_len = (n); \
5424 \
5425 __chk_user_ptr(__cu_from); \
5426- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5427+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5428+ if (!__builtin_constant_p(n)) \
5429+ check_object_size(__cu_to, __cu_len, false); \
5430 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5431+ } \
5432 __cu_len; \
5433 })
5434
5435diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5436index 24603be..948052d 100644
5437--- a/arch/ia64/kernel/module.c
5438+++ b/arch/ia64/kernel/module.c
5439@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
5440 void
5441 module_free (struct module *mod, void *module_region)
5442 {
5443- if (mod && mod->arch.init_unw_table &&
5444- module_region == mod->module_init) {
5445+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
5446 unw_remove_unwind_table(mod->arch.init_unw_table);
5447 mod->arch.init_unw_table = NULL;
5448 }
5449@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5450 }
5451
5452 static inline int
5453+in_init_rx (const struct module *mod, uint64_t addr)
5454+{
5455+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5456+}
5457+
5458+static inline int
5459+in_init_rw (const struct module *mod, uint64_t addr)
5460+{
5461+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5462+}
5463+
5464+static inline int
5465 in_init (const struct module *mod, uint64_t addr)
5466 {
5467- return addr - (uint64_t) mod->module_init < mod->init_size;
5468+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5469+}
5470+
5471+static inline int
5472+in_core_rx (const struct module *mod, uint64_t addr)
5473+{
5474+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5475+}
5476+
5477+static inline int
5478+in_core_rw (const struct module *mod, uint64_t addr)
5479+{
5480+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5481 }
5482
5483 static inline int
5484 in_core (const struct module *mod, uint64_t addr)
5485 {
5486- return addr - (uint64_t) mod->module_core < mod->core_size;
5487+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5488 }
5489
5490 static inline int
5491@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5492 break;
5493
5494 case RV_BDREL:
5495- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5496+ if (in_init_rx(mod, val))
5497+ val -= (uint64_t) mod->module_init_rx;
5498+ else if (in_init_rw(mod, val))
5499+ val -= (uint64_t) mod->module_init_rw;
5500+ else if (in_core_rx(mod, val))
5501+ val -= (uint64_t) mod->module_core_rx;
5502+ else if (in_core_rw(mod, val))
5503+ val -= (uint64_t) mod->module_core_rw;
5504 break;
5505
5506 case RV_LTV:
5507@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5508 * addresses have been selected...
5509 */
5510 uint64_t gp;
5511- if (mod->core_size > MAX_LTOFF)
5512+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5513 /*
5514 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5515 * at the end of the module.
5516 */
5517- gp = mod->core_size - MAX_LTOFF / 2;
5518+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5519 else
5520- gp = mod->core_size / 2;
5521- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5522+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5523+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5524 mod->arch.gp = gp;
5525 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5526 }
5527diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5528index c39c3cd..3c77738 100644
5529--- a/arch/ia64/kernel/palinfo.c
5530+++ b/arch/ia64/kernel/palinfo.c
5531@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5532 return NOTIFY_OK;
5533 }
5534
5535-static struct notifier_block __refdata palinfo_cpu_notifier =
5536+static struct notifier_block palinfo_cpu_notifier =
5537 {
5538 .notifier_call = palinfo_cpu_callback,
5539 .priority = 0,
5540diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5541index 41e33f8..65180b2a 100644
5542--- a/arch/ia64/kernel/sys_ia64.c
5543+++ b/arch/ia64/kernel/sys_ia64.c
5544@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5545 unsigned long align_mask = 0;
5546 struct mm_struct *mm = current->mm;
5547 struct vm_unmapped_area_info info;
5548+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5549
5550 if (len > RGN_MAP_LIMIT)
5551 return -ENOMEM;
5552@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5553 if (REGION_NUMBER(addr) == RGN_HPAGE)
5554 addr = 0;
5555 #endif
5556+
5557+#ifdef CONFIG_PAX_RANDMMAP
5558+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5559+ addr = mm->free_area_cache;
5560+ else
5561+#endif
5562+
5563 if (!addr)
5564 addr = TASK_UNMAPPED_BASE;
5565
5566@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5567 info.high_limit = TASK_SIZE;
5568 info.align_mask = align_mask;
5569 info.align_offset = 0;
5570+ info.threadstack_offset = offset;
5571 return vm_unmapped_area(&info);
5572 }
5573
5574diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5575index 84f8a52..7c76178 100644
5576--- a/arch/ia64/kernel/vmlinux.lds.S
5577+++ b/arch/ia64/kernel/vmlinux.lds.S
5578@@ -192,7 +192,7 @@ SECTIONS {
5579 /* Per-cpu data: */
5580 . = ALIGN(PERCPU_PAGE_SIZE);
5581 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5582- __phys_per_cpu_start = __per_cpu_load;
5583+ __phys_per_cpu_start = per_cpu_load;
5584 /*
5585 * ensure percpu data fits
5586 * into percpu page size
5587diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5588index 7225dad..2a7c8256 100644
5589--- a/arch/ia64/mm/fault.c
5590+++ b/arch/ia64/mm/fault.c
5591@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5592 return pte_present(pte);
5593 }
5594
5595+#ifdef CONFIG_PAX_PAGEEXEC
5596+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5597+{
5598+ unsigned long i;
5599+
5600+ printk(KERN_ERR "PAX: bytes at PC: ");
5601+ for (i = 0; i < 8; i++) {
5602+ unsigned int c;
5603+ if (get_user(c, (unsigned int *)pc+i))
5604+ printk(KERN_CONT "???????? ");
5605+ else
5606+ printk(KERN_CONT "%08x ", c);
5607+ }
5608+ printk("\n");
5609+}
5610+#endif
5611+
5612 # define VM_READ_BIT 0
5613 # define VM_WRITE_BIT 1
5614 # define VM_EXEC_BIT 2
5615@@ -151,8 +168,21 @@ retry:
5616 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5617 goto bad_area;
5618
5619- if ((vma->vm_flags & mask) != mask)
5620+ if ((vma->vm_flags & mask) != mask) {
5621+
5622+#ifdef CONFIG_PAX_PAGEEXEC
5623+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5624+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5625+ goto bad_area;
5626+
5627+ up_read(&mm->mmap_sem);
5628+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5629+ do_group_exit(SIGKILL);
5630+ }
5631+#endif
5632+
5633 goto bad_area;
5634+ }
5635
5636 /*
5637 * If for any reason at all we couldn't handle the fault, make
5638diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5639index 76069c1..c2aa816 100644
5640--- a/arch/ia64/mm/hugetlbpage.c
5641+++ b/arch/ia64/mm/hugetlbpage.c
5642@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5643 unsigned long pgoff, unsigned long flags)
5644 {
5645 struct vm_unmapped_area_info info;
5646+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5647
5648 if (len > RGN_MAP_LIMIT)
5649 return -ENOMEM;
5650@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5651 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5652 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5653 info.align_offset = 0;
5654+ info.threadstack_offset = offset;
5655 return vm_unmapped_area(&info);
5656 }
5657
5658diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5659index 6b33457..88b5124 100644
5660--- a/arch/ia64/mm/init.c
5661+++ b/arch/ia64/mm/init.c
5662@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5663 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5664 vma->vm_end = vma->vm_start + PAGE_SIZE;
5665 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5666+
5667+#ifdef CONFIG_PAX_PAGEEXEC
5668+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5669+ vma->vm_flags &= ~VM_EXEC;
5670+
5671+#ifdef CONFIG_PAX_MPROTECT
5672+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5673+ vma->vm_flags &= ~VM_MAYEXEC;
5674+#endif
5675+
5676+ }
5677+#endif
5678+
5679 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5680 down_write(&current->mm->mmap_sem);
5681 if (insert_vm_struct(current->mm, vma)) {
5682@@ -286,7 +299,7 @@ static int __init gate_vma_init(void)
5683 gate_vma.vm_start = FIXADDR_USER_START;
5684 gate_vma.vm_end = FIXADDR_USER_END;
5685 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
5686- gate_vma.vm_page_prot = __P101;
5687+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
5688
5689 return 0;
5690 }
5691diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5692index 40b3ee9..8c2c112 100644
5693--- a/arch/m32r/include/asm/cache.h
5694+++ b/arch/m32r/include/asm/cache.h
5695@@ -1,8 +1,10 @@
5696 #ifndef _ASM_M32R_CACHE_H
5697 #define _ASM_M32R_CACHE_H
5698
5699+#include <linux/const.h>
5700+
5701 /* L1 cache line size */
5702 #define L1_CACHE_SHIFT 4
5703-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5704+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5705
5706 #endif /* _ASM_M32R_CACHE_H */
5707diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5708index 82abd15..d95ae5d 100644
5709--- a/arch/m32r/lib/usercopy.c
5710+++ b/arch/m32r/lib/usercopy.c
5711@@ -14,6 +14,9 @@
5712 unsigned long
5713 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5714 {
5715+ if ((long)n < 0)
5716+ return n;
5717+
5718 prefetch(from);
5719 if (access_ok(VERIFY_WRITE, to, n))
5720 __copy_user(to,from,n);
5721@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5722 unsigned long
5723 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5724 {
5725+ if ((long)n < 0)
5726+ return n;
5727+
5728 prefetchw(to);
5729 if (access_ok(VERIFY_READ, from, n))
5730 __copy_user_zeroing(to,from,n);
5731diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5732index 0395c51..5f26031 100644
5733--- a/arch/m68k/include/asm/cache.h
5734+++ b/arch/m68k/include/asm/cache.h
5735@@ -4,9 +4,11 @@
5736 #ifndef __ARCH_M68K_CACHE_H
5737 #define __ARCH_M68K_CACHE_H
5738
5739+#include <linux/const.h>
5740+
5741 /* bytes per L1 cache line */
5742 #define L1_CACHE_SHIFT 4
5743-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5744+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5745
5746 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5747
5748diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
5749index c7591e8..ecef036 100644
5750--- a/arch/metag/include/asm/barrier.h
5751+++ b/arch/metag/include/asm/barrier.h
5752@@ -89,7 +89,7 @@ static inline void fence(void)
5753 do { \
5754 compiletime_assert_atomic_type(*p); \
5755 smp_mb(); \
5756- ACCESS_ONCE(*p) = (v); \
5757+ ACCESS_ONCE_RW(*p) = (v); \
5758 } while (0)
5759
5760 #define smp_load_acquire(p) \
5761diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5762index 3c32075..ae0ae75 100644
5763--- a/arch/metag/mm/hugetlbpage.c
5764+++ b/arch/metag/mm/hugetlbpage.c
5765@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5766 info.high_limit = TASK_SIZE;
5767 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5768 info.align_offset = 0;
5769+ info.threadstack_offset = 0;
5770 return vm_unmapped_area(&info);
5771 }
5772
5773diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5774index 4efe96a..60e8699 100644
5775--- a/arch/microblaze/include/asm/cache.h
5776+++ b/arch/microblaze/include/asm/cache.h
5777@@ -13,11 +13,12 @@
5778 #ifndef _ASM_MICROBLAZE_CACHE_H
5779 #define _ASM_MICROBLAZE_CACHE_H
5780
5781+#include <linux/const.h>
5782 #include <asm/registers.h>
5783
5784 #define L1_CACHE_SHIFT 5
5785 /* word-granular cache in microblaze */
5786-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5787+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5788
5789 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5790
5791diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5792index 574c430..470200d 100644
5793--- a/arch/mips/Kconfig
5794+++ b/arch/mips/Kconfig
5795@@ -2399,6 +2399,7 @@ source "kernel/Kconfig.preempt"
5796
5797 config KEXEC
5798 bool "Kexec system call"
5799+ depends on !GRKERNSEC_KMEM
5800 help
5801 kexec is a system call that implements the ability to shutdown your
5802 current kernel, and to start another kernel. It is like a reboot
5803diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
5804index 02f2444..506969c 100644
5805--- a/arch/mips/cavium-octeon/dma-octeon.c
5806+++ b/arch/mips/cavium-octeon/dma-octeon.c
5807@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
5808 if (dma_release_from_coherent(dev, order, vaddr))
5809 return;
5810
5811- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5812+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5813 }
5814
5815 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5816diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5817index 37b2bef..02122b8 100644
5818--- a/arch/mips/include/asm/atomic.h
5819+++ b/arch/mips/include/asm/atomic.h
5820@@ -21,15 +21,39 @@
5821 #include <asm/cmpxchg.h>
5822 #include <asm/war.h>
5823
5824+#ifdef CONFIG_GENERIC_ATOMIC64
5825+#include <asm-generic/atomic64.h>
5826+#endif
5827+
5828 #define ATOMIC_INIT(i) { (i) }
5829
5830+#ifdef CONFIG_64BIT
5831+#define _ASM_EXTABLE(from, to) \
5832+" .section __ex_table,\"a\"\n" \
5833+" .dword " #from ", " #to"\n" \
5834+" .previous\n"
5835+#else
5836+#define _ASM_EXTABLE(from, to) \
5837+" .section __ex_table,\"a\"\n" \
5838+" .word " #from ", " #to"\n" \
5839+" .previous\n"
5840+#endif
5841+
5842 /*
5843 * atomic_read - read atomic variable
5844 * @v: pointer of type atomic_t
5845 *
5846 * Atomically reads the value of @v.
5847 */
5848-#define atomic_read(v) (*(volatile int *)&(v)->counter)
5849+static inline int atomic_read(const atomic_t *v)
5850+{
5851+ return (*(volatile const int *) &v->counter);
5852+}
5853+
5854+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5855+{
5856+ return (*(volatile const int *) &v->counter);
5857+}
5858
5859 /*
5860 * atomic_set - set atomic variable
5861@@ -38,7 +62,15 @@
5862 *
5863 * Atomically sets the value of @v to @i.
5864 */
5865-#define atomic_set(v, i) ((v)->counter = (i))
5866+static inline void atomic_set(atomic_t *v, int i)
5867+{
5868+ v->counter = i;
5869+}
5870+
5871+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5872+{
5873+ v->counter = i;
5874+}
5875
5876 /*
5877 * atomic_add - add integer to atomic variable
5878@@ -47,7 +79,67 @@
5879 *
5880 * Atomically adds @i to @v.
5881 */
5882-static __inline__ void atomic_add(int i, atomic_t * v)
5883+static __inline__ void atomic_add(int i, atomic_t *v)
5884+{
5885+ int temp;
5886+
5887+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5888+ __asm__ __volatile__(
5889+ " .set mips3 \n"
5890+ "1: ll %0, %1 # atomic_add \n"
5891+#ifdef CONFIG_PAX_REFCOUNT
5892+ /* Exception on overflow. */
5893+ "2: add %0, %2 \n"
5894+#else
5895+ " addu %0, %2 \n"
5896+#endif
5897+ " sc %0, %1 \n"
5898+ " beqzl %0, 1b \n"
5899+#ifdef CONFIG_PAX_REFCOUNT
5900+ "3: \n"
5901+ _ASM_EXTABLE(2b, 3b)
5902+#endif
5903+ " .set mips0 \n"
5904+ : "=&r" (temp), "+m" (v->counter)
5905+ : "Ir" (i));
5906+ } else if (kernel_uses_llsc) {
5907+ __asm__ __volatile__(
5908+ " .set mips3 \n"
5909+ "1: ll %0, %1 # atomic_add \n"
5910+#ifdef CONFIG_PAX_REFCOUNT
5911+ /* Exception on overflow. */
5912+ "2: add %0, %2 \n"
5913+#else
5914+ " addu %0, %2 \n"
5915+#endif
5916+ " sc %0, %1 \n"
5917+ " beqz %0, 1b \n"
5918+#ifdef CONFIG_PAX_REFCOUNT
5919+ "3: \n"
5920+ _ASM_EXTABLE(2b, 3b)
5921+#endif
5922+ " .set mips0 \n"
5923+ : "=&r" (temp), "+m" (v->counter)
5924+ : "Ir" (i));
5925+ } else {
5926+ unsigned long flags;
5927+
5928+ raw_local_irq_save(flags);
5929+ __asm__ __volatile__(
5930+#ifdef CONFIG_PAX_REFCOUNT
5931+ /* Exception on overflow. */
5932+ "1: add %0, %1 \n"
5933+ "2: \n"
5934+ _ASM_EXTABLE(1b, 2b)
5935+#else
5936+ " addu %0, %1 \n"
5937+#endif
5938+ : "+r" (v->counter) : "Ir" (i));
5939+ raw_local_irq_restore(flags);
5940+ }
5941+}
5942+
5943+static __inline__ void atomic_add_unchecked(int i, atomic_unchecked_t *v)
5944 {
5945 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5946 int temp;
5947@@ -90,7 +182,67 @@ static __inline__ void atomic_add(int i, atomic_t * v)
5948 *
5949 * Atomically subtracts @i from @v.
5950 */
5951-static __inline__ void atomic_sub(int i, atomic_t * v)
5952+static __inline__ void atomic_sub(int i, atomic_t *v)
5953+{
5954+ int temp;
5955+
5956+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
5957+ __asm__ __volatile__(
5958+ " .set mips3 \n"
5959+ "1: ll %0, %1 # atomic64_sub \n"
5960+#ifdef CONFIG_PAX_REFCOUNT
5961+ /* Exception on overflow. */
5962+ "2: sub %0, %2 \n"
5963+#else
5964+ " subu %0, %2 \n"
5965+#endif
5966+ " sc %0, %1 \n"
5967+ " beqzl %0, 1b \n"
5968+#ifdef CONFIG_PAX_REFCOUNT
5969+ "3: \n"
5970+ _ASM_EXTABLE(2b, 3b)
5971+#endif
5972+ " .set mips0 \n"
5973+ : "=&r" (temp), "+m" (v->counter)
5974+ : "Ir" (i));
5975+ } else if (kernel_uses_llsc) {
5976+ __asm__ __volatile__(
5977+ " .set mips3 \n"
5978+ "1: ll %0, %1 # atomic64_sub \n"
5979+#ifdef CONFIG_PAX_REFCOUNT
5980+ /* Exception on overflow. */
5981+ "2: sub %0, %2 \n"
5982+#else
5983+ " subu %0, %2 \n"
5984+#endif
5985+ " sc %0, %1 \n"
5986+ " beqz %0, 1b \n"
5987+#ifdef CONFIG_PAX_REFCOUNT
5988+ "3: \n"
5989+ _ASM_EXTABLE(2b, 3b)
5990+#endif
5991+ " .set mips0 \n"
5992+ : "=&r" (temp), "+m" (v->counter)
5993+ : "Ir" (i));
5994+ } else {
5995+ unsigned long flags;
5996+
5997+ raw_local_irq_save(flags);
5998+ __asm__ __volatile__(
5999+#ifdef CONFIG_PAX_REFCOUNT
6000+ /* Exception on overflow. */
6001+ "1: sub %0, %1 \n"
6002+ "2: \n"
6003+ _ASM_EXTABLE(1b, 2b)
6004+#else
6005+ " subu %0, %1 \n"
6006+#endif
6007+ : "+r" (v->counter) : "Ir" (i));
6008+ raw_local_irq_restore(flags);
6009+ }
6010+}
6011+
6012+static __inline__ void atomic_sub_unchecked(long i, atomic_unchecked_t *v)
6013 {
6014 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6015 int temp;
6016@@ -129,7 +281,93 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
6017 /*
6018 * Same as above, but return the result value
6019 */
6020-static __inline__ int atomic_add_return(int i, atomic_t * v)
6021+static __inline__ int atomic_add_return(int i, atomic_t *v)
6022+{
6023+ int result;
6024+ int temp;
6025+
6026+ smp_mb__before_llsc();
6027+
6028+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6029+ __asm__ __volatile__(
6030+ " .set mips3 \n"
6031+ "1: ll %1, %2 # atomic_add_return \n"
6032+#ifdef CONFIG_PAX_REFCOUNT
6033+ "2: add %0, %1, %3 \n"
6034+#else
6035+ " addu %0, %1, %3 \n"
6036+#endif
6037+ " sc %0, %2 \n"
6038+ " beqzl %0, 1b \n"
6039+#ifdef CONFIG_PAX_REFCOUNT
6040+ " b 4f \n"
6041+ " .set noreorder \n"
6042+ "3: b 5f \n"
6043+ " move %0, %1 \n"
6044+ " .set reorder \n"
6045+ _ASM_EXTABLE(2b, 3b)
6046+#endif
6047+ "4: addu %0, %1, %3 \n"
6048+#ifdef CONFIG_PAX_REFCOUNT
6049+ "5: \n"
6050+#endif
6051+ " .set mips0 \n"
6052+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6053+ : "Ir" (i));
6054+ } else if (kernel_uses_llsc) {
6055+ __asm__ __volatile__(
6056+ " .set mips3 \n"
6057+ "1: ll %1, %2 # atomic_add_return \n"
6058+#ifdef CONFIG_PAX_REFCOUNT
6059+ "2: add %0, %1, %3 \n"
6060+#else
6061+ " addu %0, %1, %3 \n"
6062+#endif
6063+ " sc %0, %2 \n"
6064+ " bnez %0, 4f \n"
6065+ " b 1b \n"
6066+#ifdef CONFIG_PAX_REFCOUNT
6067+ " .set noreorder \n"
6068+ "3: b 5f \n"
6069+ " move %0, %1 \n"
6070+ " .set reorder \n"
6071+ _ASM_EXTABLE(2b, 3b)
6072+#endif
6073+ "4: addu %0, %1, %3 \n"
6074+#ifdef CONFIG_PAX_REFCOUNT
6075+ "5: \n"
6076+#endif
6077+ " .set mips0 \n"
6078+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6079+ : "Ir" (i));
6080+ } else {
6081+ unsigned long flags;
6082+
6083+ raw_local_irq_save(flags);
6084+ __asm__ __volatile__(
6085+ " lw %0, %1 \n"
6086+#ifdef CONFIG_PAX_REFCOUNT
6087+ /* Exception on overflow. */
6088+ "1: add %0, %2 \n"
6089+#else
6090+ " addu %0, %2 \n"
6091+#endif
6092+ " sw %0, %1 \n"
6093+#ifdef CONFIG_PAX_REFCOUNT
6094+ /* Note: Dest reg is not modified on overflow */
6095+ "2: \n"
6096+ _ASM_EXTABLE(1b, 2b)
6097+#endif
6098+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6099+ raw_local_irq_restore(flags);
6100+ }
6101+
6102+ smp_llsc_mb();
6103+
6104+ return result;
6105+}
6106+
6107+static __inline__ int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6108 {
6109 int result;
6110
6111@@ -178,7 +416,93 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
6112 return result;
6113 }
6114
6115-static __inline__ int atomic_sub_return(int i, atomic_t * v)
6116+static __inline__ int atomic_sub_return(int i, atomic_t *v)
6117+{
6118+ int result;
6119+ int temp;
6120+
6121+ smp_mb__before_llsc();
6122+
6123+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6124+ __asm__ __volatile__(
6125+ " .set mips3 \n"
6126+ "1: ll %1, %2 # atomic_sub_return \n"
6127+#ifdef CONFIG_PAX_REFCOUNT
6128+ "2: sub %0, %1, %3 \n"
6129+#else
6130+ " subu %0, %1, %3 \n"
6131+#endif
6132+ " sc %0, %2 \n"
6133+ " beqzl %0, 1b \n"
6134+#ifdef CONFIG_PAX_REFCOUNT
6135+ " b 4f \n"
6136+ " .set noreorder \n"
6137+ "3: b 5f \n"
6138+ " move %0, %1 \n"
6139+ " .set reorder \n"
6140+ _ASM_EXTABLE(2b, 3b)
6141+#endif
6142+ "4: subu %0, %1, %3 \n"
6143+#ifdef CONFIG_PAX_REFCOUNT
6144+ "5: \n"
6145+#endif
6146+ " .set mips0 \n"
6147+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6148+ : "Ir" (i), "m" (v->counter)
6149+ : "memory");
6150+ } else if (kernel_uses_llsc) {
6151+ __asm__ __volatile__(
6152+ " .set mips3 \n"
6153+ "1: ll %1, %2 # atomic_sub_return \n"
6154+#ifdef CONFIG_PAX_REFCOUNT
6155+ "2: sub %0, %1, %3 \n"
6156+#else
6157+ " subu %0, %1, %3 \n"
6158+#endif
6159+ " sc %0, %2 \n"
6160+ " bnez %0, 4f \n"
6161+ " b 1b \n"
6162+#ifdef CONFIG_PAX_REFCOUNT
6163+ " .set noreorder \n"
6164+ "3: b 5f \n"
6165+ " move %0, %1 \n"
6166+ " .set reorder \n"
6167+ _ASM_EXTABLE(2b, 3b)
6168+#endif
6169+ "4: subu %0, %1, %3 \n"
6170+#ifdef CONFIG_PAX_REFCOUNT
6171+ "5: \n"
6172+#endif
6173+ " .set mips0 \n"
6174+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6175+ : "Ir" (i));
6176+ } else {
6177+ unsigned long flags;
6178+
6179+ raw_local_irq_save(flags);
6180+ __asm__ __volatile__(
6181+ " lw %0, %1 \n"
6182+#ifdef CONFIG_PAX_REFCOUNT
6183+ /* Exception on overflow. */
6184+ "1: sub %0, %2 \n"
6185+#else
6186+ " subu %0, %2 \n"
6187+#endif
6188+ " sw %0, %1 \n"
6189+#ifdef CONFIG_PAX_REFCOUNT
6190+ /* Note: Dest reg is not modified on overflow */
6191+ "2: \n"
6192+ _ASM_EXTABLE(1b, 2b)
6193+#endif
6194+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6195+ raw_local_irq_restore(flags);
6196+ }
6197+
6198+ smp_llsc_mb();
6199+
6200+ return result;
6201+}
6202+static __inline__ int atomic_sub_return_unchecked(int i, atomic_unchecked_t *v)
6203 {
6204 int result;
6205
6206@@ -238,7 +562,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
6207 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6208 * The function returns the old value of @v minus @i.
6209 */
6210-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6211+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
6212 {
6213 int result;
6214
6215@@ -295,8 +619,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6216 return result;
6217 }
6218
6219-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
6220-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
6221+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6222+{
6223+ return cmpxchg(&v->counter, old, new);
6224+}
6225+
6226+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
6227+ int new)
6228+{
6229+ return cmpxchg(&(v->counter), old, new);
6230+}
6231+
6232+static inline int atomic_xchg(atomic_t *v, int new)
6233+{
6234+ return xchg(&v->counter, new);
6235+}
6236+
6237+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6238+{
6239+ return xchg(&(v->counter), new);
6240+}
6241
6242 /**
6243 * __atomic_add_unless - add unless the number is a given value
6244@@ -324,6 +666,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6245
6246 #define atomic_dec_return(v) atomic_sub_return(1, (v))
6247 #define atomic_inc_return(v) atomic_add_return(1, (v))
6248+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6249+{
6250+ return atomic_add_return_unchecked(1, v);
6251+}
6252
6253 /*
6254 * atomic_sub_and_test - subtract value from variable and test result
6255@@ -345,6 +691,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6256 * other cases.
6257 */
6258 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
6259+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6260+{
6261+ return atomic_add_return_unchecked(1, v) == 0;
6262+}
6263
6264 /*
6265 * atomic_dec_and_test - decrement by 1 and test
6266@@ -369,6 +719,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6267 * Atomically increments @v by 1.
6268 */
6269 #define atomic_inc(v) atomic_add(1, (v))
6270+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
6271+{
6272+ atomic_add_unchecked(1, v);
6273+}
6274
6275 /*
6276 * atomic_dec - decrement and test
6277@@ -377,6 +731,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6278 * Atomically decrements @v by 1.
6279 */
6280 #define atomic_dec(v) atomic_sub(1, (v))
6281+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6282+{
6283+ atomic_sub_unchecked(1, v);
6284+}
6285
6286 /*
6287 * atomic_add_negative - add and test if negative
6288@@ -398,14 +756,30 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6289 * @v: pointer of type atomic64_t
6290 *
6291 */
6292-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
6293+static inline long atomic64_read(const atomic64_t *v)
6294+{
6295+ return (*(volatile const long *) &v->counter);
6296+}
6297+
6298+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6299+{
6300+ return (*(volatile const long *) &v->counter);
6301+}
6302
6303 /*
6304 * atomic64_set - set atomic variable
6305 * @v: pointer of type atomic64_t
6306 * @i: required value
6307 */
6308-#define atomic64_set(v, i) ((v)->counter = (i))
6309+static inline void atomic64_set(atomic64_t *v, long i)
6310+{
6311+ v->counter = i;
6312+}
6313+
6314+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6315+{
6316+ v->counter = i;
6317+}
6318
6319 /*
6320 * atomic64_add - add integer to atomic variable
6321@@ -414,7 +788,66 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6322 *
6323 * Atomically adds @i to @v.
6324 */
6325-static __inline__ void atomic64_add(long i, atomic64_t * v)
6326+static __inline__ void atomic64_add(long i, atomic64_t *v)
6327+{
6328+ long temp;
6329+
6330+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6331+ __asm__ __volatile__(
6332+ " .set mips3 \n"
6333+ "1: lld %0, %1 # atomic64_add \n"
6334+#ifdef CONFIG_PAX_REFCOUNT
6335+ /* Exception on overflow. */
6336+ "2: dadd %0, %2 \n"
6337+#else
6338+ " daddu %0, %2 \n"
6339+#endif
6340+ " scd %0, %1 \n"
6341+ " beqzl %0, 1b \n"
6342+#ifdef CONFIG_PAX_REFCOUNT
6343+ "3: \n"
6344+ _ASM_EXTABLE(2b, 3b)
6345+#endif
6346+ " .set mips0 \n"
6347+ : "=&r" (temp), "+m" (v->counter)
6348+ : "Ir" (i));
6349+ } else if (kernel_uses_llsc) {
6350+ __asm__ __volatile__(
6351+ " .set mips3 \n"
6352+ "1: lld %0, %1 # atomic64_add \n"
6353+#ifdef CONFIG_PAX_REFCOUNT
6354+ /* Exception on overflow. */
6355+ "2: dadd %0, %2 \n"
6356+#else
6357+ " daddu %0, %2 \n"
6358+#endif
6359+ " scd %0, %1 \n"
6360+ " beqz %0, 1b \n"
6361+#ifdef CONFIG_PAX_REFCOUNT
6362+ "3: \n"
6363+ _ASM_EXTABLE(2b, 3b)
6364+#endif
6365+ " .set mips0 \n"
6366+ : "=&r" (temp), "+m" (v->counter)
6367+ : "Ir" (i));
6368+ } else {
6369+ unsigned long flags;
6370+
6371+ raw_local_irq_save(flags);
6372+ __asm__ __volatile__(
6373+#ifdef CONFIG_PAX_REFCOUNT
6374+ /* Exception on overflow. */
6375+ "1: dadd %0, %1 \n"
6376+ "2: \n"
6377+ _ASM_EXTABLE(1b, 2b)
6378+#else
6379+ " daddu %0, %1 \n"
6380+#endif
6381+ : "+r" (v->counter) : "Ir" (i));
6382+ raw_local_irq_restore(flags);
6383+ }
6384+}
6385+static __inline__ void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6386 {
6387 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6388 long temp;
6389@@ -457,7 +890,67 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
6390 *
6391 * Atomically subtracts @i from @v.
6392 */
6393-static __inline__ void atomic64_sub(long i, atomic64_t * v)
6394+static __inline__ void atomic64_sub(long i, atomic64_t *v)
6395+{
6396+ long temp;
6397+
6398+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6399+ __asm__ __volatile__(
6400+ " .set mips3 \n"
6401+ "1: lld %0, %1 # atomic64_sub \n"
6402+#ifdef CONFIG_PAX_REFCOUNT
6403+ /* Exception on overflow. */
6404+ "2: dsub %0, %2 \n"
6405+#else
6406+ " dsubu %0, %2 \n"
6407+#endif
6408+ " scd %0, %1 \n"
6409+ " beqzl %0, 1b \n"
6410+#ifdef CONFIG_PAX_REFCOUNT
6411+ "3: \n"
6412+ _ASM_EXTABLE(2b, 3b)
6413+#endif
6414+ " .set mips0 \n"
6415+ : "=&r" (temp), "+m" (v->counter)
6416+ : "Ir" (i));
6417+ } else if (kernel_uses_llsc) {
6418+ __asm__ __volatile__(
6419+ " .set mips3 \n"
6420+ "1: lld %0, %1 # atomic64_sub \n"
6421+#ifdef CONFIG_PAX_REFCOUNT
6422+ /* Exception on overflow. */
6423+ "2: dsub %0, %2 \n"
6424+#else
6425+ " dsubu %0, %2 \n"
6426+#endif
6427+ " scd %0, %1 \n"
6428+ " beqz %0, 1b \n"
6429+#ifdef CONFIG_PAX_REFCOUNT
6430+ "3: \n"
6431+ _ASM_EXTABLE(2b, 3b)
6432+#endif
6433+ " .set mips0 \n"
6434+ : "=&r" (temp), "+m" (v->counter)
6435+ : "Ir" (i));
6436+ } else {
6437+ unsigned long flags;
6438+
6439+ raw_local_irq_save(flags);
6440+ __asm__ __volatile__(
6441+#ifdef CONFIG_PAX_REFCOUNT
6442+ /* Exception on overflow. */
6443+ "1: dsub %0, %1 \n"
6444+ "2: \n"
6445+ _ASM_EXTABLE(1b, 2b)
6446+#else
6447+ " dsubu %0, %1 \n"
6448+#endif
6449+ : "+r" (v->counter) : "Ir" (i));
6450+ raw_local_irq_restore(flags);
6451+ }
6452+}
6453+
6454+static __inline__ void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6455 {
6456 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6457 long temp;
6458@@ -496,7 +989,93 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
6459 /*
6460 * Same as above, but return the result value
6461 */
6462-static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6463+static __inline__ long atomic64_add_return(long i, atomic64_t *v)
6464+{
6465+ long result;
6466+ long temp;
6467+
6468+ smp_mb__before_llsc();
6469+
6470+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6471+ __asm__ __volatile__(
6472+ " .set mips3 \n"
6473+ "1: lld %1, %2 # atomic64_add_return \n"
6474+#ifdef CONFIG_PAX_REFCOUNT
6475+ "2: dadd %0, %1, %3 \n"
6476+#else
6477+ " daddu %0, %1, %3 \n"
6478+#endif
6479+ " scd %0, %2 \n"
6480+ " beqzl %0, 1b \n"
6481+#ifdef CONFIG_PAX_REFCOUNT
6482+ " b 4f \n"
6483+ " .set noreorder \n"
6484+ "3: b 5f \n"
6485+ " move %0, %1 \n"
6486+ " .set reorder \n"
6487+ _ASM_EXTABLE(2b, 3b)
6488+#endif
6489+ "4: daddu %0, %1, %3 \n"
6490+#ifdef CONFIG_PAX_REFCOUNT
6491+ "5: \n"
6492+#endif
6493+ " .set mips0 \n"
6494+ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6495+ : "Ir" (i));
6496+ } else if (kernel_uses_llsc) {
6497+ __asm__ __volatile__(
6498+ " .set mips3 \n"
6499+ "1: lld %1, %2 # atomic64_add_return \n"
6500+#ifdef CONFIG_PAX_REFCOUNT
6501+ "2: dadd %0, %1, %3 \n"
6502+#else
6503+ " daddu %0, %1, %3 \n"
6504+#endif
6505+ " scd %0, %2 \n"
6506+ " bnez %0, 4f \n"
6507+ " b 1b \n"
6508+#ifdef CONFIG_PAX_REFCOUNT
6509+ " .set noreorder \n"
6510+ "3: b 5f \n"
6511+ " move %0, %1 \n"
6512+ " .set reorder \n"
6513+ _ASM_EXTABLE(2b, 3b)
6514+#endif
6515+ "4: daddu %0, %1, %3 \n"
6516+#ifdef CONFIG_PAX_REFCOUNT
6517+ "5: \n"
6518+#endif
6519+ " .set mips0 \n"
6520+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6521+ : "Ir" (i), "m" (v->counter)
6522+ : "memory");
6523+ } else {
6524+ unsigned long flags;
6525+
6526+ raw_local_irq_save(flags);
6527+ __asm__ __volatile__(
6528+ " ld %0, %1 \n"
6529+#ifdef CONFIG_PAX_REFCOUNT
6530+ /* Exception on overflow. */
6531+ "1: dadd %0, %2 \n"
6532+#else
6533+ " daddu %0, %2 \n"
6534+#endif
6535+ " sd %0, %1 \n"
6536+#ifdef CONFIG_PAX_REFCOUNT
6537+ /* Note: Dest reg is not modified on overflow */
6538+ "2: \n"
6539+ _ASM_EXTABLE(1b, 2b)
6540+#endif
6541+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6542+ raw_local_irq_restore(flags);
6543+ }
6544+
6545+ smp_llsc_mb();
6546+
6547+ return result;
6548+}
6549+static __inline__ long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6550 {
6551 long result;
6552
6553@@ -546,7 +1125,97 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6554 return result;
6555 }
6556
6557-static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6558+static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
6559+{
6560+ long result;
6561+ long temp;
6562+
6563+ smp_mb__before_llsc();
6564+
6565+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6566+ long temp;
6567+
6568+ __asm__ __volatile__(
6569+ " .set mips3 \n"
6570+ "1: lld %1, %2 # atomic64_sub_return \n"
6571+#ifdef CONFIG_PAX_REFCOUNT
6572+ "2: dsub %0, %1, %3 \n"
6573+#else
6574+ " dsubu %0, %1, %3 \n"
6575+#endif
6576+ " scd %0, %2 \n"
6577+ " beqzl %0, 1b \n"
6578+#ifdef CONFIG_PAX_REFCOUNT
6579+ " b 4f \n"
6580+ " .set noreorder \n"
6581+ "3: b 5f \n"
6582+ " move %0, %1 \n"
6583+ " .set reorder \n"
6584+ _ASM_EXTABLE(2b, 3b)
6585+#endif
6586+ "4: dsubu %0, %1, %3 \n"
6587+#ifdef CONFIG_PAX_REFCOUNT
6588+ "5: \n"
6589+#endif
6590+ " .set mips0 \n"
6591+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6592+ : "Ir" (i), "m" (v->counter)
6593+ : "memory");
6594+ } else if (kernel_uses_llsc) {
6595+ __asm__ __volatile__(
6596+ " .set mips3 \n"
6597+ "1: lld %1, %2 # atomic64_sub_return \n"
6598+#ifdef CONFIG_PAX_REFCOUNT
6599+ "2: dsub %0, %1, %3 \n"
6600+#else
6601+ " dsubu %0, %1, %3 \n"
6602+#endif
6603+ " scd %0, %2 \n"
6604+ " bnez %0, 4f \n"
6605+ " b 1b \n"
6606+#ifdef CONFIG_PAX_REFCOUNT
6607+ " .set noreorder \n"
6608+ "3: b 5f \n"
6609+ " move %0, %1 \n"
6610+ " .set reorder \n"
6611+ _ASM_EXTABLE(2b, 3b)
6612+#endif
6613+ "4: dsubu %0, %1, %3 \n"
6614+#ifdef CONFIG_PAX_REFCOUNT
6615+ "5: \n"
6616+#endif
6617+ " .set mips0 \n"
6618+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6619+ : "Ir" (i), "m" (v->counter)
6620+ : "memory");
6621+ } else {
6622+ unsigned long flags;
6623+
6624+ raw_local_irq_save(flags);
6625+ __asm__ __volatile__(
6626+ " ld %0, %1 \n"
6627+#ifdef CONFIG_PAX_REFCOUNT
6628+ /* Exception on overflow. */
6629+ "1: dsub %0, %2 \n"
6630+#else
6631+ " dsubu %0, %2 \n"
6632+#endif
6633+ " sd %0, %1 \n"
6634+#ifdef CONFIG_PAX_REFCOUNT
6635+ /* Note: Dest reg is not modified on overflow */
6636+ "2: \n"
6637+ _ASM_EXTABLE(1b, 2b)
6638+#endif
6639+ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6640+ raw_local_irq_restore(flags);
6641+ }
6642+
6643+ smp_llsc_mb();
6644+
6645+ return result;
6646+}
6647+
6648+static __inline__ long atomic64_sub_return_unchecked(long i, atomic64_unchecked_t *v)
6649 {
6650 long result;
6651
6652@@ -605,7 +1274,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6653 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6654 * The function returns the old value of @v minus @i.
6655 */
6656-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6657+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6658 {
6659 long result;
6660
6661@@ -662,9 +1331,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6662 return result;
6663 }
6664
6665-#define atomic64_cmpxchg(v, o, n) \
6666- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6667-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6668+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6669+{
6670+ return cmpxchg(&v->counter, old, new);
6671+}
6672+
6673+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6674+ long new)
6675+{
6676+ return cmpxchg(&(v->counter), old, new);
6677+}
6678+
6679+static inline long atomic64_xchg(atomic64_t *v, long new)
6680+{
6681+ return xchg(&v->counter, new);
6682+}
6683+
6684+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6685+{
6686+ return xchg(&(v->counter), new);
6687+}
6688
6689 /**
6690 * atomic64_add_unless - add unless the number is a given value
6691@@ -694,6 +1380,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6692
6693 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6694 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6695+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6696
6697 /*
6698 * atomic64_sub_and_test - subtract value from variable and test result
6699@@ -715,6 +1402,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6700 * other cases.
6701 */
6702 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6703+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6704
6705 /*
6706 * atomic64_dec_and_test - decrement by 1 and test
6707@@ -739,6 +1427,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6708 * Atomically increments @v by 1.
6709 */
6710 #define atomic64_inc(v) atomic64_add(1, (v))
6711+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6712
6713 /*
6714 * atomic64_dec - decrement and test
6715@@ -747,6 +1436,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6716 * Atomically decrements @v by 1.
6717 */
6718 #define atomic64_dec(v) atomic64_sub(1, (v))
6719+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6720
6721 /*
6722 * atomic64_add_negative - add and test if negative
6723diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
6724index d0101dd..266982c 100644
6725--- a/arch/mips/include/asm/barrier.h
6726+++ b/arch/mips/include/asm/barrier.h
6727@@ -184,7 +184,7 @@
6728 do { \
6729 compiletime_assert_atomic_type(*p); \
6730 smp_mb(); \
6731- ACCESS_ONCE(*p) = (v); \
6732+ ACCESS_ONCE_RW(*p) = (v); \
6733 } while (0)
6734
6735 #define smp_load_acquire(p) \
6736diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6737index b4db69f..8f3b093 100644
6738--- a/arch/mips/include/asm/cache.h
6739+++ b/arch/mips/include/asm/cache.h
6740@@ -9,10 +9,11 @@
6741 #ifndef _ASM_CACHE_H
6742 #define _ASM_CACHE_H
6743
6744+#include <linux/const.h>
6745 #include <kmalloc.h>
6746
6747 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6748-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6749+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6750
6751 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6752 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6753diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6754index 1d38fe0..9beabc9 100644
6755--- a/arch/mips/include/asm/elf.h
6756+++ b/arch/mips/include/asm/elf.h
6757@@ -381,13 +381,16 @@ extern const char *__elf_platform;
6758 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6759 #endif
6760
6761+#ifdef CONFIG_PAX_ASLR
6762+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6763+
6764+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6765+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6766+#endif
6767+
6768 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6769 struct linux_binprm;
6770 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6771 int uses_interp);
6772
6773-struct mm_struct;
6774-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6775-#define arch_randomize_brk arch_randomize_brk
6776-
6777 #endif /* _ASM_ELF_H */
6778diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6779index c1f6afa..38cc6e9 100644
6780--- a/arch/mips/include/asm/exec.h
6781+++ b/arch/mips/include/asm/exec.h
6782@@ -12,6 +12,6 @@
6783 #ifndef _ASM_EXEC_H
6784 #define _ASM_EXEC_H
6785
6786-extern unsigned long arch_align_stack(unsigned long sp);
6787+#define arch_align_stack(x) ((x) & ~0xfUL)
6788
6789 #endif /* _ASM_EXEC_H */
6790diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
6791index 9e8ef59..1139d6b 100644
6792--- a/arch/mips/include/asm/hw_irq.h
6793+++ b/arch/mips/include/asm/hw_irq.h
6794@@ -10,7 +10,7 @@
6795
6796 #include <linux/atomic.h>
6797
6798-extern atomic_t irq_err_count;
6799+extern atomic_unchecked_t irq_err_count;
6800
6801 /*
6802 * interrupt-retrigger: NOP for now. This may not be appropriate for all
6803diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6804index 46dfc3c..a16b13a 100644
6805--- a/arch/mips/include/asm/local.h
6806+++ b/arch/mips/include/asm/local.h
6807@@ -12,15 +12,25 @@ typedef struct
6808 atomic_long_t a;
6809 } local_t;
6810
6811+typedef struct {
6812+ atomic_long_unchecked_t a;
6813+} local_unchecked_t;
6814+
6815 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6816
6817 #define local_read(l) atomic_long_read(&(l)->a)
6818+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6819 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6820+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6821
6822 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6823+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6824 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6825+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6826 #define local_inc(l) atomic_long_inc(&(l)->a)
6827+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6828 #define local_dec(l) atomic_long_dec(&(l)->a)
6829+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6830
6831 /*
6832 * Same as above, but return the result value
6833@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6834 return result;
6835 }
6836
6837+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6838+{
6839+ unsigned long result;
6840+
6841+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6842+ unsigned long temp;
6843+
6844+ __asm__ __volatile__(
6845+ " .set mips3 \n"
6846+ "1:" __LL "%1, %2 # local_add_return \n"
6847+ " addu %0, %1, %3 \n"
6848+ __SC "%0, %2 \n"
6849+ " beqzl %0, 1b \n"
6850+ " addu %0, %1, %3 \n"
6851+ " .set mips0 \n"
6852+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6853+ : "Ir" (i), "m" (l->a.counter)
6854+ : "memory");
6855+ } else if (kernel_uses_llsc) {
6856+ unsigned long temp;
6857+
6858+ __asm__ __volatile__(
6859+ " .set mips3 \n"
6860+ "1:" __LL "%1, %2 # local_add_return \n"
6861+ " addu %0, %1, %3 \n"
6862+ __SC "%0, %2 \n"
6863+ " beqz %0, 1b \n"
6864+ " addu %0, %1, %3 \n"
6865+ " .set mips0 \n"
6866+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6867+ : "Ir" (i), "m" (l->a.counter)
6868+ : "memory");
6869+ } else {
6870+ unsigned long flags;
6871+
6872+ local_irq_save(flags);
6873+ result = l->a.counter;
6874+ result += i;
6875+ l->a.counter = result;
6876+ local_irq_restore(flags);
6877+ }
6878+
6879+ return result;
6880+}
6881+
6882 static __inline__ long local_sub_return(long i, local_t * l)
6883 {
6884 unsigned long result;
6885@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6886
6887 #define local_cmpxchg(l, o, n) \
6888 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6889+#define local_cmpxchg_unchecked(l, o, n) \
6890+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6891 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6892
6893 /**
6894diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6895index 3be8180..c4798d5 100644
6896--- a/arch/mips/include/asm/page.h
6897+++ b/arch/mips/include/asm/page.h
6898@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6899 #ifdef CONFIG_CPU_MIPS32
6900 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6901 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6902- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6903+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6904 #else
6905 typedef struct { unsigned long long pte; } pte_t;
6906 #define pte_val(x) ((x).pte)
6907diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6908index b336037..5b874cc 100644
6909--- a/arch/mips/include/asm/pgalloc.h
6910+++ b/arch/mips/include/asm/pgalloc.h
6911@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6912 {
6913 set_pud(pud, __pud((unsigned long)pmd));
6914 }
6915+
6916+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6917+{
6918+ pud_populate(mm, pud, pmd);
6919+}
6920 #endif
6921
6922 /*
6923diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
6924index df49a30..c0d3dd6 100644
6925--- a/arch/mips/include/asm/pgtable.h
6926+++ b/arch/mips/include/asm/pgtable.h
6927@@ -20,6 +20,9 @@
6928 #include <asm/io.h>
6929 #include <asm/pgtable-bits.h>
6930
6931+#define ktla_ktva(addr) (addr)
6932+#define ktva_ktla(addr) (addr)
6933+
6934 struct mm_struct;
6935 struct vm_area_struct;
6936
6937diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6938index 7de8658..c109224 100644
6939--- a/arch/mips/include/asm/thread_info.h
6940+++ b/arch/mips/include/asm/thread_info.h
6941@@ -105,6 +105,9 @@ static inline struct thread_info *current_thread_info(void)
6942 #define TIF_SECCOMP 4 /* secure computing */
6943 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
6944 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
6945+/* li takes a 32bit immediate */
6946+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
6947+
6948 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
6949 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
6950 #define TIF_NOHZ 19 /* in adaptive nohz mode */
6951@@ -138,14 +141,16 @@ static inline struct thread_info *current_thread_info(void)
6952 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
6953 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
6954 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6955+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6956
6957 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6958 _TIF_SYSCALL_AUDIT | \
6959- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
6960+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
6961+ _TIF_GRSEC_SETXID)
6962
6963 /* work to do in syscall_trace_leave() */
6964 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6965- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6966+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6967
6968 /* work to do on interrupt/exception return */
6969 #define _TIF_WORK_MASK \
6970@@ -153,7 +158,7 @@ static inline struct thread_info *current_thread_info(void)
6971 /* work to do on any return to u-space */
6972 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6973 _TIF_WORK_SYSCALL_EXIT | \
6974- _TIF_SYSCALL_TRACEPOINT)
6975+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6976
6977 /*
6978 * We stash processor id into a COP0 register to retrieve it fast
6979diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
6980index b9ab717..3a15c28 100644
6981--- a/arch/mips/include/asm/uaccess.h
6982+++ b/arch/mips/include/asm/uaccess.h
6983@@ -130,6 +130,7 @@ extern u64 __ua_limit;
6984 __ok == 0; \
6985 })
6986
6987+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
6988 #define access_ok(type, addr, size) \
6989 likely(__access_ok((addr), (size), __access_mask))
6990
6991@@ -301,7 +302,8 @@ do { \
6992 __get_kernel_common((x), size, __gu_ptr); \
6993 else \
6994 __get_user_common((x), size, __gu_ptr); \
6995- } \
6996+ } else \
6997+ (x) = 0; \
6998 \
6999 __gu_err; \
7000 })
7001@@ -316,6 +318,7 @@ do { \
7002 " .insn \n" \
7003 " .section .fixup,\"ax\" \n" \
7004 "3: li %0, %4 \n" \
7005+ " move %1, $0 \n" \
7006 " j 2b \n" \
7007 " .previous \n" \
7008 " .section __ex_table,\"a\" \n" \
7009@@ -630,6 +633,7 @@ do { \
7010 " .insn \n" \
7011 " .section .fixup,\"ax\" \n" \
7012 "3: li %0, %4 \n" \
7013+ " move %1, $0 \n" \
7014 " j 2b \n" \
7015 " .previous \n" \
7016 " .section __ex_table,\"a\" \n" \
7017diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
7018index 1188e00..41cf144 100644
7019--- a/arch/mips/kernel/binfmt_elfn32.c
7020+++ b/arch/mips/kernel/binfmt_elfn32.c
7021@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
7022 #undef ELF_ET_DYN_BASE
7023 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
7024
7025+#ifdef CONFIG_PAX_ASLR
7026+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
7027+
7028+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7029+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7030+#endif
7031+
7032 #include <asm/processor.h>
7033 #include <linux/module.h>
7034 #include <linux/elfcore.h>
7035diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
7036index 9287678..f870e47 100644
7037--- a/arch/mips/kernel/binfmt_elfo32.c
7038+++ b/arch/mips/kernel/binfmt_elfo32.c
7039@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
7040 #undef ELF_ET_DYN_BASE
7041 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
7042
7043+#ifdef CONFIG_PAX_ASLR
7044+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
7045+
7046+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7047+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
7048+#endif
7049+
7050 #include <asm/processor.h>
7051
7052 #include <linux/module.h>
7053diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
7054index 50b3648..c2f3cec 100644
7055--- a/arch/mips/kernel/i8259.c
7056+++ b/arch/mips/kernel/i8259.c
7057@@ -201,7 +201,7 @@ spurious_8259A_irq:
7058 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
7059 spurious_irq_mask |= irqmask;
7060 }
7061- atomic_inc(&irq_err_count);
7062+ atomic_inc_unchecked(&irq_err_count);
7063 /*
7064 * Theoretically we do not have to handle this IRQ,
7065 * but in Linux this does not cause problems and is
7066diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
7067index 44a1f79..2bd6aa3 100644
7068--- a/arch/mips/kernel/irq-gt641xx.c
7069+++ b/arch/mips/kernel/irq-gt641xx.c
7070@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
7071 }
7072 }
7073
7074- atomic_inc(&irq_err_count);
7075+ atomic_inc_unchecked(&irq_err_count);
7076 }
7077
7078 void __init gt641xx_irq_init(void)
7079diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
7080index d2bfbc2..a8eacd2 100644
7081--- a/arch/mips/kernel/irq.c
7082+++ b/arch/mips/kernel/irq.c
7083@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
7084 printk("unexpected IRQ # %d\n", irq);
7085 }
7086
7087-atomic_t irq_err_count;
7088+atomic_unchecked_t irq_err_count;
7089
7090 int arch_show_interrupts(struct seq_file *p, int prec)
7091 {
7092- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
7093+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
7094 return 0;
7095 }
7096
7097 asmlinkage void spurious_interrupt(void)
7098 {
7099- atomic_inc(&irq_err_count);
7100+ atomic_inc_unchecked(&irq_err_count);
7101 }
7102
7103 void __init init_IRQ(void)
7104@@ -109,7 +109,10 @@ void __init init_IRQ(void)
7105 #endif
7106 }
7107
7108+
7109 #ifdef DEBUG_STACKOVERFLOW
7110+extern void gr_handle_kernel_exploit(void);
7111+
7112 static inline void check_stack_overflow(void)
7113 {
7114 unsigned long sp;
7115@@ -125,6 +128,7 @@ static inline void check_stack_overflow(void)
7116 printk("do_IRQ: stack overflow: %ld\n",
7117 sp - sizeof(struct thread_info));
7118 dump_stack();
7119+ gr_handle_kernel_exploit();
7120 }
7121 }
7122 #else
7123diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
7124index 0614717..002fa43 100644
7125--- a/arch/mips/kernel/pm-cps.c
7126+++ b/arch/mips/kernel/pm-cps.c
7127@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
7128 nc_core_ready_count = nc_addr;
7129
7130 /* Ensure ready_count is zero-initialised before the assembly runs */
7131- ACCESS_ONCE(*nc_core_ready_count) = 0;
7132+ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
7133 coupled_barrier(&per_cpu(pm_barrier, core), online);
7134
7135 /* Run the generated entry code */
7136diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
7137index 636b074..8fbb91f 100644
7138--- a/arch/mips/kernel/process.c
7139+++ b/arch/mips/kernel/process.c
7140@@ -520,15 +520,3 @@ unsigned long get_wchan(struct task_struct *task)
7141 out:
7142 return pc;
7143 }
7144-
7145-/*
7146- * Don't forget that the stack pointer must be aligned on a 8 bytes
7147- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
7148- */
7149-unsigned long arch_align_stack(unsigned long sp)
7150-{
7151- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7152- sp -= get_random_int() & ~PAGE_MASK;
7153-
7154- return sp & ALMASK;
7155-}
7156diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
7157index 645b3c4..909c75a 100644
7158--- a/arch/mips/kernel/ptrace.c
7159+++ b/arch/mips/kernel/ptrace.c
7160@@ -761,6 +761,10 @@ long arch_ptrace(struct task_struct *child, long request,
7161 return ret;
7162 }
7163
7164+#ifdef CONFIG_GRKERNSEC_SETXID
7165+extern void gr_delayed_cred_worker(void);
7166+#endif
7167+
7168 /*
7169 * Notification of system call entry/exit
7170 * - triggered by current->work.syscall_trace
7171@@ -777,6 +781,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
7172 tracehook_report_syscall_entry(regs))
7173 ret = -1;
7174
7175+#ifdef CONFIG_GRKERNSEC_SETXID
7176+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7177+ gr_delayed_cred_worker();
7178+#endif
7179+
7180 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
7181 trace_sys_enter(regs, regs->regs[2]);
7182
7183diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
7184index 07fc524..b9d7f28 100644
7185--- a/arch/mips/kernel/reset.c
7186+++ b/arch/mips/kernel/reset.c
7187@@ -13,6 +13,7 @@
7188 #include <linux/reboot.h>
7189
7190 #include <asm/reboot.h>
7191+#include <asm/bug.h>
7192
7193 /*
7194 * Urgs ... Too many MIPS machines to handle this in a generic way.
7195@@ -29,16 +30,19 @@ void machine_restart(char *command)
7196 {
7197 if (_machine_restart)
7198 _machine_restart(command);
7199+ BUG();
7200 }
7201
7202 void machine_halt(void)
7203 {
7204 if (_machine_halt)
7205 _machine_halt();
7206+ BUG();
7207 }
7208
7209 void machine_power_off(void)
7210 {
7211 if (pm_power_off)
7212 pm_power_off();
7213+ BUG();
7214 }
7215diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
7216index 2242bdd..b284048 100644
7217--- a/arch/mips/kernel/sync-r4k.c
7218+++ b/arch/mips/kernel/sync-r4k.c
7219@@ -18,8 +18,8 @@
7220 #include <asm/mipsregs.h>
7221
7222 static atomic_t count_start_flag = ATOMIC_INIT(0);
7223-static atomic_t count_count_start = ATOMIC_INIT(0);
7224-static atomic_t count_count_stop = ATOMIC_INIT(0);
7225+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
7226+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
7227 static atomic_t count_reference = ATOMIC_INIT(0);
7228
7229 #define COUNTON 100
7230@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
7231
7232 for (i = 0; i < NR_LOOPS; i++) {
7233 /* slaves loop on '!= 2' */
7234- while (atomic_read(&count_count_start) != 1)
7235+ while (atomic_read_unchecked(&count_count_start) != 1)
7236 mb();
7237- atomic_set(&count_count_stop, 0);
7238+ atomic_set_unchecked(&count_count_stop, 0);
7239 smp_wmb();
7240
7241 /* this lets the slaves write their count register */
7242- atomic_inc(&count_count_start);
7243+ atomic_inc_unchecked(&count_count_start);
7244
7245 /*
7246 * Everyone initialises count in the last loop:
7247@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
7248 /*
7249 * Wait for all slaves to leave the synchronization point:
7250 */
7251- while (atomic_read(&count_count_stop) != 1)
7252+ while (atomic_read_unchecked(&count_count_stop) != 1)
7253 mb();
7254- atomic_set(&count_count_start, 0);
7255+ atomic_set_unchecked(&count_count_start, 0);
7256 smp_wmb();
7257- atomic_inc(&count_count_stop);
7258+ atomic_inc_unchecked(&count_count_stop);
7259 }
7260 /* Arrange for an interrupt in a short while */
7261 write_c0_compare(read_c0_count() + COUNTON);
7262@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
7263 initcount = atomic_read(&count_reference);
7264
7265 for (i = 0; i < NR_LOOPS; i++) {
7266- atomic_inc(&count_count_start);
7267- while (atomic_read(&count_count_start) != 2)
7268+ atomic_inc_unchecked(&count_count_start);
7269+ while (atomic_read_unchecked(&count_count_start) != 2)
7270 mb();
7271
7272 /*
7273@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
7274 if (i == NR_LOOPS-1)
7275 write_c0_count(initcount);
7276
7277- atomic_inc(&count_count_stop);
7278- while (atomic_read(&count_count_stop) != 2)
7279+ atomic_inc_unchecked(&count_count_stop);
7280+ while (atomic_read_unchecked(&count_count_stop) != 2)
7281 mb();
7282 }
7283 /* Arrange for an interrupt in a short while */
7284diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
7285index 22b19c2..c5cc8c4 100644
7286--- a/arch/mips/kernel/traps.c
7287+++ b/arch/mips/kernel/traps.c
7288@@ -688,7 +688,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
7289 siginfo_t info;
7290
7291 prev_state = exception_enter();
7292- die_if_kernel("Integer overflow", regs);
7293+ if (unlikely(!user_mode(regs))) {
7294+
7295+#ifdef CONFIG_PAX_REFCOUNT
7296+ if (fixup_exception(regs)) {
7297+ pax_report_refcount_overflow(regs);
7298+ exception_exit(prev_state);
7299+ return;
7300+ }
7301+#endif
7302+
7303+ die("Integer overflow", regs);
7304+ }
7305
7306 info.si_code = FPE_INTOVF;
7307 info.si_signo = SIGFPE;
7308diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
7309index cd71141..e02c4df 100644
7310--- a/arch/mips/kvm/mips.c
7311+++ b/arch/mips/kvm/mips.c
7312@@ -839,7 +839,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
7313 return r;
7314 }
7315
7316-int kvm_arch_init(void *opaque)
7317+int kvm_arch_init(const void *opaque)
7318 {
7319 if (kvm_mips_callbacks) {
7320 kvm_err("kvm: module already exists\n");
7321diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
7322index becc42b..9e43d4b 100644
7323--- a/arch/mips/mm/fault.c
7324+++ b/arch/mips/mm/fault.c
7325@@ -28,6 +28,23 @@
7326 #include <asm/highmem.h> /* For VMALLOC_END */
7327 #include <linux/kdebug.h>
7328
7329+#ifdef CONFIG_PAX_PAGEEXEC
7330+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7331+{
7332+ unsigned long i;
7333+
7334+ printk(KERN_ERR "PAX: bytes at PC: ");
7335+ for (i = 0; i < 5; i++) {
7336+ unsigned int c;
7337+ if (get_user(c, (unsigned int *)pc+i))
7338+ printk(KERN_CONT "???????? ");
7339+ else
7340+ printk(KERN_CONT "%08x ", c);
7341+ }
7342+ printk("\n");
7343+}
7344+#endif
7345+
7346 /*
7347 * This routine handles page faults. It determines the address,
7348 * and the problem, and then passes it off to one of the appropriate
7349@@ -199,6 +216,14 @@ bad_area:
7350 bad_area_nosemaphore:
7351 /* User mode accesses just cause a SIGSEGV */
7352 if (user_mode(regs)) {
7353+
7354+#ifdef CONFIG_PAX_PAGEEXEC
7355+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
7356+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
7357+ do_group_exit(SIGKILL);
7358+ }
7359+#endif
7360+
7361 tsk->thread.cp0_badvaddr = address;
7362 tsk->thread.error_code = write;
7363 #if 0
7364diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
7365index f1baadd..5472dca 100644
7366--- a/arch/mips/mm/mmap.c
7367+++ b/arch/mips/mm/mmap.c
7368@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7369 struct vm_area_struct *vma;
7370 unsigned long addr = addr0;
7371 int do_color_align;
7372+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7373 struct vm_unmapped_area_info info;
7374
7375 if (unlikely(len > TASK_SIZE))
7376@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7377 do_color_align = 1;
7378
7379 /* requesting a specific address */
7380+
7381+#ifdef CONFIG_PAX_RANDMMAP
7382+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
7383+#endif
7384+
7385 if (addr) {
7386 if (do_color_align)
7387 addr = COLOUR_ALIGN(addr, pgoff);
7388@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7389 addr = PAGE_ALIGN(addr);
7390
7391 vma = find_vma(mm, addr);
7392- if (TASK_SIZE - len >= addr &&
7393- (!vma || addr + len <= vma->vm_start))
7394+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7395 return addr;
7396 }
7397
7398 info.length = len;
7399 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
7400 info.align_offset = pgoff << PAGE_SHIFT;
7401+ info.threadstack_offset = offset;
7402
7403 if (dir == DOWN) {
7404 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
7405@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7406 {
7407 unsigned long random_factor = 0UL;
7408
7409+#ifdef CONFIG_PAX_RANDMMAP
7410+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7411+#endif
7412+
7413 if (current->flags & PF_RANDOMIZE) {
7414 random_factor = get_random_int();
7415 random_factor = random_factor << PAGE_SHIFT;
7416@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7417
7418 if (mmap_is_legacy()) {
7419 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
7420+
7421+#ifdef CONFIG_PAX_RANDMMAP
7422+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7423+ mm->mmap_base += mm->delta_mmap;
7424+#endif
7425+
7426 mm->get_unmapped_area = arch_get_unmapped_area;
7427 } else {
7428 mm->mmap_base = mmap_base(random_factor);
7429+
7430+#ifdef CONFIG_PAX_RANDMMAP
7431+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7432+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7433+#endif
7434+
7435 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7436 }
7437 }
7438
7439-static inline unsigned long brk_rnd(void)
7440-{
7441- unsigned long rnd = get_random_int();
7442-
7443- rnd = rnd << PAGE_SHIFT;
7444- /* 8MB for 32bit, 256MB for 64bit */
7445- if (TASK_IS_32BIT_ADDR)
7446- rnd = rnd & 0x7ffffful;
7447- else
7448- rnd = rnd & 0xffffffful;
7449-
7450- return rnd;
7451-}
7452-
7453-unsigned long arch_randomize_brk(struct mm_struct *mm)
7454-{
7455- unsigned long base = mm->brk;
7456- unsigned long ret;
7457-
7458- ret = PAGE_ALIGN(base + brk_rnd());
7459-
7460- if (ret < mm->brk)
7461- return mm->brk;
7462-
7463- return ret;
7464-}
7465-
7466 int __virt_addr_valid(const volatile void *kaddr)
7467 {
7468 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
7469diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
7470index 9f7ecbd..6e370fc 100644
7471--- a/arch/mips/net/bpf_jit.c
7472+++ b/arch/mips/net/bpf_jit.c
7473@@ -1428,5 +1428,6 @@ void bpf_jit_free(struct bpf_prog *fp)
7474 {
7475 if (fp->jited)
7476 module_free(NULL, fp->bpf_func);
7477- kfree(fp);
7478+
7479+ bpf_prog_unlock_free(fp);
7480 }
7481diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
7482index 59cccd9..f39ac2f 100644
7483--- a/arch/mips/pci/pci-octeon.c
7484+++ b/arch/mips/pci/pci-octeon.c
7485@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
7486
7487
7488 static struct pci_ops octeon_pci_ops = {
7489- octeon_read_config,
7490- octeon_write_config,
7491+ .read = octeon_read_config,
7492+ .write = octeon_write_config,
7493 };
7494
7495 static struct resource octeon_pci_mem_resource = {
7496diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
7497index 5e36c33..eb4a17b 100644
7498--- a/arch/mips/pci/pcie-octeon.c
7499+++ b/arch/mips/pci/pcie-octeon.c
7500@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
7501 }
7502
7503 static struct pci_ops octeon_pcie0_ops = {
7504- octeon_pcie0_read_config,
7505- octeon_pcie0_write_config,
7506+ .read = octeon_pcie0_read_config,
7507+ .write = octeon_pcie0_write_config,
7508 };
7509
7510 static struct resource octeon_pcie0_mem_resource = {
7511@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
7512 };
7513
7514 static struct pci_ops octeon_pcie1_ops = {
7515- octeon_pcie1_read_config,
7516- octeon_pcie1_write_config,
7517+ .read = octeon_pcie1_read_config,
7518+ .write = octeon_pcie1_write_config,
7519 };
7520
7521 static struct resource octeon_pcie1_mem_resource = {
7522@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
7523 };
7524
7525 static struct pci_ops octeon_dummy_ops = {
7526- octeon_dummy_read_config,
7527- octeon_dummy_write_config,
7528+ .read = octeon_dummy_read_config,
7529+ .write = octeon_dummy_write_config,
7530 };
7531
7532 static struct resource octeon_dummy_mem_resource = {
7533diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
7534index a2358b4..7cead4f 100644
7535--- a/arch/mips/sgi-ip27/ip27-nmi.c
7536+++ b/arch/mips/sgi-ip27/ip27-nmi.c
7537@@ -187,9 +187,9 @@ void
7538 cont_nmi_dump(void)
7539 {
7540 #ifndef REAL_NMI_SIGNAL
7541- static atomic_t nmied_cpus = ATOMIC_INIT(0);
7542+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
7543
7544- atomic_inc(&nmied_cpus);
7545+ atomic_inc_unchecked(&nmied_cpus);
7546 #endif
7547 /*
7548 * Only allow 1 cpu to proceed
7549@@ -233,7 +233,7 @@ cont_nmi_dump(void)
7550 udelay(10000);
7551 }
7552 #else
7553- while (atomic_read(&nmied_cpus) != num_online_cpus());
7554+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
7555 #endif
7556
7557 /*
7558diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
7559index a046b30..6799527 100644
7560--- a/arch/mips/sni/rm200.c
7561+++ b/arch/mips/sni/rm200.c
7562@@ -270,7 +270,7 @@ spurious_8259A_irq:
7563 "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
7564 spurious_irq_mask |= irqmask;
7565 }
7566- atomic_inc(&irq_err_count);
7567+ atomic_inc_unchecked(&irq_err_count);
7568 /*
7569 * Theoretically we do not have to handle this IRQ,
7570 * but in Linux this does not cause problems and is
7571diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
7572index 41e873b..34d33a7 100644
7573--- a/arch/mips/vr41xx/common/icu.c
7574+++ b/arch/mips/vr41xx/common/icu.c
7575@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
7576
7577 printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
7578
7579- atomic_inc(&irq_err_count);
7580+ atomic_inc_unchecked(&irq_err_count);
7581
7582 return -1;
7583 }
7584diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
7585index ae0e4ee..e8f0692 100644
7586--- a/arch/mips/vr41xx/common/irq.c
7587+++ b/arch/mips/vr41xx/common/irq.c
7588@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
7589 irq_cascade_t *cascade;
7590
7591 if (irq >= NR_IRQS) {
7592- atomic_inc(&irq_err_count);
7593+ atomic_inc_unchecked(&irq_err_count);
7594 return;
7595 }
7596
7597@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
7598 ret = cascade->get_irq(irq);
7599 irq = ret;
7600 if (ret < 0)
7601- atomic_inc(&irq_err_count);
7602+ atomic_inc_unchecked(&irq_err_count);
7603 else
7604 irq_dispatch(irq);
7605 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
7606diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7607index 967d144..db12197 100644
7608--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
7609+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
7610@@ -11,12 +11,14 @@
7611 #ifndef _ASM_PROC_CACHE_H
7612 #define _ASM_PROC_CACHE_H
7613
7614+#include <linux/const.h>
7615+
7616 /* L1 cache */
7617
7618 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7619 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
7620-#define L1_CACHE_BYTES 16 /* bytes per entry */
7621 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
7622+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7623 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
7624
7625 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7626diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7627index bcb5df2..84fabd2 100644
7628--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7629+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
7630@@ -16,13 +16,15 @@
7631 #ifndef _ASM_PROC_CACHE_H
7632 #define _ASM_PROC_CACHE_H
7633
7634+#include <linux/const.h>
7635+
7636 /*
7637 * L1 cache
7638 */
7639 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
7640 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
7641-#define L1_CACHE_BYTES 32 /* bytes per entry */
7642 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7643+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7644 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7645
7646 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7647diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7648index 4ce7a01..449202a 100644
7649--- a/arch/openrisc/include/asm/cache.h
7650+++ b/arch/openrisc/include/asm/cache.h
7651@@ -19,11 +19,13 @@
7652 #ifndef __ASM_OPENRISC_CACHE_H
7653 #define __ASM_OPENRISC_CACHE_H
7654
7655+#include <linux/const.h>
7656+
7657 /* FIXME: How can we replace these with values from the CPU...
7658 * they shouldn't be hard-coded!
7659 */
7660
7661-#define L1_CACHE_BYTES 16
7662 #define L1_CACHE_SHIFT 4
7663+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7664
7665 #endif /* __ASM_OPENRISC_CACHE_H */
7666diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7667index 0be2db2..1b0f26d 100644
7668--- a/arch/parisc/include/asm/atomic.h
7669+++ b/arch/parisc/include/asm/atomic.h
7670@@ -248,6 +248,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7671 return dec;
7672 }
7673
7674+#define atomic64_read_unchecked(v) atomic64_read(v)
7675+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7676+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7677+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7678+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7679+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7680+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7681+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7682+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7683+
7684 #endif /* !CONFIG_64BIT */
7685
7686
7687diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7688index 47f11c7..3420df2 100644
7689--- a/arch/parisc/include/asm/cache.h
7690+++ b/arch/parisc/include/asm/cache.h
7691@@ -5,6 +5,7 @@
7692 #ifndef __ARCH_PARISC_CACHE_H
7693 #define __ARCH_PARISC_CACHE_H
7694
7695+#include <linux/const.h>
7696
7697 /*
7698 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7699@@ -15,13 +16,13 @@
7700 * just ruin performance.
7701 */
7702 #ifdef CONFIG_PA20
7703-#define L1_CACHE_BYTES 64
7704 #define L1_CACHE_SHIFT 6
7705 #else
7706-#define L1_CACHE_BYTES 32
7707 #define L1_CACHE_SHIFT 5
7708 #endif
7709
7710+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7711+
7712 #ifndef __ASSEMBLY__
7713
7714 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7715diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7716index 3391d06..c23a2cc 100644
7717--- a/arch/parisc/include/asm/elf.h
7718+++ b/arch/parisc/include/asm/elf.h
7719@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7720
7721 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7722
7723+#ifdef CONFIG_PAX_ASLR
7724+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7725+
7726+#define PAX_DELTA_MMAP_LEN 16
7727+#define PAX_DELTA_STACK_LEN 16
7728+#endif
7729+
7730 /* This yields a mask that user programs can use to figure out what
7731 instruction set this CPU supports. This could be done in user space,
7732 but it's not easy, and we've already done it here. */
7733diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7734index f213f5b..0af3e8e 100644
7735--- a/arch/parisc/include/asm/pgalloc.h
7736+++ b/arch/parisc/include/asm/pgalloc.h
7737@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7738 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7739 }
7740
7741+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7742+{
7743+ pgd_populate(mm, pgd, pmd);
7744+}
7745+
7746 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7747 {
7748 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7749@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7750 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7751 #define pmd_free(mm, x) do { } while (0)
7752 #define pgd_populate(mm, pmd, pte) BUG()
7753+#define pgd_populate_kernel(mm, pmd, pte) BUG()
7754
7755 #endif
7756
7757diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7758index 22b89d1..ce34230 100644
7759--- a/arch/parisc/include/asm/pgtable.h
7760+++ b/arch/parisc/include/asm/pgtable.h
7761@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7762 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7763 #define PAGE_COPY PAGE_EXECREAD
7764 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7765+
7766+#ifdef CONFIG_PAX_PAGEEXEC
7767+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7768+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7769+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7770+#else
7771+# define PAGE_SHARED_NOEXEC PAGE_SHARED
7772+# define PAGE_COPY_NOEXEC PAGE_COPY
7773+# define PAGE_READONLY_NOEXEC PAGE_READONLY
7774+#endif
7775+
7776 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7777 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7778 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7779diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7780index 4006964..fcb3cc2 100644
7781--- a/arch/parisc/include/asm/uaccess.h
7782+++ b/arch/parisc/include/asm/uaccess.h
7783@@ -246,10 +246,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7784 const void __user *from,
7785 unsigned long n)
7786 {
7787- int sz = __compiletime_object_size(to);
7788+ size_t sz = __compiletime_object_size(to);
7789 int ret = -EFAULT;
7790
7791- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7792+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7793 ret = __copy_from_user(to, from, n);
7794 else
7795 copy_from_user_overflow();
7796diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7797index 50dfafc..b9fc230 100644
7798--- a/arch/parisc/kernel/module.c
7799+++ b/arch/parisc/kernel/module.c
7800@@ -98,16 +98,38 @@
7801
7802 /* three functions to determine where in the module core
7803 * or init pieces the location is */
7804+static inline int in_init_rx(struct module *me, void *loc)
7805+{
7806+ return (loc >= me->module_init_rx &&
7807+ loc < (me->module_init_rx + me->init_size_rx));
7808+}
7809+
7810+static inline int in_init_rw(struct module *me, void *loc)
7811+{
7812+ return (loc >= me->module_init_rw &&
7813+ loc < (me->module_init_rw + me->init_size_rw));
7814+}
7815+
7816 static inline int in_init(struct module *me, void *loc)
7817 {
7818- return (loc >= me->module_init &&
7819- loc <= (me->module_init + me->init_size));
7820+ return in_init_rx(me, loc) || in_init_rw(me, loc);
7821+}
7822+
7823+static inline int in_core_rx(struct module *me, void *loc)
7824+{
7825+ return (loc >= me->module_core_rx &&
7826+ loc < (me->module_core_rx + me->core_size_rx));
7827+}
7828+
7829+static inline int in_core_rw(struct module *me, void *loc)
7830+{
7831+ return (loc >= me->module_core_rw &&
7832+ loc < (me->module_core_rw + me->core_size_rw));
7833 }
7834
7835 static inline int in_core(struct module *me, void *loc)
7836 {
7837- return (loc >= me->module_core &&
7838- loc <= (me->module_core + me->core_size));
7839+ return in_core_rx(me, loc) || in_core_rw(me, loc);
7840 }
7841
7842 static inline int in_local(struct module *me, void *loc)
7843@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7844 }
7845
7846 /* align things a bit */
7847- me->core_size = ALIGN(me->core_size, 16);
7848- me->arch.got_offset = me->core_size;
7849- me->core_size += gots * sizeof(struct got_entry);
7850+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7851+ me->arch.got_offset = me->core_size_rw;
7852+ me->core_size_rw += gots * sizeof(struct got_entry);
7853
7854- me->core_size = ALIGN(me->core_size, 16);
7855- me->arch.fdesc_offset = me->core_size;
7856- me->core_size += fdescs * sizeof(Elf_Fdesc);
7857+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
7858+ me->arch.fdesc_offset = me->core_size_rw;
7859+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7860
7861 me->arch.got_max = gots;
7862 me->arch.fdesc_max = fdescs;
7863@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7864
7865 BUG_ON(value == 0);
7866
7867- got = me->module_core + me->arch.got_offset;
7868+ got = me->module_core_rw + me->arch.got_offset;
7869 for (i = 0; got[i].addr; i++)
7870 if (got[i].addr == value)
7871 goto out;
7872@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7873 #ifdef CONFIG_64BIT
7874 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7875 {
7876- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7877+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7878
7879 if (!value) {
7880 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7881@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7882
7883 /* Create new one */
7884 fdesc->addr = value;
7885- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7886+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7887 return (Elf_Addr)fdesc;
7888 }
7889 #endif /* CONFIG_64BIT */
7890@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
7891
7892 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7893 end = table + sechdrs[me->arch.unwind_section].sh_size;
7894- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7895+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7896
7897 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7898 me->arch.unwind_section, table, end, gp);
7899diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7900index e1ffea2..46ed66e 100644
7901--- a/arch/parisc/kernel/sys_parisc.c
7902+++ b/arch/parisc/kernel/sys_parisc.c
7903@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7904 unsigned long task_size = TASK_SIZE;
7905 int do_color_align, last_mmap;
7906 struct vm_unmapped_area_info info;
7907+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7908
7909 if (len > task_size)
7910 return -ENOMEM;
7911@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7912 goto found_addr;
7913 }
7914
7915+#ifdef CONFIG_PAX_RANDMMAP
7916+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7917+#endif
7918+
7919 if (addr) {
7920 if (do_color_align && last_mmap)
7921 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7922@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7923 info.high_limit = mmap_upper_limit();
7924 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7925 info.align_offset = shared_align_offset(last_mmap, pgoff);
7926+ info.threadstack_offset = offset;
7927 addr = vm_unmapped_area(&info);
7928
7929 found_addr:
7930@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7931 unsigned long addr = addr0;
7932 int do_color_align, last_mmap;
7933 struct vm_unmapped_area_info info;
7934+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7935
7936 #ifdef CONFIG_64BIT
7937 /* This should only ever run for 32-bit processes. */
7938@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7939 }
7940
7941 /* requesting a specific address */
7942+#ifdef CONFIG_PAX_RANDMMAP
7943+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7944+#endif
7945+
7946 if (addr) {
7947 if (do_color_align && last_mmap)
7948 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
7949@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7950 info.high_limit = mm->mmap_base;
7951 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
7952 info.align_offset = shared_align_offset(last_mmap, pgoff);
7953+ info.threadstack_offset = offset;
7954 addr = vm_unmapped_area(&info);
7955 if (!(addr & ~PAGE_MASK))
7956 goto found_addr;
7957@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7958 mm->mmap_legacy_base = mmap_legacy_base();
7959 mm->mmap_base = mmap_upper_limit();
7960
7961+#ifdef CONFIG_PAX_RANDMMAP
7962+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
7963+ mm->mmap_legacy_base += mm->delta_mmap;
7964+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7965+ }
7966+#endif
7967+
7968 if (mmap_is_legacy()) {
7969 mm->mmap_base = mm->mmap_legacy_base;
7970 mm->get_unmapped_area = arch_get_unmapped_area;
7971diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7972index 47ee620..1107387 100644
7973--- a/arch/parisc/kernel/traps.c
7974+++ b/arch/parisc/kernel/traps.c
7975@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7976
7977 down_read(&current->mm->mmap_sem);
7978 vma = find_vma(current->mm,regs->iaoq[0]);
7979- if (vma && (regs->iaoq[0] >= vma->vm_start)
7980- && (vma->vm_flags & VM_EXEC)) {
7981-
7982+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7983 fault_address = regs->iaoq[0];
7984 fault_space = regs->iasq[0];
7985
7986diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7987index 3ca9c11..d163ef7 100644
7988--- a/arch/parisc/mm/fault.c
7989+++ b/arch/parisc/mm/fault.c
7990@@ -15,6 +15,7 @@
7991 #include <linux/sched.h>
7992 #include <linux/interrupt.h>
7993 #include <linux/module.h>
7994+#include <linux/unistd.h>
7995
7996 #include <asm/uaccess.h>
7997 #include <asm/traps.h>
7998@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
7999 static unsigned long
8000 parisc_acctyp(unsigned long code, unsigned int inst)
8001 {
8002- if (code == 6 || code == 16)
8003+ if (code == 6 || code == 7 || code == 16)
8004 return VM_EXEC;
8005
8006 switch (inst & 0xf0000000) {
8007@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
8008 }
8009 #endif
8010
8011+#ifdef CONFIG_PAX_PAGEEXEC
8012+/*
8013+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
8014+ *
8015+ * returns 1 when task should be killed
8016+ * 2 when rt_sigreturn trampoline was detected
8017+ * 3 when unpatched PLT trampoline was detected
8018+ */
8019+static int pax_handle_fetch_fault(struct pt_regs *regs)
8020+{
8021+
8022+#ifdef CONFIG_PAX_EMUPLT
8023+ int err;
8024+
8025+ do { /* PaX: unpatched PLT emulation */
8026+ unsigned int bl, depwi;
8027+
8028+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
8029+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
8030+
8031+ if (err)
8032+ break;
8033+
8034+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
8035+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
8036+
8037+ err = get_user(ldw, (unsigned int *)addr);
8038+ err |= get_user(bv, (unsigned int *)(addr+4));
8039+ err |= get_user(ldw2, (unsigned int *)(addr+8));
8040+
8041+ if (err)
8042+ break;
8043+
8044+ if (ldw == 0x0E801096U &&
8045+ bv == 0xEAC0C000U &&
8046+ ldw2 == 0x0E881095U)
8047+ {
8048+ unsigned int resolver, map;
8049+
8050+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
8051+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
8052+ if (err)
8053+ break;
8054+
8055+ regs->gr[20] = instruction_pointer(regs)+8;
8056+ regs->gr[21] = map;
8057+ regs->gr[22] = resolver;
8058+ regs->iaoq[0] = resolver | 3UL;
8059+ regs->iaoq[1] = regs->iaoq[0] + 4;
8060+ return 3;
8061+ }
8062+ }
8063+ } while (0);
8064+#endif
8065+
8066+#ifdef CONFIG_PAX_EMUTRAMP
8067+
8068+#ifndef CONFIG_PAX_EMUSIGRT
8069+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
8070+ return 1;
8071+#endif
8072+
8073+ do { /* PaX: rt_sigreturn emulation */
8074+ unsigned int ldi1, ldi2, bel, nop;
8075+
8076+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
8077+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
8078+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
8079+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
8080+
8081+ if (err)
8082+ break;
8083+
8084+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
8085+ ldi2 == 0x3414015AU &&
8086+ bel == 0xE4008200U &&
8087+ nop == 0x08000240U)
8088+ {
8089+ regs->gr[25] = (ldi1 & 2) >> 1;
8090+ regs->gr[20] = __NR_rt_sigreturn;
8091+ regs->gr[31] = regs->iaoq[1] + 16;
8092+ regs->sr[0] = regs->iasq[1];
8093+ regs->iaoq[0] = 0x100UL;
8094+ regs->iaoq[1] = regs->iaoq[0] + 4;
8095+ regs->iasq[0] = regs->sr[2];
8096+ regs->iasq[1] = regs->sr[2];
8097+ return 2;
8098+ }
8099+ } while (0);
8100+#endif
8101+
8102+ return 1;
8103+}
8104+
8105+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
8106+{
8107+ unsigned long i;
8108+
8109+ printk(KERN_ERR "PAX: bytes at PC: ");
8110+ for (i = 0; i < 5; i++) {
8111+ unsigned int c;
8112+ if (get_user(c, (unsigned int *)pc+i))
8113+ printk(KERN_CONT "???????? ");
8114+ else
8115+ printk(KERN_CONT "%08x ", c);
8116+ }
8117+ printk("\n");
8118+}
8119+#endif
8120+
8121 int fixup_exception(struct pt_regs *regs)
8122 {
8123 const struct exception_table_entry *fix;
8124@@ -234,8 +345,33 @@ retry:
8125
8126 good_area:
8127
8128- if ((vma->vm_flags & acc_type) != acc_type)
8129+ if ((vma->vm_flags & acc_type) != acc_type) {
8130+
8131+#ifdef CONFIG_PAX_PAGEEXEC
8132+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
8133+ (address & ~3UL) == instruction_pointer(regs))
8134+ {
8135+ up_read(&mm->mmap_sem);
8136+ switch (pax_handle_fetch_fault(regs)) {
8137+
8138+#ifdef CONFIG_PAX_EMUPLT
8139+ case 3:
8140+ return;
8141+#endif
8142+
8143+#ifdef CONFIG_PAX_EMUTRAMP
8144+ case 2:
8145+ return;
8146+#endif
8147+
8148+ }
8149+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
8150+ do_group_exit(SIGKILL);
8151+ }
8152+#endif
8153+
8154 goto bad_area;
8155+ }
8156
8157 /*
8158 * If for any reason at all we couldn't handle the fault, make
8159diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
8160index 4bc7b62..107e0b2 100644
8161--- a/arch/powerpc/Kconfig
8162+++ b/arch/powerpc/Kconfig
8163@@ -399,6 +399,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
8164 config KEXEC
8165 bool "kexec system call"
8166 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
8167+ depends on !GRKERNSEC_KMEM
8168 help
8169 kexec is a system call that implements the ability to shutdown your
8170 current kernel, and to start another kernel. It is like a reboot
8171diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
8172index 28992d0..434c881 100644
8173--- a/arch/powerpc/include/asm/atomic.h
8174+++ b/arch/powerpc/include/asm/atomic.h
8175@@ -12,6 +12,11 @@
8176
8177 #define ATOMIC_INIT(i) { (i) }
8178
8179+#define _ASM_EXTABLE(from, to) \
8180+" .section __ex_table,\"a\"\n" \
8181+ PPC_LONG" " #from ", " #to"\n" \
8182+" .previous\n"
8183+
8184 static __inline__ int atomic_read(const atomic_t *v)
8185 {
8186 int t;
8187@@ -21,16 +26,61 @@ static __inline__ int atomic_read(const atomic_t *v)
8188 return t;
8189 }
8190
8191+static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
8192+{
8193+ int t;
8194+
8195+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
8196+
8197+ return t;
8198+}
8199+
8200 static __inline__ void atomic_set(atomic_t *v, int i)
8201 {
8202 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8203 }
8204
8205+static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8206+{
8207+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8208+}
8209+
8210 static __inline__ void atomic_add(int a, atomic_t *v)
8211 {
8212 int t;
8213
8214 __asm__ __volatile__(
8215+"1: lwarx %0,0,%3 # atomic_add\n"
8216+
8217+#ifdef CONFIG_PAX_REFCOUNT
8218+" mcrxr cr0\n"
8219+" addo. %0,%2,%0\n"
8220+" bf 4*cr0+so, 3f\n"
8221+"2:.long " "0x00c00b00""\n"
8222+#else
8223+" add %0,%2,%0\n"
8224+#endif
8225+
8226+"3:\n"
8227+ PPC405_ERR77(0,%3)
8228+" stwcx. %0,0,%3 \n\
8229+ bne- 1b"
8230+
8231+#ifdef CONFIG_PAX_REFCOUNT
8232+"\n4:\n"
8233+ _ASM_EXTABLE(2b, 4b)
8234+#endif
8235+
8236+ : "=&r" (t), "+m" (v->counter)
8237+ : "r" (a), "r" (&v->counter)
8238+ : "cc");
8239+}
8240+
8241+static __inline__ void atomic_add_unchecked(int a, atomic_unchecked_t *v)
8242+{
8243+ int t;
8244+
8245+ __asm__ __volatile__(
8246 "1: lwarx %0,0,%3 # atomic_add\n\
8247 add %0,%2,%0\n"
8248 PPC405_ERR77(0,%3)
8249@@ -41,12 +91,49 @@ static __inline__ void atomic_add(int a, atomic_t *v)
8250 : "cc");
8251 }
8252
8253+/* Same as atomic_add but return the value */
8254 static __inline__ int atomic_add_return(int a, atomic_t *v)
8255 {
8256 int t;
8257
8258 __asm__ __volatile__(
8259 PPC_ATOMIC_ENTRY_BARRIER
8260+"1: lwarx %0,0,%2 # atomic_add_return\n"
8261+
8262+#ifdef CONFIG_PAX_REFCOUNT
8263+" mcrxr cr0\n"
8264+" addo. %0,%1,%0\n"
8265+" bf 4*cr0+so, 3f\n"
8266+"2:.long " "0x00c00b00""\n"
8267+#else
8268+" add %0,%1,%0\n"
8269+#endif
8270+
8271+"3:\n"
8272+ PPC405_ERR77(0,%2)
8273+" stwcx. %0,0,%2 \n\
8274+ bne- 1b\n"
8275+"4:"
8276+
8277+#ifdef CONFIG_PAX_REFCOUNT
8278+ _ASM_EXTABLE(2b, 4b)
8279+#endif
8280+
8281+ PPC_ATOMIC_EXIT_BARRIER
8282+ : "=&r" (t)
8283+ : "r" (a), "r" (&v->counter)
8284+ : "cc", "memory");
8285+
8286+ return t;
8287+}
8288+
8289+/* Same as atomic_add_unchecked but return the value */
8290+static __inline__ int atomic_add_return_unchecked(int a, atomic_unchecked_t *v)
8291+{
8292+ int t;
8293+
8294+ __asm__ __volatile__(
8295+ PPC_ATOMIC_ENTRY_BARRIER
8296 "1: lwarx %0,0,%2 # atomic_add_return\n\
8297 add %0,%1,%0\n"
8298 PPC405_ERR77(0,%2)
8299@@ -67,6 +154,37 @@ static __inline__ void atomic_sub(int a, atomic_t *v)
8300 int t;
8301
8302 __asm__ __volatile__(
8303+"1: lwarx %0,0,%3 # atomic_sub\n"
8304+
8305+#ifdef CONFIG_PAX_REFCOUNT
8306+" mcrxr cr0\n"
8307+" subfo. %0,%2,%0\n"
8308+" bf 4*cr0+so, 3f\n"
8309+"2:.long " "0x00c00b00""\n"
8310+#else
8311+" subf %0,%2,%0\n"
8312+#endif
8313+
8314+"3:\n"
8315+ PPC405_ERR77(0,%3)
8316+" stwcx. %0,0,%3 \n\
8317+ bne- 1b\n"
8318+"4:"
8319+
8320+#ifdef CONFIG_PAX_REFCOUNT
8321+ _ASM_EXTABLE(2b, 4b)
8322+#endif
8323+
8324+ : "=&r" (t), "+m" (v->counter)
8325+ : "r" (a), "r" (&v->counter)
8326+ : "cc");
8327+}
8328+
8329+static __inline__ void atomic_sub_unchecked(int a, atomic_unchecked_t *v)
8330+{
8331+ int t;
8332+
8333+ __asm__ __volatile__(
8334 "1: lwarx %0,0,%3 # atomic_sub\n\
8335 subf %0,%2,%0\n"
8336 PPC405_ERR77(0,%3)
8337@@ -77,12 +195,49 @@ static __inline__ void atomic_sub(int a, atomic_t *v)
8338 : "cc");
8339 }
8340
8341+/* Same as atomic_sub but return the value */
8342 static __inline__ int atomic_sub_return(int a, atomic_t *v)
8343 {
8344 int t;
8345
8346 __asm__ __volatile__(
8347 PPC_ATOMIC_ENTRY_BARRIER
8348+"1: lwarx %0,0,%2 # atomic_sub_return\n"
8349+
8350+#ifdef CONFIG_PAX_REFCOUNT
8351+" mcrxr cr0\n"
8352+" subfo. %0,%1,%0\n"
8353+" bf 4*cr0+so, 3f\n"
8354+"2:.long " "0x00c00b00""\n"
8355+#else
8356+" subf %0,%1,%0\n"
8357+#endif
8358+
8359+"3:\n"
8360+ PPC405_ERR77(0,%2)
8361+" stwcx. %0,0,%2 \n\
8362+ bne- 1b\n"
8363+ PPC_ATOMIC_EXIT_BARRIER
8364+"4:"
8365+
8366+#ifdef CONFIG_PAX_REFCOUNT
8367+ _ASM_EXTABLE(2b, 4b)
8368+#endif
8369+
8370+ : "=&r" (t)
8371+ : "r" (a), "r" (&v->counter)
8372+ : "cc", "memory");
8373+
8374+ return t;
8375+}
8376+
8377+/* Same as atomic_sub_unchecked but return the value */
8378+static __inline__ int atomic_sub_return_unchecked(int a, atomic_unchecked_t *v)
8379+{
8380+ int t;
8381+
8382+ __asm__ __volatile__(
8383+ PPC_ATOMIC_ENTRY_BARRIER
8384 "1: lwarx %0,0,%2 # atomic_sub_return\n\
8385 subf %0,%1,%0\n"
8386 PPC405_ERR77(0,%2)
8387@@ -96,38 +251,23 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v)
8388 return t;
8389 }
8390
8391-static __inline__ void atomic_inc(atomic_t *v)
8392-{
8393- int t;
8394+/*
8395+ * atomic_inc - increment atomic variable
8396+ * @v: pointer of type atomic_t
8397+ *
8398+ * Automatically increments @v by 1
8399+ */
8400+#define atomic_inc(v) atomic_add(1, (v))
8401+#define atomic_inc_return(v) atomic_add_return(1, (v))
8402
8403- __asm__ __volatile__(
8404-"1: lwarx %0,0,%2 # atomic_inc\n\
8405- addic %0,%0,1\n"
8406- PPC405_ERR77(0,%2)
8407-" stwcx. %0,0,%2 \n\
8408- bne- 1b"
8409- : "=&r" (t), "+m" (v->counter)
8410- : "r" (&v->counter)
8411- : "cc", "xer");
8412+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
8413+{
8414+ atomic_add_unchecked(1, v);
8415 }
8416
8417-static __inline__ int atomic_inc_return(atomic_t *v)
8418+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8419 {
8420- int t;
8421-
8422- __asm__ __volatile__(
8423- PPC_ATOMIC_ENTRY_BARRIER
8424-"1: lwarx %0,0,%1 # atomic_inc_return\n\
8425- addic %0,%0,1\n"
8426- PPC405_ERR77(0,%1)
8427-" stwcx. %0,0,%1 \n\
8428- bne- 1b"
8429- PPC_ATOMIC_EXIT_BARRIER
8430- : "=&r" (t)
8431- : "r" (&v->counter)
8432- : "cc", "xer", "memory");
8433-
8434- return t;
8435+ return atomic_add_return_unchecked(1, v);
8436 }
8437
8438 /*
8439@@ -140,43 +280,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
8440 */
8441 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
8442
8443-static __inline__ void atomic_dec(atomic_t *v)
8444+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8445 {
8446- int t;
8447-
8448- __asm__ __volatile__(
8449-"1: lwarx %0,0,%2 # atomic_dec\n\
8450- addic %0,%0,-1\n"
8451- PPC405_ERR77(0,%2)\
8452-" stwcx. %0,0,%2\n\
8453- bne- 1b"
8454- : "=&r" (t), "+m" (v->counter)
8455- : "r" (&v->counter)
8456- : "cc", "xer");
8457+ return atomic_add_return_unchecked(1, v) == 0;
8458 }
8459
8460-static __inline__ int atomic_dec_return(atomic_t *v)
8461+/*
8462+ * atomic_dec - decrement atomic variable
8463+ * @v: pointer of type atomic_t
8464+ *
8465+ * Atomically decrements @v by 1
8466+ */
8467+#define atomic_dec(v) atomic_sub(1, (v))
8468+#define atomic_dec_return(v) atomic_sub_return(1, (v))
8469+
8470+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
8471 {
8472- int t;
8473-
8474- __asm__ __volatile__(
8475- PPC_ATOMIC_ENTRY_BARRIER
8476-"1: lwarx %0,0,%1 # atomic_dec_return\n\
8477- addic %0,%0,-1\n"
8478- PPC405_ERR77(0,%1)
8479-" stwcx. %0,0,%1\n\
8480- bne- 1b"
8481- PPC_ATOMIC_EXIT_BARRIER
8482- : "=&r" (t)
8483- : "r" (&v->counter)
8484- : "cc", "xer", "memory");
8485-
8486- return t;
8487+ atomic_sub_unchecked(1, v);
8488 }
8489
8490 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8491 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
8492
8493+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8494+{
8495+ return cmpxchg(&(v->counter), old, new);
8496+}
8497+
8498+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8499+{
8500+ return xchg(&(v->counter), new);
8501+}
8502+
8503 /**
8504 * __atomic_add_unless - add unless the number is a given value
8505 * @v: pointer of type atomic_t
8506@@ -271,6 +406,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
8507 }
8508 #define atomic_dec_if_positive atomic_dec_if_positive
8509
8510+#define smp_mb__before_atomic_dec() smp_mb()
8511+#define smp_mb__after_atomic_dec() smp_mb()
8512+#define smp_mb__before_atomic_inc() smp_mb()
8513+#define smp_mb__after_atomic_inc() smp_mb()
8514+
8515 #ifdef __powerpc64__
8516
8517 #define ATOMIC64_INIT(i) { (i) }
8518@@ -284,11 +424,25 @@ static __inline__ long atomic64_read(const atomic64_t *v)
8519 return t;
8520 }
8521
8522+static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
8523+{
8524+ long t;
8525+
8526+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
8527+
8528+ return t;
8529+}
8530+
8531 static __inline__ void atomic64_set(atomic64_t *v, long i)
8532 {
8533 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8534 }
8535
8536+static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8537+{
8538+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
8539+}
8540+
8541 static __inline__ void atomic64_add(long a, atomic64_t *v)
8542 {
8543 long t;
8544@@ -303,12 +457,76 @@ static __inline__ void atomic64_add(long a, atomic64_t *v)
8545 : "cc");
8546 }
8547
8548+static __inline__ void atomic64_add_unchecked(long a, atomic64_unchecked_t *v)
8549+{
8550+ long t;
8551+
8552+ __asm__ __volatile__(
8553+"1: ldarx %0,0,%3 # atomic64_add\n"
8554+
8555+#ifdef CONFIG_PAX_REFCOUNT
8556+" mcrxr cr0\n"
8557+" addo. %0,%2,%0\n"
8558+" bf 4*cr0+so, 3f\n"
8559+"2:.long " "0x00c00b00""\n"
8560+#else
8561+" add %0,%2,%0\n"
8562+#endif
8563+
8564+"3:\n"
8565+" stdcx. %0,0,%3 \n\
8566+ bne- 1b\n"
8567+"4:"
8568+
8569+#ifdef CONFIG_PAX_REFCOUNT
8570+ _ASM_EXTABLE(2b, 4b)
8571+#endif
8572+
8573+ : "=&r" (t), "+m" (v->counter)
8574+ : "r" (a), "r" (&v->counter)
8575+ : "cc");
8576+}
8577+
8578 static __inline__ long atomic64_add_return(long a, atomic64_t *v)
8579 {
8580 long t;
8581
8582 __asm__ __volatile__(
8583 PPC_ATOMIC_ENTRY_BARRIER
8584+"1: ldarx %0,0,%2 # atomic64_add_return\n"
8585+
8586+#ifdef CONFIG_PAX_REFCOUNT
8587+" mcrxr cr0\n"
8588+" addo. %0,%1,%0\n"
8589+" bf 4*cr0+so, 3f\n"
8590+"2:.long " "0x00c00b00""\n"
8591+#else
8592+" add %0,%1,%0\n"
8593+#endif
8594+
8595+"3:\n"
8596+" stdcx. %0,0,%2 \n\
8597+ bne- 1b\n"
8598+ PPC_ATOMIC_EXIT_BARRIER
8599+"4:"
8600+
8601+#ifdef CONFIG_PAX_REFCOUNT
8602+ _ASM_EXTABLE(2b, 4b)
8603+#endif
8604+
8605+ : "=&r" (t)
8606+ : "r" (a), "r" (&v->counter)
8607+ : "cc", "memory");
8608+
8609+ return t;
8610+}
8611+
8612+static __inline__ long atomic64_add_return_unchecked(long a, atomic64_unchecked_t *v)
8613+{
8614+ long t;
8615+
8616+ __asm__ __volatile__(
8617+ PPC_ATOMIC_ENTRY_BARRIER
8618 "1: ldarx %0,0,%2 # atomic64_add_return\n\
8619 add %0,%1,%0\n\
8620 stdcx. %0,0,%2 \n\
8621@@ -328,6 +546,36 @@ static __inline__ void atomic64_sub(long a, atomic64_t *v)
8622 long t;
8623
8624 __asm__ __volatile__(
8625+"1: ldarx %0,0,%3 # atomic64_sub\n"
8626+
8627+#ifdef CONFIG_PAX_REFCOUNT
8628+" mcrxr cr0\n"
8629+" subfo. %0,%2,%0\n"
8630+" bf 4*cr0+so, 3f\n"
8631+"2:.long " "0x00c00b00""\n"
8632+#else
8633+" subf %0,%2,%0\n"
8634+#endif
8635+
8636+"3:\n"
8637+" stdcx. %0,0,%3 \n\
8638+ bne- 1b"
8639+"4:"
8640+
8641+#ifdef CONFIG_PAX_REFCOUNT
8642+ _ASM_EXTABLE(2b, 4b)
8643+#endif
8644+
8645+ : "=&r" (t), "+m" (v->counter)
8646+ : "r" (a), "r" (&v->counter)
8647+ : "cc");
8648+}
8649+
8650+static __inline__ void atomic64_sub_unchecked(long a, atomic64_unchecked_t *v)
8651+{
8652+ long t;
8653+
8654+ __asm__ __volatile__(
8655 "1: ldarx %0,0,%3 # atomic64_sub\n\
8656 subf %0,%2,%0\n\
8657 stdcx. %0,0,%3 \n\
8658@@ -343,6 +591,40 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
8659
8660 __asm__ __volatile__(
8661 PPC_ATOMIC_ENTRY_BARRIER
8662+"1: ldarx %0,0,%2 # atomic64_sub_return\n"
8663+
8664+#ifdef CONFIG_PAX_REFCOUNT
8665+" mcrxr cr0\n"
8666+" subfo. %0,%1,%0\n"
8667+" bf 4*cr0+so, 3f\n"
8668+"2:.long " "0x00c00b00""\n"
8669+#else
8670+" subf %0,%1,%0\n"
8671+#endif
8672+
8673+"3:\n"
8674+" stdcx. %0,0,%2 \n\
8675+ bne- 1b\n"
8676+ PPC_ATOMIC_EXIT_BARRIER
8677+"4:"
8678+
8679+#ifdef CONFIG_PAX_REFCOUNT
8680+ _ASM_EXTABLE(2b, 4b)
8681+#endif
8682+
8683+ : "=&r" (t)
8684+ : "r" (a), "r" (&v->counter)
8685+ : "cc", "memory");
8686+
8687+ return t;
8688+}
8689+
8690+static __inline__ long atomic64_sub_return_unchecked(long a, atomic64_unchecked_t *v)
8691+{
8692+ long t;
8693+
8694+ __asm__ __volatile__(
8695+ PPC_ATOMIC_ENTRY_BARRIER
8696 "1: ldarx %0,0,%2 # atomic64_sub_return\n\
8697 subf %0,%1,%0\n\
8698 stdcx. %0,0,%2 \n\
8699@@ -355,36 +637,23 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
8700 return t;
8701 }
8702
8703-static __inline__ void atomic64_inc(atomic64_t *v)
8704-{
8705- long t;
8706+/*
8707+ * atomic64_inc - increment atomic variable
8708+ * @v: pointer of type atomic64_t
8709+ *
8710+ * Automatically increments @v by 1
8711+ */
8712+#define atomic64_inc(v) atomic64_add(1, (v))
8713+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
8714
8715- __asm__ __volatile__(
8716-"1: ldarx %0,0,%2 # atomic64_inc\n\
8717- addic %0,%0,1\n\
8718- stdcx. %0,0,%2 \n\
8719- bne- 1b"
8720- : "=&r" (t), "+m" (v->counter)
8721- : "r" (&v->counter)
8722- : "cc", "xer");
8723+static __inline__ void atomic64_inc_unchecked(atomic64_unchecked_t *v)
8724+{
8725+ atomic64_add_unchecked(1, v);
8726 }
8727
8728-static __inline__ long atomic64_inc_return(atomic64_t *v)
8729+static __inline__ int atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8730 {
8731- long t;
8732-
8733- __asm__ __volatile__(
8734- PPC_ATOMIC_ENTRY_BARRIER
8735-"1: ldarx %0,0,%1 # atomic64_inc_return\n\
8736- addic %0,%0,1\n\
8737- stdcx. %0,0,%1 \n\
8738- bne- 1b"
8739- PPC_ATOMIC_EXIT_BARRIER
8740- : "=&r" (t)
8741- : "r" (&v->counter)
8742- : "cc", "xer", "memory");
8743-
8744- return t;
8745+ return atomic64_add_return_unchecked(1, v);
8746 }
8747
8748 /*
8749@@ -397,36 +666,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
8750 */
8751 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
8752
8753-static __inline__ void atomic64_dec(atomic64_t *v)
8754+/*
8755+ * atomic64_dec - decrement atomic variable
8756+ * @v: pointer of type atomic64_t
8757+ *
8758+ * Atomically decrements @v by 1
8759+ */
8760+#define atomic64_dec(v) atomic64_sub(1, (v))
8761+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
8762+
8763+static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
8764 {
8765- long t;
8766-
8767- __asm__ __volatile__(
8768-"1: ldarx %0,0,%2 # atomic64_dec\n\
8769- addic %0,%0,-1\n\
8770- stdcx. %0,0,%2\n\
8771- bne- 1b"
8772- : "=&r" (t), "+m" (v->counter)
8773- : "r" (&v->counter)
8774- : "cc", "xer");
8775-}
8776-
8777-static __inline__ long atomic64_dec_return(atomic64_t *v)
8778-{
8779- long t;
8780-
8781- __asm__ __volatile__(
8782- PPC_ATOMIC_ENTRY_BARRIER
8783-"1: ldarx %0,0,%1 # atomic64_dec_return\n\
8784- addic %0,%0,-1\n\
8785- stdcx. %0,0,%1\n\
8786- bne- 1b"
8787- PPC_ATOMIC_EXIT_BARRIER
8788- : "=&r" (t)
8789- : "r" (&v->counter)
8790- : "cc", "xer", "memory");
8791-
8792- return t;
8793+ atomic64_sub_unchecked(1, v);
8794 }
8795
8796 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
8797@@ -459,6 +710,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
8798 #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
8799 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
8800
8801+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8802+{
8803+ return cmpxchg(&(v->counter), old, new);
8804+}
8805+
8806+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8807+{
8808+ return xchg(&(v->counter), new);
8809+}
8810+
8811 /**
8812 * atomic64_add_unless - add unless the number is a given value
8813 * @v: pointer of type atomic64_t
8814diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
8815index bab79a1..4a3eabc 100644
8816--- a/arch/powerpc/include/asm/barrier.h
8817+++ b/arch/powerpc/include/asm/barrier.h
8818@@ -73,7 +73,7 @@
8819 do { \
8820 compiletime_assert_atomic_type(*p); \
8821 __lwsync(); \
8822- ACCESS_ONCE(*p) = (v); \
8823+ ACCESS_ONCE_RW(*p) = (v); \
8824 } while (0)
8825
8826 #define smp_load_acquire(p) \
8827diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
8828index 34a05a1..a1f2c67 100644
8829--- a/arch/powerpc/include/asm/cache.h
8830+++ b/arch/powerpc/include/asm/cache.h
8831@@ -4,6 +4,7 @@
8832 #ifdef __KERNEL__
8833
8834 #include <asm/reg.h>
8835+#include <linux/const.h>
8836
8837 /* bytes per L1 cache line */
8838 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
8839@@ -23,7 +24,7 @@
8840 #define L1_CACHE_SHIFT 7
8841 #endif
8842
8843-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8844+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8845
8846 #define SMP_CACHE_BYTES L1_CACHE_BYTES
8847
8848diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
8849index 888d8f3..66f581c 100644
8850--- a/arch/powerpc/include/asm/elf.h
8851+++ b/arch/powerpc/include/asm/elf.h
8852@@ -28,8 +28,19 @@
8853 the loader. We need to make sure that it is out of the way of the program
8854 that it will "exec", and that there is sufficient room for the brk. */
8855
8856-extern unsigned long randomize_et_dyn(unsigned long base);
8857-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
8858+#define ELF_ET_DYN_BASE (0x20000000)
8859+
8860+#ifdef CONFIG_PAX_ASLR
8861+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
8862+
8863+#ifdef __powerpc64__
8864+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
8865+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
8866+#else
8867+#define PAX_DELTA_MMAP_LEN 15
8868+#define PAX_DELTA_STACK_LEN 15
8869+#endif
8870+#endif
8871
8872 #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
8873
8874@@ -129,10 +140,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
8875 (0x7ff >> (PAGE_SHIFT - 12)) : \
8876 (0x3ffff >> (PAGE_SHIFT - 12)))
8877
8878-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8879-#define arch_randomize_brk arch_randomize_brk
8880-
8881-
8882 #ifdef CONFIG_SPU_BASE
8883 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
8884 #define NT_SPU 1
8885diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
8886index 8196e9c..d83a9f3 100644
8887--- a/arch/powerpc/include/asm/exec.h
8888+++ b/arch/powerpc/include/asm/exec.h
8889@@ -4,6 +4,6 @@
8890 #ifndef _ASM_POWERPC_EXEC_H
8891 #define _ASM_POWERPC_EXEC_H
8892
8893-extern unsigned long arch_align_stack(unsigned long sp);
8894+#define arch_align_stack(x) ((x) & ~0xfUL)
8895
8896 #endif /* _ASM_POWERPC_EXEC_H */
8897diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
8898index 5acabbd..7ea14fa 100644
8899--- a/arch/powerpc/include/asm/kmap_types.h
8900+++ b/arch/powerpc/include/asm/kmap_types.h
8901@@ -10,7 +10,7 @@
8902 * 2 of the License, or (at your option) any later version.
8903 */
8904
8905-#define KM_TYPE_NR 16
8906+#define KM_TYPE_NR 17
8907
8908 #endif /* __KERNEL__ */
8909 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
8910diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
8911index b8da913..c02b593 100644
8912--- a/arch/powerpc/include/asm/local.h
8913+++ b/arch/powerpc/include/asm/local.h
8914@@ -9,21 +9,65 @@ typedef struct
8915 atomic_long_t a;
8916 } local_t;
8917
8918+typedef struct
8919+{
8920+ atomic_long_unchecked_t a;
8921+} local_unchecked_t;
8922+
8923 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
8924
8925 #define local_read(l) atomic_long_read(&(l)->a)
8926+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
8927 #define local_set(l,i) atomic_long_set(&(l)->a, (i))
8928+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
8929
8930 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
8931+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
8932 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
8933+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
8934 #define local_inc(l) atomic_long_inc(&(l)->a)
8935+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
8936 #define local_dec(l) atomic_long_dec(&(l)->a)
8937+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
8938
8939 static __inline__ long local_add_return(long a, local_t *l)
8940 {
8941 long t;
8942
8943 __asm__ __volatile__(
8944+"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n"
8945+
8946+#ifdef CONFIG_PAX_REFCOUNT
8947+" mcrxr cr0\n"
8948+" addo. %0,%1,%0\n"
8949+" bf 4*cr0+so, 3f\n"
8950+"2:.long " "0x00c00b00""\n"
8951+#else
8952+" add %0,%1,%0\n"
8953+#endif
8954+
8955+"3:\n"
8956+ PPC405_ERR77(0,%2)
8957+ PPC_STLCX "%0,0,%2 \n\
8958+ bne- 1b"
8959+
8960+#ifdef CONFIG_PAX_REFCOUNT
8961+"\n4:\n"
8962+ _ASM_EXTABLE(2b, 4b)
8963+#endif
8964+
8965+ : "=&r" (t)
8966+ : "r" (a), "r" (&(l->a.counter))
8967+ : "cc", "memory");
8968+
8969+ return t;
8970+}
8971+
8972+static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
8973+{
8974+ long t;
8975+
8976+ __asm__ __volatile__(
8977 "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\
8978 add %0,%1,%0\n"
8979 PPC405_ERR77(0,%2)
8980@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
8981
8982 #define local_cmpxchg(l, o, n) \
8983 (cmpxchg_local(&((l)->a.counter), (o), (n)))
8984+#define local_cmpxchg_unchecked(l, o, n) \
8985+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
8986 #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
8987
8988 /**
8989diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
8990index 8565c25..2865190 100644
8991--- a/arch/powerpc/include/asm/mman.h
8992+++ b/arch/powerpc/include/asm/mman.h
8993@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
8994 }
8995 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
8996
8997-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
8998+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
8999 {
9000 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
9001 }
9002diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
9003index 26fe1ae..987ffc5 100644
9004--- a/arch/powerpc/include/asm/page.h
9005+++ b/arch/powerpc/include/asm/page.h
9006@@ -227,8 +227,9 @@ extern long long virt_phys_offset;
9007 * and needs to be executable. This means the whole heap ends
9008 * up being executable.
9009 */
9010-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
9011- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
9012+#define VM_DATA_DEFAULT_FLAGS32 \
9013+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
9014+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
9015
9016 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
9017 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
9018@@ -256,6 +257,9 @@ extern long long virt_phys_offset;
9019 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
9020 #endif
9021
9022+#define ktla_ktva(addr) (addr)
9023+#define ktva_ktla(addr) (addr)
9024+
9025 #ifndef CONFIG_PPC_BOOK3S_64
9026 /*
9027 * Use the top bit of the higher-level page table entries to indicate whether
9028diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
9029index 88693ce..ac6f9ab 100644
9030--- a/arch/powerpc/include/asm/page_64.h
9031+++ b/arch/powerpc/include/asm/page_64.h
9032@@ -153,15 +153,18 @@ do { \
9033 * stack by default, so in the absence of a PT_GNU_STACK program header
9034 * we turn execute permission off.
9035 */
9036-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
9037- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
9038+#define VM_STACK_DEFAULT_FLAGS32 \
9039+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
9040+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
9041
9042 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
9043 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
9044
9045+#ifndef CONFIG_PAX_PAGEEXEC
9046 #define VM_STACK_DEFAULT_FLAGS \
9047 (is_32bit_task() ? \
9048 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
9049+#endif
9050
9051 #include <asm-generic/getorder.h>
9052
9053diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
9054index 4b0be20..c15a27d 100644
9055--- a/arch/powerpc/include/asm/pgalloc-64.h
9056+++ b/arch/powerpc/include/asm/pgalloc-64.h
9057@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
9058 #ifndef CONFIG_PPC_64K_PAGES
9059
9060 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
9061+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
9062
9063 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
9064 {
9065@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
9066 pud_set(pud, (unsigned long)pmd);
9067 }
9068
9069+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
9070+{
9071+ pud_populate(mm, pud, pmd);
9072+}
9073+
9074 #define pmd_populate(mm, pmd, pte_page) \
9075 pmd_populate_kernel(mm, pmd, page_address(pte_page))
9076 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
9077@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
9078 #endif
9079
9080 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
9081+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
9082
9083 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
9084 pte_t *pte)
9085diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
9086index d98c1ec..9f61569 100644
9087--- a/arch/powerpc/include/asm/pgtable.h
9088+++ b/arch/powerpc/include/asm/pgtable.h
9089@@ -2,6 +2,7 @@
9090 #define _ASM_POWERPC_PGTABLE_H
9091 #ifdef __KERNEL__
9092
9093+#include <linux/const.h>
9094 #ifndef __ASSEMBLY__
9095 #include <linux/mmdebug.h>
9096 #include <asm/processor.h> /* For TASK_SIZE */
9097diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
9098index 4aad413..85d86bf 100644
9099--- a/arch/powerpc/include/asm/pte-hash32.h
9100+++ b/arch/powerpc/include/asm/pte-hash32.h
9101@@ -21,6 +21,7 @@
9102 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
9103 #define _PAGE_USER 0x004 /* usermode access allowed */
9104 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
9105+#define _PAGE_EXEC _PAGE_GUARDED
9106 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
9107 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
9108 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
9109diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
9110index 0c05059..7e056e4 100644
9111--- a/arch/powerpc/include/asm/reg.h
9112+++ b/arch/powerpc/include/asm/reg.h
9113@@ -251,6 +251,7 @@
9114 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
9115 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
9116 #define DSISR_NOHPTE 0x40000000 /* no translation found */
9117+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
9118 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
9119 #define DSISR_ISSTORE 0x02000000 /* access was a store */
9120 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
9121diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
9122index 5a6614a..d89995d1 100644
9123--- a/arch/powerpc/include/asm/smp.h
9124+++ b/arch/powerpc/include/asm/smp.h
9125@@ -51,7 +51,7 @@ struct smp_ops_t {
9126 int (*cpu_disable)(void);
9127 void (*cpu_die)(unsigned int nr);
9128 int (*cpu_bootable)(unsigned int nr);
9129-};
9130+} __no_const;
9131
9132 extern void smp_send_debugger_break(void);
9133 extern void start_secondary_resume(void);
9134diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
9135index 4dbe072..b803275 100644
9136--- a/arch/powerpc/include/asm/spinlock.h
9137+++ b/arch/powerpc/include/asm/spinlock.h
9138@@ -204,13 +204,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
9139 __asm__ __volatile__(
9140 "1: " PPC_LWARX(%0,0,%1,1) "\n"
9141 __DO_SIGN_EXTEND
9142-" addic. %0,%0,1\n\
9143- ble- 2f\n"
9144+
9145+#ifdef CONFIG_PAX_REFCOUNT
9146+" mcrxr cr0\n"
9147+" addico. %0,%0,1\n"
9148+" bf 4*cr0+so, 3f\n"
9149+"2:.long " "0x00c00b00""\n"
9150+#else
9151+" addic. %0,%0,1\n"
9152+#endif
9153+
9154+"3:\n"
9155+ "ble- 4f\n"
9156 PPC405_ERR77(0,%1)
9157 " stwcx. %0,0,%1\n\
9158 bne- 1b\n"
9159 PPC_ACQUIRE_BARRIER
9160-"2:" : "=&r" (tmp)
9161+"4:"
9162+
9163+#ifdef CONFIG_PAX_REFCOUNT
9164+ _ASM_EXTABLE(2b,4b)
9165+#endif
9166+
9167+ : "=&r" (tmp)
9168 : "r" (&rw->lock)
9169 : "cr0", "xer", "memory");
9170
9171@@ -286,11 +302,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
9172 __asm__ __volatile__(
9173 "# read_unlock\n\t"
9174 PPC_RELEASE_BARRIER
9175-"1: lwarx %0,0,%1\n\
9176- addic %0,%0,-1\n"
9177+"1: lwarx %0,0,%1\n"
9178+
9179+#ifdef CONFIG_PAX_REFCOUNT
9180+" mcrxr cr0\n"
9181+" addico. %0,%0,-1\n"
9182+" bf 4*cr0+so, 3f\n"
9183+"2:.long " "0x00c00b00""\n"
9184+#else
9185+" addic. %0,%0,-1\n"
9186+#endif
9187+
9188+"3:\n"
9189 PPC405_ERR77(0,%1)
9190 " stwcx. %0,0,%1\n\
9191 bne- 1b"
9192+
9193+#ifdef CONFIG_PAX_REFCOUNT
9194+"\n4:\n"
9195+ _ASM_EXTABLE(2b, 4b)
9196+#endif
9197+
9198 : "=&r"(tmp)
9199 : "r"(&rw->lock)
9200 : "cr0", "xer", "memory");
9201diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
9202index b034ecd..af7e31f 100644
9203--- a/arch/powerpc/include/asm/thread_info.h
9204+++ b/arch/powerpc/include/asm/thread_info.h
9205@@ -107,6 +107,8 @@ static inline struct thread_info *current_thread_info(void)
9206 #if defined(CONFIG_PPC64)
9207 #define TIF_ELF2ABI 18 /* function descriptors must die! */
9208 #endif
9209+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
9210+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
9211
9212 /* as above, but as bit values */
9213 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
9214@@ -125,9 +127,10 @@ static inline struct thread_info *current_thread_info(void)
9215 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
9216 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
9217 #define _TIF_NOHZ (1<<TIF_NOHZ)
9218+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
9219 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
9220 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
9221- _TIF_NOHZ)
9222+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
9223
9224 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
9225 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
9226diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
9227index 9485b43..3bd3c16 100644
9228--- a/arch/powerpc/include/asm/uaccess.h
9229+++ b/arch/powerpc/include/asm/uaccess.h
9230@@ -58,6 +58,7 @@
9231
9232 #endif
9233
9234+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
9235 #define access_ok(type, addr, size) \
9236 (__chk_user_ptr(addr), \
9237 __access_ok((__force unsigned long)(addr), (size), get_fs()))
9238@@ -318,52 +319,6 @@ do { \
9239 extern unsigned long __copy_tofrom_user(void __user *to,
9240 const void __user *from, unsigned long size);
9241
9242-#ifndef __powerpc64__
9243-
9244-static inline unsigned long copy_from_user(void *to,
9245- const void __user *from, unsigned long n)
9246-{
9247- unsigned long over;
9248-
9249- if (access_ok(VERIFY_READ, from, n))
9250- return __copy_tofrom_user((__force void __user *)to, from, n);
9251- if ((unsigned long)from < TASK_SIZE) {
9252- over = (unsigned long)from + n - TASK_SIZE;
9253- return __copy_tofrom_user((__force void __user *)to, from,
9254- n - over) + over;
9255- }
9256- return n;
9257-}
9258-
9259-static inline unsigned long copy_to_user(void __user *to,
9260- const void *from, unsigned long n)
9261-{
9262- unsigned long over;
9263-
9264- if (access_ok(VERIFY_WRITE, to, n))
9265- return __copy_tofrom_user(to, (__force void __user *)from, n);
9266- if ((unsigned long)to < TASK_SIZE) {
9267- over = (unsigned long)to + n - TASK_SIZE;
9268- return __copy_tofrom_user(to, (__force void __user *)from,
9269- n - over) + over;
9270- }
9271- return n;
9272-}
9273-
9274-#else /* __powerpc64__ */
9275-
9276-#define __copy_in_user(to, from, size) \
9277- __copy_tofrom_user((to), (from), (size))
9278-
9279-extern unsigned long copy_from_user(void *to, const void __user *from,
9280- unsigned long n);
9281-extern unsigned long copy_to_user(void __user *to, const void *from,
9282- unsigned long n);
9283-extern unsigned long copy_in_user(void __user *to, const void __user *from,
9284- unsigned long n);
9285-
9286-#endif /* __powerpc64__ */
9287-
9288 static inline unsigned long __copy_from_user_inatomic(void *to,
9289 const void __user *from, unsigned long n)
9290 {
9291@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
9292 if (ret == 0)
9293 return 0;
9294 }
9295+
9296+ if (!__builtin_constant_p(n))
9297+ check_object_size(to, n, false);
9298+
9299 return __copy_tofrom_user((__force void __user *)to, from, n);
9300 }
9301
9302@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
9303 if (ret == 0)
9304 return 0;
9305 }
9306+
9307+ if (!__builtin_constant_p(n))
9308+ check_object_size(from, n, true);
9309+
9310 return __copy_tofrom_user(to, (__force const void __user *)from, n);
9311 }
9312
9313@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
9314 return __copy_to_user_inatomic(to, from, size);
9315 }
9316
9317+#ifndef __powerpc64__
9318+
9319+static inline unsigned long __must_check copy_from_user(void *to,
9320+ const void __user *from, unsigned long n)
9321+{
9322+ unsigned long over;
9323+
9324+ if ((long)n < 0)
9325+ return n;
9326+
9327+ if (access_ok(VERIFY_READ, from, n)) {
9328+ if (!__builtin_constant_p(n))
9329+ check_object_size(to, n, false);
9330+ return __copy_tofrom_user((__force void __user *)to, from, n);
9331+ }
9332+ if ((unsigned long)from < TASK_SIZE) {
9333+ over = (unsigned long)from + n - TASK_SIZE;
9334+ if (!__builtin_constant_p(n - over))
9335+ check_object_size(to, n - over, false);
9336+ return __copy_tofrom_user((__force void __user *)to, from,
9337+ n - over) + over;
9338+ }
9339+ return n;
9340+}
9341+
9342+static inline unsigned long __must_check copy_to_user(void __user *to,
9343+ const void *from, unsigned long n)
9344+{
9345+ unsigned long over;
9346+
9347+ if ((long)n < 0)
9348+ return n;
9349+
9350+ if (access_ok(VERIFY_WRITE, to, n)) {
9351+ if (!__builtin_constant_p(n))
9352+ check_object_size(from, n, true);
9353+ return __copy_tofrom_user(to, (__force void __user *)from, n);
9354+ }
9355+ if ((unsigned long)to < TASK_SIZE) {
9356+ over = (unsigned long)to + n - TASK_SIZE;
9357+ if (!__builtin_constant_p(n))
9358+ check_object_size(from, n - over, true);
9359+ return __copy_tofrom_user(to, (__force void __user *)from,
9360+ n - over) + over;
9361+ }
9362+ return n;
9363+}
9364+
9365+#else /* __powerpc64__ */
9366+
9367+#define __copy_in_user(to, from, size) \
9368+ __copy_tofrom_user((to), (from), (size))
9369+
9370+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
9371+{
9372+ if ((long)n < 0 || n > INT_MAX)
9373+ return n;
9374+
9375+ if (!__builtin_constant_p(n))
9376+ check_object_size(to, n, false);
9377+
9378+ if (likely(access_ok(VERIFY_READ, from, n)))
9379+ n = __copy_from_user(to, from, n);
9380+ else
9381+ memset(to, 0, n);
9382+ return n;
9383+}
9384+
9385+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
9386+{
9387+ if ((long)n < 0 || n > INT_MAX)
9388+ return n;
9389+
9390+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
9391+ if (!__builtin_constant_p(n))
9392+ check_object_size(from, n, true);
9393+ n = __copy_to_user(to, from, n);
9394+ }
9395+ return n;
9396+}
9397+
9398+extern unsigned long copy_in_user(void __user *to, const void __user *from,
9399+ unsigned long n);
9400+
9401+#endif /* __powerpc64__ */
9402+
9403 extern unsigned long __clear_user(void __user *addr, unsigned long size);
9404
9405 static inline unsigned long clear_user(void __user *addr, unsigned long size)
9406diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
9407index 670c312..60c2b52 100644
9408--- a/arch/powerpc/kernel/Makefile
9409+++ b/arch/powerpc/kernel/Makefile
9410@@ -27,6 +27,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
9411 CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
9412 endif
9413
9414+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
9415+
9416 obj-y := cputable.o ptrace.o syscalls.o \
9417 irq.o align.o signal_32.o pmc.o vdso.o \
9418 process.o systbl.o idle.o \
9419diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
9420index bb9cac6..5181202 100644
9421--- a/arch/powerpc/kernel/exceptions-64e.S
9422+++ b/arch/powerpc/kernel/exceptions-64e.S
9423@@ -1010,6 +1010,7 @@ storage_fault_common:
9424 std r14,_DAR(r1)
9425 std r15,_DSISR(r1)
9426 addi r3,r1,STACK_FRAME_OVERHEAD
9427+ bl save_nvgprs
9428 mr r4,r14
9429 mr r5,r15
9430 ld r14,PACA_EXGEN+EX_R14(r13)
9431@@ -1018,8 +1019,7 @@ storage_fault_common:
9432 cmpdi r3,0
9433 bne- 1f
9434 b ret_from_except_lite
9435-1: bl save_nvgprs
9436- mr r5,r3
9437+1: mr r5,r3
9438 addi r3,r1,STACK_FRAME_OVERHEAD
9439 ld r4,_DAR(r1)
9440 bl bad_page_fault
9441diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
9442index 050f79a..f385bfe 100644
9443--- a/arch/powerpc/kernel/exceptions-64s.S
9444+++ b/arch/powerpc/kernel/exceptions-64s.S
9445@@ -1593,10 +1593,10 @@ handle_page_fault:
9446 11: ld r4,_DAR(r1)
9447 ld r5,_DSISR(r1)
9448 addi r3,r1,STACK_FRAME_OVERHEAD
9449+ bl save_nvgprs
9450 bl do_page_fault
9451 cmpdi r3,0
9452 beq+ 12f
9453- bl save_nvgprs
9454 mr r5,r3
9455 addi r3,r1,STACK_FRAME_OVERHEAD
9456 lwz r4,_DAR(r1)
9457diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
9458index 4c5891d..a5d88bb 100644
9459--- a/arch/powerpc/kernel/irq.c
9460+++ b/arch/powerpc/kernel/irq.c
9461@@ -461,6 +461,8 @@ void migrate_irqs(void)
9462 }
9463 #endif
9464
9465+extern void gr_handle_kernel_exploit(void);
9466+
9467 static inline void check_stack_overflow(void)
9468 {
9469 #ifdef CONFIG_DEBUG_STACKOVERFLOW
9470@@ -473,6 +475,7 @@ static inline void check_stack_overflow(void)
9471 printk("do_IRQ: stack overflow: %ld\n",
9472 sp - sizeof(struct thread_info));
9473 dump_stack();
9474+ gr_handle_kernel_exploit();
9475 }
9476 #endif
9477 }
9478diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
9479index 6cff040..74ac5d1b 100644
9480--- a/arch/powerpc/kernel/module_32.c
9481+++ b/arch/powerpc/kernel/module_32.c
9482@@ -161,7 +161,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
9483 me->arch.core_plt_section = i;
9484 }
9485 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
9486- printk("Module doesn't contain .plt or .init.plt sections.\n");
9487+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
9488 return -ENOEXEC;
9489 }
9490
9491@@ -191,11 +191,16 @@ static uint32_t do_plt_call(void *location,
9492
9493 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
9494 /* Init, or core PLT? */
9495- if (location >= mod->module_core
9496- && location < mod->module_core + mod->core_size)
9497+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
9498+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
9499 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
9500- else
9501+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
9502+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
9503 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
9504+ else {
9505+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
9506+ return ~0UL;
9507+ }
9508
9509 /* Find this entry, or if that fails, the next avail. entry */
9510 while (entry->jump[0]) {
9511@@ -299,7 +304,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
9512 }
9513 #ifdef CONFIG_DYNAMIC_FTRACE
9514 module->arch.tramp =
9515- do_plt_call(module->module_core,
9516+ do_plt_call(module->module_core_rx,
9517 (unsigned long)ftrace_caller,
9518 sechdrs, module);
9519 #endif
9520diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
9521index bf44ae9..6d2ce71 100644
9522--- a/arch/powerpc/kernel/process.c
9523+++ b/arch/powerpc/kernel/process.c
9524@@ -1039,8 +1039,8 @@ void show_regs(struct pt_regs * regs)
9525 * Lookup NIP late so we have the best change of getting the
9526 * above info out without failing
9527 */
9528- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
9529- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
9530+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
9531+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
9532 #endif
9533 show_stack(current, (unsigned long *) regs->gpr[1]);
9534 if (!user_mode(regs))
9535@@ -1558,10 +1558,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
9536 newsp = stack[0];
9537 ip = stack[STACK_FRAME_LR_SAVE];
9538 if (!firstframe || ip != lr) {
9539- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
9540+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
9541 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
9542 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
9543- printk(" (%pS)",
9544+ printk(" (%pA)",
9545 (void *)current->ret_stack[curr_frame].ret);
9546 curr_frame--;
9547 }
9548@@ -1581,7 +1581,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
9549 struct pt_regs *regs = (struct pt_regs *)
9550 (sp + STACK_FRAME_OVERHEAD);
9551 lr = regs->link;
9552- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
9553+ printk("--- interrupt: %lx at %pA\n LR = %pA\n",
9554 regs->trap, (void *)regs->nip, (void *)lr);
9555 firstframe = 1;
9556 }
9557@@ -1617,58 +1617,3 @@ void notrace __ppc64_runlatch_off(void)
9558 mtspr(SPRN_CTRLT, ctrl);
9559 }
9560 #endif /* CONFIG_PPC64 */
9561-
9562-unsigned long arch_align_stack(unsigned long sp)
9563-{
9564- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9565- sp -= get_random_int() & ~PAGE_MASK;
9566- return sp & ~0xf;
9567-}
9568-
9569-static inline unsigned long brk_rnd(void)
9570-{
9571- unsigned long rnd = 0;
9572-
9573- /* 8MB for 32bit, 1GB for 64bit */
9574- if (is_32bit_task())
9575- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
9576- else
9577- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
9578-
9579- return rnd << PAGE_SHIFT;
9580-}
9581-
9582-unsigned long arch_randomize_brk(struct mm_struct *mm)
9583-{
9584- unsigned long base = mm->brk;
9585- unsigned long ret;
9586-
9587-#ifdef CONFIG_PPC_STD_MMU_64
9588- /*
9589- * If we are using 1TB segments and we are allowed to randomise
9590- * the heap, we can put it above 1TB so it is backed by a 1TB
9591- * segment. Otherwise the heap will be in the bottom 1TB
9592- * which always uses 256MB segments and this may result in a
9593- * performance penalty.
9594- */
9595- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
9596- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
9597-#endif
9598-
9599- ret = PAGE_ALIGN(base + brk_rnd());
9600-
9601- if (ret < mm->brk)
9602- return mm->brk;
9603-
9604- return ret;
9605-}
9606-
9607-unsigned long randomize_et_dyn(unsigned long base)
9608-{
9609- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
9610-
9611- if (ret < base)
9612- return base;
9613-
9614- return ret;
9615-}
9616diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
9617index 2e3d2bf..35df241 100644
9618--- a/arch/powerpc/kernel/ptrace.c
9619+++ b/arch/powerpc/kernel/ptrace.c
9620@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
9621 return ret;
9622 }
9623
9624+#ifdef CONFIG_GRKERNSEC_SETXID
9625+extern void gr_delayed_cred_worker(void);
9626+#endif
9627+
9628 /*
9629 * We must return the syscall number to actually look up in the table.
9630 * This can be -1L to skip running any syscall at all.
9631@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
9632
9633 secure_computing_strict(regs->gpr[0]);
9634
9635+#ifdef CONFIG_GRKERNSEC_SETXID
9636+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9637+ gr_delayed_cred_worker();
9638+#endif
9639+
9640 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
9641 tracehook_report_syscall_entry(regs))
9642 /*
9643@@ -1808,6 +1817,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
9644 {
9645 int step;
9646
9647+#ifdef CONFIG_GRKERNSEC_SETXID
9648+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9649+ gr_delayed_cred_worker();
9650+#endif
9651+
9652 audit_syscall_exit(regs);
9653
9654 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9655diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
9656index b171001..4ac7ac5 100644
9657--- a/arch/powerpc/kernel/signal_32.c
9658+++ b/arch/powerpc/kernel/signal_32.c
9659@@ -1011,7 +1011,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
9660 /* Save user registers on the stack */
9661 frame = &rt_sf->uc.uc_mcontext;
9662 addr = frame;
9663- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
9664+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9665 sigret = 0;
9666 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
9667 } else {
9668diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
9669index 2cb0c94..c0c0bc9 100644
9670--- a/arch/powerpc/kernel/signal_64.c
9671+++ b/arch/powerpc/kernel/signal_64.c
9672@@ -754,7 +754,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
9673 current->thread.fp_state.fpscr = 0;
9674
9675 /* Set up to return from userspace. */
9676- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
9677+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
9678 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
9679 } else {
9680 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
9681diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
9682index 0dc43f9..a885d33 100644
9683--- a/arch/powerpc/kernel/traps.c
9684+++ b/arch/powerpc/kernel/traps.c
9685@@ -36,6 +36,7 @@
9686 #include <linux/debugfs.h>
9687 #include <linux/ratelimit.h>
9688 #include <linux/context_tracking.h>
9689+#include <linux/uaccess.h>
9690
9691 #include <asm/emulated_ops.h>
9692 #include <asm/pgtable.h>
9693@@ -142,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
9694 return flags;
9695 }
9696
9697+extern void gr_handle_kernel_exploit(void);
9698+
9699 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9700 int signr)
9701 {
9702@@ -191,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
9703 panic("Fatal exception in interrupt");
9704 if (panic_on_oops)
9705 panic("Fatal exception");
9706+
9707+ gr_handle_kernel_exploit();
9708+
9709 do_exit(signr);
9710 }
9711
9712@@ -1137,6 +1143,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
9713 enum ctx_state prev_state = exception_enter();
9714 unsigned int reason = get_reason(regs);
9715
9716+#ifdef CONFIG_PAX_REFCOUNT
9717+ unsigned int bkpt;
9718+ const struct exception_table_entry *entry;
9719+
9720+ if (reason & REASON_ILLEGAL) {
9721+ /* Check if PaX bad instruction */
9722+ if (!probe_kernel_address(regs->nip, bkpt) && bkpt == 0xc00b00) {
9723+ current->thread.trap_nr = 0;
9724+ pax_report_refcount_overflow(regs);
9725+ /* fixup_exception() for PowerPC does not exist, simulate its job */
9726+ if ((entry = search_exception_tables(regs->nip)) != NULL) {
9727+ regs->nip = entry->fixup;
9728+ return;
9729+ }
9730+ /* fixup_exception() could not handle */
9731+ goto bail;
9732+ }
9733+ }
9734+#endif
9735+
9736 /* We can now get here via a FP Unavailable exception if the core
9737 * has no FPU, in that case the reason flags will be 0 */
9738
9739diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
9740index f174351..5722009 100644
9741--- a/arch/powerpc/kernel/vdso.c
9742+++ b/arch/powerpc/kernel/vdso.c
9743@@ -35,6 +35,7 @@
9744 #include <asm/vdso.h>
9745 #include <asm/vdso_datapage.h>
9746 #include <asm/setup.h>
9747+#include <asm/mman.h>
9748
9749 #undef DEBUG
9750
9751@@ -221,7 +222,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9752 vdso_base = VDSO32_MBASE;
9753 #endif
9754
9755- current->mm->context.vdso_base = 0;
9756+ current->mm->context.vdso_base = ~0UL;
9757
9758 /* vDSO has a problem and was disabled, just don't "enable" it for the
9759 * process
9760@@ -241,7 +242,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
9761 vdso_base = get_unmapped_area(NULL, vdso_base,
9762 (vdso_pages << PAGE_SHIFT) +
9763 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
9764- 0, 0);
9765+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
9766 if (IS_ERR_VALUE(vdso_base)) {
9767 rc = vdso_base;
9768 goto fail_mmapsem;
9769diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
9770index 4c79284..0e462c3 100644
9771--- a/arch/powerpc/kvm/powerpc.c
9772+++ b/arch/powerpc/kvm/powerpc.c
9773@@ -1338,7 +1338,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
9774 }
9775 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
9776
9777-int kvm_arch_init(void *opaque)
9778+int kvm_arch_init(const void *opaque)
9779 {
9780 return 0;
9781 }
9782diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
9783index 5eea6f3..5d10396 100644
9784--- a/arch/powerpc/lib/usercopy_64.c
9785+++ b/arch/powerpc/lib/usercopy_64.c
9786@@ -9,22 +9,6 @@
9787 #include <linux/module.h>
9788 #include <asm/uaccess.h>
9789
9790-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9791-{
9792- if (likely(access_ok(VERIFY_READ, from, n)))
9793- n = __copy_from_user(to, from, n);
9794- else
9795- memset(to, 0, n);
9796- return n;
9797-}
9798-
9799-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9800-{
9801- if (likely(access_ok(VERIFY_WRITE, to, n)))
9802- n = __copy_to_user(to, from, n);
9803- return n;
9804-}
9805-
9806 unsigned long copy_in_user(void __user *to, const void __user *from,
9807 unsigned long n)
9808 {
9809@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
9810 return n;
9811 }
9812
9813-EXPORT_SYMBOL(copy_from_user);
9814-EXPORT_SYMBOL(copy_to_user);
9815 EXPORT_SYMBOL(copy_in_user);
9816
9817diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
9818index 51ab9e7..7d3c78b 100644
9819--- a/arch/powerpc/mm/fault.c
9820+++ b/arch/powerpc/mm/fault.c
9821@@ -33,6 +33,10 @@
9822 #include <linux/magic.h>
9823 #include <linux/ratelimit.h>
9824 #include <linux/context_tracking.h>
9825+#include <linux/slab.h>
9826+#include <linux/pagemap.h>
9827+#include <linux/compiler.h>
9828+#include <linux/unistd.h>
9829
9830 #include <asm/firmware.h>
9831 #include <asm/page.h>
9832@@ -69,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
9833 }
9834 #endif
9835
9836+#ifdef CONFIG_PAX_PAGEEXEC
9837+/*
9838+ * PaX: decide what to do with offenders (regs->nip = fault address)
9839+ *
9840+ * returns 1 when task should be killed
9841+ */
9842+static int pax_handle_fetch_fault(struct pt_regs *regs)
9843+{
9844+ return 1;
9845+}
9846+
9847+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9848+{
9849+ unsigned long i;
9850+
9851+ printk(KERN_ERR "PAX: bytes at PC: ");
9852+ for (i = 0; i < 5; i++) {
9853+ unsigned int c;
9854+ if (get_user(c, (unsigned int __user *)pc+i))
9855+ printk(KERN_CONT "???????? ");
9856+ else
9857+ printk(KERN_CONT "%08x ", c);
9858+ }
9859+ printk("\n");
9860+}
9861+#endif
9862+
9863 /*
9864 * Check whether the instruction at regs->nip is a store using
9865 * an update addressing form which will update r1.
9866@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
9867 * indicate errors in DSISR but can validly be set in SRR1.
9868 */
9869 if (trap == 0x400)
9870- error_code &= 0x48200000;
9871+ error_code &= 0x58200000;
9872 else
9873 is_write = error_code & DSISR_ISSTORE;
9874 #else
9875@@ -378,7 +409,7 @@ good_area:
9876 * "undefined". Of those that can be set, this is the only
9877 * one which seems bad.
9878 */
9879- if (error_code & 0x10000000)
9880+ if (error_code & DSISR_GUARDED)
9881 /* Guarded storage error. */
9882 goto bad_area;
9883 #endif /* CONFIG_8xx */
9884@@ -393,7 +424,7 @@ good_area:
9885 * processors use the same I/D cache coherency mechanism
9886 * as embedded.
9887 */
9888- if (error_code & DSISR_PROTFAULT)
9889+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
9890 goto bad_area;
9891 #endif /* CONFIG_PPC_STD_MMU */
9892
9893@@ -483,6 +514,23 @@ bad_area:
9894 bad_area_nosemaphore:
9895 /* User mode accesses cause a SIGSEGV */
9896 if (user_mode(regs)) {
9897+
9898+#ifdef CONFIG_PAX_PAGEEXEC
9899+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
9900+#ifdef CONFIG_PPC_STD_MMU
9901+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
9902+#else
9903+ if (is_exec && regs->nip == address) {
9904+#endif
9905+ switch (pax_handle_fetch_fault(regs)) {
9906+ }
9907+
9908+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
9909+ do_group_exit(SIGKILL);
9910+ }
9911+ }
9912+#endif
9913+
9914 _exception(SIGSEGV, regs, code, address);
9915 goto bail;
9916 }
9917diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
9918index cb8bdbe..cde4bc7 100644
9919--- a/arch/powerpc/mm/mmap.c
9920+++ b/arch/powerpc/mm/mmap.c
9921@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void)
9922 return sysctl_legacy_va_layout;
9923 }
9924
9925-static unsigned long mmap_rnd(void)
9926+static unsigned long mmap_rnd(struct mm_struct *mm)
9927 {
9928 unsigned long rnd = 0;
9929
9930+#ifdef CONFIG_PAX_RANDMMAP
9931+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9932+#endif
9933+
9934 if (current->flags & PF_RANDOMIZE) {
9935 /* 8MB for 32bit, 1GB for 64bit */
9936 if (is_32bit_task())
9937@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void)
9938 return rnd << PAGE_SHIFT;
9939 }
9940
9941-static inline unsigned long mmap_base(void)
9942+static inline unsigned long mmap_base(struct mm_struct *mm)
9943 {
9944 unsigned long gap = rlimit(RLIMIT_STACK);
9945
9946@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void)
9947 else if (gap > MAX_GAP)
9948 gap = MAX_GAP;
9949
9950- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
9951+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm));
9952 }
9953
9954 /*
9955@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9956 */
9957 if (mmap_is_legacy()) {
9958 mm->mmap_base = TASK_UNMAPPED_BASE;
9959+
9960+#ifdef CONFIG_PAX_RANDMMAP
9961+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9962+ mm->mmap_base += mm->delta_mmap;
9963+#endif
9964+
9965 mm->get_unmapped_area = arch_get_unmapped_area;
9966 } else {
9967- mm->mmap_base = mmap_base();
9968+ mm->mmap_base = mmap_base(mm);
9969+
9970+#ifdef CONFIG_PAX_RANDMMAP
9971+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9972+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9973+#endif
9974+
9975 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9976 }
9977 }
9978diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
9979index b0c75cc..ef7fb93 100644
9980--- a/arch/powerpc/mm/slice.c
9981+++ b/arch/powerpc/mm/slice.c
9982@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
9983 if ((mm->task_size - len) < addr)
9984 return 0;
9985 vma = find_vma(mm, addr);
9986- return (!vma || (addr + len) <= vma->vm_start);
9987+ return check_heap_stack_gap(vma, addr, len, 0);
9988 }
9989
9990 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
9991@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
9992 info.align_offset = 0;
9993
9994 addr = TASK_UNMAPPED_BASE;
9995+
9996+#ifdef CONFIG_PAX_RANDMMAP
9997+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9998+ addr += mm->delta_mmap;
9999+#endif
10000+
10001 while (addr < TASK_SIZE) {
10002 info.low_limit = addr;
10003 if (!slice_scan_available(addr, available, 1, &addr))
10004@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
10005 if (fixed && addr > (mm->task_size - len))
10006 return -ENOMEM;
10007
10008+#ifdef CONFIG_PAX_RANDMMAP
10009+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
10010+ addr = 0;
10011+#endif
10012+
10013 /* If hint, make sure it matches our alignment restrictions */
10014 if (!fixed && addr) {
10015 addr = _ALIGN_UP(addr, 1ul << pshift);
10016diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
10017index 3afa6f4..40c53ff 100644
10018--- a/arch/powerpc/net/bpf_jit_comp.c
10019+++ b/arch/powerpc/net/bpf_jit_comp.c
10020@@ -697,5 +697,6 @@ void bpf_jit_free(struct bpf_prog *fp)
10021 {
10022 if (fp->jited)
10023 module_free(NULL, fp->bpf_func);
10024- kfree(fp);
10025+
10026+ bpf_prog_unlock_free(fp);
10027 }
10028diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
10029index 4278acf..67fd0e6 100644
10030--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
10031+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
10032@@ -400,8 +400,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
10033 }
10034
10035 static struct pci_ops scc_pciex_pci_ops = {
10036- scc_pciex_read_config,
10037- scc_pciex_write_config,
10038+ .read = scc_pciex_read_config,
10039+ .write = scc_pciex_write_config,
10040 };
10041
10042 static void pciex_clear_intr_all(unsigned int __iomem *base)
10043diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
10044index d966bbe..372124a 100644
10045--- a/arch/powerpc/platforms/cell/spufs/file.c
10046+++ b/arch/powerpc/platforms/cell/spufs/file.c
10047@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
10048 return VM_FAULT_NOPAGE;
10049 }
10050
10051-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
10052+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
10053 unsigned long address,
10054- void *buf, int len, int write)
10055+ void *buf, size_t len, int write)
10056 {
10057 struct spu_context *ctx = vma->vm_file->private_data;
10058 unsigned long offset = address - vma->vm_start;
10059diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
10060index fa934fe..c296056 100644
10061--- a/arch/s390/include/asm/atomic.h
10062+++ b/arch/s390/include/asm/atomic.h
10063@@ -412,4 +412,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
10064 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
10065 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10066
10067+#define atomic64_read_unchecked(v) atomic64_read(v)
10068+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
10069+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
10070+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
10071+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
10072+#define atomic64_inc_unchecked(v) atomic64_inc(v)
10073+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
10074+#define atomic64_dec_unchecked(v) atomic64_dec(v)
10075+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
10076+
10077 #endif /* __ARCH_S390_ATOMIC__ */
10078diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
10079index 19ff956..8d39cb1 100644
10080--- a/arch/s390/include/asm/barrier.h
10081+++ b/arch/s390/include/asm/barrier.h
10082@@ -37,7 +37,7 @@
10083 do { \
10084 compiletime_assert_atomic_type(*p); \
10085 barrier(); \
10086- ACCESS_ONCE(*p) = (v); \
10087+ ACCESS_ONCE_RW(*p) = (v); \
10088 } while (0)
10089
10090 #define smp_load_acquire(p) \
10091diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
10092index 4d7ccac..d03d0ad 100644
10093--- a/arch/s390/include/asm/cache.h
10094+++ b/arch/s390/include/asm/cache.h
10095@@ -9,8 +9,10 @@
10096 #ifndef __ARCH_S390_CACHE_H
10097 #define __ARCH_S390_CACHE_H
10098
10099-#define L1_CACHE_BYTES 256
10100+#include <linux/const.h>
10101+
10102 #define L1_CACHE_SHIFT 8
10103+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10104 #define NET_SKB_PAD 32
10105
10106 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
10107diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
10108index 78f4f87..598ce39 100644
10109--- a/arch/s390/include/asm/elf.h
10110+++ b/arch/s390/include/asm/elf.h
10111@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
10112 the loader. We need to make sure that it is out of the way of the program
10113 that it will "exec", and that there is sufficient room for the brk. */
10114
10115-extern unsigned long randomize_et_dyn(unsigned long base);
10116-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
10117+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
10118+
10119+#ifdef CONFIG_PAX_ASLR
10120+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
10121+
10122+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
10123+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
10124+#endif
10125
10126 /* This yields a mask that user programs can use to figure out what
10127 instruction set this CPU supports. */
10128@@ -222,9 +228,6 @@ struct linux_binprm;
10129 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
10130 int arch_setup_additional_pages(struct linux_binprm *, int);
10131
10132-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10133-#define arch_randomize_brk arch_randomize_brk
10134-
10135 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
10136
10137 #endif
10138diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
10139index c4a93d6..4d2a9b4 100644
10140--- a/arch/s390/include/asm/exec.h
10141+++ b/arch/s390/include/asm/exec.h
10142@@ -7,6 +7,6 @@
10143 #ifndef __ASM_EXEC_H
10144 #define __ASM_EXEC_H
10145
10146-extern unsigned long arch_align_stack(unsigned long sp);
10147+#define arch_align_stack(x) ((x) & ~0xfUL)
10148
10149 #endif /* __ASM_EXEC_H */
10150diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
10151index cd4c68e..6764641 100644
10152--- a/arch/s390/include/asm/uaccess.h
10153+++ b/arch/s390/include/asm/uaccess.h
10154@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
10155 __range_ok((unsigned long)(addr), (size)); \
10156 })
10157
10158+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
10159 #define access_ok(type, addr, size) __access_ok(addr, size)
10160
10161 /*
10162@@ -275,6 +276,10 @@ static inline unsigned long __must_check
10163 copy_to_user(void __user *to, const void *from, unsigned long n)
10164 {
10165 might_fault();
10166+
10167+ if ((long)n < 0)
10168+ return n;
10169+
10170 return __copy_to_user(to, from, n);
10171 }
10172
10173@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
10174 static inline unsigned long __must_check
10175 copy_from_user(void *to, const void __user *from, unsigned long n)
10176 {
10177- unsigned int sz = __compiletime_object_size(to);
10178+ size_t sz = __compiletime_object_size(to);
10179
10180 might_fault();
10181- if (unlikely(sz != -1 && sz < n)) {
10182+
10183+ if ((long)n < 0)
10184+ return n;
10185+
10186+ if (unlikely(sz != (size_t)-1 && sz < n)) {
10187 copy_from_user_overflow();
10188 return n;
10189 }
10190diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
10191index b89b591..fd9609d 100644
10192--- a/arch/s390/kernel/module.c
10193+++ b/arch/s390/kernel/module.c
10194@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
10195
10196 /* Increase core size by size of got & plt and set start
10197 offsets for got and plt. */
10198- me->core_size = ALIGN(me->core_size, 4);
10199- me->arch.got_offset = me->core_size;
10200- me->core_size += me->arch.got_size;
10201- me->arch.plt_offset = me->core_size;
10202- me->core_size += me->arch.plt_size;
10203+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
10204+ me->arch.got_offset = me->core_size_rw;
10205+ me->core_size_rw += me->arch.got_size;
10206+ me->arch.plt_offset = me->core_size_rx;
10207+ me->core_size_rx += me->arch.plt_size;
10208 return 0;
10209 }
10210
10211@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
10212 if (info->got_initialized == 0) {
10213 Elf_Addr *gotent;
10214
10215- gotent = me->module_core + me->arch.got_offset +
10216+ gotent = me->module_core_rw + me->arch.got_offset +
10217 info->got_offset;
10218 *gotent = val;
10219 info->got_initialized = 1;
10220@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
10221 rc = apply_rela_bits(loc, val, 0, 64, 0);
10222 else if (r_type == R_390_GOTENT ||
10223 r_type == R_390_GOTPLTENT) {
10224- val += (Elf_Addr) me->module_core - loc;
10225+ val += (Elf_Addr) me->module_core_rw - loc;
10226 rc = apply_rela_bits(loc, val, 1, 32, 1);
10227 }
10228 break;
10229@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
10230 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
10231 if (info->plt_initialized == 0) {
10232 unsigned int *ip;
10233- ip = me->module_core + me->arch.plt_offset +
10234+ ip = me->module_core_rx + me->arch.plt_offset +
10235 info->plt_offset;
10236 #ifndef CONFIG_64BIT
10237 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
10238@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
10239 val - loc + 0xffffUL < 0x1ffffeUL) ||
10240 (r_type == R_390_PLT32DBL &&
10241 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
10242- val = (Elf_Addr) me->module_core +
10243+ val = (Elf_Addr) me->module_core_rx +
10244 me->arch.plt_offset +
10245 info->plt_offset;
10246 val += rela->r_addend - loc;
10247@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
10248 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
10249 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
10250 val = val + rela->r_addend -
10251- ((Elf_Addr) me->module_core + me->arch.got_offset);
10252+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
10253 if (r_type == R_390_GOTOFF16)
10254 rc = apply_rela_bits(loc, val, 0, 16, 0);
10255 else if (r_type == R_390_GOTOFF32)
10256@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
10257 break;
10258 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
10259 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
10260- val = (Elf_Addr) me->module_core + me->arch.got_offset +
10261+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
10262 rela->r_addend - loc;
10263 if (r_type == R_390_GOTPC)
10264 rc = apply_rela_bits(loc, val, 1, 32, 0);
10265diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
10266index 93b9ca4..4ea1454 100644
10267--- a/arch/s390/kernel/process.c
10268+++ b/arch/s390/kernel/process.c
10269@@ -242,37 +242,3 @@ unsigned long get_wchan(struct task_struct *p)
10270 }
10271 return 0;
10272 }
10273-
10274-unsigned long arch_align_stack(unsigned long sp)
10275-{
10276- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
10277- sp -= get_random_int() & ~PAGE_MASK;
10278- return sp & ~0xf;
10279-}
10280-
10281-static inline unsigned long brk_rnd(void)
10282-{
10283- /* 8MB for 32bit, 1GB for 64bit */
10284- if (is_32bit_task())
10285- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
10286- else
10287- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
10288-}
10289-
10290-unsigned long arch_randomize_brk(struct mm_struct *mm)
10291-{
10292- unsigned long ret;
10293-
10294- ret = PAGE_ALIGN(mm->brk + brk_rnd());
10295- return (ret > mm->brk) ? ret : mm->brk;
10296-}
10297-
10298-unsigned long randomize_et_dyn(unsigned long base)
10299-{
10300- unsigned long ret;
10301-
10302- if (!(current->flags & PF_RANDOMIZE))
10303- return base;
10304- ret = PAGE_ALIGN(base + brk_rnd());
10305- return (ret > base) ? ret : base;
10306-}
10307diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
10308index 9b436c2..54fbf0a 100644
10309--- a/arch/s390/mm/mmap.c
10310+++ b/arch/s390/mm/mmap.c
10311@@ -95,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10312 */
10313 if (mmap_is_legacy()) {
10314 mm->mmap_base = mmap_base_legacy();
10315+
10316+#ifdef CONFIG_PAX_RANDMMAP
10317+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10318+ mm->mmap_base += mm->delta_mmap;
10319+#endif
10320+
10321 mm->get_unmapped_area = arch_get_unmapped_area;
10322 } else {
10323 mm->mmap_base = mmap_base();
10324+
10325+#ifdef CONFIG_PAX_RANDMMAP
10326+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10327+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10328+#endif
10329+
10330 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
10331 }
10332 }
10333@@ -170,9 +182,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
10334 */
10335 if (mmap_is_legacy()) {
10336 mm->mmap_base = mmap_base_legacy();
10337+
10338+#ifdef CONFIG_PAX_RANDMMAP
10339+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10340+ mm->mmap_base += mm->delta_mmap;
10341+#endif
10342+
10343 mm->get_unmapped_area = s390_get_unmapped_area;
10344 } else {
10345 mm->mmap_base = mmap_base();
10346+
10347+#ifdef CONFIG_PAX_RANDMMAP
10348+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10349+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
10350+#endif
10351+
10352 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
10353 }
10354 }
10355diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
10356index 61e45b7..f2833c5 100644
10357--- a/arch/s390/net/bpf_jit_comp.c
10358+++ b/arch/s390/net/bpf_jit_comp.c
10359@@ -887,5 +887,5 @@ void bpf_jit_free(struct bpf_prog *fp)
10360 module_free(NULL, header);
10361
10362 free_filter:
10363- kfree(fp);
10364+ bpf_prog_unlock_free(fp);
10365 }
10366diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
10367index ae3d59f..f65f075 100644
10368--- a/arch/score/include/asm/cache.h
10369+++ b/arch/score/include/asm/cache.h
10370@@ -1,7 +1,9 @@
10371 #ifndef _ASM_SCORE_CACHE_H
10372 #define _ASM_SCORE_CACHE_H
10373
10374+#include <linux/const.h>
10375+
10376 #define L1_CACHE_SHIFT 4
10377-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10378+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10379
10380 #endif /* _ASM_SCORE_CACHE_H */
10381diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
10382index f9f3cd5..58ff438 100644
10383--- a/arch/score/include/asm/exec.h
10384+++ b/arch/score/include/asm/exec.h
10385@@ -1,6 +1,6 @@
10386 #ifndef _ASM_SCORE_EXEC_H
10387 #define _ASM_SCORE_EXEC_H
10388
10389-extern unsigned long arch_align_stack(unsigned long sp);
10390+#define arch_align_stack(x) (x)
10391
10392 #endif /* _ASM_SCORE_EXEC_H */
10393diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
10394index a1519ad3..e8ac1ff 100644
10395--- a/arch/score/kernel/process.c
10396+++ b/arch/score/kernel/process.c
10397@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
10398
10399 return task_pt_regs(task)->cp0_epc;
10400 }
10401-
10402-unsigned long arch_align_stack(unsigned long sp)
10403-{
10404- return sp;
10405-}
10406diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
10407index ef9e555..331bd29 100644
10408--- a/arch/sh/include/asm/cache.h
10409+++ b/arch/sh/include/asm/cache.h
10410@@ -9,10 +9,11 @@
10411 #define __ASM_SH_CACHE_H
10412 #ifdef __KERNEL__
10413
10414+#include <linux/const.h>
10415 #include <linux/init.h>
10416 #include <cpu/cache.h>
10417
10418-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10419+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10420
10421 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
10422
10423diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
10424index 6777177..cb5e44f 100644
10425--- a/arch/sh/mm/mmap.c
10426+++ b/arch/sh/mm/mmap.c
10427@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
10428 struct mm_struct *mm = current->mm;
10429 struct vm_area_struct *vma;
10430 int do_colour_align;
10431+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10432 struct vm_unmapped_area_info info;
10433
10434 if (flags & MAP_FIXED) {
10435@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
10436 if (filp || (flags & MAP_SHARED))
10437 do_colour_align = 1;
10438
10439+#ifdef CONFIG_PAX_RANDMMAP
10440+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10441+#endif
10442+
10443 if (addr) {
10444 if (do_colour_align)
10445 addr = COLOUR_ALIGN(addr, pgoff);
10446@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
10447 addr = PAGE_ALIGN(addr);
10448
10449 vma = find_vma(mm, addr);
10450- if (TASK_SIZE - len >= addr &&
10451- (!vma || addr + len <= vma->vm_start))
10452+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10453 return addr;
10454 }
10455
10456 info.flags = 0;
10457 info.length = len;
10458- info.low_limit = TASK_UNMAPPED_BASE;
10459+ info.low_limit = mm->mmap_base;
10460 info.high_limit = TASK_SIZE;
10461 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
10462 info.align_offset = pgoff << PAGE_SHIFT;
10463@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10464 struct mm_struct *mm = current->mm;
10465 unsigned long addr = addr0;
10466 int do_colour_align;
10467+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
10468 struct vm_unmapped_area_info info;
10469
10470 if (flags & MAP_FIXED) {
10471@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10472 if (filp || (flags & MAP_SHARED))
10473 do_colour_align = 1;
10474
10475+#ifdef CONFIG_PAX_RANDMMAP
10476+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10477+#endif
10478+
10479 /* requesting a specific address */
10480 if (addr) {
10481 if (do_colour_align)
10482@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10483 addr = PAGE_ALIGN(addr);
10484
10485 vma = find_vma(mm, addr);
10486- if (TASK_SIZE - len >= addr &&
10487- (!vma || addr + len <= vma->vm_start))
10488+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10489 return addr;
10490 }
10491
10492@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
10493 VM_BUG_ON(addr != -ENOMEM);
10494 info.flags = 0;
10495 info.low_limit = TASK_UNMAPPED_BASE;
10496+
10497+#ifdef CONFIG_PAX_RANDMMAP
10498+ if (mm->pax_flags & MF_PAX_RANDMMAP)
10499+ info.low_limit += mm->delta_mmap;
10500+#endif
10501+
10502 info.high_limit = TASK_SIZE;
10503 addr = vm_unmapped_area(&info);
10504 }
10505diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
10506index bb894c8..8141d5c 100644
10507--- a/arch/sparc/include/asm/atomic_64.h
10508+++ b/arch/sparc/include/asm/atomic_64.h
10509@@ -15,18 +15,40 @@
10510 #define ATOMIC64_INIT(i) { (i) }
10511
10512 #define atomic_read(v) (*(volatile int *)&(v)->counter)
10513+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
10514+{
10515+ return v->counter;
10516+}
10517 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
10518+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
10519+{
10520+ return v->counter;
10521+}
10522
10523 #define atomic_set(v, i) (((v)->counter) = i)
10524+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
10525+{
10526+ v->counter = i;
10527+}
10528 #define atomic64_set(v, i) (((v)->counter) = i)
10529+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
10530+{
10531+ v->counter = i;
10532+}
10533
10534 void atomic_add(int, atomic_t *);
10535+void atomic_add_unchecked(int, atomic_unchecked_t *);
10536 void atomic64_add(long, atomic64_t *);
10537+void atomic64_add_unchecked(long, atomic64_unchecked_t *);
10538 void atomic_sub(int, atomic_t *);
10539+void atomic_sub_unchecked(int, atomic_unchecked_t *);
10540 void atomic64_sub(long, atomic64_t *);
10541+void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
10542
10543 int atomic_add_ret(int, atomic_t *);
10544+int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
10545 long atomic64_add_ret(long, atomic64_t *);
10546+long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
10547 int atomic_sub_ret(int, atomic_t *);
10548 long atomic64_sub_ret(long, atomic64_t *);
10549
10550@@ -34,13 +56,29 @@ long atomic64_sub_ret(long, atomic64_t *);
10551 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
10552
10553 #define atomic_inc_return(v) atomic_add_ret(1, v)
10554+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
10555+{
10556+ return atomic_add_ret_unchecked(1, v);
10557+}
10558 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
10559+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10560+{
10561+ return atomic64_add_ret_unchecked(1, v);
10562+}
10563
10564 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
10565 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
10566
10567 #define atomic_add_return(i, v) atomic_add_ret(i, v)
10568+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
10569+{
10570+ return atomic_add_ret_unchecked(i, v);
10571+}
10572 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
10573+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
10574+{
10575+ return atomic64_add_ret_unchecked(i, v);
10576+}
10577
10578 /*
10579 * atomic_inc_and_test - increment and test
10580@@ -51,6 +89,10 @@ long atomic64_sub_ret(long, atomic64_t *);
10581 * other cases.
10582 */
10583 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
10584+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
10585+{
10586+ return atomic_inc_return_unchecked(v) == 0;
10587+}
10588 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
10589
10590 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
10591@@ -60,25 +102,60 @@ long atomic64_sub_ret(long, atomic64_t *);
10592 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
10593
10594 #define atomic_inc(v) atomic_add(1, v)
10595+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
10596+{
10597+ atomic_add_unchecked(1, v);
10598+}
10599 #define atomic64_inc(v) atomic64_add(1, v)
10600+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
10601+{
10602+ atomic64_add_unchecked(1, v);
10603+}
10604
10605 #define atomic_dec(v) atomic_sub(1, v)
10606+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
10607+{
10608+ atomic_sub_unchecked(1, v);
10609+}
10610 #define atomic64_dec(v) atomic64_sub(1, v)
10611+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
10612+{
10613+ atomic64_sub_unchecked(1, v);
10614+}
10615
10616 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
10617 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
10618
10619 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
10620+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
10621+{
10622+ return cmpxchg(&v->counter, old, new);
10623+}
10624 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
10625+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
10626+{
10627+ return xchg(&v->counter, new);
10628+}
10629
10630 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10631 {
10632- int c, old;
10633+ int c, old, new;
10634 c = atomic_read(v);
10635 for (;;) {
10636- if (unlikely(c == (u)))
10637+ if (unlikely(c == u))
10638 break;
10639- old = atomic_cmpxchg((v), c, c + (a));
10640+
10641+ asm volatile("addcc %2, %0, %0\n"
10642+
10643+#ifdef CONFIG_PAX_REFCOUNT
10644+ "tvs %%icc, 6\n"
10645+#endif
10646+
10647+ : "=r" (new)
10648+ : "0" (c), "ir" (a)
10649+ : "cc");
10650+
10651+ old = atomic_cmpxchg(v, c, new);
10652 if (likely(old == c))
10653 break;
10654 c = old;
10655@@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
10656 #define atomic64_cmpxchg(v, o, n) \
10657 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
10658 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
10659+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10660+{
10661+ return xchg(&v->counter, new);
10662+}
10663
10664 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10665 {
10666- long c, old;
10667+ long c, old, new;
10668 c = atomic64_read(v);
10669 for (;;) {
10670- if (unlikely(c == (u)))
10671+ if (unlikely(c == u))
10672 break;
10673- old = atomic64_cmpxchg((v), c, c + (a));
10674+
10675+ asm volatile("addcc %2, %0, %0\n"
10676+
10677+#ifdef CONFIG_PAX_REFCOUNT
10678+ "tvs %%xcc, 6\n"
10679+#endif
10680+
10681+ : "=r" (new)
10682+ : "0" (c), "ir" (a)
10683+ : "cc");
10684+
10685+ old = atomic64_cmpxchg(v, c, new);
10686 if (likely(old == c))
10687 break;
10688 c = old;
10689 }
10690- return c != (u);
10691+ return c != u;
10692 }
10693
10694 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10695diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
10696index 305dcc3..7835030 100644
10697--- a/arch/sparc/include/asm/barrier_64.h
10698+++ b/arch/sparc/include/asm/barrier_64.h
10699@@ -57,7 +57,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
10700 do { \
10701 compiletime_assert_atomic_type(*p); \
10702 barrier(); \
10703- ACCESS_ONCE(*p) = (v); \
10704+ ACCESS_ONCE_RW(*p) = (v); \
10705 } while (0)
10706
10707 #define smp_load_acquire(p) \
10708diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
10709index 5bb6991..5c2132e 100644
10710--- a/arch/sparc/include/asm/cache.h
10711+++ b/arch/sparc/include/asm/cache.h
10712@@ -7,10 +7,12 @@
10713 #ifndef _SPARC_CACHE_H
10714 #define _SPARC_CACHE_H
10715
10716+#include <linux/const.h>
10717+
10718 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
10719
10720 #define L1_CACHE_SHIFT 5
10721-#define L1_CACHE_BYTES 32
10722+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10723
10724 #ifdef CONFIG_SPARC32
10725 #define SMP_CACHE_BYTES_SHIFT 5
10726diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
10727index a24e41f..47677ff 100644
10728--- a/arch/sparc/include/asm/elf_32.h
10729+++ b/arch/sparc/include/asm/elf_32.h
10730@@ -114,6 +114,13 @@ typedef struct {
10731
10732 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
10733
10734+#ifdef CONFIG_PAX_ASLR
10735+#define PAX_ELF_ET_DYN_BASE 0x10000UL
10736+
10737+#define PAX_DELTA_MMAP_LEN 16
10738+#define PAX_DELTA_STACK_LEN 16
10739+#endif
10740+
10741 /* This yields a mask that user programs can use to figure out what
10742 instruction set this cpu supports. This can NOT be done in userspace
10743 on Sparc. */
10744diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
10745index 370ca1e..d4f4a98 100644
10746--- a/arch/sparc/include/asm/elf_64.h
10747+++ b/arch/sparc/include/asm/elf_64.h
10748@@ -189,6 +189,13 @@ typedef struct {
10749 #define ELF_ET_DYN_BASE 0x0000010000000000UL
10750 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
10751
10752+#ifdef CONFIG_PAX_ASLR
10753+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
10754+
10755+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
10756+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
10757+#endif
10758+
10759 extern unsigned long sparc64_elf_hwcap;
10760 #define ELF_HWCAP sparc64_elf_hwcap
10761
10762diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
10763index a3890da..f6a408e 100644
10764--- a/arch/sparc/include/asm/pgalloc_32.h
10765+++ b/arch/sparc/include/asm/pgalloc_32.h
10766@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
10767 }
10768
10769 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
10770+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10771
10772 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
10773 unsigned long address)
10774diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
10775index 5e31871..13469c6 100644
10776--- a/arch/sparc/include/asm/pgalloc_64.h
10777+++ b/arch/sparc/include/asm/pgalloc_64.h
10778@@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
10779 }
10780
10781 #define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
10782+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
10783
10784 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
10785 {
10786@@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
10787 }
10788
10789 #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
10790+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
10791
10792 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
10793 {
10794diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
10795index 59ba6f6..4518128 100644
10796--- a/arch/sparc/include/asm/pgtable.h
10797+++ b/arch/sparc/include/asm/pgtable.h
10798@@ -5,4 +5,8 @@
10799 #else
10800 #include <asm/pgtable_32.h>
10801 #endif
10802+
10803+#define ktla_ktva(addr) (addr)
10804+#define ktva_ktla(addr) (addr)
10805+
10806 #endif
10807diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
10808index b9b91ae..950b91e 100644
10809--- a/arch/sparc/include/asm/pgtable_32.h
10810+++ b/arch/sparc/include/asm/pgtable_32.h
10811@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
10812 #define PAGE_SHARED SRMMU_PAGE_SHARED
10813 #define PAGE_COPY SRMMU_PAGE_COPY
10814 #define PAGE_READONLY SRMMU_PAGE_RDONLY
10815+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
10816+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
10817+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
10818 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
10819
10820 /* Top-level page directory - dummy used by init-mm.
10821@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
10822
10823 /* xwr */
10824 #define __P000 PAGE_NONE
10825-#define __P001 PAGE_READONLY
10826-#define __P010 PAGE_COPY
10827-#define __P011 PAGE_COPY
10828+#define __P001 PAGE_READONLY_NOEXEC
10829+#define __P010 PAGE_COPY_NOEXEC
10830+#define __P011 PAGE_COPY_NOEXEC
10831 #define __P100 PAGE_READONLY
10832 #define __P101 PAGE_READONLY
10833 #define __P110 PAGE_COPY
10834 #define __P111 PAGE_COPY
10835
10836 #define __S000 PAGE_NONE
10837-#define __S001 PAGE_READONLY
10838-#define __S010 PAGE_SHARED
10839-#define __S011 PAGE_SHARED
10840+#define __S001 PAGE_READONLY_NOEXEC
10841+#define __S010 PAGE_SHARED_NOEXEC
10842+#define __S011 PAGE_SHARED_NOEXEC
10843 #define __S100 PAGE_READONLY
10844 #define __S101 PAGE_READONLY
10845 #define __S110 PAGE_SHARED
10846diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
10847index 79da178..c2eede8 100644
10848--- a/arch/sparc/include/asm/pgtsrmmu.h
10849+++ b/arch/sparc/include/asm/pgtsrmmu.h
10850@@ -115,6 +115,11 @@
10851 SRMMU_EXEC | SRMMU_REF)
10852 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
10853 SRMMU_EXEC | SRMMU_REF)
10854+
10855+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
10856+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10857+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
10858+
10859 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
10860 SRMMU_DIRTY | SRMMU_REF)
10861
10862diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
10863index 29d64b1..4272fe8 100644
10864--- a/arch/sparc/include/asm/setup.h
10865+++ b/arch/sparc/include/asm/setup.h
10866@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
10867 void handle_ld_nf(u32 insn, struct pt_regs *regs);
10868
10869 /* init_64.c */
10870-extern atomic_t dcpage_flushes;
10871-extern atomic_t dcpage_flushes_xcall;
10872+extern atomic_unchecked_t dcpage_flushes;
10873+extern atomic_unchecked_t dcpage_flushes_xcall;
10874
10875 extern int sysctl_tsb_ratio;
10876 #endif
10877diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
10878index 9689176..63c18ea 100644
10879--- a/arch/sparc/include/asm/spinlock_64.h
10880+++ b/arch/sparc/include/asm/spinlock_64.h
10881@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
10882
10883 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
10884
10885-static void inline arch_read_lock(arch_rwlock_t *lock)
10886+static inline void arch_read_lock(arch_rwlock_t *lock)
10887 {
10888 unsigned long tmp1, tmp2;
10889
10890 __asm__ __volatile__ (
10891 "1: ldsw [%2], %0\n"
10892 " brlz,pn %0, 2f\n"
10893-"4: add %0, 1, %1\n"
10894+"4: addcc %0, 1, %1\n"
10895+
10896+#ifdef CONFIG_PAX_REFCOUNT
10897+" tvs %%icc, 6\n"
10898+#endif
10899+
10900 " cas [%2], %0, %1\n"
10901 " cmp %0, %1\n"
10902 " bne,pn %%icc, 1b\n"
10903@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
10904 " .previous"
10905 : "=&r" (tmp1), "=&r" (tmp2)
10906 : "r" (lock)
10907- : "memory");
10908+ : "memory", "cc");
10909 }
10910
10911-static int inline arch_read_trylock(arch_rwlock_t *lock)
10912+static inline int arch_read_trylock(arch_rwlock_t *lock)
10913 {
10914 int tmp1, tmp2;
10915
10916@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10917 "1: ldsw [%2], %0\n"
10918 " brlz,a,pn %0, 2f\n"
10919 " mov 0, %0\n"
10920-" add %0, 1, %1\n"
10921+" addcc %0, 1, %1\n"
10922+
10923+#ifdef CONFIG_PAX_REFCOUNT
10924+" tvs %%icc, 6\n"
10925+#endif
10926+
10927 " cas [%2], %0, %1\n"
10928 " cmp %0, %1\n"
10929 " bne,pn %%icc, 1b\n"
10930@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
10931 return tmp1;
10932 }
10933
10934-static void inline arch_read_unlock(arch_rwlock_t *lock)
10935+static inline void arch_read_unlock(arch_rwlock_t *lock)
10936 {
10937 unsigned long tmp1, tmp2;
10938
10939 __asm__ __volatile__(
10940 "1: lduw [%2], %0\n"
10941-" sub %0, 1, %1\n"
10942+" subcc %0, 1, %1\n"
10943+
10944+#ifdef CONFIG_PAX_REFCOUNT
10945+" tvs %%icc, 6\n"
10946+#endif
10947+
10948 " cas [%2], %0, %1\n"
10949 " cmp %0, %1\n"
10950 " bne,pn %%xcc, 1b\n"
10951@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
10952 : "memory");
10953 }
10954
10955-static void inline arch_write_lock(arch_rwlock_t *lock)
10956+static inline void arch_write_lock(arch_rwlock_t *lock)
10957 {
10958 unsigned long mask, tmp1, tmp2;
10959
10960@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
10961 : "memory");
10962 }
10963
10964-static void inline arch_write_unlock(arch_rwlock_t *lock)
10965+static inline void arch_write_unlock(arch_rwlock_t *lock)
10966 {
10967 __asm__ __volatile__(
10968 " stw %%g0, [%0]"
10969@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
10970 : "memory");
10971 }
10972
10973-static int inline arch_write_trylock(arch_rwlock_t *lock)
10974+static inline int arch_write_trylock(arch_rwlock_t *lock)
10975 {
10976 unsigned long mask, tmp1, tmp2, result;
10977
10978diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
10979index 96efa7a..16858bf 100644
10980--- a/arch/sparc/include/asm/thread_info_32.h
10981+++ b/arch/sparc/include/asm/thread_info_32.h
10982@@ -49,6 +49,8 @@ struct thread_info {
10983 unsigned long w_saved;
10984
10985 struct restart_block restart_block;
10986+
10987+ unsigned long lowest_stack;
10988 };
10989
10990 /*
10991diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
10992index cc6275c..7eb8e21 100644
10993--- a/arch/sparc/include/asm/thread_info_64.h
10994+++ b/arch/sparc/include/asm/thread_info_64.h
10995@@ -63,6 +63,8 @@ struct thread_info {
10996 struct pt_regs *kern_una_regs;
10997 unsigned int kern_una_insn;
10998
10999+ unsigned long lowest_stack;
11000+
11001 unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
11002 __attribute__ ((aligned(64)));
11003 };
11004@@ -190,12 +192,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
11005 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
11006 /* flag bit 4 is available */
11007 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
11008-/* flag bit 6 is available */
11009+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
11010 #define TIF_32BIT 7 /* 32-bit binary */
11011 #define TIF_NOHZ 8 /* in adaptive nohz mode */
11012 #define TIF_SECCOMP 9 /* secure computing */
11013 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
11014 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
11015+
11016 /* NOTE: Thread flags >= 12 should be ones we have no interest
11017 * in using in assembly, else we can't use the mask as
11018 * an immediate value in instructions such as andcc.
11019@@ -215,12 +218,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
11020 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
11021 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
11022 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
11023+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
11024
11025 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
11026 _TIF_DO_NOTIFY_RESUME_MASK | \
11027 _TIF_NEED_RESCHED)
11028 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
11029
11030+#define _TIF_WORK_SYSCALL \
11031+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
11032+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
11033+
11034+
11035 /*
11036 * Thread-synchronous status.
11037 *
11038diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
11039index bd56c28..4b63d83 100644
11040--- a/arch/sparc/include/asm/uaccess.h
11041+++ b/arch/sparc/include/asm/uaccess.h
11042@@ -1,5 +1,6 @@
11043 #ifndef ___ASM_SPARC_UACCESS_H
11044 #define ___ASM_SPARC_UACCESS_H
11045+
11046 #if defined(__sparc__) && defined(__arch64__)
11047 #include <asm/uaccess_64.h>
11048 #else
11049diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
11050index 9634d08..f55fe4f 100644
11051--- a/arch/sparc/include/asm/uaccess_32.h
11052+++ b/arch/sparc/include/asm/uaccess_32.h
11053@@ -250,27 +250,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
11054
11055 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
11056 {
11057- if (n && __access_ok((unsigned long) to, n))
11058+ if ((long)n < 0)
11059+ return n;
11060+
11061+ if (n && __access_ok((unsigned long) to, n)) {
11062+ if (!__builtin_constant_p(n))
11063+ check_object_size(from, n, true);
11064 return __copy_user(to, (__force void __user *) from, n);
11065- else
11066+ } else
11067 return n;
11068 }
11069
11070 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
11071 {
11072+ if ((long)n < 0)
11073+ return n;
11074+
11075+ if (!__builtin_constant_p(n))
11076+ check_object_size(from, n, true);
11077+
11078 return __copy_user(to, (__force void __user *) from, n);
11079 }
11080
11081 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
11082 {
11083- if (n && __access_ok((unsigned long) from, n))
11084+ if ((long)n < 0)
11085+ return n;
11086+
11087+ if (n && __access_ok((unsigned long) from, n)) {
11088+ if (!__builtin_constant_p(n))
11089+ check_object_size(to, n, false);
11090 return __copy_user((__force void __user *) to, from, n);
11091- else
11092+ } else
11093 return n;
11094 }
11095
11096 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
11097 {
11098+ if ((long)n < 0)
11099+ return n;
11100+
11101 return __copy_user((__force void __user *) to, from, n);
11102 }
11103
11104diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
11105index c990a5e..f17b9c1 100644
11106--- a/arch/sparc/include/asm/uaccess_64.h
11107+++ b/arch/sparc/include/asm/uaccess_64.h
11108@@ -10,6 +10,7 @@
11109 #include <linux/compiler.h>
11110 #include <linux/string.h>
11111 #include <linux/thread_info.h>
11112+#include <linux/kernel.h>
11113 #include <asm/asi.h>
11114 #include <asm/spitfire.h>
11115 #include <asm-generic/uaccess-unaligned.h>
11116@@ -214,8 +215,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
11117 static inline unsigned long __must_check
11118 copy_from_user(void *to, const void __user *from, unsigned long size)
11119 {
11120- unsigned long ret = ___copy_from_user(to, from, size);
11121+ unsigned long ret;
11122
11123+ if ((long)size < 0 || size > INT_MAX)
11124+ return size;
11125+
11126+ if (!__builtin_constant_p(size))
11127+ check_object_size(to, size, false);
11128+
11129+ ret = ___copy_from_user(to, from, size);
11130 if (unlikely(ret))
11131 ret = copy_from_user_fixup(to, from, size);
11132
11133@@ -231,8 +239,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
11134 static inline unsigned long __must_check
11135 copy_to_user(void __user *to, const void *from, unsigned long size)
11136 {
11137- unsigned long ret = ___copy_to_user(to, from, size);
11138+ unsigned long ret;
11139
11140+ if ((long)size < 0 || size > INT_MAX)
11141+ return size;
11142+
11143+ if (!__builtin_constant_p(size))
11144+ check_object_size(from, size, true);
11145+
11146+ ret = ___copy_to_user(to, from, size);
11147 if (unlikely(ret))
11148 ret = copy_to_user_fixup(to, from, size);
11149 return ret;
11150diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
11151index 7cf9c6e..6206648 100644
11152--- a/arch/sparc/kernel/Makefile
11153+++ b/arch/sparc/kernel/Makefile
11154@@ -4,7 +4,7 @@
11155 #
11156
11157 asflags-y := -ansi
11158-ccflags-y := -Werror
11159+#ccflags-y := -Werror
11160
11161 extra-y := head_$(BITS).o
11162
11163diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
11164index 50e7b62..79fae35 100644
11165--- a/arch/sparc/kernel/process_32.c
11166+++ b/arch/sparc/kernel/process_32.c
11167@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
11168
11169 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
11170 r->psr, r->pc, r->npc, r->y, print_tainted());
11171- printk("PC: <%pS>\n", (void *) r->pc);
11172+ printk("PC: <%pA>\n", (void *) r->pc);
11173 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
11174 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
11175 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
11176 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
11177 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
11178 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
11179- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
11180+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
11181
11182 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
11183 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
11184@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11185 rw = (struct reg_window32 *) fp;
11186 pc = rw->ins[7];
11187 printk("[%08lx : ", pc);
11188- printk("%pS ] ", (void *) pc);
11189+ printk("%pA ] ", (void *) pc);
11190 fp = rw->ins[6];
11191 } while (++count < 16);
11192 printk("\n");
11193diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
11194index 0be7bf9..2b1cba8 100644
11195--- a/arch/sparc/kernel/process_64.c
11196+++ b/arch/sparc/kernel/process_64.c
11197@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
11198 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
11199 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
11200 if (regs->tstate & TSTATE_PRIV)
11201- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
11202+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
11203 }
11204
11205 void show_regs(struct pt_regs *regs)
11206@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
11207
11208 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
11209 regs->tpc, regs->tnpc, regs->y, print_tainted());
11210- printk("TPC: <%pS>\n", (void *) regs->tpc);
11211+ printk("TPC: <%pA>\n", (void *) regs->tpc);
11212 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
11213 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
11214 regs->u_regs[3]);
11215@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
11216 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
11217 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
11218 regs->u_regs[15]);
11219- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
11220+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
11221 show_regwindow(regs);
11222 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
11223 }
11224@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
11225 ((tp && tp->task) ? tp->task->pid : -1));
11226
11227 if (gp->tstate & TSTATE_PRIV) {
11228- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
11229+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
11230 (void *) gp->tpc,
11231 (void *) gp->o7,
11232 (void *) gp->i7,
11233diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
11234index 79cc0d1..ec62734 100644
11235--- a/arch/sparc/kernel/prom_common.c
11236+++ b/arch/sparc/kernel/prom_common.c
11237@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
11238
11239 unsigned int prom_early_allocated __initdata;
11240
11241-static struct of_pdt_ops prom_sparc_ops __initdata = {
11242+static struct of_pdt_ops prom_sparc_ops __initconst = {
11243 .nextprop = prom_common_nextprop,
11244 .getproplen = prom_getproplen,
11245 .getproperty = prom_getproperty,
11246diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
11247index c13c9f2..d572c34 100644
11248--- a/arch/sparc/kernel/ptrace_64.c
11249+++ b/arch/sparc/kernel/ptrace_64.c
11250@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
11251 return ret;
11252 }
11253
11254+#ifdef CONFIG_GRKERNSEC_SETXID
11255+extern void gr_delayed_cred_worker(void);
11256+#endif
11257+
11258 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
11259 {
11260 int ret = 0;
11261@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
11262 if (test_thread_flag(TIF_NOHZ))
11263 user_exit();
11264
11265+#ifdef CONFIG_GRKERNSEC_SETXID
11266+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
11267+ gr_delayed_cred_worker();
11268+#endif
11269+
11270 if (test_thread_flag(TIF_SYSCALL_TRACE))
11271 ret = tracehook_report_syscall_entry(regs);
11272
11273@@ -1093,6 +1102,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
11274 if (test_thread_flag(TIF_NOHZ))
11275 user_exit();
11276
11277+#ifdef CONFIG_GRKERNSEC_SETXID
11278+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
11279+ gr_delayed_cred_worker();
11280+#endif
11281+
11282 audit_syscall_exit(regs);
11283
11284 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
11285diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
11286index 81954ee..6cfaa98 100644
11287--- a/arch/sparc/kernel/smp_64.c
11288+++ b/arch/sparc/kernel/smp_64.c
11289@@ -887,7 +887,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
11290 return;
11291
11292 #ifdef CONFIG_DEBUG_DCFLUSH
11293- atomic_inc(&dcpage_flushes);
11294+ atomic_inc_unchecked(&dcpage_flushes);
11295 #endif
11296
11297 this_cpu = get_cpu();
11298@@ -911,7 +911,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
11299 xcall_deliver(data0, __pa(pg_addr),
11300 (u64) pg_addr, cpumask_of(cpu));
11301 #ifdef CONFIG_DEBUG_DCFLUSH
11302- atomic_inc(&dcpage_flushes_xcall);
11303+ atomic_inc_unchecked(&dcpage_flushes_xcall);
11304 #endif
11305 }
11306 }
11307@@ -930,7 +930,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
11308 preempt_disable();
11309
11310 #ifdef CONFIG_DEBUG_DCFLUSH
11311- atomic_inc(&dcpage_flushes);
11312+ atomic_inc_unchecked(&dcpage_flushes);
11313 #endif
11314 data0 = 0;
11315 pg_addr = page_address(page);
11316@@ -947,7 +947,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
11317 xcall_deliver(data0, __pa(pg_addr),
11318 (u64) pg_addr, cpu_online_mask);
11319 #ifdef CONFIG_DEBUG_DCFLUSH
11320- atomic_inc(&dcpage_flushes_xcall);
11321+ atomic_inc_unchecked(&dcpage_flushes_xcall);
11322 #endif
11323 }
11324 __local_flush_dcache_page(page);
11325diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
11326index 646988d..b88905f 100644
11327--- a/arch/sparc/kernel/sys_sparc_32.c
11328+++ b/arch/sparc/kernel/sys_sparc_32.c
11329@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
11330 if (len > TASK_SIZE - PAGE_SIZE)
11331 return -ENOMEM;
11332 if (!addr)
11333- addr = TASK_UNMAPPED_BASE;
11334+ addr = current->mm->mmap_base;
11335
11336 info.flags = 0;
11337 info.length = len;
11338diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
11339index c85403d..6af95c9 100644
11340--- a/arch/sparc/kernel/sys_sparc_64.c
11341+++ b/arch/sparc/kernel/sys_sparc_64.c
11342@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
11343 struct vm_area_struct * vma;
11344 unsigned long task_size = TASK_SIZE;
11345 int do_color_align;
11346+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
11347 struct vm_unmapped_area_info info;
11348
11349 if (flags & MAP_FIXED) {
11350 /* We do not accept a shared mapping if it would violate
11351 * cache aliasing constraints.
11352 */
11353- if ((flags & MAP_SHARED) &&
11354+ if ((filp || (flags & MAP_SHARED)) &&
11355 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
11356 return -EINVAL;
11357 return addr;
11358@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
11359 if (filp || (flags & MAP_SHARED))
11360 do_color_align = 1;
11361
11362+#ifdef CONFIG_PAX_RANDMMAP
11363+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
11364+#endif
11365+
11366 if (addr) {
11367 if (do_color_align)
11368 addr = COLOR_ALIGN(addr, pgoff);
11369@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
11370 addr = PAGE_ALIGN(addr);
11371
11372 vma = find_vma(mm, addr);
11373- if (task_size - len >= addr &&
11374- (!vma || addr + len <= vma->vm_start))
11375+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
11376 return addr;
11377 }
11378
11379 info.flags = 0;
11380 info.length = len;
11381- info.low_limit = TASK_UNMAPPED_BASE;
11382+ info.low_limit = mm->mmap_base;
11383 info.high_limit = min(task_size, VA_EXCLUDE_START);
11384 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
11385 info.align_offset = pgoff << PAGE_SHIFT;
11386+ info.threadstack_offset = offset;
11387 addr = vm_unmapped_area(&info);
11388
11389 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
11390 VM_BUG_ON(addr != -ENOMEM);
11391 info.low_limit = VA_EXCLUDE_END;
11392+
11393+#ifdef CONFIG_PAX_RANDMMAP
11394+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11395+ info.low_limit += mm->delta_mmap;
11396+#endif
11397+
11398 info.high_limit = task_size;
11399 addr = vm_unmapped_area(&info);
11400 }
11401@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11402 unsigned long task_size = STACK_TOP32;
11403 unsigned long addr = addr0;
11404 int do_color_align;
11405+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
11406 struct vm_unmapped_area_info info;
11407
11408 /* This should only ever run for 32-bit processes. */
11409@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11410 /* We do not accept a shared mapping if it would violate
11411 * cache aliasing constraints.
11412 */
11413- if ((flags & MAP_SHARED) &&
11414+ if ((filp || (flags & MAP_SHARED)) &&
11415 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
11416 return -EINVAL;
11417 return addr;
11418@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11419 if (filp || (flags & MAP_SHARED))
11420 do_color_align = 1;
11421
11422+#ifdef CONFIG_PAX_RANDMMAP
11423+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
11424+#endif
11425+
11426 /* requesting a specific address */
11427 if (addr) {
11428 if (do_color_align)
11429@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11430 addr = PAGE_ALIGN(addr);
11431
11432 vma = find_vma(mm, addr);
11433- if (task_size - len >= addr &&
11434- (!vma || addr + len <= vma->vm_start))
11435+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
11436 return addr;
11437 }
11438
11439@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11440 info.high_limit = mm->mmap_base;
11441 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
11442 info.align_offset = pgoff << PAGE_SHIFT;
11443+ info.threadstack_offset = offset;
11444 addr = vm_unmapped_area(&info);
11445
11446 /*
11447@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11448 VM_BUG_ON(addr != -ENOMEM);
11449 info.flags = 0;
11450 info.low_limit = TASK_UNMAPPED_BASE;
11451+
11452+#ifdef CONFIG_PAX_RANDMMAP
11453+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11454+ info.low_limit += mm->delta_mmap;
11455+#endif
11456+
11457 info.high_limit = STACK_TOP32;
11458 addr = vm_unmapped_area(&info);
11459 }
11460@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
11461 EXPORT_SYMBOL(get_fb_unmapped_area);
11462
11463 /* Essentially the same as PowerPC. */
11464-static unsigned long mmap_rnd(void)
11465+static unsigned long mmap_rnd(struct mm_struct *mm)
11466 {
11467 unsigned long rnd = 0UL;
11468
11469+#ifdef CONFIG_PAX_RANDMMAP
11470+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
11471+#endif
11472+
11473 if (current->flags & PF_RANDOMIZE) {
11474 unsigned long val = get_random_int();
11475 if (test_thread_flag(TIF_32BIT))
11476@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
11477
11478 void arch_pick_mmap_layout(struct mm_struct *mm)
11479 {
11480- unsigned long random_factor = mmap_rnd();
11481+ unsigned long random_factor = mmap_rnd(mm);
11482 unsigned long gap;
11483
11484 /*
11485@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
11486 gap == RLIM_INFINITY ||
11487 sysctl_legacy_va_layout) {
11488 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
11489+
11490+#ifdef CONFIG_PAX_RANDMMAP
11491+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11492+ mm->mmap_base += mm->delta_mmap;
11493+#endif
11494+
11495 mm->get_unmapped_area = arch_get_unmapped_area;
11496 } else {
11497 /* We know it's 32-bit */
11498@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
11499 gap = (task_size / 6 * 5);
11500
11501 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
11502+
11503+#ifdef CONFIG_PAX_RANDMMAP
11504+ if (mm->pax_flags & MF_PAX_RANDMMAP)
11505+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
11506+#endif
11507+
11508 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
11509 }
11510 }
11511diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
11512index 33a17e7..d87fb1f 100644
11513--- a/arch/sparc/kernel/syscalls.S
11514+++ b/arch/sparc/kernel/syscalls.S
11515@@ -52,7 +52,7 @@ sys32_rt_sigreturn:
11516 #endif
11517 .align 32
11518 1: ldx [%g6 + TI_FLAGS], %l5
11519- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11520+ andcc %l5, _TIF_WORK_SYSCALL, %g0
11521 be,pt %icc, rtrap
11522 nop
11523 call syscall_trace_leave
11524@@ -184,7 +184,7 @@ linux_sparc_syscall32:
11525
11526 srl %i3, 0, %o3 ! IEU0
11527 srl %i2, 0, %o2 ! IEU0 Group
11528- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11529+ andcc %l0, _TIF_WORK_SYSCALL, %g0
11530 bne,pn %icc, linux_syscall_trace32 ! CTI
11531 mov %i0, %l5 ! IEU1
11532 5: call %l7 ! CTI Group brk forced
11533@@ -208,7 +208,7 @@ linux_sparc_syscall:
11534
11535 mov %i3, %o3 ! IEU1
11536 mov %i4, %o4 ! IEU0 Group
11537- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11538+ andcc %l0, _TIF_WORK_SYSCALL, %g0
11539 bne,pn %icc, linux_syscall_trace ! CTI Group
11540 mov %i0, %l5 ! IEU0
11541 2: call %l7 ! CTI Group brk forced
11542@@ -223,7 +223,7 @@ ret_sys_call:
11543
11544 cmp %o0, -ERESTART_RESTARTBLOCK
11545 bgeu,pn %xcc, 1f
11546- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
11547+ andcc %l0, _TIF_WORK_SYSCALL, %g0
11548 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
11549
11550 2:
11551diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
11552index 6fd386c5..6907d81 100644
11553--- a/arch/sparc/kernel/traps_32.c
11554+++ b/arch/sparc/kernel/traps_32.c
11555@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
11556 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
11557 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
11558
11559+extern void gr_handle_kernel_exploit(void);
11560+
11561 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11562 {
11563 static int die_counter;
11564@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11565 count++ < 30 &&
11566 (((unsigned long) rw) >= PAGE_OFFSET) &&
11567 !(((unsigned long) rw) & 0x7)) {
11568- printk("Caller[%08lx]: %pS\n", rw->ins[7],
11569+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
11570 (void *) rw->ins[7]);
11571 rw = (struct reg_window32 *)rw->ins[6];
11572 }
11573 }
11574 printk("Instruction DUMP:");
11575 instruction_dump ((unsigned long *) regs->pc);
11576- if(regs->psr & PSR_PS)
11577+ if(regs->psr & PSR_PS) {
11578+ gr_handle_kernel_exploit();
11579 do_exit(SIGKILL);
11580+ }
11581 do_exit(SIGSEGV);
11582 }
11583
11584diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
11585index 981a769..d906eda 100644
11586--- a/arch/sparc/kernel/traps_64.c
11587+++ b/arch/sparc/kernel/traps_64.c
11588@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
11589 i + 1,
11590 p->trapstack[i].tstate, p->trapstack[i].tpc,
11591 p->trapstack[i].tnpc, p->trapstack[i].tt);
11592- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
11593+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
11594 }
11595 }
11596
11597@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
11598
11599 lvl -= 0x100;
11600 if (regs->tstate & TSTATE_PRIV) {
11601+
11602+#ifdef CONFIG_PAX_REFCOUNT
11603+ if (lvl == 6)
11604+ pax_report_refcount_overflow(regs);
11605+#endif
11606+
11607 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
11608 die_if_kernel(buffer, regs);
11609 }
11610@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
11611 void bad_trap_tl1(struct pt_regs *regs, long lvl)
11612 {
11613 char buffer[32];
11614-
11615+
11616 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
11617 0, lvl, SIGTRAP) == NOTIFY_STOP)
11618 return;
11619
11620+#ifdef CONFIG_PAX_REFCOUNT
11621+ if (lvl == 6)
11622+ pax_report_refcount_overflow(regs);
11623+#endif
11624+
11625 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
11626
11627 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
11628@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
11629 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
11630 printk("%s" "ERROR(%d): ",
11631 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
11632- printk("TPC<%pS>\n", (void *) regs->tpc);
11633+ printk("TPC<%pA>\n", (void *) regs->tpc);
11634 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
11635 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
11636 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
11637@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11638 smp_processor_id(),
11639 (type & 0x1) ? 'I' : 'D',
11640 regs->tpc);
11641- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
11642+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
11643 panic("Irrecoverable Cheetah+ parity error.");
11644 }
11645
11646@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
11647 smp_processor_id(),
11648 (type & 0x1) ? 'I' : 'D',
11649 regs->tpc);
11650- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
11651+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
11652 }
11653
11654 struct sun4v_error_entry {
11655@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
11656 /*0x38*/u64 reserved_5;
11657 };
11658
11659-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11660-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11661+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
11662+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
11663
11664 static const char *sun4v_err_type_to_str(u8 type)
11665 {
11666@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
11667 }
11668
11669 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11670- int cpu, const char *pfx, atomic_t *ocnt)
11671+ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
11672 {
11673 u64 *raw_ptr = (u64 *) ent;
11674 u32 attrs;
11675@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
11676
11677 show_regs(regs);
11678
11679- if ((cnt = atomic_read(ocnt)) != 0) {
11680- atomic_set(ocnt, 0);
11681+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
11682+ atomic_set_unchecked(ocnt, 0);
11683 wmb();
11684 printk("%s: Queue overflowed %d times.\n",
11685 pfx, cnt);
11686@@ -2048,7 +2059,7 @@ out:
11687 */
11688 void sun4v_resum_overflow(struct pt_regs *regs)
11689 {
11690- atomic_inc(&sun4v_resum_oflow_cnt);
11691+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
11692 }
11693
11694 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
11695@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
11696 /* XXX Actually even this can make not that much sense. Perhaps
11697 * XXX we should just pull the plug and panic directly from here?
11698 */
11699- atomic_inc(&sun4v_nonresum_oflow_cnt);
11700+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
11701 }
11702
11703 static void sun4v_tlb_error(struct pt_regs *regs)
11704@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
11705
11706 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
11707 regs->tpc, tl);
11708- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
11709+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
11710 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11711- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
11712+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
11713 (void *) regs->u_regs[UREG_I7]);
11714 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
11715 "pte[%lx] error[%lx]\n",
11716@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
11717
11718 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
11719 regs->tpc, tl);
11720- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
11721+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
11722 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
11723- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
11724+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
11725 (void *) regs->u_regs[UREG_I7]);
11726 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
11727 "pte[%lx] error[%lx]\n",
11728@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
11729 fp = (unsigned long)sf->fp + STACK_BIAS;
11730 }
11731
11732- printk(" [%016lx] %pS\n", pc, (void *) pc);
11733+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11734 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
11735 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
11736 int index = tsk->curr_ret_stack;
11737 if (tsk->ret_stack && index >= graph) {
11738 pc = tsk->ret_stack[index - graph].ret;
11739- printk(" [%016lx] %pS\n", pc, (void *) pc);
11740+ printk(" [%016lx] %pA\n", pc, (void *) pc);
11741 graph++;
11742 }
11743 }
11744@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
11745 return (struct reg_window *) (fp + STACK_BIAS);
11746 }
11747
11748+extern void gr_handle_kernel_exploit(void);
11749+
11750 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11751 {
11752 static int die_counter;
11753@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11754 while (rw &&
11755 count++ < 30 &&
11756 kstack_valid(tp, (unsigned long) rw)) {
11757- printk("Caller[%016lx]: %pS\n", rw->ins[7],
11758+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
11759 (void *) rw->ins[7]);
11760
11761 rw = kernel_stack_up(rw);
11762@@ -2427,8 +2440,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
11763 }
11764 user_instruction_dump ((unsigned int __user *) regs->tpc);
11765 }
11766- if (regs->tstate & TSTATE_PRIV)
11767+ if (regs->tstate & TSTATE_PRIV) {
11768+ gr_handle_kernel_exploit();
11769 do_exit(SIGKILL);
11770+ }
11771 do_exit(SIGSEGV);
11772 }
11773 EXPORT_SYMBOL(die_if_kernel);
11774diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
11775index 62098a8..547ab2c 100644
11776--- a/arch/sparc/kernel/unaligned_64.c
11777+++ b/arch/sparc/kernel/unaligned_64.c
11778@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
11779 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
11780
11781 if (__ratelimit(&ratelimit)) {
11782- printk("Kernel unaligned access at TPC[%lx] %pS\n",
11783+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
11784 regs->tpc, (void *) regs->tpc);
11785 }
11786 }
11787diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
11788index 3269b02..64f5231 100644
11789--- a/arch/sparc/lib/Makefile
11790+++ b/arch/sparc/lib/Makefile
11791@@ -2,7 +2,7 @@
11792 #
11793
11794 asflags-y := -ansi -DST_DIV0=0x02
11795-ccflags-y := -Werror
11796+#ccflags-y := -Werror
11797
11798 lib-$(CONFIG_SPARC32) += ashrdi3.o
11799 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
11800diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
11801index 85c233d..68500e0 100644
11802--- a/arch/sparc/lib/atomic_64.S
11803+++ b/arch/sparc/lib/atomic_64.S
11804@@ -17,7 +17,12 @@
11805 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
11806 BACKOFF_SETUP(%o2)
11807 1: lduw [%o1], %g1
11808- add %g1, %o0, %g7
11809+ addcc %g1, %o0, %g7
11810+
11811+#ifdef CONFIG_PAX_REFCOUNT
11812+ tvs %icc, 6
11813+#endif
11814+
11815 cas [%o1], %g1, %g7
11816 cmp %g1, %g7
11817 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11818@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
11819 2: BACKOFF_SPIN(%o2, %o3, 1b)
11820 ENDPROC(atomic_add)
11821
11822+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11823+ BACKOFF_SETUP(%o2)
11824+1: lduw [%o1], %g1
11825+ add %g1, %o0, %g7
11826+ cas [%o1], %g1, %g7
11827+ cmp %g1, %g7
11828+ bne,pn %icc, 2f
11829+ nop
11830+ retl
11831+ nop
11832+2: BACKOFF_SPIN(%o2, %o3, 1b)
11833+ENDPROC(atomic_add_unchecked)
11834+
11835 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11836 BACKOFF_SETUP(%o2)
11837 1: lduw [%o1], %g1
11838- sub %g1, %o0, %g7
11839+ subcc %g1, %o0, %g7
11840+
11841+#ifdef CONFIG_PAX_REFCOUNT
11842+ tvs %icc, 6
11843+#endif
11844+
11845 cas [%o1], %g1, %g7
11846 cmp %g1, %g7
11847 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11848@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11849 2: BACKOFF_SPIN(%o2, %o3, 1b)
11850 ENDPROC(atomic_sub)
11851
11852+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
11853+ BACKOFF_SETUP(%o2)
11854+1: lduw [%o1], %g1
11855+ sub %g1, %o0, %g7
11856+ cas [%o1], %g1, %g7
11857+ cmp %g1, %g7
11858+ bne,pn %icc, 2f
11859+ nop
11860+ retl
11861+ nop
11862+2: BACKOFF_SPIN(%o2, %o3, 1b)
11863+ENDPROC(atomic_sub_unchecked)
11864+
11865 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11866 BACKOFF_SETUP(%o2)
11867 1: lduw [%o1], %g1
11868- add %g1, %o0, %g7
11869+ addcc %g1, %o0, %g7
11870+
11871+#ifdef CONFIG_PAX_REFCOUNT
11872+ tvs %icc, 6
11873+#endif
11874+
11875 cas [%o1], %g1, %g7
11876 cmp %g1, %g7
11877 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11878@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11879 2: BACKOFF_SPIN(%o2, %o3, 1b)
11880 ENDPROC(atomic_add_ret)
11881
11882+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11883+ BACKOFF_SETUP(%o2)
11884+1: lduw [%o1], %g1
11885+ addcc %g1, %o0, %g7
11886+ cas [%o1], %g1, %g7
11887+ cmp %g1, %g7
11888+ bne,pn %icc, 2f
11889+ add %g7, %o0, %g7
11890+ sra %g7, 0, %o0
11891+ retl
11892+ nop
11893+2: BACKOFF_SPIN(%o2, %o3, 1b)
11894+ENDPROC(atomic_add_ret_unchecked)
11895+
11896 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
11897 BACKOFF_SETUP(%o2)
11898 1: lduw [%o1], %g1
11899- sub %g1, %o0, %g7
11900+ subcc %g1, %o0, %g7
11901+
11902+#ifdef CONFIG_PAX_REFCOUNT
11903+ tvs %icc, 6
11904+#endif
11905+
11906 cas [%o1], %g1, %g7
11907 cmp %g1, %g7
11908 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
11909@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
11910 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
11911 BACKOFF_SETUP(%o2)
11912 1: ldx [%o1], %g1
11913- add %g1, %o0, %g7
11914+ addcc %g1, %o0, %g7
11915+
11916+#ifdef CONFIG_PAX_REFCOUNT
11917+ tvs %xcc, 6
11918+#endif
11919+
11920 casx [%o1], %g1, %g7
11921 cmp %g1, %g7
11922 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11923@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
11924 2: BACKOFF_SPIN(%o2, %o3, 1b)
11925 ENDPROC(atomic64_add)
11926
11927+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11928+ BACKOFF_SETUP(%o2)
11929+1: ldx [%o1], %g1
11930+ addcc %g1, %o0, %g7
11931+ casx [%o1], %g1, %g7
11932+ cmp %g1, %g7
11933+ bne,pn %xcc, 2f
11934+ nop
11935+ retl
11936+ nop
11937+2: BACKOFF_SPIN(%o2, %o3, 1b)
11938+ENDPROC(atomic64_add_unchecked)
11939+
11940 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11941 BACKOFF_SETUP(%o2)
11942 1: ldx [%o1], %g1
11943- sub %g1, %o0, %g7
11944+ subcc %g1, %o0, %g7
11945+
11946+#ifdef CONFIG_PAX_REFCOUNT
11947+ tvs %xcc, 6
11948+#endif
11949+
11950 casx [%o1], %g1, %g7
11951 cmp %g1, %g7
11952 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11953@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
11954 2: BACKOFF_SPIN(%o2, %o3, 1b)
11955 ENDPROC(atomic64_sub)
11956
11957+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
11958+ BACKOFF_SETUP(%o2)
11959+1: ldx [%o1], %g1
11960+ subcc %g1, %o0, %g7
11961+ casx [%o1], %g1, %g7
11962+ cmp %g1, %g7
11963+ bne,pn %xcc, 2f
11964+ nop
11965+ retl
11966+ nop
11967+2: BACKOFF_SPIN(%o2, %o3, 1b)
11968+ENDPROC(atomic64_sub_unchecked)
11969+
11970 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11971 BACKOFF_SETUP(%o2)
11972 1: ldx [%o1], %g1
11973- add %g1, %o0, %g7
11974+ addcc %g1, %o0, %g7
11975+
11976+#ifdef CONFIG_PAX_REFCOUNT
11977+ tvs %xcc, 6
11978+#endif
11979+
11980 casx [%o1], %g1, %g7
11981 cmp %g1, %g7
11982 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
11983@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
11984 2: BACKOFF_SPIN(%o2, %o3, 1b)
11985 ENDPROC(atomic64_add_ret)
11986
11987+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
11988+ BACKOFF_SETUP(%o2)
11989+1: ldx [%o1], %g1
11990+ addcc %g1, %o0, %g7
11991+ casx [%o1], %g1, %g7
11992+ cmp %g1, %g7
11993+ bne,pn %xcc, 2f
11994+ add %g7, %o0, %g7
11995+ mov %g7, %o0
11996+ retl
11997+ nop
11998+2: BACKOFF_SPIN(%o2, %o3, 1b)
11999+ENDPROC(atomic64_add_ret_unchecked)
12000+
12001 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
12002 BACKOFF_SETUP(%o2)
12003 1: ldx [%o1], %g1
12004- sub %g1, %o0, %g7
12005+ subcc %g1, %o0, %g7
12006+
12007+#ifdef CONFIG_PAX_REFCOUNT
12008+ tvs %xcc, 6
12009+#endif
12010+
12011 casx [%o1], %g1, %g7
12012 cmp %g1, %g7
12013 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
12014diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
12015index 323335b..ed85ea2 100644
12016--- a/arch/sparc/lib/ksyms.c
12017+++ b/arch/sparc/lib/ksyms.c
12018@@ -100,12 +100,18 @@ EXPORT_SYMBOL(__clear_user);
12019
12020 /* Atomic counter implementation. */
12021 EXPORT_SYMBOL(atomic_add);
12022+EXPORT_SYMBOL(atomic_add_unchecked);
12023 EXPORT_SYMBOL(atomic_add_ret);
12024+EXPORT_SYMBOL(atomic_add_ret_unchecked);
12025 EXPORT_SYMBOL(atomic_sub);
12026+EXPORT_SYMBOL(atomic_sub_unchecked);
12027 EXPORT_SYMBOL(atomic_sub_ret);
12028 EXPORT_SYMBOL(atomic64_add);
12029+EXPORT_SYMBOL(atomic64_add_unchecked);
12030 EXPORT_SYMBOL(atomic64_add_ret);
12031+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
12032 EXPORT_SYMBOL(atomic64_sub);
12033+EXPORT_SYMBOL(atomic64_sub_unchecked);
12034 EXPORT_SYMBOL(atomic64_sub_ret);
12035 EXPORT_SYMBOL(atomic64_dec_if_positive);
12036
12037diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
12038index 30c3ecc..736f015 100644
12039--- a/arch/sparc/mm/Makefile
12040+++ b/arch/sparc/mm/Makefile
12041@@ -2,7 +2,7 @@
12042 #
12043
12044 asflags-y := -ansi
12045-ccflags-y := -Werror
12046+#ccflags-y := -Werror
12047
12048 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
12049 obj-y += fault_$(BITS).o
12050diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
12051index 908e8c1..1524793 100644
12052--- a/arch/sparc/mm/fault_32.c
12053+++ b/arch/sparc/mm/fault_32.c
12054@@ -21,6 +21,9 @@
12055 #include <linux/perf_event.h>
12056 #include <linux/interrupt.h>
12057 #include <linux/kdebug.h>
12058+#include <linux/slab.h>
12059+#include <linux/pagemap.h>
12060+#include <linux/compiler.h>
12061
12062 #include <asm/page.h>
12063 #include <asm/pgtable.h>
12064@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
12065 return safe_compute_effective_address(regs, insn);
12066 }
12067
12068+#ifdef CONFIG_PAX_PAGEEXEC
12069+#ifdef CONFIG_PAX_DLRESOLVE
12070+static void pax_emuplt_close(struct vm_area_struct *vma)
12071+{
12072+ vma->vm_mm->call_dl_resolve = 0UL;
12073+}
12074+
12075+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
12076+{
12077+ unsigned int *kaddr;
12078+
12079+ vmf->page = alloc_page(GFP_HIGHUSER);
12080+ if (!vmf->page)
12081+ return VM_FAULT_OOM;
12082+
12083+ kaddr = kmap(vmf->page);
12084+ memset(kaddr, 0, PAGE_SIZE);
12085+ kaddr[0] = 0x9DE3BFA8U; /* save */
12086+ flush_dcache_page(vmf->page);
12087+ kunmap(vmf->page);
12088+ return VM_FAULT_MAJOR;
12089+}
12090+
12091+static const struct vm_operations_struct pax_vm_ops = {
12092+ .close = pax_emuplt_close,
12093+ .fault = pax_emuplt_fault
12094+};
12095+
12096+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
12097+{
12098+ int ret;
12099+
12100+ INIT_LIST_HEAD(&vma->anon_vma_chain);
12101+ vma->vm_mm = current->mm;
12102+ vma->vm_start = addr;
12103+ vma->vm_end = addr + PAGE_SIZE;
12104+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
12105+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
12106+ vma->vm_ops = &pax_vm_ops;
12107+
12108+ ret = insert_vm_struct(current->mm, vma);
12109+ if (ret)
12110+ return ret;
12111+
12112+ ++current->mm->total_vm;
12113+ return 0;
12114+}
12115+#endif
12116+
12117+/*
12118+ * PaX: decide what to do with offenders (regs->pc = fault address)
12119+ *
12120+ * returns 1 when task should be killed
12121+ * 2 when patched PLT trampoline was detected
12122+ * 3 when unpatched PLT trampoline was detected
12123+ */
12124+static int pax_handle_fetch_fault(struct pt_regs *regs)
12125+{
12126+
12127+#ifdef CONFIG_PAX_EMUPLT
12128+ int err;
12129+
12130+ do { /* PaX: patched PLT emulation #1 */
12131+ unsigned int sethi1, sethi2, jmpl;
12132+
12133+ err = get_user(sethi1, (unsigned int *)regs->pc);
12134+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
12135+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
12136+
12137+ if (err)
12138+ break;
12139+
12140+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
12141+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
12142+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
12143+ {
12144+ unsigned int addr;
12145+
12146+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
12147+ addr = regs->u_regs[UREG_G1];
12148+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
12149+ regs->pc = addr;
12150+ regs->npc = addr+4;
12151+ return 2;
12152+ }
12153+ } while (0);
12154+
12155+ do { /* PaX: patched PLT emulation #2 */
12156+ unsigned int ba;
12157+
12158+ err = get_user(ba, (unsigned int *)regs->pc);
12159+
12160+ if (err)
12161+ break;
12162+
12163+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
12164+ unsigned int addr;
12165+
12166+ if ((ba & 0xFFC00000U) == 0x30800000U)
12167+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
12168+ else
12169+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
12170+ regs->pc = addr;
12171+ regs->npc = addr+4;
12172+ return 2;
12173+ }
12174+ } while (0);
12175+
12176+ do { /* PaX: patched PLT emulation #3 */
12177+ unsigned int sethi, bajmpl, nop;
12178+
12179+ err = get_user(sethi, (unsigned int *)regs->pc);
12180+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
12181+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
12182+
12183+ if (err)
12184+ break;
12185+
12186+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12187+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
12188+ nop == 0x01000000U)
12189+ {
12190+ unsigned int addr;
12191+
12192+ addr = (sethi & 0x003FFFFFU) << 10;
12193+ regs->u_regs[UREG_G1] = addr;
12194+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
12195+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
12196+ else
12197+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
12198+ regs->pc = addr;
12199+ regs->npc = addr+4;
12200+ return 2;
12201+ }
12202+ } while (0);
12203+
12204+ do { /* PaX: unpatched PLT emulation step 1 */
12205+ unsigned int sethi, ba, nop;
12206+
12207+ err = get_user(sethi, (unsigned int *)regs->pc);
12208+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
12209+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
12210+
12211+ if (err)
12212+ break;
12213+
12214+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12215+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
12216+ nop == 0x01000000U)
12217+ {
12218+ unsigned int addr, save, call;
12219+
12220+ if ((ba & 0xFFC00000U) == 0x30800000U)
12221+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
12222+ else
12223+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
12224+
12225+ err = get_user(save, (unsigned int *)addr);
12226+ err |= get_user(call, (unsigned int *)(addr+4));
12227+ err |= get_user(nop, (unsigned int *)(addr+8));
12228+ if (err)
12229+ break;
12230+
12231+#ifdef CONFIG_PAX_DLRESOLVE
12232+ if (save == 0x9DE3BFA8U &&
12233+ (call & 0xC0000000U) == 0x40000000U &&
12234+ nop == 0x01000000U)
12235+ {
12236+ struct vm_area_struct *vma;
12237+ unsigned long call_dl_resolve;
12238+
12239+ down_read(&current->mm->mmap_sem);
12240+ call_dl_resolve = current->mm->call_dl_resolve;
12241+ up_read(&current->mm->mmap_sem);
12242+ if (likely(call_dl_resolve))
12243+ goto emulate;
12244+
12245+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
12246+
12247+ down_write(&current->mm->mmap_sem);
12248+ if (current->mm->call_dl_resolve) {
12249+ call_dl_resolve = current->mm->call_dl_resolve;
12250+ up_write(&current->mm->mmap_sem);
12251+ if (vma)
12252+ kmem_cache_free(vm_area_cachep, vma);
12253+ goto emulate;
12254+ }
12255+
12256+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
12257+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
12258+ up_write(&current->mm->mmap_sem);
12259+ if (vma)
12260+ kmem_cache_free(vm_area_cachep, vma);
12261+ return 1;
12262+ }
12263+
12264+ if (pax_insert_vma(vma, call_dl_resolve)) {
12265+ up_write(&current->mm->mmap_sem);
12266+ kmem_cache_free(vm_area_cachep, vma);
12267+ return 1;
12268+ }
12269+
12270+ current->mm->call_dl_resolve = call_dl_resolve;
12271+ up_write(&current->mm->mmap_sem);
12272+
12273+emulate:
12274+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12275+ regs->pc = call_dl_resolve;
12276+ regs->npc = addr+4;
12277+ return 3;
12278+ }
12279+#endif
12280+
12281+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
12282+ if ((save & 0xFFC00000U) == 0x05000000U &&
12283+ (call & 0xFFFFE000U) == 0x85C0A000U &&
12284+ nop == 0x01000000U)
12285+ {
12286+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12287+ regs->u_regs[UREG_G2] = addr + 4;
12288+ addr = (save & 0x003FFFFFU) << 10;
12289+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
12290+ regs->pc = addr;
12291+ regs->npc = addr+4;
12292+ return 3;
12293+ }
12294+ }
12295+ } while (0);
12296+
12297+ do { /* PaX: unpatched PLT emulation step 2 */
12298+ unsigned int save, call, nop;
12299+
12300+ err = get_user(save, (unsigned int *)(regs->pc-4));
12301+ err |= get_user(call, (unsigned int *)regs->pc);
12302+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
12303+ if (err)
12304+ break;
12305+
12306+ if (save == 0x9DE3BFA8U &&
12307+ (call & 0xC0000000U) == 0x40000000U &&
12308+ nop == 0x01000000U)
12309+ {
12310+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
12311+
12312+ regs->u_regs[UREG_RETPC] = regs->pc;
12313+ regs->pc = dl_resolve;
12314+ regs->npc = dl_resolve+4;
12315+ return 3;
12316+ }
12317+ } while (0);
12318+#endif
12319+
12320+ return 1;
12321+}
12322+
12323+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12324+{
12325+ unsigned long i;
12326+
12327+ printk(KERN_ERR "PAX: bytes at PC: ");
12328+ for (i = 0; i < 8; i++) {
12329+ unsigned int c;
12330+ if (get_user(c, (unsigned int *)pc+i))
12331+ printk(KERN_CONT "???????? ");
12332+ else
12333+ printk(KERN_CONT "%08x ", c);
12334+ }
12335+ printk("\n");
12336+}
12337+#endif
12338+
12339 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
12340 int text_fault)
12341 {
12342@@ -226,6 +500,24 @@ good_area:
12343 if (!(vma->vm_flags & VM_WRITE))
12344 goto bad_area;
12345 } else {
12346+
12347+#ifdef CONFIG_PAX_PAGEEXEC
12348+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
12349+ up_read(&mm->mmap_sem);
12350+ switch (pax_handle_fetch_fault(regs)) {
12351+
12352+#ifdef CONFIG_PAX_EMUPLT
12353+ case 2:
12354+ case 3:
12355+ return;
12356+#endif
12357+
12358+ }
12359+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
12360+ do_group_exit(SIGKILL);
12361+ }
12362+#endif
12363+
12364 /* Allow reads even for write-only mappings */
12365 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
12366 goto bad_area;
12367diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
12368index 18fcd71..e4fe821 100644
12369--- a/arch/sparc/mm/fault_64.c
12370+++ b/arch/sparc/mm/fault_64.c
12371@@ -22,6 +22,9 @@
12372 #include <linux/kdebug.h>
12373 #include <linux/percpu.h>
12374 #include <linux/context_tracking.h>
12375+#include <linux/slab.h>
12376+#include <linux/pagemap.h>
12377+#include <linux/compiler.h>
12378
12379 #include <asm/page.h>
12380 #include <asm/pgtable.h>
12381@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
12382 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
12383 regs->tpc);
12384 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
12385- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
12386+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
12387 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
12388 dump_stack();
12389 unhandled_fault(regs->tpc, current, regs);
12390@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
12391 show_regs(regs);
12392 }
12393
12394+#ifdef CONFIG_PAX_PAGEEXEC
12395+#ifdef CONFIG_PAX_DLRESOLVE
12396+static void pax_emuplt_close(struct vm_area_struct *vma)
12397+{
12398+ vma->vm_mm->call_dl_resolve = 0UL;
12399+}
12400+
12401+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
12402+{
12403+ unsigned int *kaddr;
12404+
12405+ vmf->page = alloc_page(GFP_HIGHUSER);
12406+ if (!vmf->page)
12407+ return VM_FAULT_OOM;
12408+
12409+ kaddr = kmap(vmf->page);
12410+ memset(kaddr, 0, PAGE_SIZE);
12411+ kaddr[0] = 0x9DE3BFA8U; /* save */
12412+ flush_dcache_page(vmf->page);
12413+ kunmap(vmf->page);
12414+ return VM_FAULT_MAJOR;
12415+}
12416+
12417+static const struct vm_operations_struct pax_vm_ops = {
12418+ .close = pax_emuplt_close,
12419+ .fault = pax_emuplt_fault
12420+};
12421+
12422+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
12423+{
12424+ int ret;
12425+
12426+ INIT_LIST_HEAD(&vma->anon_vma_chain);
12427+ vma->vm_mm = current->mm;
12428+ vma->vm_start = addr;
12429+ vma->vm_end = addr + PAGE_SIZE;
12430+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
12431+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
12432+ vma->vm_ops = &pax_vm_ops;
12433+
12434+ ret = insert_vm_struct(current->mm, vma);
12435+ if (ret)
12436+ return ret;
12437+
12438+ ++current->mm->total_vm;
12439+ return 0;
12440+}
12441+#endif
12442+
12443+/*
12444+ * PaX: decide what to do with offenders (regs->tpc = fault address)
12445+ *
12446+ * returns 1 when task should be killed
12447+ * 2 when patched PLT trampoline was detected
12448+ * 3 when unpatched PLT trampoline was detected
12449+ */
12450+static int pax_handle_fetch_fault(struct pt_regs *regs)
12451+{
12452+
12453+#ifdef CONFIG_PAX_EMUPLT
12454+ int err;
12455+
12456+ do { /* PaX: patched PLT emulation #1 */
12457+ unsigned int sethi1, sethi2, jmpl;
12458+
12459+ err = get_user(sethi1, (unsigned int *)regs->tpc);
12460+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
12461+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
12462+
12463+ if (err)
12464+ break;
12465+
12466+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
12467+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
12468+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
12469+ {
12470+ unsigned long addr;
12471+
12472+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
12473+ addr = regs->u_regs[UREG_G1];
12474+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12475+
12476+ if (test_thread_flag(TIF_32BIT))
12477+ addr &= 0xFFFFFFFFUL;
12478+
12479+ regs->tpc = addr;
12480+ regs->tnpc = addr+4;
12481+ return 2;
12482+ }
12483+ } while (0);
12484+
12485+ do { /* PaX: patched PLT emulation #2 */
12486+ unsigned int ba;
12487+
12488+ err = get_user(ba, (unsigned int *)regs->tpc);
12489+
12490+ if (err)
12491+ break;
12492+
12493+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
12494+ unsigned long addr;
12495+
12496+ if ((ba & 0xFFC00000U) == 0x30800000U)
12497+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
12498+ else
12499+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12500+
12501+ if (test_thread_flag(TIF_32BIT))
12502+ addr &= 0xFFFFFFFFUL;
12503+
12504+ regs->tpc = addr;
12505+ regs->tnpc = addr+4;
12506+ return 2;
12507+ }
12508+ } while (0);
12509+
12510+ do { /* PaX: patched PLT emulation #3 */
12511+ unsigned int sethi, bajmpl, nop;
12512+
12513+ err = get_user(sethi, (unsigned int *)regs->tpc);
12514+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
12515+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12516+
12517+ if (err)
12518+ break;
12519+
12520+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12521+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
12522+ nop == 0x01000000U)
12523+ {
12524+ unsigned long addr;
12525+
12526+ addr = (sethi & 0x003FFFFFU) << 10;
12527+ regs->u_regs[UREG_G1] = addr;
12528+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
12529+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12530+ else
12531+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12532+
12533+ if (test_thread_flag(TIF_32BIT))
12534+ addr &= 0xFFFFFFFFUL;
12535+
12536+ regs->tpc = addr;
12537+ regs->tnpc = addr+4;
12538+ return 2;
12539+ }
12540+ } while (0);
12541+
12542+ do { /* PaX: patched PLT emulation #4 */
12543+ unsigned int sethi, mov1, call, mov2;
12544+
12545+ err = get_user(sethi, (unsigned int *)regs->tpc);
12546+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
12547+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
12548+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
12549+
12550+ if (err)
12551+ break;
12552+
12553+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12554+ mov1 == 0x8210000FU &&
12555+ (call & 0xC0000000U) == 0x40000000U &&
12556+ mov2 == 0x9E100001U)
12557+ {
12558+ unsigned long addr;
12559+
12560+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
12561+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12562+
12563+ if (test_thread_flag(TIF_32BIT))
12564+ addr &= 0xFFFFFFFFUL;
12565+
12566+ regs->tpc = addr;
12567+ regs->tnpc = addr+4;
12568+ return 2;
12569+ }
12570+ } while (0);
12571+
12572+ do { /* PaX: patched PLT emulation #5 */
12573+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
12574+
12575+ err = get_user(sethi, (unsigned int *)regs->tpc);
12576+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
12577+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
12578+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
12579+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
12580+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
12581+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
12582+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
12583+
12584+ if (err)
12585+ break;
12586+
12587+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12588+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
12589+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12590+ (or1 & 0xFFFFE000U) == 0x82106000U &&
12591+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12592+ sllx == 0x83287020U &&
12593+ jmpl == 0x81C04005U &&
12594+ nop == 0x01000000U)
12595+ {
12596+ unsigned long addr;
12597+
12598+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12599+ regs->u_regs[UREG_G1] <<= 32;
12600+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12601+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
12602+ regs->tpc = addr;
12603+ regs->tnpc = addr+4;
12604+ return 2;
12605+ }
12606+ } while (0);
12607+
12608+ do { /* PaX: patched PLT emulation #6 */
12609+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
12610+
12611+ err = get_user(sethi, (unsigned int *)regs->tpc);
12612+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
12613+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
12614+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
12615+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
12616+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
12617+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
12618+
12619+ if (err)
12620+ break;
12621+
12622+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12623+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
12624+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12625+ sllx == 0x83287020U &&
12626+ (or & 0xFFFFE000U) == 0x8A116000U &&
12627+ jmpl == 0x81C04005U &&
12628+ nop == 0x01000000U)
12629+ {
12630+ unsigned long addr;
12631+
12632+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
12633+ regs->u_regs[UREG_G1] <<= 32;
12634+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
12635+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
12636+ regs->tpc = addr;
12637+ regs->tnpc = addr+4;
12638+ return 2;
12639+ }
12640+ } while (0);
12641+
12642+ do { /* PaX: unpatched PLT emulation step 1 */
12643+ unsigned int sethi, ba, nop;
12644+
12645+ err = get_user(sethi, (unsigned int *)regs->tpc);
12646+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12647+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12648+
12649+ if (err)
12650+ break;
12651+
12652+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12653+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
12654+ nop == 0x01000000U)
12655+ {
12656+ unsigned long addr;
12657+ unsigned int save, call;
12658+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
12659+
12660+ if ((ba & 0xFFC00000U) == 0x30800000U)
12661+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
12662+ else
12663+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12664+
12665+ if (test_thread_flag(TIF_32BIT))
12666+ addr &= 0xFFFFFFFFUL;
12667+
12668+ err = get_user(save, (unsigned int *)addr);
12669+ err |= get_user(call, (unsigned int *)(addr+4));
12670+ err |= get_user(nop, (unsigned int *)(addr+8));
12671+ if (err)
12672+ break;
12673+
12674+#ifdef CONFIG_PAX_DLRESOLVE
12675+ if (save == 0x9DE3BFA8U &&
12676+ (call & 0xC0000000U) == 0x40000000U &&
12677+ nop == 0x01000000U)
12678+ {
12679+ struct vm_area_struct *vma;
12680+ unsigned long call_dl_resolve;
12681+
12682+ down_read(&current->mm->mmap_sem);
12683+ call_dl_resolve = current->mm->call_dl_resolve;
12684+ up_read(&current->mm->mmap_sem);
12685+ if (likely(call_dl_resolve))
12686+ goto emulate;
12687+
12688+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
12689+
12690+ down_write(&current->mm->mmap_sem);
12691+ if (current->mm->call_dl_resolve) {
12692+ call_dl_resolve = current->mm->call_dl_resolve;
12693+ up_write(&current->mm->mmap_sem);
12694+ if (vma)
12695+ kmem_cache_free(vm_area_cachep, vma);
12696+ goto emulate;
12697+ }
12698+
12699+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
12700+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
12701+ up_write(&current->mm->mmap_sem);
12702+ if (vma)
12703+ kmem_cache_free(vm_area_cachep, vma);
12704+ return 1;
12705+ }
12706+
12707+ if (pax_insert_vma(vma, call_dl_resolve)) {
12708+ up_write(&current->mm->mmap_sem);
12709+ kmem_cache_free(vm_area_cachep, vma);
12710+ return 1;
12711+ }
12712+
12713+ current->mm->call_dl_resolve = call_dl_resolve;
12714+ up_write(&current->mm->mmap_sem);
12715+
12716+emulate:
12717+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12718+ regs->tpc = call_dl_resolve;
12719+ regs->tnpc = addr+4;
12720+ return 3;
12721+ }
12722+#endif
12723+
12724+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
12725+ if ((save & 0xFFC00000U) == 0x05000000U &&
12726+ (call & 0xFFFFE000U) == 0x85C0A000U &&
12727+ nop == 0x01000000U)
12728+ {
12729+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12730+ regs->u_regs[UREG_G2] = addr + 4;
12731+ addr = (save & 0x003FFFFFU) << 10;
12732+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
12733+
12734+ if (test_thread_flag(TIF_32BIT))
12735+ addr &= 0xFFFFFFFFUL;
12736+
12737+ regs->tpc = addr;
12738+ regs->tnpc = addr+4;
12739+ return 3;
12740+ }
12741+
12742+ /* PaX: 64-bit PLT stub */
12743+ err = get_user(sethi1, (unsigned int *)addr);
12744+ err |= get_user(sethi2, (unsigned int *)(addr+4));
12745+ err |= get_user(or1, (unsigned int *)(addr+8));
12746+ err |= get_user(or2, (unsigned int *)(addr+12));
12747+ err |= get_user(sllx, (unsigned int *)(addr+16));
12748+ err |= get_user(add, (unsigned int *)(addr+20));
12749+ err |= get_user(jmpl, (unsigned int *)(addr+24));
12750+ err |= get_user(nop, (unsigned int *)(addr+28));
12751+ if (err)
12752+ break;
12753+
12754+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
12755+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
12756+ (or1 & 0xFFFFE000U) == 0x88112000U &&
12757+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
12758+ sllx == 0x89293020U &&
12759+ add == 0x8A010005U &&
12760+ jmpl == 0x89C14000U &&
12761+ nop == 0x01000000U)
12762+ {
12763+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
12764+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
12765+ regs->u_regs[UREG_G4] <<= 32;
12766+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
12767+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
12768+ regs->u_regs[UREG_G4] = addr + 24;
12769+ addr = regs->u_regs[UREG_G5];
12770+ regs->tpc = addr;
12771+ regs->tnpc = addr+4;
12772+ return 3;
12773+ }
12774+ }
12775+ } while (0);
12776+
12777+#ifdef CONFIG_PAX_DLRESOLVE
12778+ do { /* PaX: unpatched PLT emulation step 2 */
12779+ unsigned int save, call, nop;
12780+
12781+ err = get_user(save, (unsigned int *)(regs->tpc-4));
12782+ err |= get_user(call, (unsigned int *)regs->tpc);
12783+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
12784+ if (err)
12785+ break;
12786+
12787+ if (save == 0x9DE3BFA8U &&
12788+ (call & 0xC0000000U) == 0x40000000U &&
12789+ nop == 0x01000000U)
12790+ {
12791+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
12792+
12793+ if (test_thread_flag(TIF_32BIT))
12794+ dl_resolve &= 0xFFFFFFFFUL;
12795+
12796+ regs->u_regs[UREG_RETPC] = regs->tpc;
12797+ regs->tpc = dl_resolve;
12798+ regs->tnpc = dl_resolve+4;
12799+ return 3;
12800+ }
12801+ } while (0);
12802+#endif
12803+
12804+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
12805+ unsigned int sethi, ba, nop;
12806+
12807+ err = get_user(sethi, (unsigned int *)regs->tpc);
12808+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
12809+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
12810+
12811+ if (err)
12812+ break;
12813+
12814+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
12815+ (ba & 0xFFF00000U) == 0x30600000U &&
12816+ nop == 0x01000000U)
12817+ {
12818+ unsigned long addr;
12819+
12820+ addr = (sethi & 0x003FFFFFU) << 10;
12821+ regs->u_regs[UREG_G1] = addr;
12822+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
12823+
12824+ if (test_thread_flag(TIF_32BIT))
12825+ addr &= 0xFFFFFFFFUL;
12826+
12827+ regs->tpc = addr;
12828+ regs->tnpc = addr+4;
12829+ return 2;
12830+ }
12831+ } while (0);
12832+
12833+#endif
12834+
12835+ return 1;
12836+}
12837+
12838+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
12839+{
12840+ unsigned long i;
12841+
12842+ printk(KERN_ERR "PAX: bytes at PC: ");
12843+ for (i = 0; i < 8; i++) {
12844+ unsigned int c;
12845+ if (get_user(c, (unsigned int *)pc+i))
12846+ printk(KERN_CONT "???????? ");
12847+ else
12848+ printk(KERN_CONT "%08x ", c);
12849+ }
12850+ printk("\n");
12851+}
12852+#endif
12853+
12854 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
12855 {
12856 enum ctx_state prev_state = exception_enter();
12857@@ -353,6 +816,29 @@ retry:
12858 if (!vma)
12859 goto bad_area;
12860
12861+#ifdef CONFIG_PAX_PAGEEXEC
12862+ /* PaX: detect ITLB misses on non-exec pages */
12863+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
12864+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
12865+ {
12866+ if (address != regs->tpc)
12867+ goto good_area;
12868+
12869+ up_read(&mm->mmap_sem);
12870+ switch (pax_handle_fetch_fault(regs)) {
12871+
12872+#ifdef CONFIG_PAX_EMUPLT
12873+ case 2:
12874+ case 3:
12875+ return;
12876+#endif
12877+
12878+ }
12879+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
12880+ do_group_exit(SIGKILL);
12881+ }
12882+#endif
12883+
12884 /* Pure DTLB misses do not tell us whether the fault causing
12885 * load/store/atomic was a write or not, it only says that there
12886 * was no match. So in such a case we (carefully) read the
12887diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
12888index d329537..2c3746a 100644
12889--- a/arch/sparc/mm/hugetlbpage.c
12890+++ b/arch/sparc/mm/hugetlbpage.c
12891@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12892 unsigned long addr,
12893 unsigned long len,
12894 unsigned long pgoff,
12895- unsigned long flags)
12896+ unsigned long flags,
12897+ unsigned long offset)
12898 {
12899+ struct mm_struct *mm = current->mm;
12900 unsigned long task_size = TASK_SIZE;
12901 struct vm_unmapped_area_info info;
12902
12903@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
12904
12905 info.flags = 0;
12906 info.length = len;
12907- info.low_limit = TASK_UNMAPPED_BASE;
12908+ info.low_limit = mm->mmap_base;
12909 info.high_limit = min(task_size, VA_EXCLUDE_START);
12910 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12911 info.align_offset = 0;
12912+ info.threadstack_offset = offset;
12913 addr = vm_unmapped_area(&info);
12914
12915 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
12916 VM_BUG_ON(addr != -ENOMEM);
12917 info.low_limit = VA_EXCLUDE_END;
12918+
12919+#ifdef CONFIG_PAX_RANDMMAP
12920+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12921+ info.low_limit += mm->delta_mmap;
12922+#endif
12923+
12924 info.high_limit = task_size;
12925 addr = vm_unmapped_area(&info);
12926 }
12927@@ -55,7 +64,8 @@ static unsigned long
12928 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12929 const unsigned long len,
12930 const unsigned long pgoff,
12931- const unsigned long flags)
12932+ const unsigned long flags,
12933+ const unsigned long offset)
12934 {
12935 struct mm_struct *mm = current->mm;
12936 unsigned long addr = addr0;
12937@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12938 info.high_limit = mm->mmap_base;
12939 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
12940 info.align_offset = 0;
12941+ info.threadstack_offset = offset;
12942 addr = vm_unmapped_area(&info);
12943
12944 /*
12945@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
12946 VM_BUG_ON(addr != -ENOMEM);
12947 info.flags = 0;
12948 info.low_limit = TASK_UNMAPPED_BASE;
12949+
12950+#ifdef CONFIG_PAX_RANDMMAP
12951+ if (mm->pax_flags & MF_PAX_RANDMMAP)
12952+ info.low_limit += mm->delta_mmap;
12953+#endif
12954+
12955 info.high_limit = STACK_TOP32;
12956 addr = vm_unmapped_area(&info);
12957 }
12958@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12959 struct mm_struct *mm = current->mm;
12960 struct vm_area_struct *vma;
12961 unsigned long task_size = TASK_SIZE;
12962+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
12963
12964 if (test_thread_flag(TIF_32BIT))
12965 task_size = STACK_TOP32;
12966@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12967 return addr;
12968 }
12969
12970+#ifdef CONFIG_PAX_RANDMMAP
12971+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
12972+#endif
12973+
12974 if (addr) {
12975 addr = ALIGN(addr, HPAGE_SIZE);
12976 vma = find_vma(mm, addr);
12977- if (task_size - len >= addr &&
12978- (!vma || addr + len <= vma->vm_start))
12979+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
12980 return addr;
12981 }
12982 if (mm->get_unmapped_area == arch_get_unmapped_area)
12983 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12984- pgoff, flags);
12985+ pgoff, flags, offset);
12986 else
12987 return hugetlb_get_unmapped_area_topdown(file, addr, len,
12988- pgoff, flags);
12989+ pgoff, flags, offset);
12990 }
12991
12992 pte_t *huge_pte_alloc(struct mm_struct *mm,
12993diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
12994index 04bc826..0fefab9 100644
12995--- a/arch/sparc/mm/init_64.c
12996+++ b/arch/sparc/mm/init_64.c
12997@@ -186,9 +186,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
12998 int num_kernel_image_mappings;
12999
13000 #ifdef CONFIG_DEBUG_DCFLUSH
13001-atomic_t dcpage_flushes = ATOMIC_INIT(0);
13002+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
13003 #ifdef CONFIG_SMP
13004-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
13005+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
13006 #endif
13007 #endif
13008
13009@@ -196,7 +196,7 @@ inline void flush_dcache_page_impl(struct page *page)
13010 {
13011 BUG_ON(tlb_type == hypervisor);
13012 #ifdef CONFIG_DEBUG_DCFLUSH
13013- atomic_inc(&dcpage_flushes);
13014+ atomic_inc_unchecked(&dcpage_flushes);
13015 #endif
13016
13017 #ifdef DCACHE_ALIASING_POSSIBLE
13018@@ -468,10 +468,10 @@ void mmu_info(struct seq_file *m)
13019
13020 #ifdef CONFIG_DEBUG_DCFLUSH
13021 seq_printf(m, "DCPageFlushes\t: %d\n",
13022- atomic_read(&dcpage_flushes));
13023+ atomic_read_unchecked(&dcpage_flushes));
13024 #ifdef CONFIG_SMP
13025 seq_printf(m, "DCPageFlushesXC\t: %d\n",
13026- atomic_read(&dcpage_flushes_xcall));
13027+ atomic_read_unchecked(&dcpage_flushes_xcall));
13028 #endif /* CONFIG_SMP */
13029 #endif /* CONFIG_DEBUG_DCFLUSH */
13030 }
13031diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
13032index ece4af0..f04b862 100644
13033--- a/arch/sparc/net/bpf_jit_comp.c
13034+++ b/arch/sparc/net/bpf_jit_comp.c
13035@@ -823,5 +823,6 @@ void bpf_jit_free(struct bpf_prog *fp)
13036 {
13037 if (fp->jited)
13038 module_free(NULL, fp->bpf_func);
13039- kfree(fp);
13040+
13041+ bpf_prog_unlock_free(fp);
13042 }
13043diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
13044index 7fcd492..1311074 100644
13045--- a/arch/tile/Kconfig
13046+++ b/arch/tile/Kconfig
13047@@ -191,6 +191,7 @@ source "kernel/Kconfig.hz"
13048
13049 config KEXEC
13050 bool "kexec system call"
13051+ depends on !GRKERNSEC_KMEM
13052 ---help---
13053 kexec is a system call that implements the ability to shutdown your
13054 current kernel, and to start another kernel. It is like a reboot
13055diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
13056index 7b11c5f..755a026 100644
13057--- a/arch/tile/include/asm/atomic_64.h
13058+++ b/arch/tile/include/asm/atomic_64.h
13059@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
13060
13061 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
13062
13063+#define atomic64_read_unchecked(v) atomic64_read(v)
13064+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
13065+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
13066+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
13067+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
13068+#define atomic64_inc_unchecked(v) atomic64_inc(v)
13069+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
13070+#define atomic64_dec_unchecked(v) atomic64_dec(v)
13071+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
13072+
13073 /* Define this to indicate that cmpxchg is an efficient operation. */
13074 #define __HAVE_ARCH_CMPXCHG
13075
13076diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
13077index 6160761..00cac88 100644
13078--- a/arch/tile/include/asm/cache.h
13079+++ b/arch/tile/include/asm/cache.h
13080@@ -15,11 +15,12 @@
13081 #ifndef _ASM_TILE_CACHE_H
13082 #define _ASM_TILE_CACHE_H
13083
13084+#include <linux/const.h>
13085 #include <arch/chip.h>
13086
13087 /* bytes per L1 data cache line */
13088 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
13089-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
13090+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
13091
13092 /* bytes per L2 cache line */
13093 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
13094diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
13095index b6cde32..c0cb736 100644
13096--- a/arch/tile/include/asm/uaccess.h
13097+++ b/arch/tile/include/asm/uaccess.h
13098@@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
13099 const void __user *from,
13100 unsigned long n)
13101 {
13102- int sz = __compiletime_object_size(to);
13103+ size_t sz = __compiletime_object_size(to);
13104
13105- if (likely(sz == -1 || sz >= n))
13106+ if (likely(sz == (size_t)-1 || sz >= n))
13107 n = _copy_from_user(to, from, n);
13108 else
13109 copy_from_user_overflow();
13110diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
13111index e514899..f8743c4 100644
13112--- a/arch/tile/mm/hugetlbpage.c
13113+++ b/arch/tile/mm/hugetlbpage.c
13114@@ -207,6 +207,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
13115 info.high_limit = TASK_SIZE;
13116 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
13117 info.align_offset = 0;
13118+ info.threadstack_offset = 0;
13119 return vm_unmapped_area(&info);
13120 }
13121
13122@@ -224,6 +225,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
13123 info.high_limit = current->mm->mmap_base;
13124 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
13125 info.align_offset = 0;
13126+ info.threadstack_offset = 0;
13127 addr = vm_unmapped_area(&info);
13128
13129 /*
13130diff --git a/arch/um/Makefile b/arch/um/Makefile
13131index e4b1a96..16162f8 100644
13132--- a/arch/um/Makefile
13133+++ b/arch/um/Makefile
13134@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
13135 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
13136 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
13137
13138+ifdef CONSTIFY_PLUGIN
13139+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
13140+endif
13141+
13142 #This will adjust *FLAGS accordingly to the platform.
13143 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
13144
13145diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
13146index 19e1bdd..3665b77 100644
13147--- a/arch/um/include/asm/cache.h
13148+++ b/arch/um/include/asm/cache.h
13149@@ -1,6 +1,7 @@
13150 #ifndef __UM_CACHE_H
13151 #define __UM_CACHE_H
13152
13153+#include <linux/const.h>
13154
13155 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
13156 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
13157@@ -12,6 +13,6 @@
13158 # define L1_CACHE_SHIFT 5
13159 #endif
13160
13161-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
13162+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
13163
13164 #endif
13165diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
13166index 2e0a6b1..a64d0f5 100644
13167--- a/arch/um/include/asm/kmap_types.h
13168+++ b/arch/um/include/asm/kmap_types.h
13169@@ -8,6 +8,6 @@
13170
13171 /* No more #include "asm/arch/kmap_types.h" ! */
13172
13173-#define KM_TYPE_NR 14
13174+#define KM_TYPE_NR 15
13175
13176 #endif
13177diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
13178index 71c5d13..4c7b9f1 100644
13179--- a/arch/um/include/asm/page.h
13180+++ b/arch/um/include/asm/page.h
13181@@ -14,6 +14,9 @@
13182 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
13183 #define PAGE_MASK (~(PAGE_SIZE-1))
13184
13185+#define ktla_ktva(addr) (addr)
13186+#define ktva_ktla(addr) (addr)
13187+
13188 #ifndef __ASSEMBLY__
13189
13190 struct page;
13191diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
13192index 0032f92..cd151e0 100644
13193--- a/arch/um/include/asm/pgtable-3level.h
13194+++ b/arch/um/include/asm/pgtable-3level.h
13195@@ -58,6 +58,7 @@
13196 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
13197 #define pud_populate(mm, pud, pmd) \
13198 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
13199+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
13200
13201 #ifdef CONFIG_64BIT
13202 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
13203diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
13204index f17bca8..48adb87 100644
13205--- a/arch/um/kernel/process.c
13206+++ b/arch/um/kernel/process.c
13207@@ -356,22 +356,6 @@ int singlestepping(void * t)
13208 return 2;
13209 }
13210
13211-/*
13212- * Only x86 and x86_64 have an arch_align_stack().
13213- * All other arches have "#define arch_align_stack(x) (x)"
13214- * in their asm/exec.h
13215- * As this is included in UML from asm-um/system-generic.h,
13216- * we can use it to behave as the subarch does.
13217- */
13218-#ifndef arch_align_stack
13219-unsigned long arch_align_stack(unsigned long sp)
13220-{
13221- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
13222- sp -= get_random_int() % 8192;
13223- return sp & ~0xf;
13224-}
13225-#endif
13226-
13227 unsigned long get_wchan(struct task_struct *p)
13228 {
13229 unsigned long stack_page, sp, ip;
13230diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
13231index ad8f795..2c7eec6 100644
13232--- a/arch/unicore32/include/asm/cache.h
13233+++ b/arch/unicore32/include/asm/cache.h
13234@@ -12,8 +12,10 @@
13235 #ifndef __UNICORE_CACHE_H__
13236 #define __UNICORE_CACHE_H__
13237
13238-#define L1_CACHE_SHIFT (5)
13239-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
13240+#include <linux/const.h>
13241+
13242+#define L1_CACHE_SHIFT 5
13243+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
13244
13245 /*
13246 * Memory returned by kmalloc() may be used for DMA, so we must make
13247diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
13248index 3632743..630a8bb 100644
13249--- a/arch/x86/Kconfig
13250+++ b/arch/x86/Kconfig
13251@@ -130,7 +130,7 @@ config X86
13252 select RTC_LIB
13253 select HAVE_DEBUG_STACKOVERFLOW
13254 select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
13255- select HAVE_CC_STACKPROTECTOR
13256+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
13257 select GENERIC_CPU_AUTOPROBE
13258 select HAVE_ARCH_AUDITSYSCALL
13259 select ARCH_SUPPORTS_ATOMIC_RMW
13260@@ -258,7 +258,7 @@ config X86_HT
13261
13262 config X86_32_LAZY_GS
13263 def_bool y
13264- depends on X86_32 && !CC_STACKPROTECTOR
13265+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
13266
13267 config ARCH_HWEIGHT_CFLAGS
13268 string
13269@@ -555,6 +555,7 @@ config SCHED_OMIT_FRAME_POINTER
13270
13271 menuconfig HYPERVISOR_GUEST
13272 bool "Linux guest support"
13273+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
13274 ---help---
13275 Say Y here to enable options for running Linux under various hyper-
13276 visors. This option enables basic hypervisor detection and platform
13277@@ -1083,6 +1084,7 @@ choice
13278
13279 config NOHIGHMEM
13280 bool "off"
13281+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
13282 ---help---
13283 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
13284 However, the address space of 32-bit x86 processors is only 4
13285@@ -1119,6 +1121,7 @@ config NOHIGHMEM
13286
13287 config HIGHMEM4G
13288 bool "4GB"
13289+ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
13290 ---help---
13291 Select this if you have a 32-bit processor and between 1 and 4
13292 gigabytes of physical RAM.
13293@@ -1171,7 +1174,7 @@ config PAGE_OFFSET
13294 hex
13295 default 0xB0000000 if VMSPLIT_3G_OPT
13296 default 0x80000000 if VMSPLIT_2G
13297- default 0x78000000 if VMSPLIT_2G_OPT
13298+ default 0x70000000 if VMSPLIT_2G_OPT
13299 default 0x40000000 if VMSPLIT_1G
13300 default 0xC0000000
13301 depends on X86_32
13302@@ -1586,6 +1589,7 @@ source kernel/Kconfig.hz
13303
13304 config KEXEC
13305 bool "kexec system call"
13306+ depends on !GRKERNSEC_KMEM
13307 ---help---
13308 kexec is a system call that implements the ability to shutdown your
13309 current kernel, and to start another kernel. It is like a reboot
13310@@ -1771,7 +1775,9 @@ config X86_NEED_RELOCS
13311
13312 config PHYSICAL_ALIGN
13313 hex "Alignment value to which kernel should be aligned"
13314- default "0x200000"
13315+ default "0x1000000"
13316+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
13317+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
13318 range 0x2000 0x1000000 if X86_32
13319 range 0x200000 0x1000000 if X86_64
13320 ---help---
13321@@ -1854,6 +1860,7 @@ config COMPAT_VDSO
13322 def_bool n
13323 prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
13324 depends on X86_32 || IA32_EMULATION
13325+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
13326 ---help---
13327 Certain buggy versions of glibc will crash if they are
13328 presented with a 32-bit vDSO that is not mapped at the address
13329diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
13330index 6983314..54ad7e8 100644
13331--- a/arch/x86/Kconfig.cpu
13332+++ b/arch/x86/Kconfig.cpu
13333@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
13334
13335 config X86_F00F_BUG
13336 def_bool y
13337- depends on M586MMX || M586TSC || M586 || M486
13338+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
13339
13340 config X86_INVD_BUG
13341 def_bool y
13342@@ -327,7 +327,7 @@ config X86_INVD_BUG
13343
13344 config X86_ALIGNMENT_16
13345 def_bool y
13346- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
13347+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
13348
13349 config X86_INTEL_USERCOPY
13350 def_bool y
13351@@ -369,7 +369,7 @@ config X86_CMPXCHG64
13352 # generates cmov.
13353 config X86_CMOV
13354 def_bool y
13355- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
13356+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
13357
13358 config X86_MINIMUM_CPU_FAMILY
13359 int
13360diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
13361index 61bd2ad..50b625d 100644
13362--- a/arch/x86/Kconfig.debug
13363+++ b/arch/x86/Kconfig.debug
13364@@ -93,7 +93,7 @@ config EFI_PGT_DUMP
13365 config DEBUG_RODATA
13366 bool "Write protect kernel read-only data structures"
13367 default y
13368- depends on DEBUG_KERNEL
13369+ depends on DEBUG_KERNEL && BROKEN
13370 ---help---
13371 Mark the kernel read-only data as write-protected in the pagetables,
13372 in order to catch accidental (and incorrect) writes to such const
13373@@ -111,7 +111,7 @@ config DEBUG_RODATA_TEST
13374
13375 config DEBUG_SET_MODULE_RONX
13376 bool "Set loadable kernel module data as NX and text as RO"
13377- depends on MODULES
13378+ depends on MODULES && BROKEN
13379 ---help---
13380 This option helps catch unintended modifications to loadable
13381 kernel module's text and read-only data. It also prevents execution
13382diff --git a/arch/x86/Makefile b/arch/x86/Makefile
13383index 60087ca..9d9500e 100644
13384--- a/arch/x86/Makefile
13385+++ b/arch/x86/Makefile
13386@@ -68,9 +68,6 @@ ifeq ($(CONFIG_X86_32),y)
13387 # CPU-specific tuning. Anything which can be shared with UML should go here.
13388 include $(srctree)/arch/x86/Makefile_32.cpu
13389 KBUILD_CFLAGS += $(cflags-y)
13390-
13391- # temporary until string.h is fixed
13392- KBUILD_CFLAGS += -ffreestanding
13393 else
13394 BITS := 64
13395 UTS_MACHINE := x86_64
13396@@ -111,6 +108,9 @@ else
13397 KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
13398 endif
13399
13400+# temporary until string.h is fixed
13401+KBUILD_CFLAGS += -ffreestanding
13402+
13403 # Make sure compiler does not have buggy stack-protector support.
13404 ifdef CONFIG_CC_STACKPROTECTOR
13405 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
13406@@ -184,6 +184,7 @@ archheaders:
13407 $(Q)$(MAKE) $(build)=arch/x86/syscalls all
13408
13409 archprepare:
13410+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
13411 ifeq ($(CONFIG_KEXEC_FILE),y)
13412 $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
13413 endif
13414@@ -274,3 +275,9 @@ define archhelp
13415 echo ' FDINITRD=file initrd for the booted kernel'
13416 echo ' kvmconfig - Enable additional options for guest kernel support'
13417 endef
13418+
13419+define OLD_LD
13420+
13421+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
13422+*** Please upgrade your binutils to 2.18 or newer
13423+endef
13424diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
13425index dbe8dd2..2f0a98f 100644
13426--- a/arch/x86/boot/Makefile
13427+++ b/arch/x86/boot/Makefile
13428@@ -52,6 +52,9 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
13429 # ---------------------------------------------------------------------------
13430
13431 KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
13432+ifdef CONSTIFY_PLUGIN
13433+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
13434+endif
13435 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13436 GCOV_PROFILE := n
13437
13438diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
13439index 878e4b9..20537ab 100644
13440--- a/arch/x86/boot/bitops.h
13441+++ b/arch/x86/boot/bitops.h
13442@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
13443 u8 v;
13444 const u32 *p = (const u32 *)addr;
13445
13446- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
13447+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
13448 return v;
13449 }
13450
13451@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
13452
13453 static inline void set_bit(int nr, void *addr)
13454 {
13455- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
13456+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
13457 }
13458
13459 #endif /* BOOT_BITOPS_H */
13460diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
13461index bd49ec6..94c7f58 100644
13462--- a/arch/x86/boot/boot.h
13463+++ b/arch/x86/boot/boot.h
13464@@ -84,7 +84,7 @@ static inline void io_delay(void)
13465 static inline u16 ds(void)
13466 {
13467 u16 seg;
13468- asm("movw %%ds,%0" : "=rm" (seg));
13469+ asm volatile("movw %%ds,%0" : "=rm" (seg));
13470 return seg;
13471 }
13472
13473diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
13474index 14fe7cb..829b962 100644
13475--- a/arch/x86/boot/compressed/Makefile
13476+++ b/arch/x86/boot/compressed/Makefile
13477@@ -16,6 +16,9 @@ KBUILD_CFLAGS += $(cflags-y)
13478 KBUILD_CFLAGS += -mno-mmx -mno-sse
13479 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
13480 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
13481+ifdef CONSTIFY_PLUGIN
13482+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
13483+endif
13484
13485 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13486 GCOV_PROFILE := n
13487diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
13488index a53440e..c3dbf1e 100644
13489--- a/arch/x86/boot/compressed/efi_stub_32.S
13490+++ b/arch/x86/boot/compressed/efi_stub_32.S
13491@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
13492 * parameter 2, ..., param n. To make things easy, we save the return
13493 * address of efi_call_phys in a global variable.
13494 */
13495- popl %ecx
13496- movl %ecx, saved_return_addr(%edx)
13497- /* get the function pointer into ECX*/
13498- popl %ecx
13499- movl %ecx, efi_rt_function_ptr(%edx)
13500+ popl saved_return_addr(%edx)
13501+ popl efi_rt_function_ptr(%edx)
13502
13503 /*
13504 * 3. Call the physical function.
13505 */
13506- call *%ecx
13507+ call *efi_rt_function_ptr(%edx)
13508
13509 /*
13510 * 4. Balance the stack. And because EAX contain the return value,
13511@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
13512 1: popl %edx
13513 subl $1b, %edx
13514
13515- movl efi_rt_function_ptr(%edx), %ecx
13516- pushl %ecx
13517+ pushl efi_rt_function_ptr(%edx)
13518
13519 /*
13520 * 10. Push the saved return address onto the stack and return.
13521 */
13522- movl saved_return_addr(%edx), %ecx
13523- pushl %ecx
13524- ret
13525+ jmpl *saved_return_addr(%edx)
13526 ENDPROC(efi_call_phys)
13527 .previous
13528
13529diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
13530index 1d7fbbc..36ecd58 100644
13531--- a/arch/x86/boot/compressed/head_32.S
13532+++ b/arch/x86/boot/compressed/head_32.S
13533@@ -140,10 +140,10 @@ preferred_addr:
13534 addl %eax, %ebx
13535 notl %eax
13536 andl %eax, %ebx
13537- cmpl $LOAD_PHYSICAL_ADDR, %ebx
13538+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
13539 jge 1f
13540 #endif
13541- movl $LOAD_PHYSICAL_ADDR, %ebx
13542+ movl $____LOAD_PHYSICAL_ADDR, %ebx
13543 1:
13544
13545 /* Target address to relocate to for decompression */
13546diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
13547index 6b1766c..ad465c9 100644
13548--- a/arch/x86/boot/compressed/head_64.S
13549+++ b/arch/x86/boot/compressed/head_64.S
13550@@ -94,10 +94,10 @@ ENTRY(startup_32)
13551 addl %eax, %ebx
13552 notl %eax
13553 andl %eax, %ebx
13554- cmpl $LOAD_PHYSICAL_ADDR, %ebx
13555+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
13556 jge 1f
13557 #endif
13558- movl $LOAD_PHYSICAL_ADDR, %ebx
13559+ movl $____LOAD_PHYSICAL_ADDR, %ebx
13560 1:
13561
13562 /* Target address to relocate to for decompression */
13563@@ -322,10 +322,10 @@ preferred_addr:
13564 addq %rax, %rbp
13565 notq %rax
13566 andq %rax, %rbp
13567- cmpq $LOAD_PHYSICAL_ADDR, %rbp
13568+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
13569 jge 1f
13570 #endif
13571- movq $LOAD_PHYSICAL_ADDR, %rbp
13572+ movq $____LOAD_PHYSICAL_ADDR, %rbp
13573 1:
13574
13575 /* Target address to relocate to for decompression */
13576@@ -434,8 +434,8 @@ gdt:
13577 .long gdt
13578 .word 0
13579 .quad 0x0000000000000000 /* NULL descriptor */
13580- .quad 0x00af9a000000ffff /* __KERNEL_CS */
13581- .quad 0x00cf92000000ffff /* __KERNEL_DS */
13582+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
13583+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
13584 .quad 0x0080890000000000 /* TS descriptor */
13585 .quad 0x0000000000000000 /* TS continued */
13586 gdt_end:
13587diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
13588index 30dd59a..cd9edc3 100644
13589--- a/arch/x86/boot/compressed/misc.c
13590+++ b/arch/x86/boot/compressed/misc.c
13591@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
13592 * Calculate the delta between where vmlinux was linked to load
13593 * and where it was actually loaded.
13594 */
13595- delta = min_addr - LOAD_PHYSICAL_ADDR;
13596+ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
13597 if (!delta) {
13598 debug_putstr("No relocation needed... ");
13599 return;
13600@@ -312,7 +312,7 @@ static void parse_elf(void *output)
13601 Elf32_Ehdr ehdr;
13602 Elf32_Phdr *phdrs, *phdr;
13603 #endif
13604- void *dest;
13605+ void *dest, *prev;
13606 int i;
13607
13608 memcpy(&ehdr, output, sizeof(ehdr));
13609@@ -339,13 +339,16 @@ static void parse_elf(void *output)
13610 case PT_LOAD:
13611 #ifdef CONFIG_RELOCATABLE
13612 dest = output;
13613- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
13614+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
13615 #else
13616 dest = (void *)(phdr->p_paddr);
13617 #endif
13618 memcpy(dest,
13619 output + phdr->p_offset,
13620 phdr->p_filesz);
13621+ if (i)
13622+ memset(prev, 0xff, dest - prev);
13623+ prev = dest + phdr->p_filesz;
13624 break;
13625 default: /* Ignore other PT_* */ break;
13626 }
13627@@ -402,7 +405,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
13628 error("Destination address too large");
13629 #endif
13630 #ifndef CONFIG_RELOCATABLE
13631- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
13632+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
13633 error("Wrong destination address");
13634 #endif
13635
13636diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
13637index 1fd7d57..0f7d096 100644
13638--- a/arch/x86/boot/cpucheck.c
13639+++ b/arch/x86/boot/cpucheck.c
13640@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13641 u32 ecx = MSR_K7_HWCR;
13642 u32 eax, edx;
13643
13644- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13645+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13646 eax &= ~(1 << 15);
13647- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13648+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13649
13650 get_cpuflags(); /* Make sure it really did something */
13651 err = check_cpuflags();
13652@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13653 u32 ecx = MSR_VIA_FCR;
13654 u32 eax, edx;
13655
13656- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13657+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13658 eax |= (1<<1)|(1<<7);
13659- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13660+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13661
13662 set_bit(X86_FEATURE_CX8, cpu.flags);
13663 err = check_cpuflags();
13664@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
13665 u32 eax, edx;
13666 u32 level = 1;
13667
13668- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13669- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13670- asm("cpuid"
13671+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
13672+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
13673+ asm volatile("cpuid"
13674 : "+a" (level), "=d" (cpu.flags[0])
13675 : : "ecx", "ebx");
13676- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13677+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
13678
13679 err = check_cpuflags();
13680 } else if (err == 0x01 &&
13681diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
13682index 16ef025..91e033b 100644
13683--- a/arch/x86/boot/header.S
13684+++ b/arch/x86/boot/header.S
13685@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
13686 # single linked list of
13687 # struct setup_data
13688
13689-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
13690+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
13691
13692 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
13693+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13694+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
13695+#else
13696 #define VO_INIT_SIZE (VO__end - VO__text)
13697+#endif
13698 #if ZO_INIT_SIZE > VO_INIT_SIZE
13699 #define INIT_SIZE ZO_INIT_SIZE
13700 #else
13701diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
13702index db75d07..8e6d0af 100644
13703--- a/arch/x86/boot/memory.c
13704+++ b/arch/x86/boot/memory.c
13705@@ -19,7 +19,7 @@
13706
13707 static int detect_memory_e820(void)
13708 {
13709- int count = 0;
13710+ unsigned int count = 0;
13711 struct biosregs ireg, oreg;
13712 struct e820entry *desc = boot_params.e820_map;
13713 static struct e820entry buf; /* static so it is zeroed */
13714diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
13715index ba3e100..6501b8f 100644
13716--- a/arch/x86/boot/video-vesa.c
13717+++ b/arch/x86/boot/video-vesa.c
13718@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
13719
13720 boot_params.screen_info.vesapm_seg = oreg.es;
13721 boot_params.screen_info.vesapm_off = oreg.di;
13722+ boot_params.screen_info.vesapm_size = oreg.cx;
13723 }
13724
13725 /*
13726diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
13727index 43eda28..5ab5fdb 100644
13728--- a/arch/x86/boot/video.c
13729+++ b/arch/x86/boot/video.c
13730@@ -96,7 +96,7 @@ static void store_mode_params(void)
13731 static unsigned int get_entry(void)
13732 {
13733 char entry_buf[4];
13734- int i, len = 0;
13735+ unsigned int i, len = 0;
13736 int key;
13737 unsigned int v;
13738
13739diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
13740index 9105655..41779c1 100644
13741--- a/arch/x86/crypto/aes-x86_64-asm_64.S
13742+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
13743@@ -8,6 +8,8 @@
13744 * including this sentence is retained in full.
13745 */
13746
13747+#include <asm/alternative-asm.h>
13748+
13749 .extern crypto_ft_tab
13750 .extern crypto_it_tab
13751 .extern crypto_fl_tab
13752@@ -70,6 +72,8 @@
13753 je B192; \
13754 leaq 32(r9),r9;
13755
13756+#define ret pax_force_retaddr; ret
13757+
13758 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
13759 movq r1,r2; \
13760 movq r3,r4; \
13761diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
13762index 477e9d7..c92c7d8 100644
13763--- a/arch/x86/crypto/aesni-intel_asm.S
13764+++ b/arch/x86/crypto/aesni-intel_asm.S
13765@@ -31,6 +31,7 @@
13766
13767 #include <linux/linkage.h>
13768 #include <asm/inst.h>
13769+#include <asm/alternative-asm.h>
13770
13771 #ifdef __x86_64__
13772 .data
13773@@ -205,7 +206,7 @@ enc: .octa 0x2
13774 * num_initial_blocks = b mod 4
13775 * encrypt the initial num_initial_blocks blocks and apply ghash on
13776 * the ciphertext
13777-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13778+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13779 * are clobbered
13780 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13781 */
13782@@ -214,8 +215,8 @@ enc: .octa 0x2
13783 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13784 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13785 mov arg7, %r10 # %r10 = AAD
13786- mov arg8, %r12 # %r12 = aadLen
13787- mov %r12, %r11
13788+ mov arg8, %r15 # %r15 = aadLen
13789+ mov %r15, %r11
13790 pxor %xmm\i, %xmm\i
13791 _get_AAD_loop\num_initial_blocks\operation:
13792 movd (%r10), \TMP1
13793@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13794 psrldq $4, %xmm\i
13795 pxor \TMP1, %xmm\i
13796 add $4, %r10
13797- sub $4, %r12
13798+ sub $4, %r15
13799 jne _get_AAD_loop\num_initial_blocks\operation
13800 cmp $16, %r11
13801 je _get_AAD_loop2_done\num_initial_blocks\operation
13802- mov $16, %r12
13803+ mov $16, %r15
13804 _get_AAD_loop2\num_initial_blocks\operation:
13805 psrldq $4, %xmm\i
13806- sub $4, %r12
13807- cmp %r11, %r12
13808+ sub $4, %r15
13809+ cmp %r11, %r15
13810 jne _get_AAD_loop2\num_initial_blocks\operation
13811 _get_AAD_loop2_done\num_initial_blocks\operation:
13812 movdqa SHUF_MASK(%rip), %xmm14
13813@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
13814 * num_initial_blocks = b mod 4
13815 * encrypt the initial num_initial_blocks blocks and apply ghash on
13816 * the ciphertext
13817-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13818+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
13819 * are clobbered
13820 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
13821 */
13822@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
13823 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
13824 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
13825 mov arg7, %r10 # %r10 = AAD
13826- mov arg8, %r12 # %r12 = aadLen
13827- mov %r12, %r11
13828+ mov arg8, %r15 # %r15 = aadLen
13829+ mov %r15, %r11
13830 pxor %xmm\i, %xmm\i
13831 _get_AAD_loop\num_initial_blocks\operation:
13832 movd (%r10), \TMP1
13833@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
13834 psrldq $4, %xmm\i
13835 pxor \TMP1, %xmm\i
13836 add $4, %r10
13837- sub $4, %r12
13838+ sub $4, %r15
13839 jne _get_AAD_loop\num_initial_blocks\operation
13840 cmp $16, %r11
13841 je _get_AAD_loop2_done\num_initial_blocks\operation
13842- mov $16, %r12
13843+ mov $16, %r15
13844 _get_AAD_loop2\num_initial_blocks\operation:
13845 psrldq $4, %xmm\i
13846- sub $4, %r12
13847- cmp %r11, %r12
13848+ sub $4, %r15
13849+ cmp %r11, %r15
13850 jne _get_AAD_loop2\num_initial_blocks\operation
13851 _get_AAD_loop2_done\num_initial_blocks\operation:
13852 movdqa SHUF_MASK(%rip), %xmm14
13853@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
13854 *
13855 *****************************************************************************/
13856 ENTRY(aesni_gcm_dec)
13857- push %r12
13858+ push %r15
13859 push %r13
13860 push %r14
13861 mov %rsp, %r14
13862@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
13863 */
13864 sub $VARIABLE_OFFSET, %rsp
13865 and $~63, %rsp # align rsp to 64 bytes
13866- mov %arg6, %r12
13867- movdqu (%r12), %xmm13 # %xmm13 = HashKey
13868+ mov %arg6, %r15
13869+ movdqu (%r15), %xmm13 # %xmm13 = HashKey
13870 movdqa SHUF_MASK(%rip), %xmm2
13871 PSHUFB_XMM %xmm2, %xmm13
13872
13873@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
13874 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
13875 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
13876 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
13877- mov %r13, %r12
13878- and $(3<<4), %r12
13879+ mov %r13, %r15
13880+ and $(3<<4), %r15
13881 jz _initial_num_blocks_is_0_decrypt
13882- cmp $(2<<4), %r12
13883+ cmp $(2<<4), %r15
13884 jb _initial_num_blocks_is_1_decrypt
13885 je _initial_num_blocks_is_2_decrypt
13886 _initial_num_blocks_is_3_decrypt:
13887@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
13888 sub $16, %r11
13889 add %r13, %r11
13890 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
13891- lea SHIFT_MASK+16(%rip), %r12
13892- sub %r13, %r12
13893+ lea SHIFT_MASK+16(%rip), %r15
13894+ sub %r13, %r15
13895 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
13896 # (%r13 is the number of bytes in plaintext mod 16)
13897- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13898+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13899 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
13900
13901 movdqa %xmm1, %xmm2
13902 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
13903- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13904+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13905 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
13906 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
13907 pand %xmm1, %xmm2
13908@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
13909 sub $1, %r13
13910 jne _less_than_8_bytes_left_decrypt
13911 _multiple_of_16_bytes_decrypt:
13912- mov arg8, %r12 # %r13 = aadLen (number of bytes)
13913- shl $3, %r12 # convert into number of bits
13914- movd %r12d, %xmm15 # len(A) in %xmm15
13915+ mov arg8, %r15 # %r13 = aadLen (number of bytes)
13916+ shl $3, %r15 # convert into number of bits
13917+ movd %r15d, %xmm15 # len(A) in %xmm15
13918 shl $3, %arg4 # len(C) in bits (*128)
13919 MOVQ_R64_XMM %arg4, %xmm1
13920 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
13921@@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
13922 mov %r14, %rsp
13923 pop %r14
13924 pop %r13
13925- pop %r12
13926+ pop %r15
13927+ pax_force_retaddr
13928 ret
13929 ENDPROC(aesni_gcm_dec)
13930
13931@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
13932 * poly = x^128 + x^127 + x^126 + x^121 + 1
13933 ***************************************************************************/
13934 ENTRY(aesni_gcm_enc)
13935- push %r12
13936+ push %r15
13937 push %r13
13938 push %r14
13939 mov %rsp, %r14
13940@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
13941 #
13942 sub $VARIABLE_OFFSET, %rsp
13943 and $~63, %rsp
13944- mov %arg6, %r12
13945- movdqu (%r12), %xmm13
13946+ mov %arg6, %r15
13947+ movdqu (%r15), %xmm13
13948 movdqa SHUF_MASK(%rip), %xmm2
13949 PSHUFB_XMM %xmm2, %xmm13
13950
13951@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
13952 movdqa %xmm13, HashKey(%rsp)
13953 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
13954 and $-16, %r13
13955- mov %r13, %r12
13956+ mov %r13, %r15
13957
13958 # Encrypt first few blocks
13959
13960- and $(3<<4), %r12
13961+ and $(3<<4), %r15
13962 jz _initial_num_blocks_is_0_encrypt
13963- cmp $(2<<4), %r12
13964+ cmp $(2<<4), %r15
13965 jb _initial_num_blocks_is_1_encrypt
13966 je _initial_num_blocks_is_2_encrypt
13967 _initial_num_blocks_is_3_encrypt:
13968@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
13969 sub $16, %r11
13970 add %r13, %r11
13971 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
13972- lea SHIFT_MASK+16(%rip), %r12
13973- sub %r13, %r12
13974+ lea SHIFT_MASK+16(%rip), %r15
13975+ sub %r13, %r15
13976 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
13977 # (%r13 is the number of bytes in plaintext mod 16)
13978- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
13979+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
13980 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
13981 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
13982- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
13983+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
13984 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
13985 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
13986 movdqa SHUF_MASK(%rip), %xmm10
13987@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
13988 sub $1, %r13
13989 jne _less_than_8_bytes_left_encrypt
13990 _multiple_of_16_bytes_encrypt:
13991- mov arg8, %r12 # %r12 = addLen (number of bytes)
13992- shl $3, %r12
13993- movd %r12d, %xmm15 # len(A) in %xmm15
13994+ mov arg8, %r15 # %r15 = addLen (number of bytes)
13995+ shl $3, %r15
13996+ movd %r15d, %xmm15 # len(A) in %xmm15
13997 shl $3, %arg4 # len(C) in bits (*128)
13998 MOVQ_R64_XMM %arg4, %xmm1
13999 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
14000@@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
14001 mov %r14, %rsp
14002 pop %r14
14003 pop %r13
14004- pop %r12
14005+ pop %r15
14006+ pax_force_retaddr
14007 ret
14008 ENDPROC(aesni_gcm_enc)
14009
14010@@ -1722,6 +1725,7 @@ _key_expansion_256a:
14011 pxor %xmm1, %xmm0
14012 movaps %xmm0, (TKEYP)
14013 add $0x10, TKEYP
14014+ pax_force_retaddr
14015 ret
14016 ENDPROC(_key_expansion_128)
14017 ENDPROC(_key_expansion_256a)
14018@@ -1748,6 +1752,7 @@ _key_expansion_192a:
14019 shufps $0b01001110, %xmm2, %xmm1
14020 movaps %xmm1, 0x10(TKEYP)
14021 add $0x20, TKEYP
14022+ pax_force_retaddr
14023 ret
14024 ENDPROC(_key_expansion_192a)
14025
14026@@ -1768,6 +1773,7 @@ _key_expansion_192b:
14027
14028 movaps %xmm0, (TKEYP)
14029 add $0x10, TKEYP
14030+ pax_force_retaddr
14031 ret
14032 ENDPROC(_key_expansion_192b)
14033
14034@@ -1781,6 +1787,7 @@ _key_expansion_256b:
14035 pxor %xmm1, %xmm2
14036 movaps %xmm2, (TKEYP)
14037 add $0x10, TKEYP
14038+ pax_force_retaddr
14039 ret
14040 ENDPROC(_key_expansion_256b)
14041
14042@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
14043 #ifndef __x86_64__
14044 popl KEYP
14045 #endif
14046+ pax_force_retaddr
14047 ret
14048 ENDPROC(aesni_set_key)
14049
14050@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
14051 popl KLEN
14052 popl KEYP
14053 #endif
14054+ pax_force_retaddr
14055 ret
14056 ENDPROC(aesni_enc)
14057
14058@@ -1974,6 +1983,7 @@ _aesni_enc1:
14059 AESENC KEY STATE
14060 movaps 0x70(TKEYP), KEY
14061 AESENCLAST KEY STATE
14062+ pax_force_retaddr
14063 ret
14064 ENDPROC(_aesni_enc1)
14065
14066@@ -2083,6 +2093,7 @@ _aesni_enc4:
14067 AESENCLAST KEY STATE2
14068 AESENCLAST KEY STATE3
14069 AESENCLAST KEY STATE4
14070+ pax_force_retaddr
14071 ret
14072 ENDPROC(_aesni_enc4)
14073
14074@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
14075 popl KLEN
14076 popl KEYP
14077 #endif
14078+ pax_force_retaddr
14079 ret
14080 ENDPROC(aesni_dec)
14081
14082@@ -2164,6 +2176,7 @@ _aesni_dec1:
14083 AESDEC KEY STATE
14084 movaps 0x70(TKEYP), KEY
14085 AESDECLAST KEY STATE
14086+ pax_force_retaddr
14087 ret
14088 ENDPROC(_aesni_dec1)
14089
14090@@ -2273,6 +2286,7 @@ _aesni_dec4:
14091 AESDECLAST KEY STATE2
14092 AESDECLAST KEY STATE3
14093 AESDECLAST KEY STATE4
14094+ pax_force_retaddr
14095 ret
14096 ENDPROC(_aesni_dec4)
14097
14098@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
14099 popl KEYP
14100 popl LEN
14101 #endif
14102+ pax_force_retaddr
14103 ret
14104 ENDPROC(aesni_ecb_enc)
14105
14106@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
14107 popl KEYP
14108 popl LEN
14109 #endif
14110+ pax_force_retaddr
14111 ret
14112 ENDPROC(aesni_ecb_dec)
14113
14114@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
14115 popl LEN
14116 popl IVP
14117 #endif
14118+ pax_force_retaddr
14119 ret
14120 ENDPROC(aesni_cbc_enc)
14121
14122@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
14123 popl LEN
14124 popl IVP
14125 #endif
14126+ pax_force_retaddr
14127 ret
14128 ENDPROC(aesni_cbc_dec)
14129
14130@@ -2550,6 +2568,7 @@ _aesni_inc_init:
14131 mov $1, TCTR_LOW
14132 MOVQ_R64_XMM TCTR_LOW INC
14133 MOVQ_R64_XMM CTR TCTR_LOW
14134+ pax_force_retaddr
14135 ret
14136 ENDPROC(_aesni_inc_init)
14137
14138@@ -2579,6 +2598,7 @@ _aesni_inc:
14139 .Linc_low:
14140 movaps CTR, IV
14141 PSHUFB_XMM BSWAP_MASK IV
14142+ pax_force_retaddr
14143 ret
14144 ENDPROC(_aesni_inc)
14145
14146@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
14147 .Lctr_enc_ret:
14148 movups IV, (IVP)
14149 .Lctr_enc_just_ret:
14150+ pax_force_retaddr
14151 ret
14152 ENDPROC(aesni_ctr_enc)
14153
14154@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
14155 pxor INC, STATE4
14156 movdqu STATE4, 0x70(OUTP)
14157
14158+ pax_force_retaddr
14159 ret
14160 ENDPROC(aesni_xts_crypt8)
14161
14162diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
14163index 246c670..466e2d6 100644
14164--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
14165+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
14166@@ -21,6 +21,7 @@
14167 */
14168
14169 #include <linux/linkage.h>
14170+#include <asm/alternative-asm.h>
14171
14172 .file "blowfish-x86_64-asm.S"
14173 .text
14174@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
14175 jnz .L__enc_xor;
14176
14177 write_block();
14178+ pax_force_retaddr
14179 ret;
14180 .L__enc_xor:
14181 xor_block();
14182+ pax_force_retaddr
14183 ret;
14184 ENDPROC(__blowfish_enc_blk)
14185
14186@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
14187
14188 movq %r11, %rbp;
14189
14190+ pax_force_retaddr
14191 ret;
14192 ENDPROC(blowfish_dec_blk)
14193
14194@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
14195
14196 popq %rbx;
14197 popq %rbp;
14198+ pax_force_retaddr
14199 ret;
14200
14201 .L__enc_xor4:
14202@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
14203
14204 popq %rbx;
14205 popq %rbp;
14206+ pax_force_retaddr
14207 ret;
14208 ENDPROC(__blowfish_enc_blk_4way)
14209
14210@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
14211 popq %rbx;
14212 popq %rbp;
14213
14214+ pax_force_retaddr
14215 ret;
14216 ENDPROC(blowfish_dec_blk_4way)
14217diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
14218index ce71f92..1dce7ec 100644
14219--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
14220+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
14221@@ -16,6 +16,7 @@
14222 */
14223
14224 #include <linux/linkage.h>
14225+#include <asm/alternative-asm.h>
14226
14227 #define CAMELLIA_TABLE_BYTE_LEN 272
14228
14229@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
14230 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
14231 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
14232 %rcx, (%r9));
14233+ pax_force_retaddr
14234 ret;
14235 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
14236
14237@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
14238 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
14239 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
14240 %rax, (%r9));
14241+ pax_force_retaddr
14242 ret;
14243 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
14244
14245@@ -780,6 +783,7 @@ __camellia_enc_blk16:
14246 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
14247 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
14248
14249+ pax_force_retaddr
14250 ret;
14251
14252 .align 8
14253@@ -865,6 +869,7 @@ __camellia_dec_blk16:
14254 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
14255 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
14256
14257+ pax_force_retaddr
14258 ret;
14259
14260 .align 8
14261@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
14262 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
14263 %xmm8, %rsi);
14264
14265+ pax_force_retaddr
14266 ret;
14267 ENDPROC(camellia_ecb_enc_16way)
14268
14269@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
14270 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
14271 %xmm8, %rsi);
14272
14273+ pax_force_retaddr
14274 ret;
14275 ENDPROC(camellia_ecb_dec_16way)
14276
14277@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
14278 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
14279 %xmm8, %rsi);
14280
14281+ pax_force_retaddr
14282 ret;
14283 ENDPROC(camellia_cbc_dec_16way)
14284
14285@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
14286 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
14287 %xmm8, %rsi);
14288
14289+ pax_force_retaddr
14290 ret;
14291 ENDPROC(camellia_ctr_16way)
14292
14293@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
14294 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
14295 %xmm8, %rsi);
14296
14297+ pax_force_retaddr
14298 ret;
14299 ENDPROC(camellia_xts_crypt_16way)
14300
14301diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
14302index 0e0b886..5a3123c 100644
14303--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
14304+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
14305@@ -11,6 +11,7 @@
14306 */
14307
14308 #include <linux/linkage.h>
14309+#include <asm/alternative-asm.h>
14310
14311 #define CAMELLIA_TABLE_BYTE_LEN 272
14312
14313@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
14314 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
14315 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
14316 %rcx, (%r9));
14317+ pax_force_retaddr
14318 ret;
14319 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
14320
14321@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
14322 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
14323 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
14324 %rax, (%r9));
14325+ pax_force_retaddr
14326 ret;
14327 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
14328
14329@@ -820,6 +823,7 @@ __camellia_enc_blk32:
14330 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
14331 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
14332
14333+ pax_force_retaddr
14334 ret;
14335
14336 .align 8
14337@@ -905,6 +909,7 @@ __camellia_dec_blk32:
14338 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
14339 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
14340
14341+ pax_force_retaddr
14342 ret;
14343
14344 .align 8
14345@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
14346
14347 vzeroupper;
14348
14349+ pax_force_retaddr
14350 ret;
14351 ENDPROC(camellia_ecb_enc_32way)
14352
14353@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
14354
14355 vzeroupper;
14356
14357+ pax_force_retaddr
14358 ret;
14359 ENDPROC(camellia_ecb_dec_32way)
14360
14361@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
14362
14363 vzeroupper;
14364
14365+ pax_force_retaddr
14366 ret;
14367 ENDPROC(camellia_cbc_dec_32way)
14368
14369@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
14370
14371 vzeroupper;
14372
14373+ pax_force_retaddr
14374 ret;
14375 ENDPROC(camellia_ctr_32way)
14376
14377@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
14378
14379 vzeroupper;
14380
14381+ pax_force_retaddr
14382 ret;
14383 ENDPROC(camellia_xts_crypt_32way)
14384
14385diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
14386index 310319c..db3d7b5 100644
14387--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
14388+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
14389@@ -21,6 +21,7 @@
14390 */
14391
14392 #include <linux/linkage.h>
14393+#include <asm/alternative-asm.h>
14394
14395 .file "camellia-x86_64-asm_64.S"
14396 .text
14397@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
14398 enc_outunpack(mov, RT1);
14399
14400 movq RRBP, %rbp;
14401+ pax_force_retaddr
14402 ret;
14403
14404 .L__enc_xor:
14405 enc_outunpack(xor, RT1);
14406
14407 movq RRBP, %rbp;
14408+ pax_force_retaddr
14409 ret;
14410 ENDPROC(__camellia_enc_blk)
14411
14412@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
14413 dec_outunpack();
14414
14415 movq RRBP, %rbp;
14416+ pax_force_retaddr
14417 ret;
14418 ENDPROC(camellia_dec_blk)
14419
14420@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
14421
14422 movq RRBP, %rbp;
14423 popq %rbx;
14424+ pax_force_retaddr
14425 ret;
14426
14427 .L__enc2_xor:
14428@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
14429
14430 movq RRBP, %rbp;
14431 popq %rbx;
14432+ pax_force_retaddr
14433 ret;
14434 ENDPROC(__camellia_enc_blk_2way)
14435
14436@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
14437
14438 movq RRBP, %rbp;
14439 movq RXOR, %rbx;
14440+ pax_force_retaddr
14441 ret;
14442 ENDPROC(camellia_dec_blk_2way)
14443diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
14444index c35fd5d..2d8c7db 100644
14445--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
14446+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
14447@@ -24,6 +24,7 @@
14448 */
14449
14450 #include <linux/linkage.h>
14451+#include <asm/alternative-asm.h>
14452
14453 .file "cast5-avx-x86_64-asm_64.S"
14454
14455@@ -281,6 +282,7 @@ __cast5_enc_blk16:
14456 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
14457 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
14458
14459+ pax_force_retaddr
14460 ret;
14461 ENDPROC(__cast5_enc_blk16)
14462
14463@@ -352,6 +354,7 @@ __cast5_dec_blk16:
14464 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
14465 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
14466
14467+ pax_force_retaddr
14468 ret;
14469
14470 .L__skip_dec:
14471@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
14472 vmovdqu RR4, (6*4*4)(%r11);
14473 vmovdqu RL4, (7*4*4)(%r11);
14474
14475+ pax_force_retaddr
14476 ret;
14477 ENDPROC(cast5_ecb_enc_16way)
14478
14479@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
14480 vmovdqu RR4, (6*4*4)(%r11);
14481 vmovdqu RL4, (7*4*4)(%r11);
14482
14483+ pax_force_retaddr
14484 ret;
14485 ENDPROC(cast5_ecb_dec_16way)
14486
14487@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
14488 * %rdx: src
14489 */
14490
14491- pushq %r12;
14492+ pushq %r14;
14493
14494 movq %rsi, %r11;
14495- movq %rdx, %r12;
14496+ movq %rdx, %r14;
14497
14498 vmovdqu (0*16)(%rdx), RL1;
14499 vmovdqu (1*16)(%rdx), RR1;
14500@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
14501 call __cast5_dec_blk16;
14502
14503 /* xor with src */
14504- vmovq (%r12), RX;
14505+ vmovq (%r14), RX;
14506 vpshufd $0x4f, RX, RX;
14507 vpxor RX, RR1, RR1;
14508- vpxor 0*16+8(%r12), RL1, RL1;
14509- vpxor 1*16+8(%r12), RR2, RR2;
14510- vpxor 2*16+8(%r12), RL2, RL2;
14511- vpxor 3*16+8(%r12), RR3, RR3;
14512- vpxor 4*16+8(%r12), RL3, RL3;
14513- vpxor 5*16+8(%r12), RR4, RR4;
14514- vpxor 6*16+8(%r12), RL4, RL4;
14515+ vpxor 0*16+8(%r14), RL1, RL1;
14516+ vpxor 1*16+8(%r14), RR2, RR2;
14517+ vpxor 2*16+8(%r14), RL2, RL2;
14518+ vpxor 3*16+8(%r14), RR3, RR3;
14519+ vpxor 4*16+8(%r14), RL3, RL3;
14520+ vpxor 5*16+8(%r14), RR4, RR4;
14521+ vpxor 6*16+8(%r14), RL4, RL4;
14522
14523 vmovdqu RR1, (0*16)(%r11);
14524 vmovdqu RL1, (1*16)(%r11);
14525@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
14526 vmovdqu RR4, (6*16)(%r11);
14527 vmovdqu RL4, (7*16)(%r11);
14528
14529- popq %r12;
14530+ popq %r14;
14531
14532+ pax_force_retaddr
14533 ret;
14534 ENDPROC(cast5_cbc_dec_16way)
14535
14536@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
14537 * %rcx: iv (big endian, 64bit)
14538 */
14539
14540- pushq %r12;
14541+ pushq %r14;
14542
14543 movq %rsi, %r11;
14544- movq %rdx, %r12;
14545+ movq %rdx, %r14;
14546
14547 vpcmpeqd RTMP, RTMP, RTMP;
14548 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
14549@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
14550 call __cast5_enc_blk16;
14551
14552 /* dst = src ^ iv */
14553- vpxor (0*16)(%r12), RR1, RR1;
14554- vpxor (1*16)(%r12), RL1, RL1;
14555- vpxor (2*16)(%r12), RR2, RR2;
14556- vpxor (3*16)(%r12), RL2, RL2;
14557- vpxor (4*16)(%r12), RR3, RR3;
14558- vpxor (5*16)(%r12), RL3, RL3;
14559- vpxor (6*16)(%r12), RR4, RR4;
14560- vpxor (7*16)(%r12), RL4, RL4;
14561+ vpxor (0*16)(%r14), RR1, RR1;
14562+ vpxor (1*16)(%r14), RL1, RL1;
14563+ vpxor (2*16)(%r14), RR2, RR2;
14564+ vpxor (3*16)(%r14), RL2, RL2;
14565+ vpxor (4*16)(%r14), RR3, RR3;
14566+ vpxor (5*16)(%r14), RL3, RL3;
14567+ vpxor (6*16)(%r14), RR4, RR4;
14568+ vpxor (7*16)(%r14), RL4, RL4;
14569 vmovdqu RR1, (0*16)(%r11);
14570 vmovdqu RL1, (1*16)(%r11);
14571 vmovdqu RR2, (2*16)(%r11);
14572@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
14573 vmovdqu RR4, (6*16)(%r11);
14574 vmovdqu RL4, (7*16)(%r11);
14575
14576- popq %r12;
14577+ popq %r14;
14578
14579+ pax_force_retaddr
14580 ret;
14581 ENDPROC(cast5_ctr_16way)
14582diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
14583index e3531f8..e123f35 100644
14584--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
14585+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
14586@@ -24,6 +24,7 @@
14587 */
14588
14589 #include <linux/linkage.h>
14590+#include <asm/alternative-asm.h>
14591 #include "glue_helper-asm-avx.S"
14592
14593 .file "cast6-avx-x86_64-asm_64.S"
14594@@ -295,6 +296,7 @@ __cast6_enc_blk8:
14595 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
14596 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
14597
14598+ pax_force_retaddr
14599 ret;
14600 ENDPROC(__cast6_enc_blk8)
14601
14602@@ -340,6 +342,7 @@ __cast6_dec_blk8:
14603 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
14604 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
14605
14606+ pax_force_retaddr
14607 ret;
14608 ENDPROC(__cast6_dec_blk8)
14609
14610@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
14611
14612 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14613
14614+ pax_force_retaddr
14615 ret;
14616 ENDPROC(cast6_ecb_enc_8way)
14617
14618@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
14619
14620 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14621
14622+ pax_force_retaddr
14623 ret;
14624 ENDPROC(cast6_ecb_dec_8way)
14625
14626@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
14627 * %rdx: src
14628 */
14629
14630- pushq %r12;
14631+ pushq %r14;
14632
14633 movq %rsi, %r11;
14634- movq %rdx, %r12;
14635+ movq %rdx, %r14;
14636
14637 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14638
14639 call __cast6_dec_blk8;
14640
14641- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14642+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14643
14644- popq %r12;
14645+ popq %r14;
14646
14647+ pax_force_retaddr
14648 ret;
14649 ENDPROC(cast6_cbc_dec_8way)
14650
14651@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
14652 * %rcx: iv (little endian, 128bit)
14653 */
14654
14655- pushq %r12;
14656+ pushq %r14;
14657
14658 movq %rsi, %r11;
14659- movq %rdx, %r12;
14660+ movq %rdx, %r14;
14661
14662 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
14663 RD2, RX, RKR, RKM);
14664
14665 call __cast6_enc_blk8;
14666
14667- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14668+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14669
14670- popq %r12;
14671+ popq %r14;
14672
14673+ pax_force_retaddr
14674 ret;
14675 ENDPROC(cast6_ctr_8way)
14676
14677@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
14678 /* dst <= regs xor IVs(in dst) */
14679 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14680
14681+ pax_force_retaddr
14682 ret;
14683 ENDPROC(cast6_xts_enc_8way)
14684
14685@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
14686 /* dst <= regs xor IVs(in dst) */
14687 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14688
14689+ pax_force_retaddr
14690 ret;
14691 ENDPROC(cast6_xts_dec_8way)
14692diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14693index 26d49eb..c0a8c84 100644
14694--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14695+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
14696@@ -45,6 +45,7 @@
14697
14698 #include <asm/inst.h>
14699 #include <linux/linkage.h>
14700+#include <asm/alternative-asm.h>
14701
14702 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
14703
14704@@ -309,6 +310,7 @@ do_return:
14705 popq %rsi
14706 popq %rdi
14707 popq %rbx
14708+ pax_force_retaddr
14709 ret
14710
14711 ################################################################
14712diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14713index 5d1e007..098cb4f 100644
14714--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
14715+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
14716@@ -18,6 +18,7 @@
14717
14718 #include <linux/linkage.h>
14719 #include <asm/inst.h>
14720+#include <asm/alternative-asm.h>
14721
14722 .data
14723
14724@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
14725 psrlq $1, T2
14726 pxor T2, T1
14727 pxor T1, DATA
14728+ pax_force_retaddr
14729 ret
14730 ENDPROC(__clmul_gf128mul_ble)
14731
14732@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
14733 call __clmul_gf128mul_ble
14734 PSHUFB_XMM BSWAP DATA
14735 movups DATA, (%rdi)
14736+ pax_force_retaddr
14737 ret
14738 ENDPROC(clmul_ghash_mul)
14739
14740@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
14741 PSHUFB_XMM BSWAP DATA
14742 movups DATA, (%rdi)
14743 .Lupdate_just_ret:
14744+ pax_force_retaddr
14745 ret
14746 ENDPROC(clmul_ghash_update)
14747diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14748index 9279e0b..c4b3d2c 100644
14749--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
14750+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
14751@@ -1,4 +1,5 @@
14752 #include <linux/linkage.h>
14753+#include <asm/alternative-asm.h>
14754
14755 # enter salsa20_encrypt_bytes
14756 ENTRY(salsa20_encrypt_bytes)
14757@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
14758 add %r11,%rsp
14759 mov %rdi,%rax
14760 mov %rsi,%rdx
14761+ pax_force_retaddr
14762 ret
14763 # bytesatleast65:
14764 ._bytesatleast65:
14765@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
14766 add %r11,%rsp
14767 mov %rdi,%rax
14768 mov %rsi,%rdx
14769+ pax_force_retaddr
14770 ret
14771 ENDPROC(salsa20_keysetup)
14772
14773@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
14774 add %r11,%rsp
14775 mov %rdi,%rax
14776 mov %rsi,%rdx
14777+ pax_force_retaddr
14778 ret
14779 ENDPROC(salsa20_ivsetup)
14780diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14781index 2f202f4..d9164d6 100644
14782--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14783+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
14784@@ -24,6 +24,7 @@
14785 */
14786
14787 #include <linux/linkage.h>
14788+#include <asm/alternative-asm.h>
14789 #include "glue_helper-asm-avx.S"
14790
14791 .file "serpent-avx-x86_64-asm_64.S"
14792@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
14793 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14794 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14795
14796+ pax_force_retaddr
14797 ret;
14798 ENDPROC(__serpent_enc_blk8_avx)
14799
14800@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
14801 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14802 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14803
14804+ pax_force_retaddr
14805 ret;
14806 ENDPROC(__serpent_dec_blk8_avx)
14807
14808@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
14809
14810 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14811
14812+ pax_force_retaddr
14813 ret;
14814 ENDPROC(serpent_ecb_enc_8way_avx)
14815
14816@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
14817
14818 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14819
14820+ pax_force_retaddr
14821 ret;
14822 ENDPROC(serpent_ecb_dec_8way_avx)
14823
14824@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
14825
14826 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14827
14828+ pax_force_retaddr
14829 ret;
14830 ENDPROC(serpent_cbc_dec_8way_avx)
14831
14832@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
14833
14834 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14835
14836+ pax_force_retaddr
14837 ret;
14838 ENDPROC(serpent_ctr_8way_avx)
14839
14840@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
14841 /* dst <= regs xor IVs(in dst) */
14842 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
14843
14844+ pax_force_retaddr
14845 ret;
14846 ENDPROC(serpent_xts_enc_8way_avx)
14847
14848@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
14849 /* dst <= regs xor IVs(in dst) */
14850 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
14851
14852+ pax_force_retaddr
14853 ret;
14854 ENDPROC(serpent_xts_dec_8way_avx)
14855diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
14856index b222085..abd483c 100644
14857--- a/arch/x86/crypto/serpent-avx2-asm_64.S
14858+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
14859@@ -15,6 +15,7 @@
14860 */
14861
14862 #include <linux/linkage.h>
14863+#include <asm/alternative-asm.h>
14864 #include "glue_helper-asm-avx2.S"
14865
14866 .file "serpent-avx2-asm_64.S"
14867@@ -610,6 +611,7 @@ __serpent_enc_blk16:
14868 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14869 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14870
14871+ pax_force_retaddr
14872 ret;
14873 ENDPROC(__serpent_enc_blk16)
14874
14875@@ -664,6 +666,7 @@ __serpent_dec_blk16:
14876 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14877 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14878
14879+ pax_force_retaddr
14880 ret;
14881 ENDPROC(__serpent_dec_blk16)
14882
14883@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
14884
14885 vzeroupper;
14886
14887+ pax_force_retaddr
14888 ret;
14889 ENDPROC(serpent_ecb_enc_16way)
14890
14891@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
14892
14893 vzeroupper;
14894
14895+ pax_force_retaddr
14896 ret;
14897 ENDPROC(serpent_ecb_dec_16way)
14898
14899@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
14900
14901 vzeroupper;
14902
14903+ pax_force_retaddr
14904 ret;
14905 ENDPROC(serpent_cbc_dec_16way)
14906
14907@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
14908
14909 vzeroupper;
14910
14911+ pax_force_retaddr
14912 ret;
14913 ENDPROC(serpent_ctr_16way)
14914
14915@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
14916
14917 vzeroupper;
14918
14919+ pax_force_retaddr
14920 ret;
14921 ENDPROC(serpent_xts_enc_16way)
14922
14923@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
14924
14925 vzeroupper;
14926
14927+ pax_force_retaddr
14928 ret;
14929 ENDPROC(serpent_xts_dec_16way)
14930diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14931index acc066c..1559cc4 100644
14932--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14933+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
14934@@ -25,6 +25,7 @@
14935 */
14936
14937 #include <linux/linkage.h>
14938+#include <asm/alternative-asm.h>
14939
14940 .file "serpent-sse2-x86_64-asm_64.S"
14941 .text
14942@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
14943 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14944 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14945
14946+ pax_force_retaddr
14947 ret;
14948
14949 .L__enc_xor8:
14950 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
14951 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
14952
14953+ pax_force_retaddr
14954 ret;
14955 ENDPROC(__serpent_enc_blk_8way)
14956
14957@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
14958 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
14959 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
14960
14961+ pax_force_retaddr
14962 ret;
14963 ENDPROC(serpent_dec_blk_8way)
14964diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
14965index a410950..9dfe7ad 100644
14966--- a/arch/x86/crypto/sha1_ssse3_asm.S
14967+++ b/arch/x86/crypto/sha1_ssse3_asm.S
14968@@ -29,6 +29,7 @@
14969 */
14970
14971 #include <linux/linkage.h>
14972+#include <asm/alternative-asm.h>
14973
14974 #define CTX %rdi // arg1
14975 #define BUF %rsi // arg2
14976@@ -75,9 +76,9 @@
14977
14978 push %rbx
14979 push %rbp
14980- push %r12
14981+ push %r14
14982
14983- mov %rsp, %r12
14984+ mov %rsp, %r14
14985 sub $64, %rsp # allocate workspace
14986 and $~15, %rsp # align stack
14987
14988@@ -99,11 +100,12 @@
14989 xor %rax, %rax
14990 rep stosq
14991
14992- mov %r12, %rsp # deallocate workspace
14993+ mov %r14, %rsp # deallocate workspace
14994
14995- pop %r12
14996+ pop %r14
14997 pop %rbp
14998 pop %rbx
14999+ pax_force_retaddr
15000 ret
15001
15002 ENDPROC(\name)
15003diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
15004index 642f156..51a513c 100644
15005--- a/arch/x86/crypto/sha256-avx-asm.S
15006+++ b/arch/x86/crypto/sha256-avx-asm.S
15007@@ -49,6 +49,7 @@
15008
15009 #ifdef CONFIG_AS_AVX
15010 #include <linux/linkage.h>
15011+#include <asm/alternative-asm.h>
15012
15013 ## assume buffers not aligned
15014 #define VMOVDQ vmovdqu
15015@@ -460,6 +461,7 @@ done_hash:
15016 popq %r13
15017 popq %rbp
15018 popq %rbx
15019+ pax_force_retaddr
15020 ret
15021 ENDPROC(sha256_transform_avx)
15022
15023diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
15024index 9e86944..3795e6a 100644
15025--- a/arch/x86/crypto/sha256-avx2-asm.S
15026+++ b/arch/x86/crypto/sha256-avx2-asm.S
15027@@ -50,6 +50,7 @@
15028
15029 #ifdef CONFIG_AS_AVX2
15030 #include <linux/linkage.h>
15031+#include <asm/alternative-asm.h>
15032
15033 ## assume buffers not aligned
15034 #define VMOVDQ vmovdqu
15035@@ -720,6 +721,7 @@ done_hash:
15036 popq %r12
15037 popq %rbp
15038 popq %rbx
15039+ pax_force_retaddr
15040 ret
15041 ENDPROC(sha256_transform_rorx)
15042
15043diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
15044index f833b74..8c62a9e 100644
15045--- a/arch/x86/crypto/sha256-ssse3-asm.S
15046+++ b/arch/x86/crypto/sha256-ssse3-asm.S
15047@@ -47,6 +47,7 @@
15048 ########################################################################
15049
15050 #include <linux/linkage.h>
15051+#include <asm/alternative-asm.h>
15052
15053 ## assume buffers not aligned
15054 #define MOVDQ movdqu
15055@@ -471,6 +472,7 @@ done_hash:
15056 popq %rbp
15057 popq %rbx
15058
15059+ pax_force_retaddr
15060 ret
15061 ENDPROC(sha256_transform_ssse3)
15062
15063diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
15064index 974dde9..a823ff9 100644
15065--- a/arch/x86/crypto/sha512-avx-asm.S
15066+++ b/arch/x86/crypto/sha512-avx-asm.S
15067@@ -49,6 +49,7 @@
15068
15069 #ifdef CONFIG_AS_AVX
15070 #include <linux/linkage.h>
15071+#include <asm/alternative-asm.h>
15072
15073 .text
15074
15075@@ -364,6 +365,7 @@ updateblock:
15076 mov frame_RSPSAVE(%rsp), %rsp
15077
15078 nowork:
15079+ pax_force_retaddr
15080 ret
15081 ENDPROC(sha512_transform_avx)
15082
15083diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
15084index 568b961..ed20c37 100644
15085--- a/arch/x86/crypto/sha512-avx2-asm.S
15086+++ b/arch/x86/crypto/sha512-avx2-asm.S
15087@@ -51,6 +51,7 @@
15088
15089 #ifdef CONFIG_AS_AVX2
15090 #include <linux/linkage.h>
15091+#include <asm/alternative-asm.h>
15092
15093 .text
15094
15095@@ -678,6 +679,7 @@ done_hash:
15096
15097 # Restore Stack Pointer
15098 mov frame_RSPSAVE(%rsp), %rsp
15099+ pax_force_retaddr
15100 ret
15101 ENDPROC(sha512_transform_rorx)
15102
15103diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
15104index fb56855..6edd768 100644
15105--- a/arch/x86/crypto/sha512-ssse3-asm.S
15106+++ b/arch/x86/crypto/sha512-ssse3-asm.S
15107@@ -48,6 +48,7 @@
15108 ########################################################################
15109
15110 #include <linux/linkage.h>
15111+#include <asm/alternative-asm.h>
15112
15113 .text
15114
15115@@ -363,6 +364,7 @@ updateblock:
15116 mov frame_RSPSAVE(%rsp), %rsp
15117
15118 nowork:
15119+ pax_force_retaddr
15120 ret
15121 ENDPROC(sha512_transform_ssse3)
15122
15123diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
15124index 0505813..b067311 100644
15125--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
15126+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
15127@@ -24,6 +24,7 @@
15128 */
15129
15130 #include <linux/linkage.h>
15131+#include <asm/alternative-asm.h>
15132 #include "glue_helper-asm-avx.S"
15133
15134 .file "twofish-avx-x86_64-asm_64.S"
15135@@ -284,6 +285,7 @@ __twofish_enc_blk8:
15136 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
15137 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
15138
15139+ pax_force_retaddr
15140 ret;
15141 ENDPROC(__twofish_enc_blk8)
15142
15143@@ -324,6 +326,7 @@ __twofish_dec_blk8:
15144 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
15145 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
15146
15147+ pax_force_retaddr
15148 ret;
15149 ENDPROC(__twofish_dec_blk8)
15150
15151@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
15152
15153 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
15154
15155+ pax_force_retaddr
15156 ret;
15157 ENDPROC(twofish_ecb_enc_8way)
15158
15159@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
15160
15161 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
15162
15163+ pax_force_retaddr
15164 ret;
15165 ENDPROC(twofish_ecb_dec_8way)
15166
15167@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
15168 * %rdx: src
15169 */
15170
15171- pushq %r12;
15172+ pushq %r14;
15173
15174 movq %rsi, %r11;
15175- movq %rdx, %r12;
15176+ movq %rdx, %r14;
15177
15178 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
15179
15180 call __twofish_dec_blk8;
15181
15182- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
15183+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
15184
15185- popq %r12;
15186+ popq %r14;
15187
15188+ pax_force_retaddr
15189 ret;
15190 ENDPROC(twofish_cbc_dec_8way)
15191
15192@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
15193 * %rcx: iv (little endian, 128bit)
15194 */
15195
15196- pushq %r12;
15197+ pushq %r14;
15198
15199 movq %rsi, %r11;
15200- movq %rdx, %r12;
15201+ movq %rdx, %r14;
15202
15203 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
15204 RD2, RX0, RX1, RY0);
15205
15206 call __twofish_enc_blk8;
15207
15208- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
15209+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
15210
15211- popq %r12;
15212+ popq %r14;
15213
15214+ pax_force_retaddr
15215 ret;
15216 ENDPROC(twofish_ctr_8way)
15217
15218@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
15219 /* dst <= regs xor IVs(in dst) */
15220 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
15221
15222+ pax_force_retaddr
15223 ret;
15224 ENDPROC(twofish_xts_enc_8way)
15225
15226@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
15227 /* dst <= regs xor IVs(in dst) */
15228 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
15229
15230+ pax_force_retaddr
15231 ret;
15232 ENDPROC(twofish_xts_dec_8way)
15233diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
15234index 1c3b7ce..02f578d 100644
15235--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
15236+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
15237@@ -21,6 +21,7 @@
15238 */
15239
15240 #include <linux/linkage.h>
15241+#include <asm/alternative-asm.h>
15242
15243 .file "twofish-x86_64-asm-3way.S"
15244 .text
15245@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
15246 popq %r13;
15247 popq %r14;
15248 popq %r15;
15249+ pax_force_retaddr
15250 ret;
15251
15252 .L__enc_xor3:
15253@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
15254 popq %r13;
15255 popq %r14;
15256 popq %r15;
15257+ pax_force_retaddr
15258 ret;
15259 ENDPROC(__twofish_enc_blk_3way)
15260
15261@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
15262 popq %r13;
15263 popq %r14;
15264 popq %r15;
15265+ pax_force_retaddr
15266 ret;
15267 ENDPROC(twofish_dec_blk_3way)
15268diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
15269index a039d21..524b8b2 100644
15270--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
15271+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
15272@@ -22,6 +22,7 @@
15273
15274 #include <linux/linkage.h>
15275 #include <asm/asm-offsets.h>
15276+#include <asm/alternative-asm.h>
15277
15278 #define a_offset 0
15279 #define b_offset 4
15280@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
15281
15282 popq R1
15283 movq $1,%rax
15284+ pax_force_retaddr
15285 ret
15286 ENDPROC(twofish_enc_blk)
15287
15288@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
15289
15290 popq R1
15291 movq $1,%rax
15292+ pax_force_retaddr
15293 ret
15294 ENDPROC(twofish_dec_blk)
15295diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
15296index d21ff89..6da8e6e 100644
15297--- a/arch/x86/ia32/ia32_aout.c
15298+++ b/arch/x86/ia32/ia32_aout.c
15299@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
15300 unsigned long dump_start, dump_size;
15301 struct user32 dump;
15302
15303+ memset(&dump, 0, sizeof(dump));
15304+
15305 fs = get_fs();
15306 set_fs(KERNEL_DS);
15307 has_dumped = 1;
15308diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
15309index f9e181a..300544c 100644
15310--- a/arch/x86/ia32/ia32_signal.c
15311+++ b/arch/x86/ia32/ia32_signal.c
15312@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void)
15313 if (__get_user(set.sig[0], &frame->sc.oldmask)
15314 || (_COMPAT_NSIG_WORDS > 1
15315 && __copy_from_user((((char *) &set.sig) + 4),
15316- &frame->extramask,
15317+ frame->extramask,
15318 sizeof(frame->extramask))))
15319 goto badframe;
15320
15321@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
15322 sp -= frame_size;
15323 /* Align the stack pointer according to the i386 ABI,
15324 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
15325- sp = ((sp + 4) & -16ul) - 4;
15326+ sp = ((sp - 12) & -16ul) - 4;
15327 return (void __user *) sp;
15328 }
15329
15330@@ -383,10 +383,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
15331 } else {
15332 /* Return stub is in 32bit vsyscall page */
15333 if (current->mm->context.vdso)
15334- restorer = current->mm->context.vdso +
15335- selected_vdso32->sym___kernel_sigreturn;
15336+ restorer = (void __force_user *)(current->mm->context.vdso +
15337+ selected_vdso32->sym___kernel_sigreturn);
15338 else
15339- restorer = &frame->retcode;
15340+ restorer = frame->retcode;
15341 }
15342
15343 put_user_try {
15344@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
15345 * These are actually not used anymore, but left because some
15346 * gdb versions depend on them as a marker.
15347 */
15348- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
15349+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
15350 } put_user_catch(err);
15351
15352 if (err)
15353@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
15354 0xb8,
15355 __NR_ia32_rt_sigreturn,
15356 0x80cd,
15357- 0,
15358+ 0
15359 };
15360
15361 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
15362@@ -461,16 +461,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
15363
15364 if (ksig->ka.sa.sa_flags & SA_RESTORER)
15365 restorer = ksig->ka.sa.sa_restorer;
15366+ else if (current->mm->context.vdso)
15367+ /* Return stub is in 32bit vsyscall page */
15368+ restorer = (void __force_user *)(current->mm->context.vdso +
15369+ selected_vdso32->sym___kernel_rt_sigreturn);
15370 else
15371- restorer = current->mm->context.vdso +
15372- selected_vdso32->sym___kernel_rt_sigreturn;
15373+ restorer = frame->retcode;
15374 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
15375
15376 /*
15377 * Not actually used anymore, but left because some gdb
15378 * versions need it.
15379 */
15380- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
15381+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
15382 } put_user_catch(err);
15383
15384 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
15385diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
15386index 92a2e93..cd4d95f 100644
15387--- a/arch/x86/ia32/ia32entry.S
15388+++ b/arch/x86/ia32/ia32entry.S
15389@@ -15,8 +15,10 @@
15390 #include <asm/irqflags.h>
15391 #include <asm/asm.h>
15392 #include <asm/smap.h>
15393+#include <asm/pgtable.h>
15394 #include <linux/linkage.h>
15395 #include <linux/err.h>
15396+#include <asm/alternative-asm.h>
15397
15398 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15399 #include <linux/elf-em.h>
15400@@ -62,12 +64,12 @@
15401 */
15402 .macro LOAD_ARGS32 offset, _r9=0
15403 .if \_r9
15404- movl \offset+16(%rsp),%r9d
15405+ movl \offset+R9(%rsp),%r9d
15406 .endif
15407- movl \offset+40(%rsp),%ecx
15408- movl \offset+48(%rsp),%edx
15409- movl \offset+56(%rsp),%esi
15410- movl \offset+64(%rsp),%edi
15411+ movl \offset+RCX(%rsp),%ecx
15412+ movl \offset+RDX(%rsp),%edx
15413+ movl \offset+RSI(%rsp),%esi
15414+ movl \offset+RDI(%rsp),%edi
15415 movl %eax,%eax /* zero extension */
15416 .endm
15417
15418@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
15419 ENDPROC(native_irq_enable_sysexit)
15420 #endif
15421
15422+ .macro pax_enter_kernel_user
15423+ pax_set_fptr_mask
15424+#ifdef CONFIG_PAX_MEMORY_UDEREF
15425+ call pax_enter_kernel_user
15426+#endif
15427+ .endm
15428+
15429+ .macro pax_exit_kernel_user
15430+#ifdef CONFIG_PAX_MEMORY_UDEREF
15431+ call pax_exit_kernel_user
15432+#endif
15433+#ifdef CONFIG_PAX_RANDKSTACK
15434+ pushq %rax
15435+ pushq %r11
15436+ call pax_randomize_kstack
15437+ popq %r11
15438+ popq %rax
15439+#endif
15440+ .endm
15441+
15442+ .macro pax_erase_kstack
15443+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15444+ call pax_erase_kstack
15445+#endif
15446+ .endm
15447+
15448 /*
15449 * 32bit SYSENTER instruction entry.
15450 *
15451@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
15452 CFI_REGISTER rsp,rbp
15453 SWAPGS_UNSAFE_STACK
15454 movq PER_CPU_VAR(kernel_stack), %rsp
15455- addq $(KERNEL_STACK_OFFSET),%rsp
15456- /*
15457- * No need to follow this irqs on/off section: the syscall
15458- * disabled irqs, here we enable it straight after entry:
15459- */
15460- ENABLE_INTERRUPTS(CLBR_NONE)
15461 movl %ebp,%ebp /* zero extension */
15462 pushq_cfi $__USER32_DS
15463 /*CFI_REL_OFFSET ss,0*/
15464@@ -135,23 +157,46 @@ ENTRY(ia32_sysenter_target)
15465 CFI_REL_OFFSET rsp,0
15466 pushfq_cfi
15467 /*CFI_REL_OFFSET rflags,0*/
15468- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
15469- CFI_REGISTER rip,r10
15470+ orl $X86_EFLAGS_IF,(%rsp)
15471+ GET_THREAD_INFO(%r11)
15472+ movl TI_sysenter_return(%r11), %r11d
15473+ CFI_REGISTER rip,r11
15474 pushq_cfi $__USER32_CS
15475 /*CFI_REL_OFFSET cs,0*/
15476 movl %eax, %eax
15477- pushq_cfi %r10
15478+ pushq_cfi %r11
15479 CFI_REL_OFFSET rip,0
15480 pushq_cfi %rax
15481 cld
15482 SAVE_ARGS 0,1,0
15483+ pax_enter_kernel_user
15484+
15485+#ifdef CONFIG_PAX_RANDKSTACK
15486+ pax_erase_kstack
15487+#endif
15488+
15489+ /*
15490+ * No need to follow this irqs on/off section: the syscall
15491+ * disabled irqs, here we enable it straight after entry:
15492+ */
15493+ ENABLE_INTERRUPTS(CLBR_NONE)
15494 /* no need to do an access_ok check here because rbp has been
15495 32bit zero extended */
15496+
15497+#ifdef CONFIG_PAX_MEMORY_UDEREF
15498+ addq pax_user_shadow_base,%rbp
15499+ ASM_PAX_OPEN_USERLAND
15500+#endif
15501+
15502 ASM_STAC
15503 1: movl (%rbp),%ebp
15504 _ASM_EXTABLE(1b,ia32_badarg)
15505 ASM_CLAC
15506
15507+#ifdef CONFIG_PAX_MEMORY_UDEREF
15508+ ASM_PAX_CLOSE_USERLAND
15509+#endif
15510+
15511 /*
15512 * Sysenter doesn't filter flags, so we need to clear NT
15513 * ourselves. To save a few cycles, we can check whether
15514@@ -161,8 +206,9 @@ ENTRY(ia32_sysenter_target)
15515 jnz sysenter_fix_flags
15516 sysenter_flags_fixed:
15517
15518- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15519- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15520+ GET_THREAD_INFO(%r11)
15521+ orl $TS_COMPAT,TI_status(%r11)
15522+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15523 CFI_REMEMBER_STATE
15524 jnz sysenter_tracesys
15525 cmpq $(IA32_NR_syscalls-1),%rax
15526@@ -172,15 +218,18 @@ sysenter_do_call:
15527 sysenter_dispatch:
15528 call *ia32_sys_call_table(,%rax,8)
15529 movq %rax,RAX-ARGOFFSET(%rsp)
15530+ GET_THREAD_INFO(%r11)
15531 DISABLE_INTERRUPTS(CLBR_NONE)
15532 TRACE_IRQS_OFF
15533- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15534+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
15535 jnz sysexit_audit
15536 sysexit_from_sys_call:
15537- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15538+ pax_exit_kernel_user
15539+ pax_erase_kstack
15540+ andl $~TS_COMPAT,TI_status(%r11)
15541 /* clear IF, that popfq doesn't enable interrupts early */
15542- andl $~0x200,EFLAGS-R11(%rsp)
15543- movl RIP-R11(%rsp),%edx /* User %eip */
15544+ andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
15545+ movl RIP(%rsp),%edx /* User %eip */
15546 CFI_REGISTER rip,rdx
15547 RESTORE_ARGS 0,24,0,0,0,0
15548 xorq %r8,%r8
15549@@ -205,6 +254,9 @@ sysexit_from_sys_call:
15550 movl %eax,%esi /* 2nd arg: syscall number */
15551 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
15552 call __audit_syscall_entry
15553+
15554+ pax_erase_kstack
15555+
15556 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
15557 cmpq $(IA32_NR_syscalls-1),%rax
15558 ja ia32_badsys
15559@@ -216,7 +268,7 @@ sysexit_from_sys_call:
15560 .endm
15561
15562 .macro auditsys_exit exit
15563- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15564+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15565 jnz ia32_ret_from_sys_call
15566 TRACE_IRQS_ON
15567 ENABLE_INTERRUPTS(CLBR_NONE)
15568@@ -227,11 +279,12 @@ sysexit_from_sys_call:
15569 1: setbe %al /* 1 if error, 0 if not */
15570 movzbl %al,%edi /* zero-extend that into %edi */
15571 call __audit_syscall_exit
15572+ GET_THREAD_INFO(%r11)
15573 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
15574 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
15575 DISABLE_INTERRUPTS(CLBR_NONE)
15576 TRACE_IRQS_OFF
15577- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15578+ testl %edi,TI_flags(%r11)
15579 jz \exit
15580 CLEAR_RREGS -ARGOFFSET
15581 jmp int_with_check
15582@@ -253,7 +306,7 @@ sysenter_fix_flags:
15583
15584 sysenter_tracesys:
15585 #ifdef CONFIG_AUDITSYSCALL
15586- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15587+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15588 jz sysenter_auditsys
15589 #endif
15590 SAVE_REST
15591@@ -265,6 +318,9 @@ sysenter_tracesys:
15592 RESTORE_REST
15593 cmpq $(IA32_NR_syscalls-1),%rax
15594 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
15595+
15596+ pax_erase_kstack
15597+
15598 jmp sysenter_do_call
15599 CFI_ENDPROC
15600 ENDPROC(ia32_sysenter_target)
15601@@ -292,19 +348,25 @@ ENDPROC(ia32_sysenter_target)
15602 ENTRY(ia32_cstar_target)
15603 CFI_STARTPROC32 simple
15604 CFI_SIGNAL_FRAME
15605- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
15606+ CFI_DEF_CFA rsp,0
15607 CFI_REGISTER rip,rcx
15608 /*CFI_REGISTER rflags,r11*/
15609 SWAPGS_UNSAFE_STACK
15610 movl %esp,%r8d
15611 CFI_REGISTER rsp,r8
15612 movq PER_CPU_VAR(kernel_stack),%rsp
15613+ SAVE_ARGS 8*6,0,0
15614+ pax_enter_kernel_user
15615+
15616+#ifdef CONFIG_PAX_RANDKSTACK
15617+ pax_erase_kstack
15618+#endif
15619+
15620 /*
15621 * No need to follow this irqs on/off section: the syscall
15622 * disabled irqs and here we enable it straight after entry:
15623 */
15624 ENABLE_INTERRUPTS(CLBR_NONE)
15625- SAVE_ARGS 8,0,0
15626 movl %eax,%eax /* zero extension */
15627 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
15628 movq %rcx,RIP-ARGOFFSET(%rsp)
15629@@ -320,12 +382,25 @@ ENTRY(ia32_cstar_target)
15630 /* no need to do an access_ok check here because r8 has been
15631 32bit zero extended */
15632 /* hardware stack frame is complete now */
15633+
15634+#ifdef CONFIG_PAX_MEMORY_UDEREF
15635+ ASM_PAX_OPEN_USERLAND
15636+ movq pax_user_shadow_base,%r8
15637+ addq RSP-ARGOFFSET(%rsp),%r8
15638+#endif
15639+
15640 ASM_STAC
15641 1: movl (%r8),%r9d
15642 _ASM_EXTABLE(1b,ia32_badarg)
15643 ASM_CLAC
15644- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15645- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15646+
15647+#ifdef CONFIG_PAX_MEMORY_UDEREF
15648+ ASM_PAX_CLOSE_USERLAND
15649+#endif
15650+
15651+ GET_THREAD_INFO(%r11)
15652+ orl $TS_COMPAT,TI_status(%r11)
15653+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15654 CFI_REMEMBER_STATE
15655 jnz cstar_tracesys
15656 cmpq $IA32_NR_syscalls-1,%rax
15657@@ -335,13 +410,16 @@ cstar_do_call:
15658 cstar_dispatch:
15659 call *ia32_sys_call_table(,%rax,8)
15660 movq %rax,RAX-ARGOFFSET(%rsp)
15661+ GET_THREAD_INFO(%r11)
15662 DISABLE_INTERRUPTS(CLBR_NONE)
15663 TRACE_IRQS_OFF
15664- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15665+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
15666 jnz sysretl_audit
15667 sysretl_from_sys_call:
15668- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15669- RESTORE_ARGS 0,-ARG_SKIP,0,0,0
15670+ pax_exit_kernel_user
15671+ pax_erase_kstack
15672+ andl $~TS_COMPAT,TI_status(%r11)
15673+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0
15674 movl RIP-ARGOFFSET(%rsp),%ecx
15675 CFI_REGISTER rip,rcx
15676 movl EFLAGS-ARGOFFSET(%rsp),%r11d
15677@@ -368,7 +446,7 @@ sysretl_audit:
15678
15679 cstar_tracesys:
15680 #ifdef CONFIG_AUDITSYSCALL
15681- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15682+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
15683 jz cstar_auditsys
15684 #endif
15685 xchgl %r9d,%ebp
15686@@ -382,11 +460,19 @@ cstar_tracesys:
15687 xchgl %ebp,%r9d
15688 cmpq $(IA32_NR_syscalls-1),%rax
15689 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
15690+
15691+ pax_erase_kstack
15692+
15693 jmp cstar_do_call
15694 END(ia32_cstar_target)
15695
15696 ia32_badarg:
15697 ASM_CLAC
15698+
15699+#ifdef CONFIG_PAX_MEMORY_UDEREF
15700+ ASM_PAX_CLOSE_USERLAND
15701+#endif
15702+
15703 movq $-EFAULT,%rax
15704 jmp ia32_sysret
15705 CFI_ENDPROC
15706@@ -423,19 +509,26 @@ ENTRY(ia32_syscall)
15707 CFI_REL_OFFSET rip,RIP-RIP
15708 PARAVIRT_ADJUST_EXCEPTION_FRAME
15709 SWAPGS
15710- /*
15711- * No need to follow this irqs on/off section: the syscall
15712- * disabled irqs and here we enable it straight after entry:
15713- */
15714- ENABLE_INTERRUPTS(CLBR_NONE)
15715 movl %eax,%eax
15716 pushq_cfi %rax
15717 cld
15718 /* note the registers are not zero extended to the sf.
15719 this could be a problem. */
15720 SAVE_ARGS 0,1,0
15721- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15722- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15723+ pax_enter_kernel_user
15724+
15725+#ifdef CONFIG_PAX_RANDKSTACK
15726+ pax_erase_kstack
15727+#endif
15728+
15729+ /*
15730+ * No need to follow this irqs on/off section: the syscall
15731+ * disabled irqs and here we enable it straight after entry:
15732+ */
15733+ ENABLE_INTERRUPTS(CLBR_NONE)
15734+ GET_THREAD_INFO(%r11)
15735+ orl $TS_COMPAT,TI_status(%r11)
15736+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
15737 jnz ia32_tracesys
15738 cmpq $(IA32_NR_syscalls-1),%rax
15739 ja ia32_badsys
15740@@ -458,6 +551,9 @@ ia32_tracesys:
15741 RESTORE_REST
15742 cmpq $(IA32_NR_syscalls-1),%rax
15743 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
15744+
15745+ pax_erase_kstack
15746+
15747 jmp ia32_do_call
15748 END(ia32_syscall)
15749
15750diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
15751index 8e0ceec..af13504 100644
15752--- a/arch/x86/ia32/sys_ia32.c
15753+++ b/arch/x86/ia32/sys_ia32.c
15754@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
15755 */
15756 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
15757 {
15758- typeof(ubuf->st_uid) uid = 0;
15759- typeof(ubuf->st_gid) gid = 0;
15760+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
15761+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
15762 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
15763 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
15764 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
15765diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
15766index 372231c..51b537d 100644
15767--- a/arch/x86/include/asm/alternative-asm.h
15768+++ b/arch/x86/include/asm/alternative-asm.h
15769@@ -18,6 +18,45 @@
15770 .endm
15771 #endif
15772
15773+#ifdef KERNEXEC_PLUGIN
15774+ .macro pax_force_retaddr_bts rip=0
15775+ btsq $63,\rip(%rsp)
15776+ .endm
15777+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
15778+ .macro pax_force_retaddr rip=0, reload=0
15779+ btsq $63,\rip(%rsp)
15780+ .endm
15781+ .macro pax_force_fptr ptr
15782+ btsq $63,\ptr
15783+ .endm
15784+ .macro pax_set_fptr_mask
15785+ .endm
15786+#endif
15787+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15788+ .macro pax_force_retaddr rip=0, reload=0
15789+ .if \reload
15790+ pax_set_fptr_mask
15791+ .endif
15792+ orq %r12,\rip(%rsp)
15793+ .endm
15794+ .macro pax_force_fptr ptr
15795+ orq %r12,\ptr
15796+ .endm
15797+ .macro pax_set_fptr_mask
15798+ movabs $0x8000000000000000,%r12
15799+ .endm
15800+#endif
15801+#else
15802+ .macro pax_force_retaddr rip=0, reload=0
15803+ .endm
15804+ .macro pax_force_fptr ptr
15805+ .endm
15806+ .macro pax_force_retaddr_bts rip=0
15807+ .endm
15808+ .macro pax_set_fptr_mask
15809+ .endm
15810+#endif
15811+
15812 .macro altinstruction_entry orig alt feature orig_len alt_len
15813 .long \orig - .
15814 .long \alt - .
15815diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
15816index 473bdbe..b1e3377 100644
15817--- a/arch/x86/include/asm/alternative.h
15818+++ b/arch/x86/include/asm/alternative.h
15819@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15820 ".pushsection .discard,\"aw\",@progbits\n" \
15821 DISCARD_ENTRY(1) \
15822 ".popsection\n" \
15823- ".pushsection .altinstr_replacement, \"ax\"\n" \
15824+ ".pushsection .altinstr_replacement, \"a\"\n" \
15825 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
15826 ".popsection"
15827
15828@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
15829 DISCARD_ENTRY(1) \
15830 DISCARD_ENTRY(2) \
15831 ".popsection\n" \
15832- ".pushsection .altinstr_replacement, \"ax\"\n" \
15833+ ".pushsection .altinstr_replacement, \"a\"\n" \
15834 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
15835 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
15836 ".popsection"
15837diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
15838index 465b309..ab7e51f 100644
15839--- a/arch/x86/include/asm/apic.h
15840+++ b/arch/x86/include/asm/apic.h
15841@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
15842
15843 #ifdef CONFIG_X86_LOCAL_APIC
15844
15845-extern unsigned int apic_verbosity;
15846+extern int apic_verbosity;
15847 extern int local_apic_timer_c2_ok;
15848
15849 extern int disable_apic;
15850diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
15851index 20370c6..a2eb9b0 100644
15852--- a/arch/x86/include/asm/apm.h
15853+++ b/arch/x86/include/asm/apm.h
15854@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
15855 __asm__ __volatile__(APM_DO_ZERO_SEGS
15856 "pushl %%edi\n\t"
15857 "pushl %%ebp\n\t"
15858- "lcall *%%cs:apm_bios_entry\n\t"
15859+ "lcall *%%ss:apm_bios_entry\n\t"
15860 "setc %%al\n\t"
15861 "popl %%ebp\n\t"
15862 "popl %%edi\n\t"
15863@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
15864 __asm__ __volatile__(APM_DO_ZERO_SEGS
15865 "pushl %%edi\n\t"
15866 "pushl %%ebp\n\t"
15867- "lcall *%%cs:apm_bios_entry\n\t"
15868+ "lcall *%%ss:apm_bios_entry\n\t"
15869 "setc %%bl\n\t"
15870 "popl %%ebp\n\t"
15871 "popl %%edi\n\t"
15872diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
15873index 6dd1c7dd..5a85bf2 100644
15874--- a/arch/x86/include/asm/atomic.h
15875+++ b/arch/x86/include/asm/atomic.h
15876@@ -24,7 +24,18 @@
15877 */
15878 static inline int atomic_read(const atomic_t *v)
15879 {
15880- return (*(volatile int *)&(v)->counter);
15881+ return (*(volatile const int *)&(v)->counter);
15882+}
15883+
15884+/**
15885+ * atomic_read_unchecked - read atomic variable
15886+ * @v: pointer of type atomic_unchecked_t
15887+ *
15888+ * Atomically reads the value of @v.
15889+ */
15890+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
15891+{
15892+ return (*(volatile const int *)&(v)->counter);
15893 }
15894
15895 /**
15896@@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
15897 }
15898
15899 /**
15900+ * atomic_set_unchecked - set atomic variable
15901+ * @v: pointer of type atomic_unchecked_t
15902+ * @i: required value
15903+ *
15904+ * Atomically sets the value of @v to @i.
15905+ */
15906+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
15907+{
15908+ v->counter = i;
15909+}
15910+
15911+/**
15912 * atomic_add - add integer to atomic variable
15913 * @i: integer value to add
15914 * @v: pointer of type atomic_t
15915@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
15916 */
15917 static inline void atomic_add(int i, atomic_t *v)
15918 {
15919- asm volatile(LOCK_PREFIX "addl %1,%0"
15920+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15921+
15922+#ifdef CONFIG_PAX_REFCOUNT
15923+ "jno 0f\n"
15924+ LOCK_PREFIX "subl %1,%0\n"
15925+ "int $4\n0:\n"
15926+ _ASM_EXTABLE(0b, 0b)
15927+#endif
15928+
15929+ : "+m" (v->counter)
15930+ : "ir" (i));
15931+}
15932+
15933+/**
15934+ * atomic_add_unchecked - add integer to atomic variable
15935+ * @i: integer value to add
15936+ * @v: pointer of type atomic_unchecked_t
15937+ *
15938+ * Atomically adds @i to @v.
15939+ */
15940+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
15941+{
15942+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
15943 : "+m" (v->counter)
15944 : "ir" (i));
15945 }
15946@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
15947 */
15948 static inline void atomic_sub(int i, atomic_t *v)
15949 {
15950- asm volatile(LOCK_PREFIX "subl %1,%0"
15951+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15952+
15953+#ifdef CONFIG_PAX_REFCOUNT
15954+ "jno 0f\n"
15955+ LOCK_PREFIX "addl %1,%0\n"
15956+ "int $4\n0:\n"
15957+ _ASM_EXTABLE(0b, 0b)
15958+#endif
15959+
15960+ : "+m" (v->counter)
15961+ : "ir" (i));
15962+}
15963+
15964+/**
15965+ * atomic_sub_unchecked - subtract integer from atomic variable
15966+ * @i: integer value to subtract
15967+ * @v: pointer of type atomic_unchecked_t
15968+ *
15969+ * Atomically subtracts @i from @v.
15970+ */
15971+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
15972+{
15973+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
15974 : "+m" (v->counter)
15975 : "ir" (i));
15976 }
15977@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
15978 */
15979 static inline int atomic_sub_and_test(int i, atomic_t *v)
15980 {
15981- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
15982+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
15983 }
15984
15985 /**
15986@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
15987 */
15988 static inline void atomic_inc(atomic_t *v)
15989 {
15990- asm volatile(LOCK_PREFIX "incl %0"
15991+ asm volatile(LOCK_PREFIX "incl %0\n"
15992+
15993+#ifdef CONFIG_PAX_REFCOUNT
15994+ "jno 0f\n"
15995+ LOCK_PREFIX "decl %0\n"
15996+ "int $4\n0:\n"
15997+ _ASM_EXTABLE(0b, 0b)
15998+#endif
15999+
16000+ : "+m" (v->counter));
16001+}
16002+
16003+/**
16004+ * atomic_inc_unchecked - increment atomic variable
16005+ * @v: pointer of type atomic_unchecked_t
16006+ *
16007+ * Atomically increments @v by 1.
16008+ */
16009+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
16010+{
16011+ asm volatile(LOCK_PREFIX "incl %0\n"
16012 : "+m" (v->counter));
16013 }
16014
16015@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
16016 */
16017 static inline void atomic_dec(atomic_t *v)
16018 {
16019- asm volatile(LOCK_PREFIX "decl %0"
16020+ asm volatile(LOCK_PREFIX "decl %0\n"
16021+
16022+#ifdef CONFIG_PAX_REFCOUNT
16023+ "jno 0f\n"
16024+ LOCK_PREFIX "incl %0\n"
16025+ "int $4\n0:\n"
16026+ _ASM_EXTABLE(0b, 0b)
16027+#endif
16028+
16029+ : "+m" (v->counter));
16030+}
16031+
16032+/**
16033+ * atomic_dec_unchecked - decrement atomic variable
16034+ * @v: pointer of type atomic_unchecked_t
16035+ *
16036+ * Atomically decrements @v by 1.
16037+ */
16038+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
16039+{
16040+ asm volatile(LOCK_PREFIX "decl %0\n"
16041 : "+m" (v->counter));
16042 }
16043
16044@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
16045 */
16046 static inline int atomic_dec_and_test(atomic_t *v)
16047 {
16048- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
16049+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
16050 }
16051
16052 /**
16053@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
16054 */
16055 static inline int atomic_inc_and_test(atomic_t *v)
16056 {
16057- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
16058+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
16059+}
16060+
16061+/**
16062+ * atomic_inc_and_test_unchecked - increment and test
16063+ * @v: pointer of type atomic_unchecked_t
16064+ *
16065+ * Atomically increments @v by 1
16066+ * and returns true if the result is zero, or false for all
16067+ * other cases.
16068+ */
16069+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
16070+{
16071+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
16072 }
16073
16074 /**
16075@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
16076 */
16077 static inline int atomic_add_negative(int i, atomic_t *v)
16078 {
16079- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
16080+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
16081 }
16082
16083 /**
16084@@ -152,7 +272,19 @@ static inline int atomic_add_negative(int i, atomic_t *v)
16085 *
16086 * Atomically adds @i to @v and returns @i + @v
16087 */
16088-static inline int atomic_add_return(int i, atomic_t *v)
16089+static inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
16090+{
16091+ return i + xadd_check_overflow(&v->counter, i);
16092+}
16093+
16094+/**
16095+ * atomic_add_return_unchecked - add integer and return
16096+ * @i: integer value to add
16097+ * @v: pointer of type atomic_unchecked_t
16098+ *
16099+ * Atomically adds @i to @v and returns @i + @v
16100+ */
16101+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
16102 {
16103 return i + xadd(&v->counter, i);
16104 }
16105@@ -164,15 +296,24 @@ static inline int atomic_add_return(int i, atomic_t *v)
16106 *
16107 * Atomically subtracts @i from @v and returns @v - @i
16108 */
16109-static inline int atomic_sub_return(int i, atomic_t *v)
16110+static inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
16111 {
16112 return atomic_add_return(-i, v);
16113 }
16114
16115 #define atomic_inc_return(v) (atomic_add_return(1, v))
16116+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
16117+{
16118+ return atomic_add_return_unchecked(1, v);
16119+}
16120 #define atomic_dec_return(v) (atomic_sub_return(1, v))
16121
16122-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
16123+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
16124+{
16125+ return cmpxchg(&v->counter, old, new);
16126+}
16127+
16128+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
16129 {
16130 return cmpxchg(&v->counter, old, new);
16131 }
16132@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
16133 return xchg(&v->counter, new);
16134 }
16135
16136+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
16137+{
16138+ return xchg(&v->counter, new);
16139+}
16140+
16141 /**
16142 * __atomic_add_unless - add unless the number is already a given value
16143 * @v: pointer of type atomic_t
16144@@ -191,14 +337,27 @@ static inline int atomic_xchg(atomic_t *v, int new)
16145 * Atomically adds @a to @v, so long as @v was not already @u.
16146 * Returns the old value of @v.
16147 */
16148-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
16149+static inline int __intentional_overflow(-1) __atomic_add_unless(atomic_t *v, int a, int u)
16150 {
16151- int c, old;
16152+ int c, old, new;
16153 c = atomic_read(v);
16154 for (;;) {
16155- if (unlikely(c == (u)))
16156+ if (unlikely(c == u))
16157 break;
16158- old = atomic_cmpxchg((v), c, c + (a));
16159+
16160+ asm volatile("addl %2,%0\n"
16161+
16162+#ifdef CONFIG_PAX_REFCOUNT
16163+ "jno 0f\n"
16164+ "subl %2,%0\n"
16165+ "int $4\n0:\n"
16166+ _ASM_EXTABLE(0b, 0b)
16167+#endif
16168+
16169+ : "=r" (new)
16170+ : "0" (c), "ir" (a));
16171+
16172+ old = atomic_cmpxchg(v, c, new);
16173 if (likely(old == c))
16174 break;
16175 c = old;
16176@@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
16177 }
16178
16179 /**
16180+ * atomic_inc_not_zero_hint - increment if not null
16181+ * @v: pointer of type atomic_t
16182+ * @hint: probable value of the atomic before the increment
16183+ *
16184+ * This version of atomic_inc_not_zero() gives a hint of probable
16185+ * value of the atomic. This helps processor to not read the memory
16186+ * before doing the atomic read/modify/write cycle, lowering
16187+ * number of bus transactions on some arches.
16188+ *
16189+ * Returns: 0 if increment was not done, 1 otherwise.
16190+ */
16191+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
16192+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
16193+{
16194+ int val, c = hint, new;
16195+
16196+ /* sanity test, should be removed by compiler if hint is a constant */
16197+ if (!hint)
16198+ return __atomic_add_unless(v, 1, 0);
16199+
16200+ do {
16201+ asm volatile("incl %0\n"
16202+
16203+#ifdef CONFIG_PAX_REFCOUNT
16204+ "jno 0f\n"
16205+ "decl %0\n"
16206+ "int $4\n0:\n"
16207+ _ASM_EXTABLE(0b, 0b)
16208+#endif
16209+
16210+ : "=r" (new)
16211+ : "0" (c));
16212+
16213+ val = atomic_cmpxchg(v, c, new);
16214+ if (val == c)
16215+ return 1;
16216+ c = val;
16217+ } while (c);
16218+
16219+ return 0;
16220+}
16221+
16222+/**
16223 * atomic_inc_short - increment of a short integer
16224 * @v: pointer to type int
16225 *
16226@@ -235,14 +437,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
16227 #endif
16228
16229 /* These are x86-specific, used by some header files */
16230-#define atomic_clear_mask(mask, addr) \
16231- asm volatile(LOCK_PREFIX "andl %0,%1" \
16232- : : "r" (~(mask)), "m" (*(addr)) : "memory")
16233+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
16234+{
16235+ asm volatile(LOCK_PREFIX "andl %1,%0"
16236+ : "+m" (v->counter)
16237+ : "r" (~(mask))
16238+ : "memory");
16239+}
16240
16241-#define atomic_set_mask(mask, addr) \
16242- asm volatile(LOCK_PREFIX "orl %0,%1" \
16243- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
16244- : "memory")
16245+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
16246+{
16247+ asm volatile(LOCK_PREFIX "andl %1,%0"
16248+ : "+m" (v->counter)
16249+ : "r" (~(mask))
16250+ : "memory");
16251+}
16252+
16253+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
16254+{
16255+ asm volatile(LOCK_PREFIX "orl %1,%0"
16256+ : "+m" (v->counter)
16257+ : "r" (mask)
16258+ : "memory");
16259+}
16260+
16261+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
16262+{
16263+ asm volatile(LOCK_PREFIX "orl %1,%0"
16264+ : "+m" (v->counter)
16265+ : "r" (mask)
16266+ : "memory");
16267+}
16268
16269 #ifdef CONFIG_X86_32
16270 # include <asm/atomic64_32.h>
16271diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
16272index b154de7..bf18a5a 100644
16273--- a/arch/x86/include/asm/atomic64_32.h
16274+++ b/arch/x86/include/asm/atomic64_32.h
16275@@ -12,6 +12,14 @@ typedef struct {
16276 u64 __aligned(8) counter;
16277 } atomic64_t;
16278
16279+#ifdef CONFIG_PAX_REFCOUNT
16280+typedef struct {
16281+ u64 __aligned(8) counter;
16282+} atomic64_unchecked_t;
16283+#else
16284+typedef atomic64_t atomic64_unchecked_t;
16285+#endif
16286+
16287 #define ATOMIC64_INIT(val) { (val) }
16288
16289 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
16290@@ -37,21 +45,31 @@ typedef struct {
16291 ATOMIC64_DECL_ONE(sym##_386)
16292
16293 ATOMIC64_DECL_ONE(add_386);
16294+ATOMIC64_DECL_ONE(add_unchecked_386);
16295 ATOMIC64_DECL_ONE(sub_386);
16296+ATOMIC64_DECL_ONE(sub_unchecked_386);
16297 ATOMIC64_DECL_ONE(inc_386);
16298+ATOMIC64_DECL_ONE(inc_unchecked_386);
16299 ATOMIC64_DECL_ONE(dec_386);
16300+ATOMIC64_DECL_ONE(dec_unchecked_386);
16301 #endif
16302
16303 #define alternative_atomic64(f, out, in...) \
16304 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
16305
16306 ATOMIC64_DECL(read);
16307+ATOMIC64_DECL(read_unchecked);
16308 ATOMIC64_DECL(set);
16309+ATOMIC64_DECL(set_unchecked);
16310 ATOMIC64_DECL(xchg);
16311 ATOMIC64_DECL(add_return);
16312+ATOMIC64_DECL(add_return_unchecked);
16313 ATOMIC64_DECL(sub_return);
16314+ATOMIC64_DECL(sub_return_unchecked);
16315 ATOMIC64_DECL(inc_return);
16316+ATOMIC64_DECL(inc_return_unchecked);
16317 ATOMIC64_DECL(dec_return);
16318+ATOMIC64_DECL(dec_return_unchecked);
16319 ATOMIC64_DECL(dec_if_positive);
16320 ATOMIC64_DECL(inc_not_zero);
16321 ATOMIC64_DECL(add_unless);
16322@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
16323 }
16324
16325 /**
16326+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
16327+ * @p: pointer to type atomic64_unchecked_t
16328+ * @o: expected value
16329+ * @n: new value
16330+ *
16331+ * Atomically sets @v to @n if it was equal to @o and returns
16332+ * the old value.
16333+ */
16334+
16335+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
16336+{
16337+ return cmpxchg64(&v->counter, o, n);
16338+}
16339+
16340+/**
16341 * atomic64_xchg - xchg atomic64 variable
16342 * @v: pointer to type atomic64_t
16343 * @n: value to assign
16344@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
16345 }
16346
16347 /**
16348+ * atomic64_set_unchecked - set atomic64 variable
16349+ * @v: pointer to type atomic64_unchecked_t
16350+ * @n: value to assign
16351+ *
16352+ * Atomically sets the value of @v to @n.
16353+ */
16354+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
16355+{
16356+ unsigned high = (unsigned)(i >> 32);
16357+ unsigned low = (unsigned)i;
16358+ alternative_atomic64(set, /* no output */,
16359+ "S" (v), "b" (low), "c" (high)
16360+ : "eax", "edx", "memory");
16361+}
16362+
16363+/**
16364 * atomic64_read - read atomic64 variable
16365 * @v: pointer to type atomic64_t
16366 *
16367@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
16368 }
16369
16370 /**
16371+ * atomic64_read_unchecked - read atomic64 variable
16372+ * @v: pointer to type atomic64_unchecked_t
16373+ *
16374+ * Atomically reads the value of @v and returns it.
16375+ */
16376+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
16377+{
16378+ long long r;
16379+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
16380+ return r;
16381+ }
16382+
16383+/**
16384 * atomic64_add_return - add and return
16385 * @i: integer value to add
16386 * @v: pointer to type atomic64_t
16387@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
16388 return i;
16389 }
16390
16391+/**
16392+ * atomic64_add_return_unchecked - add and return
16393+ * @i: integer value to add
16394+ * @v: pointer to type atomic64_unchecked_t
16395+ *
16396+ * Atomically adds @i to @v and returns @i + *@v
16397+ */
16398+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
16399+{
16400+ alternative_atomic64(add_return_unchecked,
16401+ ASM_OUTPUT2("+A" (i), "+c" (v)),
16402+ ASM_NO_INPUT_CLOBBER("memory"));
16403+ return i;
16404+}
16405+
16406 /*
16407 * Other variants with different arithmetic operators:
16408 */
16409@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
16410 return a;
16411 }
16412
16413+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
16414+{
16415+ long long a;
16416+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
16417+ "S" (v) : "memory", "ecx");
16418+ return a;
16419+}
16420+
16421 static inline long long atomic64_dec_return(atomic64_t *v)
16422 {
16423 long long a;
16424@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
16425 }
16426
16427 /**
16428+ * atomic64_add_unchecked - add integer to atomic64 variable
16429+ * @i: integer value to add
16430+ * @v: pointer to type atomic64_unchecked_t
16431+ *
16432+ * Atomically adds @i to @v.
16433+ */
16434+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
16435+{
16436+ __alternative_atomic64(add_unchecked, add_return_unchecked,
16437+ ASM_OUTPUT2("+A" (i), "+c" (v)),
16438+ ASM_NO_INPUT_CLOBBER("memory"));
16439+ return i;
16440+}
16441+
16442+/**
16443 * atomic64_sub - subtract the atomic64 variable
16444 * @i: integer value to subtract
16445 * @v: pointer to type atomic64_t
16446diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
16447index 46e9052..ae45136 100644
16448--- a/arch/x86/include/asm/atomic64_64.h
16449+++ b/arch/x86/include/asm/atomic64_64.h
16450@@ -18,7 +18,19 @@
16451 */
16452 static inline long atomic64_read(const atomic64_t *v)
16453 {
16454- return (*(volatile long *)&(v)->counter);
16455+ return (*(volatile const long *)&(v)->counter);
16456+}
16457+
16458+/**
16459+ * atomic64_read_unchecked - read atomic64 variable
16460+ * @v: pointer of type atomic64_unchecked_t
16461+ *
16462+ * Atomically reads the value of @v.
16463+ * Doesn't imply a read memory barrier.
16464+ */
16465+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
16466+{
16467+ return (*(volatile const long *)&(v)->counter);
16468 }
16469
16470 /**
16471@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
16472 }
16473
16474 /**
16475+ * atomic64_set_unchecked - set atomic64 variable
16476+ * @v: pointer to type atomic64_unchecked_t
16477+ * @i: required value
16478+ *
16479+ * Atomically sets the value of @v to @i.
16480+ */
16481+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
16482+{
16483+ v->counter = i;
16484+}
16485+
16486+/**
16487 * atomic64_add - add integer to atomic64 variable
16488 * @i: integer value to add
16489 * @v: pointer to type atomic64_t
16490@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
16491 */
16492 static inline void atomic64_add(long i, atomic64_t *v)
16493 {
16494+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
16495+
16496+#ifdef CONFIG_PAX_REFCOUNT
16497+ "jno 0f\n"
16498+ LOCK_PREFIX "subq %1,%0\n"
16499+ "int $4\n0:\n"
16500+ _ASM_EXTABLE(0b, 0b)
16501+#endif
16502+
16503+ : "=m" (v->counter)
16504+ : "er" (i), "m" (v->counter));
16505+}
16506+
16507+/**
16508+ * atomic64_add_unchecked - add integer to atomic64 variable
16509+ * @i: integer value to add
16510+ * @v: pointer to type atomic64_unchecked_t
16511+ *
16512+ * Atomically adds @i to @v.
16513+ */
16514+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
16515+{
16516 asm volatile(LOCK_PREFIX "addq %1,%0"
16517 : "=m" (v->counter)
16518 : "er" (i), "m" (v->counter));
16519@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
16520 */
16521 static inline void atomic64_sub(long i, atomic64_t *v)
16522 {
16523- asm volatile(LOCK_PREFIX "subq %1,%0"
16524+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
16525+
16526+#ifdef CONFIG_PAX_REFCOUNT
16527+ "jno 0f\n"
16528+ LOCK_PREFIX "addq %1,%0\n"
16529+ "int $4\n0:\n"
16530+ _ASM_EXTABLE(0b, 0b)
16531+#endif
16532+
16533+ : "=m" (v->counter)
16534+ : "er" (i), "m" (v->counter));
16535+}
16536+
16537+/**
16538+ * atomic64_sub_unchecked - subtract the atomic64 variable
16539+ * @i: integer value to subtract
16540+ * @v: pointer to type atomic64_unchecked_t
16541+ *
16542+ * Atomically subtracts @i from @v.
16543+ */
16544+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
16545+{
16546+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
16547 : "=m" (v->counter)
16548 : "er" (i), "m" (v->counter));
16549 }
16550@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
16551 */
16552 static inline int atomic64_sub_and_test(long i, atomic64_t *v)
16553 {
16554- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
16555+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
16556 }
16557
16558 /**
16559@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
16560 */
16561 static inline void atomic64_inc(atomic64_t *v)
16562 {
16563+ asm volatile(LOCK_PREFIX "incq %0\n"
16564+
16565+#ifdef CONFIG_PAX_REFCOUNT
16566+ "jno 0f\n"
16567+ LOCK_PREFIX "decq %0\n"
16568+ "int $4\n0:\n"
16569+ _ASM_EXTABLE(0b, 0b)
16570+#endif
16571+
16572+ : "=m" (v->counter)
16573+ : "m" (v->counter));
16574+}
16575+
16576+/**
16577+ * atomic64_inc_unchecked - increment atomic64 variable
16578+ * @v: pointer to type atomic64_unchecked_t
16579+ *
16580+ * Atomically increments @v by 1.
16581+ */
16582+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
16583+{
16584 asm volatile(LOCK_PREFIX "incq %0"
16585 : "=m" (v->counter)
16586 : "m" (v->counter));
16587@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
16588 */
16589 static inline void atomic64_dec(atomic64_t *v)
16590 {
16591- asm volatile(LOCK_PREFIX "decq %0"
16592+ asm volatile(LOCK_PREFIX "decq %0\n"
16593+
16594+#ifdef CONFIG_PAX_REFCOUNT
16595+ "jno 0f\n"
16596+ LOCK_PREFIX "incq %0\n"
16597+ "int $4\n0:\n"
16598+ _ASM_EXTABLE(0b, 0b)
16599+#endif
16600+
16601+ : "=m" (v->counter)
16602+ : "m" (v->counter));
16603+}
16604+
16605+/**
16606+ * atomic64_dec_unchecked - decrement atomic64 variable
16607+ * @v: pointer to type atomic64_t
16608+ *
16609+ * Atomically decrements @v by 1.
16610+ */
16611+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
16612+{
16613+ asm volatile(LOCK_PREFIX "decq %0\n"
16614 : "=m" (v->counter)
16615 : "m" (v->counter));
16616 }
16617@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
16618 */
16619 static inline int atomic64_dec_and_test(atomic64_t *v)
16620 {
16621- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
16622+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
16623 }
16624
16625 /**
16626@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
16627 */
16628 static inline int atomic64_inc_and_test(atomic64_t *v)
16629 {
16630- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
16631+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
16632 }
16633
16634 /**
16635@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
16636 */
16637 static inline int atomic64_add_negative(long i, atomic64_t *v)
16638 {
16639- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
16640+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
16641 }
16642
16643 /**
16644@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
16645 */
16646 static inline long atomic64_add_return(long i, atomic64_t *v)
16647 {
16648+ return i + xadd_check_overflow(&v->counter, i);
16649+}
16650+
16651+/**
16652+ * atomic64_add_return_unchecked - add and return
16653+ * @i: integer value to add
16654+ * @v: pointer to type atomic64_unchecked_t
16655+ *
16656+ * Atomically adds @i to @v and returns @i + @v
16657+ */
16658+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
16659+{
16660 return i + xadd(&v->counter, i);
16661 }
16662
16663@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
16664 }
16665
16666 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
16667+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
16668+{
16669+ return atomic64_add_return_unchecked(1, v);
16670+}
16671 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
16672
16673 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16674@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
16675 return cmpxchg(&v->counter, old, new);
16676 }
16677
16678+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
16679+{
16680+ return cmpxchg(&v->counter, old, new);
16681+}
16682+
16683 static inline long atomic64_xchg(atomic64_t *v, long new)
16684 {
16685 return xchg(&v->counter, new);
16686@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
16687 */
16688 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
16689 {
16690- long c, old;
16691+ long c, old, new;
16692 c = atomic64_read(v);
16693 for (;;) {
16694- if (unlikely(c == (u)))
16695+ if (unlikely(c == u))
16696 break;
16697- old = atomic64_cmpxchg((v), c, c + (a));
16698+
16699+ asm volatile("add %2,%0\n"
16700+
16701+#ifdef CONFIG_PAX_REFCOUNT
16702+ "jno 0f\n"
16703+ "sub %2,%0\n"
16704+ "int $4\n0:\n"
16705+ _ASM_EXTABLE(0b, 0b)
16706+#endif
16707+
16708+ : "=r" (new)
16709+ : "0" (c), "ir" (a));
16710+
16711+ old = atomic64_cmpxchg(v, c, new);
16712 if (likely(old == c))
16713 break;
16714 c = old;
16715 }
16716- return c != (u);
16717+ return c != u;
16718 }
16719
16720 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
16721diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
16722index 0f4460b..fa1ee19 100644
16723--- a/arch/x86/include/asm/barrier.h
16724+++ b/arch/x86/include/asm/barrier.h
16725@@ -107,7 +107,7 @@
16726 do { \
16727 compiletime_assert_atomic_type(*p); \
16728 smp_mb(); \
16729- ACCESS_ONCE(*p) = (v); \
16730+ ACCESS_ONCE_RW(*p) = (v); \
16731 } while (0)
16732
16733 #define smp_load_acquire(p) \
16734@@ -124,7 +124,7 @@ do { \
16735 do { \
16736 compiletime_assert_atomic_type(*p); \
16737 barrier(); \
16738- ACCESS_ONCE(*p) = (v); \
16739+ ACCESS_ONCE_RW(*p) = (v); \
16740 } while (0)
16741
16742 #define smp_load_acquire(p) \
16743diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
16744index cfe3b95..d01b118 100644
16745--- a/arch/x86/include/asm/bitops.h
16746+++ b/arch/x86/include/asm/bitops.h
16747@@ -50,7 +50,7 @@
16748 * a mask operation on a byte.
16749 */
16750 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
16751-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
16752+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
16753 #define CONST_MASK(nr) (1 << ((nr) & 7))
16754
16755 /**
16756@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
16757 */
16758 static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
16759 {
16760- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16761+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
16762 }
16763
16764 /**
16765@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
16766 */
16767 static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
16768 {
16769- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16770+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
16771 }
16772
16773 /**
16774@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
16775 */
16776 static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
16777 {
16778- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16779+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
16780 }
16781
16782 static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
16783@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
16784 *
16785 * Undefined if no bit exists, so code should check against 0 first.
16786 */
16787-static inline unsigned long __ffs(unsigned long word)
16788+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
16789 {
16790 asm("rep; bsf %1,%0"
16791 : "=r" (word)
16792@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
16793 *
16794 * Undefined if no zero exists, so code should check against ~0UL first.
16795 */
16796-static inline unsigned long ffz(unsigned long word)
16797+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
16798 {
16799 asm("rep; bsf %1,%0"
16800 : "=r" (word)
16801@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
16802 *
16803 * Undefined if no set bit exists, so code should check against 0 first.
16804 */
16805-static inline unsigned long __fls(unsigned long word)
16806+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
16807 {
16808 asm("bsr %1,%0"
16809 : "=r" (word)
16810@@ -434,7 +434,7 @@ static inline int ffs(int x)
16811 * set bit if value is nonzero. The last (most significant) bit is
16812 * at position 32.
16813 */
16814-static inline int fls(int x)
16815+static inline int __intentional_overflow(-1) fls(int x)
16816 {
16817 int r;
16818
16819@@ -476,7 +476,7 @@ static inline int fls(int x)
16820 * at position 64.
16821 */
16822 #ifdef CONFIG_X86_64
16823-static __always_inline int fls64(__u64 x)
16824+static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
16825 {
16826 int bitpos = -1;
16827 /*
16828diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
16829index 4fa687a..60f2d39 100644
16830--- a/arch/x86/include/asm/boot.h
16831+++ b/arch/x86/include/asm/boot.h
16832@@ -6,10 +6,15 @@
16833 #include <uapi/asm/boot.h>
16834
16835 /* Physical address where kernel should be loaded. */
16836-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16837+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
16838 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16839 & ~(CONFIG_PHYSICAL_ALIGN - 1))
16840
16841+#ifndef __ASSEMBLY__
16842+extern unsigned char __LOAD_PHYSICAL_ADDR[];
16843+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
16844+#endif
16845+
16846 /* Minimum kernel alignment, as a power of two */
16847 #ifdef CONFIG_X86_64
16848 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
16849diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
16850index 48f99f1..d78ebf9 100644
16851--- a/arch/x86/include/asm/cache.h
16852+++ b/arch/x86/include/asm/cache.h
16853@@ -5,12 +5,13 @@
16854
16855 /* L1 cache line size */
16856 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
16857-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
16858+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
16859
16860 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
16861+#define __read_only __attribute__((__section__(".data..read_only")))
16862
16863 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
16864-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
16865+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
16866
16867 #ifdef CONFIG_X86_VSMP
16868 #ifdef CONFIG_SMP
16869diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
16870index 9863ee3..4a1f8e1 100644
16871--- a/arch/x86/include/asm/cacheflush.h
16872+++ b/arch/x86/include/asm/cacheflush.h
16873@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
16874 unsigned long pg_flags = pg->flags & _PGMT_MASK;
16875
16876 if (pg_flags == _PGMT_DEFAULT)
16877- return -1;
16878+ return ~0UL;
16879 else if (pg_flags == _PGMT_WC)
16880 return _PAGE_CACHE_WC;
16881 else if (pg_flags == _PGMT_UC_MINUS)
16882diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
16883index cb4c73b..c473c29 100644
16884--- a/arch/x86/include/asm/calling.h
16885+++ b/arch/x86/include/asm/calling.h
16886@@ -82,103 +82,113 @@ For 32-bit we have the following conventions - kernel is built with
16887 #define RSP 152
16888 #define SS 160
16889
16890-#define ARGOFFSET R11
16891-#define SWFRAME ORIG_RAX
16892+#define ARGOFFSET R15
16893
16894 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1
16895- subq $9*8+\addskip, %rsp
16896- CFI_ADJUST_CFA_OFFSET 9*8+\addskip
16897- movq_cfi rdi, 8*8
16898- movq_cfi rsi, 7*8
16899- movq_cfi rdx, 6*8
16900+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
16901+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
16902+ movq_cfi rdi, RDI
16903+ movq_cfi rsi, RSI
16904+ movq_cfi rdx, RDX
16905
16906 .if \save_rcx
16907- movq_cfi rcx, 5*8
16908+ movq_cfi rcx, RCX
16909 .endif
16910
16911- movq_cfi rax, 4*8
16912+ movq_cfi rax, RAX
16913
16914 .if \save_r891011
16915- movq_cfi r8, 3*8
16916- movq_cfi r9, 2*8
16917- movq_cfi r10, 1*8
16918- movq_cfi r11, 0*8
16919+ movq_cfi r8, R8
16920+ movq_cfi r9, R9
16921+ movq_cfi r10, R10
16922+ movq_cfi r11, R11
16923 .endif
16924
16925+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16926+ movq_cfi r12, R12
16927+#endif
16928+
16929 .endm
16930
16931-#define ARG_SKIP (9*8)
16932+#define ARG_SKIP ORIG_RAX
16933
16934 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
16935 rstor_r8910=1, rstor_rdx=1
16936+
16937+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
16938+ movq_cfi_restore R12, r12
16939+#endif
16940+
16941 .if \rstor_r11
16942- movq_cfi_restore 0*8, r11
16943+ movq_cfi_restore R11, r11
16944 .endif
16945
16946 .if \rstor_r8910
16947- movq_cfi_restore 1*8, r10
16948- movq_cfi_restore 2*8, r9
16949- movq_cfi_restore 3*8, r8
16950+ movq_cfi_restore R10, r10
16951+ movq_cfi_restore R9, r9
16952+ movq_cfi_restore R8, r8
16953 .endif
16954
16955 .if \rstor_rax
16956- movq_cfi_restore 4*8, rax
16957+ movq_cfi_restore RAX, rax
16958 .endif
16959
16960 .if \rstor_rcx
16961- movq_cfi_restore 5*8, rcx
16962+ movq_cfi_restore RCX, rcx
16963 .endif
16964
16965 .if \rstor_rdx
16966- movq_cfi_restore 6*8, rdx
16967+ movq_cfi_restore RDX, rdx
16968 .endif
16969
16970- movq_cfi_restore 7*8, rsi
16971- movq_cfi_restore 8*8, rdi
16972+ movq_cfi_restore RSI, rsi
16973+ movq_cfi_restore RDI, rdi
16974
16975- .if ARG_SKIP+\addskip > 0
16976- addq $ARG_SKIP+\addskip, %rsp
16977- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
16978+ .if ORIG_RAX+\addskip > 0
16979+ addq $ORIG_RAX+\addskip, %rsp
16980+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
16981 .endif
16982 .endm
16983
16984- .macro LOAD_ARGS offset, skiprax=0
16985- movq \offset(%rsp), %r11
16986- movq \offset+8(%rsp), %r10
16987- movq \offset+16(%rsp), %r9
16988- movq \offset+24(%rsp), %r8
16989- movq \offset+40(%rsp), %rcx
16990- movq \offset+48(%rsp), %rdx
16991- movq \offset+56(%rsp), %rsi
16992- movq \offset+64(%rsp), %rdi
16993+ .macro LOAD_ARGS skiprax=0
16994+ movq R11(%rsp), %r11
16995+ movq R10(%rsp), %r10
16996+ movq R9(%rsp), %r9
16997+ movq R8(%rsp), %r8
16998+ movq RCX(%rsp), %rcx
16999+ movq RDX(%rsp), %rdx
17000+ movq RSI(%rsp), %rsi
17001+ movq RDI(%rsp), %rdi
17002 .if \skiprax
17003 .else
17004- movq \offset+72(%rsp), %rax
17005+ movq RAX(%rsp), %rax
17006 .endif
17007 .endm
17008
17009-#define REST_SKIP (6*8)
17010-
17011 .macro SAVE_REST
17012- subq $REST_SKIP, %rsp
17013- CFI_ADJUST_CFA_OFFSET REST_SKIP
17014- movq_cfi rbx, 5*8
17015- movq_cfi rbp, 4*8
17016- movq_cfi r12, 3*8
17017- movq_cfi r13, 2*8
17018- movq_cfi r14, 1*8
17019- movq_cfi r15, 0*8
17020+ movq_cfi rbx, RBX
17021+ movq_cfi rbp, RBP
17022+
17023+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
17024+ movq_cfi r12, R12
17025+#endif
17026+
17027+ movq_cfi r13, R13
17028+ movq_cfi r14, R14
17029+ movq_cfi r15, R15
17030 .endm
17031
17032 .macro RESTORE_REST
17033- movq_cfi_restore 0*8, r15
17034- movq_cfi_restore 1*8, r14
17035- movq_cfi_restore 2*8, r13
17036- movq_cfi_restore 3*8, r12
17037- movq_cfi_restore 4*8, rbp
17038- movq_cfi_restore 5*8, rbx
17039- addq $REST_SKIP, %rsp
17040- CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
17041+ movq_cfi_restore R15, r15
17042+ movq_cfi_restore R14, r14
17043+ movq_cfi_restore R13, r13
17044+
17045+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
17046+ movq_cfi_restore R12, r12
17047+#endif
17048+
17049+ movq_cfi_restore RBP, rbp
17050+ movq_cfi_restore RBX, rbx
17051 .endm
17052
17053 .macro SAVE_ALL
17054diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
17055index f50de69..2b0a458 100644
17056--- a/arch/x86/include/asm/checksum_32.h
17057+++ b/arch/x86/include/asm/checksum_32.h
17058@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
17059 int len, __wsum sum,
17060 int *src_err_ptr, int *dst_err_ptr);
17061
17062+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
17063+ int len, __wsum sum,
17064+ int *src_err_ptr, int *dst_err_ptr);
17065+
17066+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
17067+ int len, __wsum sum,
17068+ int *src_err_ptr, int *dst_err_ptr);
17069+
17070 /*
17071 * Note: when you get a NULL pointer exception here this means someone
17072 * passed in an incorrect kernel address to one of these functions.
17073@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
17074
17075 might_sleep();
17076 stac();
17077- ret = csum_partial_copy_generic((__force void *)src, dst,
17078+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
17079 len, sum, err_ptr, NULL);
17080 clac();
17081
17082@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
17083 might_sleep();
17084 if (access_ok(VERIFY_WRITE, dst, len)) {
17085 stac();
17086- ret = csum_partial_copy_generic(src, (__force void *)dst,
17087+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
17088 len, sum, NULL, err_ptr);
17089 clac();
17090 return ret;
17091diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
17092index 99c105d7..2f667ac 100644
17093--- a/arch/x86/include/asm/cmpxchg.h
17094+++ b/arch/x86/include/asm/cmpxchg.h
17095@@ -16,8 +16,12 @@ extern void __cmpxchg_wrong_size(void)
17096 __compiletime_error("Bad argument size for cmpxchg");
17097 extern void __xadd_wrong_size(void)
17098 __compiletime_error("Bad argument size for xadd");
17099+extern void __xadd_check_overflow_wrong_size(void)
17100+ __compiletime_error("Bad argument size for xadd_check_overflow");
17101 extern void __add_wrong_size(void)
17102 __compiletime_error("Bad argument size for add");
17103+extern void __add_check_overflow_wrong_size(void)
17104+ __compiletime_error("Bad argument size for add_check_overflow");
17105
17106 /*
17107 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
17108@@ -69,6 +73,38 @@ extern void __add_wrong_size(void)
17109 __ret; \
17110 })
17111
17112+#ifdef CONFIG_PAX_REFCOUNT
17113+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
17114+ ({ \
17115+ __typeof__ (*(ptr)) __ret = (arg); \
17116+ switch (sizeof(*(ptr))) { \
17117+ case __X86_CASE_L: \
17118+ asm volatile (lock #op "l %0, %1\n" \
17119+ "jno 0f\n" \
17120+ "mov %0,%1\n" \
17121+ "int $4\n0:\n" \
17122+ _ASM_EXTABLE(0b, 0b) \
17123+ : "+r" (__ret), "+m" (*(ptr)) \
17124+ : : "memory", "cc"); \
17125+ break; \
17126+ case __X86_CASE_Q: \
17127+ asm volatile (lock #op "q %q0, %1\n" \
17128+ "jno 0f\n" \
17129+ "mov %0,%1\n" \
17130+ "int $4\n0:\n" \
17131+ _ASM_EXTABLE(0b, 0b) \
17132+ : "+r" (__ret), "+m" (*(ptr)) \
17133+ : : "memory", "cc"); \
17134+ break; \
17135+ default: \
17136+ __ ## op ## _check_overflow_wrong_size(); \
17137+ } \
17138+ __ret; \
17139+ })
17140+#else
17141+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
17142+#endif
17143+
17144 /*
17145 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
17146 * Since this is generally used to protect other memory information, we
17147@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
17148 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
17149 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
17150
17151+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
17152+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
17153+
17154 #define __add(ptr, inc, lock) \
17155 ({ \
17156 __typeof__ (*(ptr)) __ret = (inc); \
17157diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
17158index 59c6c40..5e0b22c 100644
17159--- a/arch/x86/include/asm/compat.h
17160+++ b/arch/x86/include/asm/compat.h
17161@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
17162 typedef u32 compat_uint_t;
17163 typedef u32 compat_ulong_t;
17164 typedef u64 __attribute__((aligned(4))) compat_u64;
17165-typedef u32 compat_uptr_t;
17166+typedef u32 __user compat_uptr_t;
17167
17168 struct compat_timespec {
17169 compat_time_t tv_sec;
17170diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
17171index 2075e6c..d65aa96 100644
17172--- a/arch/x86/include/asm/cpufeature.h
17173+++ b/arch/x86/include/asm/cpufeature.h
17174@@ -204,14 +204,14 @@
17175 #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
17176 #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
17177
17178-
17179+#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
17180 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
17181 #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
17182 #define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
17183 #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
17184 #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
17185 #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
17186-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
17187+#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Prevention */
17188 #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
17189 #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
17190 #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
17191@@ -371,6 +371,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
17192 #undef cpu_has_centaur_mcr
17193 #define cpu_has_centaur_mcr 0
17194
17195+#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
17196 #endif /* CONFIG_X86_64 */
17197
17198 #if __GNUC__ >= 4
17199@@ -423,7 +424,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
17200
17201 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
17202 t_warn:
17203- warn_pre_alternatives();
17204+ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
17205+ warn_pre_alternatives();
17206 return false;
17207 #endif
17208
17209@@ -443,7 +445,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
17210 ".section .discard,\"aw\",@progbits\n"
17211 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
17212 ".previous\n"
17213- ".section .altinstr_replacement,\"ax\"\n"
17214+ ".section .altinstr_replacement,\"a\"\n"
17215 "3: movb $1,%0\n"
17216 "4:\n"
17217 ".previous\n"
17218@@ -480,7 +482,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
17219 " .byte 2b - 1b\n" /* src len */
17220 " .byte 4f - 3f\n" /* repl len */
17221 ".previous\n"
17222- ".section .altinstr_replacement,\"ax\"\n"
17223+ ".section .altinstr_replacement,\"a\"\n"
17224 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
17225 "4:\n"
17226 ".previous\n"
17227@@ -513,7 +515,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
17228 ".section .discard,\"aw\",@progbits\n"
17229 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
17230 ".previous\n"
17231- ".section .altinstr_replacement,\"ax\"\n"
17232+ ".section .altinstr_replacement,\"a\"\n"
17233 "3: movb $0,%0\n"
17234 "4:\n"
17235 ".previous\n"
17236@@ -527,7 +529,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
17237 ".section .discard,\"aw\",@progbits\n"
17238 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
17239 ".previous\n"
17240- ".section .altinstr_replacement,\"ax\"\n"
17241+ ".section .altinstr_replacement,\"a\"\n"
17242 "5: movb $1,%0\n"
17243 "6:\n"
17244 ".previous\n"
17245diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
17246index 50d033a..37deb26 100644
17247--- a/arch/x86/include/asm/desc.h
17248+++ b/arch/x86/include/asm/desc.h
17249@@ -4,6 +4,7 @@
17250 #include <asm/desc_defs.h>
17251 #include <asm/ldt.h>
17252 #include <asm/mmu.h>
17253+#include <asm/pgtable.h>
17254
17255 #include <linux/smp.h>
17256 #include <linux/percpu.h>
17257@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
17258
17259 desc->type = (info->read_exec_only ^ 1) << 1;
17260 desc->type |= info->contents << 2;
17261+ desc->type |= info->seg_not_present ^ 1;
17262
17263 desc->s = 1;
17264 desc->dpl = 0x3;
17265@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
17266 }
17267
17268 extern struct desc_ptr idt_descr;
17269-extern gate_desc idt_table[];
17270-extern struct desc_ptr debug_idt_descr;
17271-extern gate_desc debug_idt_table[];
17272-
17273-struct gdt_page {
17274- struct desc_struct gdt[GDT_ENTRIES];
17275-} __attribute__((aligned(PAGE_SIZE)));
17276-
17277-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
17278+extern gate_desc idt_table[IDT_ENTRIES];
17279+extern const struct desc_ptr debug_idt_descr;
17280+extern gate_desc debug_idt_table[IDT_ENTRIES];
17281
17282+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
17283 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
17284 {
17285- return per_cpu(gdt_page, cpu).gdt;
17286+ return cpu_gdt_table[cpu];
17287 }
17288
17289 #ifdef CONFIG_X86_64
17290@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
17291 unsigned long base, unsigned dpl, unsigned flags,
17292 unsigned short seg)
17293 {
17294- gate->a = (seg << 16) | (base & 0xffff);
17295- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
17296+ gate->gate.offset_low = base;
17297+ gate->gate.seg = seg;
17298+ gate->gate.reserved = 0;
17299+ gate->gate.type = type;
17300+ gate->gate.s = 0;
17301+ gate->gate.dpl = dpl;
17302+ gate->gate.p = 1;
17303+ gate->gate.offset_high = base >> 16;
17304 }
17305
17306 #endif
17307@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
17308
17309 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
17310 {
17311+ pax_open_kernel();
17312 memcpy(&idt[entry], gate, sizeof(*gate));
17313+ pax_close_kernel();
17314 }
17315
17316 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
17317 {
17318+ pax_open_kernel();
17319 memcpy(&ldt[entry], desc, 8);
17320+ pax_close_kernel();
17321 }
17322
17323 static inline void
17324@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
17325 default: size = sizeof(*gdt); break;
17326 }
17327
17328+ pax_open_kernel();
17329 memcpy(&gdt[entry], desc, size);
17330+ pax_close_kernel();
17331 }
17332
17333 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
17334@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
17335
17336 static inline void native_load_tr_desc(void)
17337 {
17338+ pax_open_kernel();
17339 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
17340+ pax_close_kernel();
17341 }
17342
17343 static inline void native_load_gdt(const struct desc_ptr *dtr)
17344@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
17345 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
17346 unsigned int i;
17347
17348+ pax_open_kernel();
17349 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
17350 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
17351+ pax_close_kernel();
17352 }
17353
17354 #define _LDT_empty(info) \
17355@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
17356 preempt_enable();
17357 }
17358
17359-static inline unsigned long get_desc_base(const struct desc_struct *desc)
17360+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
17361 {
17362 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
17363 }
17364@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
17365 }
17366
17367 #ifdef CONFIG_X86_64
17368-static inline void set_nmi_gate(int gate, void *addr)
17369+static inline void set_nmi_gate(int gate, const void *addr)
17370 {
17371 gate_desc s;
17372
17373@@ -321,14 +334,14 @@ static inline void set_nmi_gate(int gate, void *addr)
17374 #endif
17375
17376 #ifdef CONFIG_TRACING
17377-extern struct desc_ptr trace_idt_descr;
17378-extern gate_desc trace_idt_table[];
17379+extern const struct desc_ptr trace_idt_descr;
17380+extern gate_desc trace_idt_table[IDT_ENTRIES];
17381 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
17382 {
17383 write_idt_entry(trace_idt_table, entry, gate);
17384 }
17385
17386-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
17387+static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
17388 unsigned dpl, unsigned ist, unsigned seg)
17389 {
17390 gate_desc s;
17391@@ -348,7 +361,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
17392 #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
17393 #endif
17394
17395-static inline void _set_gate(int gate, unsigned type, void *addr,
17396+static inline void _set_gate(int gate, unsigned type, const void *addr,
17397 unsigned dpl, unsigned ist, unsigned seg)
17398 {
17399 gate_desc s;
17400@@ -371,9 +384,9 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
17401 #define set_intr_gate(n, addr) \
17402 do { \
17403 BUG_ON((unsigned)n > 0xFF); \
17404- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
17405+ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
17406 __KERNEL_CS); \
17407- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
17408+ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
17409 0, 0, __KERNEL_CS); \
17410 } while (0)
17411
17412@@ -401,19 +414,19 @@ static inline void alloc_system_vector(int vector)
17413 /*
17414 * This routine sets up an interrupt gate at directory privilege level 3.
17415 */
17416-static inline void set_system_intr_gate(unsigned int n, void *addr)
17417+static inline void set_system_intr_gate(unsigned int n, const void *addr)
17418 {
17419 BUG_ON((unsigned)n > 0xFF);
17420 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
17421 }
17422
17423-static inline void set_system_trap_gate(unsigned int n, void *addr)
17424+static inline void set_system_trap_gate(unsigned int n, const void *addr)
17425 {
17426 BUG_ON((unsigned)n > 0xFF);
17427 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
17428 }
17429
17430-static inline void set_trap_gate(unsigned int n, void *addr)
17431+static inline void set_trap_gate(unsigned int n, const void *addr)
17432 {
17433 BUG_ON((unsigned)n > 0xFF);
17434 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
17435@@ -422,16 +435,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
17436 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
17437 {
17438 BUG_ON((unsigned)n > 0xFF);
17439- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
17440+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
17441 }
17442
17443-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
17444+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
17445 {
17446 BUG_ON((unsigned)n > 0xFF);
17447 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
17448 }
17449
17450-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
17451+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
17452 {
17453 BUG_ON((unsigned)n > 0xFF);
17454 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
17455@@ -503,4 +516,17 @@ static inline void load_current_idt(void)
17456 else
17457 load_idt((const struct desc_ptr *)&idt_descr);
17458 }
17459+
17460+#ifdef CONFIG_X86_32
17461+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
17462+{
17463+ struct desc_struct d;
17464+
17465+ if (likely(limit))
17466+ limit = (limit - 1UL) >> PAGE_SHIFT;
17467+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
17468+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
17469+}
17470+#endif
17471+
17472 #endif /* _ASM_X86_DESC_H */
17473diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
17474index 278441f..b95a174 100644
17475--- a/arch/x86/include/asm/desc_defs.h
17476+++ b/arch/x86/include/asm/desc_defs.h
17477@@ -31,6 +31,12 @@ struct desc_struct {
17478 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
17479 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
17480 };
17481+ struct {
17482+ u16 offset_low;
17483+ u16 seg;
17484+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
17485+ unsigned offset_high: 16;
17486+ } gate;
17487 };
17488 } __attribute__((packed));
17489
17490diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
17491index ced283a..ffe04cc 100644
17492--- a/arch/x86/include/asm/div64.h
17493+++ b/arch/x86/include/asm/div64.h
17494@@ -39,7 +39,7 @@
17495 __mod; \
17496 })
17497
17498-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
17499+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
17500 {
17501 union {
17502 u64 v64;
17503diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
17504index ca3347a..1a5082a 100644
17505--- a/arch/x86/include/asm/elf.h
17506+++ b/arch/x86/include/asm/elf.h
17507@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
17508
17509 #include <asm/vdso.h>
17510
17511-#ifdef CONFIG_X86_64
17512-extern unsigned int vdso64_enabled;
17513-#endif
17514 #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
17515 extern unsigned int vdso32_enabled;
17516 #endif
17517@@ -249,7 +246,25 @@ extern int force_personality32;
17518 the loader. We need to make sure that it is out of the way of the program
17519 that it will "exec", and that there is sufficient room for the brk. */
17520
17521+#ifdef CONFIG_PAX_SEGMEXEC
17522+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
17523+#else
17524 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
17525+#endif
17526+
17527+#ifdef CONFIG_PAX_ASLR
17528+#ifdef CONFIG_X86_32
17529+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
17530+
17531+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
17532+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
17533+#else
17534+#define PAX_ELF_ET_DYN_BASE 0x400000UL
17535+
17536+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
17537+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
17538+#endif
17539+#endif
17540
17541 /* This yields a mask that user programs can use to figure out what
17542 instruction set this CPU supports. This could be done in user space,
17543@@ -298,17 +313,13 @@ do { \
17544
17545 #define ARCH_DLINFO \
17546 do { \
17547- if (vdso64_enabled) \
17548- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
17549- (unsigned long __force)current->mm->context.vdso); \
17550+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
17551 } while (0)
17552
17553 /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
17554 #define ARCH_DLINFO_X32 \
17555 do { \
17556- if (vdso64_enabled) \
17557- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
17558- (unsigned long __force)current->mm->context.vdso); \
17559+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
17560 } while (0)
17561
17562 #define AT_SYSINFO 32
17563@@ -323,10 +334,10 @@ else \
17564
17565 #endif /* !CONFIG_X86_32 */
17566
17567-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
17568+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
17569
17570 #define VDSO_ENTRY \
17571- ((unsigned long)current->mm->context.vdso + \
17572+ (current->mm->context.vdso + \
17573 selected_vdso32->sym___kernel_vsyscall)
17574
17575 struct linux_binprm;
17576@@ -338,9 +349,6 @@ extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
17577 int uses_interp);
17578 #define compat_arch_setup_additional_pages compat_arch_setup_additional_pages
17579
17580-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
17581-#define arch_randomize_brk arch_randomize_brk
17582-
17583 /*
17584 * True on X86_32 or when emulating IA32 on X86_64
17585 */
17586diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
17587index 77a99ac..39ff7f5 100644
17588--- a/arch/x86/include/asm/emergency-restart.h
17589+++ b/arch/x86/include/asm/emergency-restart.h
17590@@ -1,6 +1,6 @@
17591 #ifndef _ASM_X86_EMERGENCY_RESTART_H
17592 #define _ASM_X86_EMERGENCY_RESTART_H
17593
17594-extern void machine_emergency_restart(void);
17595+extern void machine_emergency_restart(void) __noreturn;
17596
17597 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
17598diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
17599index 1c7eefe..d0e4702 100644
17600--- a/arch/x86/include/asm/floppy.h
17601+++ b/arch/x86/include/asm/floppy.h
17602@@ -229,18 +229,18 @@ static struct fd_routine_l {
17603 int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
17604 } fd_routine[] = {
17605 {
17606- request_dma,
17607- free_dma,
17608- get_dma_residue,
17609- dma_mem_alloc,
17610- hard_dma_setup
17611+ ._request_dma = request_dma,
17612+ ._free_dma = free_dma,
17613+ ._get_dma_residue = get_dma_residue,
17614+ ._dma_mem_alloc = dma_mem_alloc,
17615+ ._dma_setup = hard_dma_setup
17616 },
17617 {
17618- vdma_request_dma,
17619- vdma_nop,
17620- vdma_get_dma_residue,
17621- vdma_mem_alloc,
17622- vdma_dma_setup
17623+ ._request_dma = vdma_request_dma,
17624+ ._free_dma = vdma_nop,
17625+ ._get_dma_residue = vdma_get_dma_residue,
17626+ ._dma_mem_alloc = vdma_mem_alloc,
17627+ ._dma_setup = vdma_dma_setup
17628 }
17629 };
17630
17631diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
17632index 412ecec..c1ea43a 100644
17633--- a/arch/x86/include/asm/fpu-internal.h
17634+++ b/arch/x86/include/asm/fpu-internal.h
17635@@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
17636 #define user_insn(insn, output, input...) \
17637 ({ \
17638 int err; \
17639+ pax_open_userland(); \
17640 asm volatile(ASM_STAC "\n" \
17641- "1:" #insn "\n\t" \
17642+ "1:" \
17643+ __copyuser_seg \
17644+ #insn "\n\t" \
17645 "2: " ASM_CLAC "\n" \
17646 ".section .fixup,\"ax\"\n" \
17647 "3: movl $-1,%[err]\n" \
17648@@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
17649 _ASM_EXTABLE(1b, 3b) \
17650 : [err] "=r" (err), output \
17651 : "0"(0), input); \
17652+ pax_close_userland(); \
17653 err; \
17654 })
17655
17656@@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
17657 "fnclex\n\t"
17658 "emms\n\t"
17659 "fildl %P[addr]" /* set F?P to defined value */
17660- : : [addr] "m" (tsk->thread.fpu.has_fpu));
17661+ : : [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
17662 }
17663
17664 return fpu_restore_checking(&tsk->thread.fpu);
17665diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
17666index b4c1f54..e290c08 100644
17667--- a/arch/x86/include/asm/futex.h
17668+++ b/arch/x86/include/asm/futex.h
17669@@ -12,6 +12,7 @@
17670 #include <asm/smap.h>
17671
17672 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
17673+ typecheck(u32 __user *, uaddr); \
17674 asm volatile("\t" ASM_STAC "\n" \
17675 "1:\t" insn "\n" \
17676 "2:\t" ASM_CLAC "\n" \
17677@@ -20,15 +21,16 @@
17678 "\tjmp\t2b\n" \
17679 "\t.previous\n" \
17680 _ASM_EXTABLE(1b, 3b) \
17681- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
17682+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
17683 : "i" (-EFAULT), "0" (oparg), "1" (0))
17684
17685 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
17686+ typecheck(u32 __user *, uaddr); \
17687 asm volatile("\t" ASM_STAC "\n" \
17688 "1:\tmovl %2, %0\n" \
17689 "\tmovl\t%0, %3\n" \
17690 "\t" insn "\n" \
17691- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
17692+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
17693 "\tjnz\t1b\n" \
17694 "3:\t" ASM_CLAC "\n" \
17695 "\t.section .fixup,\"ax\"\n" \
17696@@ -38,7 +40,7 @@
17697 _ASM_EXTABLE(1b, 4b) \
17698 _ASM_EXTABLE(2b, 4b) \
17699 : "=&a" (oldval), "=&r" (ret), \
17700- "+m" (*uaddr), "=&r" (tem) \
17701+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
17702 : "r" (oparg), "i" (-EFAULT), "1" (0))
17703
17704 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17705@@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17706
17707 pagefault_disable();
17708
17709+ pax_open_userland();
17710 switch (op) {
17711 case FUTEX_OP_SET:
17712- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
17713+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
17714 break;
17715 case FUTEX_OP_ADD:
17716- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
17717+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
17718 uaddr, oparg);
17719 break;
17720 case FUTEX_OP_OR:
17721@@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
17722 default:
17723 ret = -ENOSYS;
17724 }
17725+ pax_close_userland();
17726
17727 pagefault_enable();
17728
17729diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
17730index 4615906..788c817 100644
17731--- a/arch/x86/include/asm/hw_irq.h
17732+++ b/arch/x86/include/asm/hw_irq.h
17733@@ -164,8 +164,8 @@ extern void setup_ioapic_dest(void);
17734 extern void enable_IO_APIC(void);
17735
17736 /* Statistics */
17737-extern atomic_t irq_err_count;
17738-extern atomic_t irq_mis_count;
17739+extern atomic_unchecked_t irq_err_count;
17740+extern atomic_unchecked_t irq_mis_count;
17741
17742 /* EISA */
17743 extern void eisa_set_level_irq(unsigned int irq);
17744diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
17745index ccffa53..3c90c87 100644
17746--- a/arch/x86/include/asm/i8259.h
17747+++ b/arch/x86/include/asm/i8259.h
17748@@ -62,7 +62,7 @@ struct legacy_pic {
17749 void (*init)(int auto_eoi);
17750 int (*irq_pending)(unsigned int irq);
17751 void (*make_irq)(unsigned int irq);
17752-};
17753+} __do_const;
17754
17755 extern struct legacy_pic *legacy_pic;
17756 extern struct legacy_pic null_legacy_pic;
17757diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
17758index b8237d8..3e8864e 100644
17759--- a/arch/x86/include/asm/io.h
17760+++ b/arch/x86/include/asm/io.h
17761@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
17762 "m" (*(volatile type __force *)addr) barrier); }
17763
17764 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
17765-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
17766-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
17767+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
17768+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
17769
17770 build_mmio_read(__readb, "b", unsigned char, "=q", )
17771-build_mmio_read(__readw, "w", unsigned short, "=r", )
17772-build_mmio_read(__readl, "l", unsigned int, "=r", )
17773+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
17774+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
17775
17776 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
17777 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
17778@@ -109,7 +109,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
17779 * this function
17780 */
17781
17782-static inline phys_addr_t virt_to_phys(volatile void *address)
17783+static inline phys_addr_t __intentional_overflow(-1) virt_to_phys(volatile void *address)
17784 {
17785 return __pa(address);
17786 }
17787@@ -185,7 +185,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
17788 return ioremap_nocache(offset, size);
17789 }
17790
17791-extern void iounmap(volatile void __iomem *addr);
17792+extern void iounmap(const volatile void __iomem *addr);
17793
17794 extern void set_iounmap_nonlazy(void);
17795
17796@@ -195,6 +195,17 @@ extern void set_iounmap_nonlazy(void);
17797
17798 #include <linux/vmalloc.h>
17799
17800+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
17801+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
17802+{
17803+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17804+}
17805+
17806+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
17807+{
17808+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
17809+}
17810+
17811 /*
17812 * Convert a virtual cached pointer to an uncached pointer
17813 */
17814diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
17815index 0a8b519..80e7d5b 100644
17816--- a/arch/x86/include/asm/irqflags.h
17817+++ b/arch/x86/include/asm/irqflags.h
17818@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
17819 sti; \
17820 sysexit
17821
17822+#define GET_CR0_INTO_RDI mov %cr0, %rdi
17823+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
17824+#define GET_CR3_INTO_RDI mov %cr3, %rdi
17825+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
17826+
17827 #else
17828 #define INTERRUPT_RETURN iret
17829 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
17830diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
17831index 53cdfb2..d1369e6 100644
17832--- a/arch/x86/include/asm/kprobes.h
17833+++ b/arch/x86/include/asm/kprobes.h
17834@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
17835 #define RELATIVEJUMP_SIZE 5
17836 #define RELATIVECALL_OPCODE 0xe8
17837 #define RELATIVE_ADDR_SIZE 4
17838-#define MAX_STACK_SIZE 64
17839-#define MIN_STACK_SIZE(ADDR) \
17840- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
17841- THREAD_SIZE - (unsigned long)(ADDR))) \
17842- ? (MAX_STACK_SIZE) \
17843- : (((unsigned long)current_thread_info()) + \
17844- THREAD_SIZE - (unsigned long)(ADDR)))
17845+#define MAX_STACK_SIZE 64UL
17846+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
17847
17848 #define flush_insn_slot(p) do { } while (0)
17849
17850diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
17851index 4ad6560..75c7bdd 100644
17852--- a/arch/x86/include/asm/local.h
17853+++ b/arch/x86/include/asm/local.h
17854@@ -10,33 +10,97 @@ typedef struct {
17855 atomic_long_t a;
17856 } local_t;
17857
17858+typedef struct {
17859+ atomic_long_unchecked_t a;
17860+} local_unchecked_t;
17861+
17862 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
17863
17864 #define local_read(l) atomic_long_read(&(l)->a)
17865+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
17866 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
17867+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
17868
17869 static inline void local_inc(local_t *l)
17870 {
17871- asm volatile(_ASM_INC "%0"
17872+ asm volatile(_ASM_INC "%0\n"
17873+
17874+#ifdef CONFIG_PAX_REFCOUNT
17875+ "jno 0f\n"
17876+ _ASM_DEC "%0\n"
17877+ "int $4\n0:\n"
17878+ _ASM_EXTABLE(0b, 0b)
17879+#endif
17880+
17881+ : "+m" (l->a.counter));
17882+}
17883+
17884+static inline void local_inc_unchecked(local_unchecked_t *l)
17885+{
17886+ asm volatile(_ASM_INC "%0\n"
17887 : "+m" (l->a.counter));
17888 }
17889
17890 static inline void local_dec(local_t *l)
17891 {
17892- asm volatile(_ASM_DEC "%0"
17893+ asm volatile(_ASM_DEC "%0\n"
17894+
17895+#ifdef CONFIG_PAX_REFCOUNT
17896+ "jno 0f\n"
17897+ _ASM_INC "%0\n"
17898+ "int $4\n0:\n"
17899+ _ASM_EXTABLE(0b, 0b)
17900+#endif
17901+
17902+ : "+m" (l->a.counter));
17903+}
17904+
17905+static inline void local_dec_unchecked(local_unchecked_t *l)
17906+{
17907+ asm volatile(_ASM_DEC "%0\n"
17908 : "+m" (l->a.counter));
17909 }
17910
17911 static inline void local_add(long i, local_t *l)
17912 {
17913- asm volatile(_ASM_ADD "%1,%0"
17914+ asm volatile(_ASM_ADD "%1,%0\n"
17915+
17916+#ifdef CONFIG_PAX_REFCOUNT
17917+ "jno 0f\n"
17918+ _ASM_SUB "%1,%0\n"
17919+ "int $4\n0:\n"
17920+ _ASM_EXTABLE(0b, 0b)
17921+#endif
17922+
17923+ : "+m" (l->a.counter)
17924+ : "ir" (i));
17925+}
17926+
17927+static inline void local_add_unchecked(long i, local_unchecked_t *l)
17928+{
17929+ asm volatile(_ASM_ADD "%1,%0\n"
17930 : "+m" (l->a.counter)
17931 : "ir" (i));
17932 }
17933
17934 static inline void local_sub(long i, local_t *l)
17935 {
17936- asm volatile(_ASM_SUB "%1,%0"
17937+ asm volatile(_ASM_SUB "%1,%0\n"
17938+
17939+#ifdef CONFIG_PAX_REFCOUNT
17940+ "jno 0f\n"
17941+ _ASM_ADD "%1,%0\n"
17942+ "int $4\n0:\n"
17943+ _ASM_EXTABLE(0b, 0b)
17944+#endif
17945+
17946+ : "+m" (l->a.counter)
17947+ : "ir" (i));
17948+}
17949+
17950+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
17951+{
17952+ asm volatile(_ASM_SUB "%1,%0\n"
17953 : "+m" (l->a.counter)
17954 : "ir" (i));
17955 }
17956@@ -52,7 +116,7 @@ static inline void local_sub(long i, local_t *l)
17957 */
17958 static inline int local_sub_and_test(long i, local_t *l)
17959 {
17960- GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
17961+ GEN_BINARY_RMWcc(_ASM_SUB, _ASM_ADD, l->a.counter, "er", i, "%0", "e");
17962 }
17963
17964 /**
17965@@ -65,7 +129,7 @@ static inline int local_sub_and_test(long i, local_t *l)
17966 */
17967 static inline int local_dec_and_test(local_t *l)
17968 {
17969- GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
17970+ GEN_UNARY_RMWcc(_ASM_DEC, _ASM_INC, l->a.counter, "%0", "e");
17971 }
17972
17973 /**
17974@@ -78,7 +142,7 @@ static inline int local_dec_and_test(local_t *l)
17975 */
17976 static inline int local_inc_and_test(local_t *l)
17977 {
17978- GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
17979+ GEN_UNARY_RMWcc(_ASM_INC, _ASM_DEC, l->a.counter, "%0", "e");
17980 }
17981
17982 /**
17983@@ -92,7 +156,7 @@ static inline int local_inc_and_test(local_t *l)
17984 */
17985 static inline int local_add_negative(long i, local_t *l)
17986 {
17987- GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
17988+ GEN_BINARY_RMWcc(_ASM_ADD, _ASM_SUB, l->a.counter, "er", i, "%0", "s");
17989 }
17990
17991 /**
17992@@ -105,6 +169,30 @@ static inline int local_add_negative(long i, local_t *l)
17993 static inline long local_add_return(long i, local_t *l)
17994 {
17995 long __i = i;
17996+ asm volatile(_ASM_XADD "%0, %1\n"
17997+
17998+#ifdef CONFIG_PAX_REFCOUNT
17999+ "jno 0f\n"
18000+ _ASM_MOV "%0,%1\n"
18001+ "int $4\n0:\n"
18002+ _ASM_EXTABLE(0b, 0b)
18003+#endif
18004+
18005+ : "+r" (i), "+m" (l->a.counter)
18006+ : : "memory");
18007+ return i + __i;
18008+}
18009+
18010+/**
18011+ * local_add_return_unchecked - add and return
18012+ * @i: integer value to add
18013+ * @l: pointer to type local_unchecked_t
18014+ *
18015+ * Atomically adds @i to @l and returns @i + @l
18016+ */
18017+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
18018+{
18019+ long __i = i;
18020 asm volatile(_ASM_XADD "%0, %1;"
18021 : "+r" (i), "+m" (l->a.counter)
18022 : : "memory");
18023@@ -121,6 +209,8 @@ static inline long local_sub_return(long i, local_t *l)
18024
18025 #define local_cmpxchg(l, o, n) \
18026 (cmpxchg_local(&((l)->a.counter), (o), (n)))
18027+#define local_cmpxchg_unchecked(l, o, n) \
18028+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
18029 /* Always has a lock prefix */
18030 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
18031
18032diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
18033new file mode 100644
18034index 0000000..2bfd3ba
18035--- /dev/null
18036+++ b/arch/x86/include/asm/mman.h
18037@@ -0,0 +1,15 @@
18038+#ifndef _X86_MMAN_H
18039+#define _X86_MMAN_H
18040+
18041+#include <uapi/asm/mman.h>
18042+
18043+#ifdef __KERNEL__
18044+#ifndef __ASSEMBLY__
18045+#ifdef CONFIG_X86_32
18046+#define arch_mmap_check i386_mmap_check
18047+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
18048+#endif
18049+#endif
18050+#endif
18051+
18052+#endif /* X86_MMAN_H */
18053diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
18054index 876e74e..e20bfb1 100644
18055--- a/arch/x86/include/asm/mmu.h
18056+++ b/arch/x86/include/asm/mmu.h
18057@@ -9,7 +9,7 @@
18058 * we put the segment information here.
18059 */
18060 typedef struct {
18061- void *ldt;
18062+ struct desc_struct *ldt;
18063 int size;
18064
18065 #ifdef CONFIG_X86_64
18066@@ -18,7 +18,19 @@ typedef struct {
18067 #endif
18068
18069 struct mutex lock;
18070- void __user *vdso;
18071+ unsigned long vdso;
18072+
18073+#ifdef CONFIG_X86_32
18074+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18075+ unsigned long user_cs_base;
18076+ unsigned long user_cs_limit;
18077+
18078+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18079+ cpumask_t cpu_user_cs_mask;
18080+#endif
18081+
18082+#endif
18083+#endif
18084 } mm_context_t;
18085
18086 #ifdef CONFIG_SMP
18087diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
18088index 166af2a..648c200 100644
18089--- a/arch/x86/include/asm/mmu_context.h
18090+++ b/arch/x86/include/asm/mmu_context.h
18091@@ -28,6 +28,20 @@ void destroy_context(struct mm_struct *mm);
18092
18093 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
18094 {
18095+
18096+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18097+ if (!(static_cpu_has(X86_FEATURE_PCID))) {
18098+ unsigned int i;
18099+ pgd_t *pgd;
18100+
18101+ pax_open_kernel();
18102+ pgd = get_cpu_pgd(smp_processor_id(), kernel);
18103+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
18104+ set_pgd_batched(pgd+i, native_make_pgd(0));
18105+ pax_close_kernel();
18106+ }
18107+#endif
18108+
18109 #ifdef CONFIG_SMP
18110 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
18111 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
18112@@ -38,16 +52,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
18113 struct task_struct *tsk)
18114 {
18115 unsigned cpu = smp_processor_id();
18116+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
18117+ int tlbstate = TLBSTATE_OK;
18118+#endif
18119
18120 if (likely(prev != next)) {
18121 #ifdef CONFIG_SMP
18122+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
18123+ tlbstate = this_cpu_read(cpu_tlbstate.state);
18124+#endif
18125 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
18126 this_cpu_write(cpu_tlbstate.active_mm, next);
18127 #endif
18128 cpumask_set_cpu(cpu, mm_cpumask(next));
18129
18130 /* Re-load page tables */
18131+#ifdef CONFIG_PAX_PER_CPU_PGD
18132+ pax_open_kernel();
18133+
18134+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18135+ if (static_cpu_has(X86_FEATURE_PCID))
18136+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
18137+ else
18138+#endif
18139+
18140+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
18141+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
18142+ pax_close_kernel();
18143+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
18144+
18145+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18146+ if (static_cpu_has(X86_FEATURE_PCID)) {
18147+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
18148+ u64 descriptor[2];
18149+ descriptor[0] = PCID_USER;
18150+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
18151+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
18152+ descriptor[0] = PCID_KERNEL;
18153+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
18154+ }
18155+ } else {
18156+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
18157+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
18158+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
18159+ else
18160+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
18161+ }
18162+ } else
18163+#endif
18164+
18165+ load_cr3(get_cpu_pgd(cpu, kernel));
18166+#else
18167 load_cr3(next->pgd);
18168+#endif
18169 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
18170
18171 /* Stop flush ipis for the previous mm */
18172@@ -56,9 +113,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
18173 /* Load the LDT, if the LDT is different: */
18174 if (unlikely(prev->context.ldt != next->context.ldt))
18175 load_LDT_nolock(&next->context);
18176+
18177+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18178+ if (!(__supported_pte_mask & _PAGE_NX)) {
18179+ smp_mb__before_atomic();
18180+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
18181+ smp_mb__after_atomic();
18182+ cpu_set(cpu, next->context.cpu_user_cs_mask);
18183+ }
18184+#endif
18185+
18186+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
18187+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
18188+ prev->context.user_cs_limit != next->context.user_cs_limit))
18189+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
18190+#ifdef CONFIG_SMP
18191+ else if (unlikely(tlbstate != TLBSTATE_OK))
18192+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
18193+#endif
18194+#endif
18195+
18196 }
18197+ else {
18198+
18199+#ifdef CONFIG_PAX_PER_CPU_PGD
18200+ pax_open_kernel();
18201+
18202+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18203+ if (static_cpu_has(X86_FEATURE_PCID))
18204+ __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
18205+ else
18206+#endif
18207+
18208+ __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
18209+ __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
18210+ pax_close_kernel();
18211+ BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
18212+
18213+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18214+ if (static_cpu_has(X86_FEATURE_PCID)) {
18215+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
18216+ u64 descriptor[2];
18217+ descriptor[0] = PCID_USER;
18218+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
18219+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
18220+ descriptor[0] = PCID_KERNEL;
18221+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
18222+ }
18223+ } else {
18224+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
18225+ if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
18226+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
18227+ else
18228+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
18229+ }
18230+ } else
18231+#endif
18232+
18233+ load_cr3(get_cpu_pgd(cpu, kernel));
18234+#endif
18235+
18236 #ifdef CONFIG_SMP
18237- else {
18238 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
18239 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
18240
18241@@ -75,12 +190,29 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
18242 * tlb flush IPI delivery. We must reload CR3
18243 * to make sure to use no freed page tables.
18244 */
18245+
18246+#ifndef CONFIG_PAX_PER_CPU_PGD
18247 load_cr3(next->pgd);
18248 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
18249+#endif
18250+
18251 load_LDT_nolock(&next->context);
18252+
18253+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18254+ if (!(__supported_pte_mask & _PAGE_NX))
18255+ cpu_set(cpu, next->context.cpu_user_cs_mask);
18256+#endif
18257+
18258+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
18259+#ifdef CONFIG_PAX_PAGEEXEC
18260+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
18261+#endif
18262+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
18263+#endif
18264+
18265 }
18266+#endif
18267 }
18268-#endif
18269 }
18270
18271 #define activate_mm(prev, next) \
18272diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
18273index e3b7819..b257c64 100644
18274--- a/arch/x86/include/asm/module.h
18275+++ b/arch/x86/include/asm/module.h
18276@@ -5,6 +5,7 @@
18277
18278 #ifdef CONFIG_X86_64
18279 /* X86_64 does not define MODULE_PROC_FAMILY */
18280+#define MODULE_PROC_FAMILY ""
18281 #elif defined CONFIG_M486
18282 #define MODULE_PROC_FAMILY "486 "
18283 #elif defined CONFIG_M586
18284@@ -57,8 +58,20 @@
18285 #error unknown processor family
18286 #endif
18287
18288-#ifdef CONFIG_X86_32
18289-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
18290+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
18291+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
18292+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
18293+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
18294+#else
18295+#define MODULE_PAX_KERNEXEC ""
18296 #endif
18297
18298+#ifdef CONFIG_PAX_MEMORY_UDEREF
18299+#define MODULE_PAX_UDEREF "UDEREF "
18300+#else
18301+#define MODULE_PAX_UDEREF ""
18302+#endif
18303+
18304+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
18305+
18306 #endif /* _ASM_X86_MODULE_H */
18307diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
18308index 5f2fc44..106caa6 100644
18309--- a/arch/x86/include/asm/nmi.h
18310+++ b/arch/x86/include/asm/nmi.h
18311@@ -36,26 +36,35 @@ enum {
18312
18313 typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
18314
18315+struct nmiaction;
18316+
18317+struct nmiwork {
18318+ const struct nmiaction *action;
18319+ u64 max_duration;
18320+ struct irq_work irq_work;
18321+};
18322+
18323 struct nmiaction {
18324 struct list_head list;
18325 nmi_handler_t handler;
18326- u64 max_duration;
18327- struct irq_work irq_work;
18328 unsigned long flags;
18329 const char *name;
18330-};
18331+ struct nmiwork *work;
18332+} __do_const;
18333
18334 #define register_nmi_handler(t, fn, fg, n, init...) \
18335 ({ \
18336- static struct nmiaction init fn##_na = { \
18337+ static struct nmiwork fn##_nw; \
18338+ static const struct nmiaction init fn##_na = { \
18339 .handler = (fn), \
18340 .name = (n), \
18341 .flags = (fg), \
18342+ .work = &fn##_nw, \
18343 }; \
18344 __register_nmi_handler((t), &fn##_na); \
18345 })
18346
18347-int __register_nmi_handler(unsigned int, struct nmiaction *);
18348+int __register_nmi_handler(unsigned int, const struct nmiaction *);
18349
18350 void unregister_nmi_handler(unsigned int, const char *);
18351
18352diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
18353index 802dde3..9183e68 100644
18354--- a/arch/x86/include/asm/page.h
18355+++ b/arch/x86/include/asm/page.h
18356@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
18357 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
18358
18359 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
18360+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
18361
18362 #define __boot_va(x) __va(x)
18363 #define __boot_pa(x) __pa(x)
18364@@ -60,11 +61,21 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
18365 * virt_to_page(kaddr) returns a valid pointer if and only if
18366 * virt_addr_valid(kaddr) returns true.
18367 */
18368-#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
18369 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
18370 extern bool __virt_addr_valid(unsigned long kaddr);
18371 #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
18372
18373+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
18374+#define virt_to_page(kaddr) \
18375+ ({ \
18376+ const void *__kaddr = (const void *)(kaddr); \
18377+ BUG_ON(!virt_addr_valid(__kaddr)); \
18378+ pfn_to_page(__pa(__kaddr) >> PAGE_SHIFT); \
18379+ })
18380+#else
18381+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
18382+#endif
18383+
18384 #endif /* __ASSEMBLY__ */
18385
18386 #include <asm-generic/memory_model.h>
18387diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
18388index f408caf..4a0455e 100644
18389--- a/arch/x86/include/asm/page_64.h
18390+++ b/arch/x86/include/asm/page_64.h
18391@@ -7,9 +7,9 @@
18392
18393 /* duplicated to the one in bootmem.h */
18394 extern unsigned long max_pfn;
18395-extern unsigned long phys_base;
18396+extern const unsigned long phys_base;
18397
18398-static inline unsigned long __phys_addr_nodebug(unsigned long x)
18399+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
18400 {
18401 unsigned long y = x - __START_KERNEL_map;
18402
18403diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
18404index cd6e1610..70f4418 100644
18405--- a/arch/x86/include/asm/paravirt.h
18406+++ b/arch/x86/include/asm/paravirt.h
18407@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
18408 return (pmd_t) { ret };
18409 }
18410
18411-static inline pmdval_t pmd_val(pmd_t pmd)
18412+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
18413 {
18414 pmdval_t ret;
18415
18416@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
18417 val);
18418 }
18419
18420+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
18421+{
18422+ pgdval_t val = native_pgd_val(pgd);
18423+
18424+ if (sizeof(pgdval_t) > sizeof(long))
18425+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
18426+ val, (u64)val >> 32);
18427+ else
18428+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
18429+ val);
18430+}
18431+
18432 static inline void pgd_clear(pgd_t *pgdp)
18433 {
18434 set_pgd(pgdp, __pgd(0));
18435@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
18436 pv_mmu_ops.set_fixmap(idx, phys, flags);
18437 }
18438
18439+#ifdef CONFIG_PAX_KERNEXEC
18440+static inline unsigned long pax_open_kernel(void)
18441+{
18442+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
18443+}
18444+
18445+static inline unsigned long pax_close_kernel(void)
18446+{
18447+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
18448+}
18449+#else
18450+static inline unsigned long pax_open_kernel(void) { return 0; }
18451+static inline unsigned long pax_close_kernel(void) { return 0; }
18452+#endif
18453+
18454 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
18455
18456 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
18457@@ -906,7 +933,7 @@ extern void default_banner(void);
18458
18459 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
18460 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
18461-#define PARA_INDIRECT(addr) *%cs:addr
18462+#define PARA_INDIRECT(addr) *%ss:addr
18463 #endif
18464
18465 #define INTERRUPT_RETURN \
18466@@ -981,6 +1008,21 @@ extern void default_banner(void);
18467 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
18468 CLBR_NONE, \
18469 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
18470+
18471+#define GET_CR0_INTO_RDI \
18472+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
18473+ mov %rax,%rdi
18474+
18475+#define SET_RDI_INTO_CR0 \
18476+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
18477+
18478+#define GET_CR3_INTO_RDI \
18479+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
18480+ mov %rax,%rdi
18481+
18482+#define SET_RDI_INTO_CR3 \
18483+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
18484+
18485 #endif /* CONFIG_X86_32 */
18486
18487 #endif /* __ASSEMBLY__ */
18488diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
18489index 7549b8b..f0edfda 100644
18490--- a/arch/x86/include/asm/paravirt_types.h
18491+++ b/arch/x86/include/asm/paravirt_types.h
18492@@ -84,7 +84,7 @@ struct pv_init_ops {
18493 */
18494 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
18495 unsigned long addr, unsigned len);
18496-};
18497+} __no_const __no_randomize_layout;
18498
18499
18500 struct pv_lazy_ops {
18501@@ -92,13 +92,13 @@ struct pv_lazy_ops {
18502 void (*enter)(void);
18503 void (*leave)(void);
18504 void (*flush)(void);
18505-};
18506+} __no_randomize_layout;
18507
18508 struct pv_time_ops {
18509 unsigned long long (*sched_clock)(void);
18510 unsigned long long (*steal_clock)(int cpu);
18511 unsigned long (*get_tsc_khz)(void);
18512-};
18513+} __no_const __no_randomize_layout;
18514
18515 struct pv_cpu_ops {
18516 /* hooks for various privileged instructions */
18517@@ -192,7 +192,7 @@ struct pv_cpu_ops {
18518
18519 void (*start_context_switch)(struct task_struct *prev);
18520 void (*end_context_switch)(struct task_struct *next);
18521-};
18522+} __no_const __no_randomize_layout;
18523
18524 struct pv_irq_ops {
18525 /*
18526@@ -215,7 +215,7 @@ struct pv_irq_ops {
18527 #ifdef CONFIG_X86_64
18528 void (*adjust_exception_frame)(void);
18529 #endif
18530-};
18531+} __no_randomize_layout;
18532
18533 struct pv_apic_ops {
18534 #ifdef CONFIG_X86_LOCAL_APIC
18535@@ -223,7 +223,7 @@ struct pv_apic_ops {
18536 unsigned long start_eip,
18537 unsigned long start_esp);
18538 #endif
18539-};
18540+} __no_const __no_randomize_layout;
18541
18542 struct pv_mmu_ops {
18543 unsigned long (*read_cr2)(void);
18544@@ -313,6 +313,7 @@ struct pv_mmu_ops {
18545 struct paravirt_callee_save make_pud;
18546
18547 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
18548+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
18549 #endif /* PAGETABLE_LEVELS == 4 */
18550 #endif /* PAGETABLE_LEVELS >= 3 */
18551
18552@@ -324,7 +325,13 @@ struct pv_mmu_ops {
18553 an mfn. We can tell which is which from the index. */
18554 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
18555 phys_addr_t phys, pgprot_t flags);
18556-};
18557+
18558+#ifdef CONFIG_PAX_KERNEXEC
18559+ unsigned long (*pax_open_kernel)(void);
18560+ unsigned long (*pax_close_kernel)(void);
18561+#endif
18562+
18563+} __no_randomize_layout;
18564
18565 struct arch_spinlock;
18566 #ifdef CONFIG_SMP
18567@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
18568 struct pv_lock_ops {
18569 struct paravirt_callee_save lock_spinning;
18570 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
18571-};
18572+} __no_randomize_layout;
18573
18574 /* This contains all the paravirt structures: we get a convenient
18575 * number for each function using the offset which we use to indicate
18576- * what to patch. */
18577+ * what to patch.
18578+ * shouldn't be randomized due to the "NEAT TRICK" in paravirt.c
18579+ */
18580+
18581 struct paravirt_patch_template {
18582 struct pv_init_ops pv_init_ops;
18583 struct pv_time_ops pv_time_ops;
18584@@ -349,7 +359,7 @@ struct paravirt_patch_template {
18585 struct pv_apic_ops pv_apic_ops;
18586 struct pv_mmu_ops pv_mmu_ops;
18587 struct pv_lock_ops pv_lock_ops;
18588-};
18589+} __no_randomize_layout;
18590
18591 extern struct pv_info pv_info;
18592 extern struct pv_init_ops pv_init_ops;
18593diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
18594index c4412e9..90e88c5 100644
18595--- a/arch/x86/include/asm/pgalloc.h
18596+++ b/arch/x86/include/asm/pgalloc.h
18597@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
18598 pmd_t *pmd, pte_t *pte)
18599 {
18600 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
18601+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
18602+}
18603+
18604+static inline void pmd_populate_user(struct mm_struct *mm,
18605+ pmd_t *pmd, pte_t *pte)
18606+{
18607+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
18608 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
18609 }
18610
18611@@ -108,12 +115,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
18612
18613 #ifdef CONFIG_X86_PAE
18614 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
18615+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
18616+{
18617+ pud_populate(mm, pudp, pmd);
18618+}
18619 #else /* !CONFIG_X86_PAE */
18620 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
18621 {
18622 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
18623 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
18624 }
18625+
18626+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
18627+{
18628+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
18629+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
18630+}
18631 #endif /* CONFIG_X86_PAE */
18632
18633 #if PAGETABLE_LEVELS > 3
18634@@ -123,6 +140,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
18635 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
18636 }
18637
18638+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
18639+{
18640+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
18641+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
18642+}
18643+
18644 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
18645 {
18646 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
18647diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
18648index 206a87f..1623b06 100644
18649--- a/arch/x86/include/asm/pgtable-2level.h
18650+++ b/arch/x86/include/asm/pgtable-2level.h
18651@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
18652
18653 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18654 {
18655+ pax_open_kernel();
18656 *pmdp = pmd;
18657+ pax_close_kernel();
18658 }
18659
18660 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18661diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
18662index 81bb91b..9392125 100644
18663--- a/arch/x86/include/asm/pgtable-3level.h
18664+++ b/arch/x86/include/asm/pgtable-3level.h
18665@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
18666
18667 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
18668 {
18669+ pax_open_kernel();
18670 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
18671+ pax_close_kernel();
18672 }
18673
18674 static inline void native_set_pud(pud_t *pudp, pud_t pud)
18675 {
18676+ pax_open_kernel();
18677 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
18678+ pax_close_kernel();
18679 }
18680
18681 /*
18682diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
18683index aa97a07..5c53c32 100644
18684--- a/arch/x86/include/asm/pgtable.h
18685+++ b/arch/x86/include/asm/pgtable.h
18686@@ -46,6 +46,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18687
18688 #ifndef __PAGETABLE_PUD_FOLDED
18689 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
18690+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
18691 #define pgd_clear(pgd) native_pgd_clear(pgd)
18692 #endif
18693
18694@@ -83,12 +84,53 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
18695
18696 #define arch_end_context_switch(prev) do {} while(0)
18697
18698+#define pax_open_kernel() native_pax_open_kernel()
18699+#define pax_close_kernel() native_pax_close_kernel()
18700 #endif /* CONFIG_PARAVIRT */
18701
18702+#define __HAVE_ARCH_PAX_OPEN_KERNEL
18703+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
18704+
18705+#ifdef CONFIG_PAX_KERNEXEC
18706+static inline unsigned long native_pax_open_kernel(void)
18707+{
18708+ unsigned long cr0;
18709+
18710+ preempt_disable();
18711+ barrier();
18712+ cr0 = read_cr0() ^ X86_CR0_WP;
18713+ BUG_ON(cr0 & X86_CR0_WP);
18714+ write_cr0(cr0);
18715+ barrier();
18716+ return cr0 ^ X86_CR0_WP;
18717+}
18718+
18719+static inline unsigned long native_pax_close_kernel(void)
18720+{
18721+ unsigned long cr0;
18722+
18723+ barrier();
18724+ cr0 = read_cr0() ^ X86_CR0_WP;
18725+ BUG_ON(!(cr0 & X86_CR0_WP));
18726+ write_cr0(cr0);
18727+ barrier();
18728+ preempt_enable_no_resched();
18729+ return cr0 ^ X86_CR0_WP;
18730+}
18731+#else
18732+static inline unsigned long native_pax_open_kernel(void) { return 0; }
18733+static inline unsigned long native_pax_close_kernel(void) { return 0; }
18734+#endif
18735+
18736 /*
18737 * The following only work if pte_present() is true.
18738 * Undefined behaviour if not..
18739 */
18740+static inline int pte_user(pte_t pte)
18741+{
18742+ return pte_val(pte) & _PAGE_USER;
18743+}
18744+
18745 static inline int pte_dirty(pte_t pte)
18746 {
18747 return pte_flags(pte) & _PAGE_DIRTY;
18748@@ -155,6 +197,11 @@ static inline unsigned long pud_pfn(pud_t pud)
18749 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
18750 }
18751
18752+static inline unsigned long pgd_pfn(pgd_t pgd)
18753+{
18754+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
18755+}
18756+
18757 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
18758
18759 static inline int pmd_large(pmd_t pte)
18760@@ -208,9 +255,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
18761 return pte_clear_flags(pte, _PAGE_RW);
18762 }
18763
18764+static inline pte_t pte_mkread(pte_t pte)
18765+{
18766+ return __pte(pte_val(pte) | _PAGE_USER);
18767+}
18768+
18769 static inline pte_t pte_mkexec(pte_t pte)
18770 {
18771- return pte_clear_flags(pte, _PAGE_NX);
18772+#ifdef CONFIG_X86_PAE
18773+ if (__supported_pte_mask & _PAGE_NX)
18774+ return pte_clear_flags(pte, _PAGE_NX);
18775+ else
18776+#endif
18777+ return pte_set_flags(pte, _PAGE_USER);
18778+}
18779+
18780+static inline pte_t pte_exprotect(pte_t pte)
18781+{
18782+#ifdef CONFIG_X86_PAE
18783+ if (__supported_pte_mask & _PAGE_NX)
18784+ return pte_set_flags(pte, _PAGE_NX);
18785+ else
18786+#endif
18787+ return pte_clear_flags(pte, _PAGE_USER);
18788 }
18789
18790 static inline pte_t pte_mkdirty(pte_t pte)
18791@@ -440,6 +507,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
18792 #endif
18793
18794 #ifndef __ASSEMBLY__
18795+
18796+#ifdef CONFIG_PAX_PER_CPU_PGD
18797+extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
18798+enum cpu_pgd_type {kernel = 0, user = 1};
18799+static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
18800+{
18801+ return cpu_pgd[cpu][type];
18802+}
18803+#endif
18804+
18805 #include <linux/mm_types.h>
18806 #include <linux/mmdebug.h>
18807 #include <linux/log2.h>
18808@@ -586,7 +663,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
18809 * Currently stuck as a macro due to indirect forward reference to
18810 * linux/mmzone.h's __section_mem_map_addr() definition:
18811 */
18812-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
18813+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
18814
18815 /* Find an entry in the second-level page table.. */
18816 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
18817@@ -626,7 +703,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
18818 * Currently stuck as a macro due to indirect forward reference to
18819 * linux/mmzone.h's __section_mem_map_addr() definition:
18820 */
18821-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
18822+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
18823
18824 /* to find an entry in a page-table-directory. */
18825 static inline unsigned long pud_index(unsigned long address)
18826@@ -641,7 +718,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
18827
18828 static inline int pgd_bad(pgd_t pgd)
18829 {
18830- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
18831+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
18832 }
18833
18834 static inline int pgd_none(pgd_t pgd)
18835@@ -664,7 +741,12 @@ static inline int pgd_none(pgd_t pgd)
18836 * pgd_offset() returns a (pgd_t *)
18837 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
18838 */
18839-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
18840+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
18841+
18842+#ifdef CONFIG_PAX_PER_CPU_PGD
18843+#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
18844+#endif
18845+
18846 /*
18847 * a shortcut which implies the use of the kernel's pgd, instead
18848 * of a process's
18849@@ -675,6 +757,23 @@ static inline int pgd_none(pgd_t pgd)
18850 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
18851 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
18852
18853+#ifdef CONFIG_X86_32
18854+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
18855+#else
18856+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
18857+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
18858+
18859+#ifdef CONFIG_PAX_MEMORY_UDEREF
18860+#ifdef __ASSEMBLY__
18861+#define pax_user_shadow_base pax_user_shadow_base(%rip)
18862+#else
18863+extern unsigned long pax_user_shadow_base;
18864+extern pgdval_t clone_pgd_mask;
18865+#endif
18866+#endif
18867+
18868+#endif
18869+
18870 #ifndef __ASSEMBLY__
18871
18872 extern int direct_gbpages;
18873@@ -841,11 +940,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
18874 * dst and src can be on the same page, but the range must not overlap,
18875 * and must not cross a page boundary.
18876 */
18877-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
18878+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
18879 {
18880- memcpy(dst, src, count * sizeof(pgd_t));
18881+ pax_open_kernel();
18882+ while (count--)
18883+ *dst++ = *src++;
18884+ pax_close_kernel();
18885 }
18886
18887+#ifdef CONFIG_PAX_PER_CPU_PGD
18888+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
18889+#endif
18890+
18891+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18892+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
18893+#else
18894+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
18895+#endif
18896+
18897 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
18898 static inline int page_level_shift(enum pg_level level)
18899 {
18900diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
18901index 9ee3221..b979c6b 100644
18902--- a/arch/x86/include/asm/pgtable_32.h
18903+++ b/arch/x86/include/asm/pgtable_32.h
18904@@ -25,9 +25,6 @@
18905 struct mm_struct;
18906 struct vm_area_struct;
18907
18908-extern pgd_t swapper_pg_dir[1024];
18909-extern pgd_t initial_page_table[1024];
18910-
18911 static inline void pgtable_cache_init(void) { }
18912 static inline void check_pgt_cache(void) { }
18913 void paging_init(void);
18914@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
18915 # include <asm/pgtable-2level.h>
18916 #endif
18917
18918+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
18919+extern pgd_t initial_page_table[PTRS_PER_PGD];
18920+#ifdef CONFIG_X86_PAE
18921+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
18922+#endif
18923+
18924 #if defined(CONFIG_HIGHPTE)
18925 #define pte_offset_map(dir, address) \
18926 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
18927@@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
18928 /* Clear a kernel PTE and flush it from the TLB */
18929 #define kpte_clear_flush(ptep, vaddr) \
18930 do { \
18931+ pax_open_kernel(); \
18932 pte_clear(&init_mm, (vaddr), (ptep)); \
18933+ pax_close_kernel(); \
18934 __flush_tlb_one((vaddr)); \
18935 } while (0)
18936
18937 #endif /* !__ASSEMBLY__ */
18938
18939+#define HAVE_ARCH_UNMAPPED_AREA
18940+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
18941+
18942 /*
18943 * kern_addr_valid() is (1) for FLATMEM and (0) for
18944 * SPARSEMEM and DISCONTIGMEM
18945diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
18946index ed5903b..c7fe163 100644
18947--- a/arch/x86/include/asm/pgtable_32_types.h
18948+++ b/arch/x86/include/asm/pgtable_32_types.h
18949@@ -8,7 +8,7 @@
18950 */
18951 #ifdef CONFIG_X86_PAE
18952 # include <asm/pgtable-3level_types.h>
18953-# define PMD_SIZE (1UL << PMD_SHIFT)
18954+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
18955 # define PMD_MASK (~(PMD_SIZE - 1))
18956 #else
18957 # include <asm/pgtable-2level_types.h>
18958@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
18959 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
18960 #endif
18961
18962+#ifdef CONFIG_PAX_KERNEXEC
18963+#ifndef __ASSEMBLY__
18964+extern unsigned char MODULES_EXEC_VADDR[];
18965+extern unsigned char MODULES_EXEC_END[];
18966+#endif
18967+#include <asm/boot.h>
18968+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
18969+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
18970+#else
18971+#define ktla_ktva(addr) (addr)
18972+#define ktva_ktla(addr) (addr)
18973+#endif
18974+
18975 #define MODULES_VADDR VMALLOC_START
18976 #define MODULES_END VMALLOC_END
18977 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
18978diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
18979index 3874693..d7906ac 100644
18980--- a/arch/x86/include/asm/pgtable_64.h
18981+++ b/arch/x86/include/asm/pgtable_64.h
18982@@ -16,11 +16,16 @@
18983
18984 extern pud_t level3_kernel_pgt[512];
18985 extern pud_t level3_ident_pgt[512];
18986+extern pud_t level3_vmalloc_start_pgt[512];
18987+extern pud_t level3_vmalloc_end_pgt[512];
18988+extern pud_t level3_vmemmap_pgt[512];
18989+extern pud_t level2_vmemmap_pgt[512];
18990 extern pmd_t level2_kernel_pgt[512];
18991 extern pmd_t level2_fixmap_pgt[512];
18992-extern pmd_t level2_ident_pgt[512];
18993+extern pmd_t level2_ident_pgt[512*2];
18994 extern pte_t level1_fixmap_pgt[512];
18995-extern pgd_t init_level4_pgt[];
18996+extern pte_t level1_vsyscall_pgt[512];
18997+extern pgd_t init_level4_pgt[512];
18998
18999 #define swapper_pg_dir init_level4_pgt
19000
19001@@ -62,7 +67,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
19002
19003 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
19004 {
19005+ pax_open_kernel();
19006 *pmdp = pmd;
19007+ pax_close_kernel();
19008 }
19009
19010 static inline void native_pmd_clear(pmd_t *pmd)
19011@@ -98,7 +105,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
19012
19013 static inline void native_set_pud(pud_t *pudp, pud_t pud)
19014 {
19015+ pax_open_kernel();
19016 *pudp = pud;
19017+ pax_close_kernel();
19018 }
19019
19020 static inline void native_pud_clear(pud_t *pud)
19021@@ -108,6 +117,13 @@ static inline void native_pud_clear(pud_t *pud)
19022
19023 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
19024 {
19025+ pax_open_kernel();
19026+ *pgdp = pgd;
19027+ pax_close_kernel();
19028+}
19029+
19030+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
19031+{
19032 *pgdp = pgd;
19033 }
19034
19035diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
19036index 7166e25..baaa6fe 100644
19037--- a/arch/x86/include/asm/pgtable_64_types.h
19038+++ b/arch/x86/include/asm/pgtable_64_types.h
19039@@ -61,9 +61,14 @@ typedef struct { pteval_t pte; } pte_t;
19040 #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
19041 #define MODULES_END _AC(0xffffffffff000000, UL)
19042 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
19043+#define MODULES_EXEC_VADDR MODULES_VADDR
19044+#define MODULES_EXEC_END MODULES_END
19045 #define ESPFIX_PGD_ENTRY _AC(-2, UL)
19046 #define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << PGDIR_SHIFT)
19047
19048+#define ktla_ktva(addr) (addr)
19049+#define ktva_ktla(addr) (addr)
19050+
19051 #define EARLY_DYNAMIC_PAGE_TABLES 64
19052
19053 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
19054diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
19055index f216963..6bd7c21 100644
19056--- a/arch/x86/include/asm/pgtable_types.h
19057+++ b/arch/x86/include/asm/pgtable_types.h
19058@@ -111,8 +111,10 @@
19059
19060 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
19061 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
19062-#else
19063+#elif defined(CONFIG_KMEMCHECK) || defined(CONFIG_MEM_SOFT_DIRTY)
19064 #define _PAGE_NX (_AT(pteval_t, 0))
19065+#else
19066+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
19067 #endif
19068
19069 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
19070@@ -151,6 +153,9 @@
19071 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
19072 _PAGE_ACCESSED)
19073
19074+#define PAGE_READONLY_NOEXEC PAGE_READONLY
19075+#define PAGE_SHARED_NOEXEC PAGE_SHARED
19076+
19077 #define __PAGE_KERNEL_EXEC \
19078 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
19079 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
19080@@ -161,7 +166,7 @@
19081 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
19082 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
19083 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
19084-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
19085+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
19086 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
19087 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
19088 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
19089@@ -218,7 +223,7 @@
19090 #ifdef CONFIG_X86_64
19091 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
19092 #else
19093-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
19094+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
19095 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
19096 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
19097 #endif
19098@@ -257,7 +262,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
19099 {
19100 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
19101 }
19102+#endif
19103
19104+#if PAGETABLE_LEVELS == 3
19105+#include <asm-generic/pgtable-nopud.h>
19106+#endif
19107+
19108+#if PAGETABLE_LEVELS == 2
19109+#include <asm-generic/pgtable-nopmd.h>
19110+#endif
19111+
19112+#ifndef __ASSEMBLY__
19113 #if PAGETABLE_LEVELS > 3
19114 typedef struct { pudval_t pud; } pud_t;
19115
19116@@ -271,8 +286,6 @@ static inline pudval_t native_pud_val(pud_t pud)
19117 return pud.pud;
19118 }
19119 #else
19120-#include <asm-generic/pgtable-nopud.h>
19121-
19122 static inline pudval_t native_pud_val(pud_t pud)
19123 {
19124 return native_pgd_val(pud.pgd);
19125@@ -292,8 +305,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
19126 return pmd.pmd;
19127 }
19128 #else
19129-#include <asm-generic/pgtable-nopmd.h>
19130-
19131 static inline pmdval_t native_pmd_val(pmd_t pmd)
19132 {
19133 return native_pgd_val(pmd.pud.pgd);
19134@@ -333,7 +344,6 @@ typedef struct page *pgtable_t;
19135
19136 extern pteval_t __supported_pte_mask;
19137 extern void set_nx(void);
19138-extern int nx_enabled;
19139
19140 #define pgprot_writecombine pgprot_writecombine
19141 extern pgprot_t pgprot_writecombine(pgprot_t prot);
19142diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
19143index 7024c12..71c46b9 100644
19144--- a/arch/x86/include/asm/preempt.h
19145+++ b/arch/x86/include/asm/preempt.h
19146@@ -87,7 +87,7 @@ static __always_inline void __preempt_count_sub(int val)
19147 */
19148 static __always_inline bool __preempt_count_dec_and_test(void)
19149 {
19150- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
19151+ GEN_UNARY_RMWcc("decl", "incl", __preempt_count, __percpu_arg(0), "e");
19152 }
19153
19154 /*
19155diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
19156index eb71ec7..f06532a 100644
19157--- a/arch/x86/include/asm/processor.h
19158+++ b/arch/x86/include/asm/processor.h
19159@@ -127,7 +127,7 @@ struct cpuinfo_x86 {
19160 /* Index into per_cpu list: */
19161 u16 cpu_index;
19162 u32 microcode;
19163-} __attribute__((__aligned__(SMP_CACHE_BYTES)));
19164+} __attribute__((__aligned__(SMP_CACHE_BYTES))) __randomize_layout;
19165
19166 #define X86_VENDOR_INTEL 0
19167 #define X86_VENDOR_CYRIX 1
19168@@ -198,9 +198,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
19169 : "memory");
19170 }
19171
19172+/* invpcid (%rdx),%rax */
19173+#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
19174+
19175+#define INVPCID_SINGLE_ADDRESS 0UL
19176+#define INVPCID_SINGLE_CONTEXT 1UL
19177+#define INVPCID_ALL_GLOBAL 2UL
19178+#define INVPCID_ALL_NONGLOBAL 3UL
19179+
19180+#define PCID_KERNEL 0UL
19181+#define PCID_USER 1UL
19182+#define PCID_NOFLUSH (1UL << 63)
19183+
19184 static inline void load_cr3(pgd_t *pgdir)
19185 {
19186- write_cr3(__pa(pgdir));
19187+ write_cr3(__pa(pgdir) | PCID_KERNEL);
19188 }
19189
19190 #ifdef CONFIG_X86_32
19191@@ -282,7 +294,7 @@ struct tss_struct {
19192
19193 } ____cacheline_aligned;
19194
19195-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
19196+extern struct tss_struct init_tss[NR_CPUS];
19197
19198 /*
19199 * Save the original ist values for checking stack pointers during debugging
19200@@ -478,6 +490,7 @@ struct thread_struct {
19201 unsigned short ds;
19202 unsigned short fsindex;
19203 unsigned short gsindex;
19204+ unsigned short ss;
19205 #endif
19206 #ifdef CONFIG_X86_32
19207 unsigned long ip;
19208@@ -587,29 +600,8 @@ static inline void load_sp0(struct tss_struct *tss,
19209 extern unsigned long mmu_cr4_features;
19210 extern u32 *trampoline_cr4_features;
19211
19212-static inline void set_in_cr4(unsigned long mask)
19213-{
19214- unsigned long cr4;
19215-
19216- mmu_cr4_features |= mask;
19217- if (trampoline_cr4_features)
19218- *trampoline_cr4_features = mmu_cr4_features;
19219- cr4 = read_cr4();
19220- cr4 |= mask;
19221- write_cr4(cr4);
19222-}
19223-
19224-static inline void clear_in_cr4(unsigned long mask)
19225-{
19226- unsigned long cr4;
19227-
19228- mmu_cr4_features &= ~mask;
19229- if (trampoline_cr4_features)
19230- *trampoline_cr4_features = mmu_cr4_features;
19231- cr4 = read_cr4();
19232- cr4 &= ~mask;
19233- write_cr4(cr4);
19234-}
19235+extern void set_in_cr4(unsigned long mask);
19236+extern void clear_in_cr4(unsigned long mask);
19237
19238 typedef struct {
19239 unsigned long seg;
19240@@ -837,11 +829,18 @@ static inline void spin_lock_prefetch(const void *x)
19241 */
19242 #define TASK_SIZE PAGE_OFFSET
19243 #define TASK_SIZE_MAX TASK_SIZE
19244+
19245+#ifdef CONFIG_PAX_SEGMEXEC
19246+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
19247+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
19248+#else
19249 #define STACK_TOP TASK_SIZE
19250-#define STACK_TOP_MAX STACK_TOP
19251+#endif
19252+
19253+#define STACK_TOP_MAX TASK_SIZE
19254
19255 #define INIT_THREAD { \
19256- .sp0 = sizeof(init_stack) + (long)&init_stack, \
19257+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
19258 .vm86_info = NULL, \
19259 .sysenter_cs = __KERNEL_CS, \
19260 .io_bitmap_ptr = NULL, \
19261@@ -855,7 +854,7 @@ static inline void spin_lock_prefetch(const void *x)
19262 */
19263 #define INIT_TSS { \
19264 .x86_tss = { \
19265- .sp0 = sizeof(init_stack) + (long)&init_stack, \
19266+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
19267 .ss0 = __KERNEL_DS, \
19268 .ss1 = __KERNEL_CS, \
19269 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
19270@@ -866,11 +865,7 @@ static inline void spin_lock_prefetch(const void *x)
19271 extern unsigned long thread_saved_pc(struct task_struct *tsk);
19272
19273 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
19274-#define KSTK_TOP(info) \
19275-({ \
19276- unsigned long *__ptr = (unsigned long *)(info); \
19277- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
19278-})
19279+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
19280
19281 /*
19282 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
19283@@ -885,7 +880,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
19284 #define task_pt_regs(task) \
19285 ({ \
19286 struct pt_regs *__regs__; \
19287- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
19288+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
19289 __regs__ - 1; \
19290 })
19291
19292@@ -895,13 +890,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
19293 /*
19294 * User space process size. 47bits minus one guard page.
19295 */
19296-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
19297+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
19298
19299 /* This decides where the kernel will search for a free chunk of vm
19300 * space during mmap's.
19301 */
19302 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
19303- 0xc0000000 : 0xFFFFe000)
19304+ 0xc0000000 : 0xFFFFf000)
19305
19306 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
19307 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
19308@@ -912,11 +907,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
19309 #define STACK_TOP_MAX TASK_SIZE_MAX
19310
19311 #define INIT_THREAD { \
19312- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
19313+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
19314 }
19315
19316 #define INIT_TSS { \
19317- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
19318+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
19319 }
19320
19321 /*
19322@@ -944,6 +939,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
19323 */
19324 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
19325
19326+#ifdef CONFIG_PAX_SEGMEXEC
19327+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
19328+#endif
19329+
19330 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
19331
19332 /* Get/set a process' ability to use the timestamp counter instruction */
19333@@ -970,7 +969,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
19334 return 0;
19335 }
19336
19337-extern unsigned long arch_align_stack(unsigned long sp);
19338+#define arch_align_stack(x) ((x) & ~0xfUL)
19339 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
19340
19341 void default_idle(void);
19342@@ -980,6 +979,6 @@ bool xen_set_default_idle(void);
19343 #define xen_set_default_idle 0
19344 #endif
19345
19346-void stop_this_cpu(void *dummy);
19347+void stop_this_cpu(void *dummy) __noreturn;
19348 void df_debug(struct pt_regs *regs, long error_code);
19349 #endif /* _ASM_X86_PROCESSOR_H */
19350diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
19351index 6205f0c..688a3a9 100644
19352--- a/arch/x86/include/asm/ptrace.h
19353+++ b/arch/x86/include/asm/ptrace.h
19354@@ -84,28 +84,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
19355 }
19356
19357 /*
19358- * user_mode_vm(regs) determines whether a register set came from user mode.
19359+ * user_mode(regs) determines whether a register set came from user mode.
19360 * This is true if V8086 mode was enabled OR if the register set was from
19361 * protected mode with RPL-3 CS value. This tricky test checks that with
19362 * one comparison. Many places in the kernel can bypass this full check
19363- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
19364+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
19365+ * be used.
19366 */
19367-static inline int user_mode(struct pt_regs *regs)
19368+static inline int user_mode_novm(struct pt_regs *regs)
19369 {
19370 #ifdef CONFIG_X86_32
19371 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
19372 #else
19373- return !!(regs->cs & 3);
19374+ return !!(regs->cs & SEGMENT_RPL_MASK);
19375 #endif
19376 }
19377
19378-static inline int user_mode_vm(struct pt_regs *regs)
19379+static inline int user_mode(struct pt_regs *regs)
19380 {
19381 #ifdef CONFIG_X86_32
19382 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
19383 USER_RPL;
19384 #else
19385- return user_mode(regs);
19386+ return user_mode_novm(regs);
19387 #endif
19388 }
19389
19390@@ -121,15 +122,16 @@ static inline int v8086_mode(struct pt_regs *regs)
19391 #ifdef CONFIG_X86_64
19392 static inline bool user_64bit_mode(struct pt_regs *regs)
19393 {
19394+ unsigned long cs = regs->cs & 0xffff;
19395 #ifndef CONFIG_PARAVIRT
19396 /*
19397 * On non-paravirt systems, this is the only long mode CPL 3
19398 * selector. We do not allow long mode selectors in the LDT.
19399 */
19400- return regs->cs == __USER_CS;
19401+ return cs == __USER_CS;
19402 #else
19403 /* Headers are too twisted for this to go in paravirt.h. */
19404- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
19405+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
19406 #endif
19407 }
19408
19409@@ -180,9 +182,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
19410 * Traps from the kernel do not save sp and ss.
19411 * Use the helper function to retrieve sp.
19412 */
19413- if (offset == offsetof(struct pt_regs, sp) &&
19414- regs->cs == __KERNEL_CS)
19415- return kernel_stack_pointer(regs);
19416+ if (offset == offsetof(struct pt_regs, sp)) {
19417+ unsigned long cs = regs->cs & 0xffff;
19418+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
19419+ return kernel_stack_pointer(regs);
19420+ }
19421 #endif
19422 return *(unsigned long *)((unsigned long)regs + offset);
19423 }
19424diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
19425index ae0e241..e80b10b 100644
19426--- a/arch/x86/include/asm/qrwlock.h
19427+++ b/arch/x86/include/asm/qrwlock.h
19428@@ -7,8 +7,8 @@
19429 #define queue_write_unlock queue_write_unlock
19430 static inline void queue_write_unlock(struct qrwlock *lock)
19431 {
19432- barrier();
19433- ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
19434+ barrier();
19435+ ACCESS_ONCE_RW(*(u8 *)&lock->cnts) = 0;
19436 }
19437 #endif
19438
19439diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
19440index 9c6b890..5305f53 100644
19441--- a/arch/x86/include/asm/realmode.h
19442+++ b/arch/x86/include/asm/realmode.h
19443@@ -22,16 +22,14 @@ struct real_mode_header {
19444 #endif
19445 /* APM/BIOS reboot */
19446 u32 machine_real_restart_asm;
19447-#ifdef CONFIG_X86_64
19448 u32 machine_real_restart_seg;
19449-#endif
19450 };
19451
19452 /* This must match data at trampoline_32/64.S */
19453 struct trampoline_header {
19454 #ifdef CONFIG_X86_32
19455 u32 start;
19456- u16 gdt_pad;
19457+ u16 boot_cs;
19458 u16 gdt_limit;
19459 u32 gdt_base;
19460 #else
19461diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
19462index a82c4f1..ac45053 100644
19463--- a/arch/x86/include/asm/reboot.h
19464+++ b/arch/x86/include/asm/reboot.h
19465@@ -6,13 +6,13 @@
19466 struct pt_regs;
19467
19468 struct machine_ops {
19469- void (*restart)(char *cmd);
19470- void (*halt)(void);
19471- void (*power_off)(void);
19472+ void (* __noreturn restart)(char *cmd);
19473+ void (* __noreturn halt)(void);
19474+ void (* __noreturn power_off)(void);
19475 void (*shutdown)(void);
19476 void (*crash_shutdown)(struct pt_regs *);
19477- void (*emergency_restart)(void);
19478-};
19479+ void (* __noreturn emergency_restart)(void);
19480+} __no_const;
19481
19482 extern struct machine_ops machine_ops;
19483
19484diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h
19485index 8f7866a..e442f20 100644
19486--- a/arch/x86/include/asm/rmwcc.h
19487+++ b/arch/x86/include/asm/rmwcc.h
19488@@ -3,7 +3,34 @@
19489
19490 #ifdef CC_HAVE_ASM_GOTO
19491
19492-#define __GEN_RMWcc(fullop, var, cc, ...) \
19493+#ifdef CONFIG_PAX_REFCOUNT
19494+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19495+do { \
19496+ asm_volatile_goto (fullop \
19497+ ";jno 0f\n" \
19498+ fullantiop \
19499+ ";int $4\n0:\n" \
19500+ _ASM_EXTABLE(0b, 0b) \
19501+ ";j" cc " %l[cc_label]" \
19502+ : : "m" (var), ## __VA_ARGS__ \
19503+ : "memory" : cc_label); \
19504+ return 0; \
19505+cc_label: \
19506+ return 1; \
19507+} while (0)
19508+#else
19509+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19510+do { \
19511+ asm_volatile_goto (fullop ";j" cc " %l[cc_label]" \
19512+ : : "m" (var), ## __VA_ARGS__ \
19513+ : "memory" : cc_label); \
19514+ return 0; \
19515+cc_label: \
19516+ return 1; \
19517+} while (0)
19518+#endif
19519+
19520+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
19521 do { \
19522 asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
19523 : : "m" (var), ## __VA_ARGS__ \
19524@@ -13,15 +40,46 @@ cc_label: \
19525 return 1; \
19526 } while (0)
19527
19528-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
19529- __GEN_RMWcc(op " " arg0, var, cc)
19530+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
19531+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
19532
19533-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
19534- __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
19535+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
19536+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
19537+
19538+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
19539+ __GEN_RMWcc(op " %1, " arg0, antiop " %1, " arg0, var, cc, vcon (val))
19540+
19541+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
19542+ __GEN_RMWcc_unchecked(op " %1, " arg0, var, cc, vcon (val))
19543
19544 #else /* !CC_HAVE_ASM_GOTO */
19545
19546-#define __GEN_RMWcc(fullop, var, cc, ...) \
19547+#ifdef CONFIG_PAX_REFCOUNT
19548+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19549+do { \
19550+ char c; \
19551+ asm volatile (fullop \
19552+ ";jno 0f\n" \
19553+ fullantiop \
19554+ ";int $4\n0:\n" \
19555+ _ASM_EXTABLE(0b, 0b) \
19556+ "; set" cc " %1" \
19557+ : "+m" (var), "=qm" (c) \
19558+ : __VA_ARGS__ : "memory"); \
19559+ return c != 0; \
19560+} while (0)
19561+#else
19562+#define __GEN_RMWcc(fullop, fullantiop, var, cc, ...) \
19563+do { \
19564+ char c; \
19565+ asm volatile (fullop "; set" cc " %1" \
19566+ : "+m" (var), "=qm" (c) \
19567+ : __VA_ARGS__ : "memory"); \
19568+ return c != 0; \
19569+} while (0)
19570+#endif
19571+
19572+#define __GEN_RMWcc_unchecked(fullop, var, cc, ...) \
19573 do { \
19574 char c; \
19575 asm volatile (fullop "; set" cc " %1" \
19576@@ -30,11 +88,17 @@ do { \
19577 return c != 0; \
19578 } while (0)
19579
19580-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
19581- __GEN_RMWcc(op " " arg0, var, cc)
19582+#define GEN_UNARY_RMWcc(op, antiop, var, arg0, cc) \
19583+ __GEN_RMWcc(op " " arg0, antiop " " arg0, var, cc)
19584+
19585+#define GEN_UNARY_RMWcc_unchecked(op, var, arg0, cc) \
19586+ __GEN_RMWcc_unchecked(op " " arg0, var, cc)
19587+
19588+#define GEN_BINARY_RMWcc(op, antiop, var, vcon, val, arg0, cc) \
19589+ __GEN_RMWcc(op " %2, " arg0, antiop " %2, " arg0, var, cc, vcon (val))
19590
19591-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
19592- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
19593+#define GEN_BINARY_RMWcc_unchecked(op, var, vcon, val, arg0, cc) \
19594+ __GEN_RMWcc_unchecked(op " %2, " arg0, var, cc, vcon (val))
19595
19596 #endif /* CC_HAVE_ASM_GOTO */
19597
19598diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
19599index cad82c9..2e5c5c1 100644
19600--- a/arch/x86/include/asm/rwsem.h
19601+++ b/arch/x86/include/asm/rwsem.h
19602@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
19603 {
19604 asm volatile("# beginning down_read\n\t"
19605 LOCK_PREFIX _ASM_INC "(%1)\n\t"
19606+
19607+#ifdef CONFIG_PAX_REFCOUNT
19608+ "jno 0f\n"
19609+ LOCK_PREFIX _ASM_DEC "(%1)\n"
19610+ "int $4\n0:\n"
19611+ _ASM_EXTABLE(0b, 0b)
19612+#endif
19613+
19614 /* adds 0x00000001 */
19615 " jns 1f\n"
19616 " call call_rwsem_down_read_failed\n"
19617@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
19618 "1:\n\t"
19619 " mov %1,%2\n\t"
19620 " add %3,%2\n\t"
19621+
19622+#ifdef CONFIG_PAX_REFCOUNT
19623+ "jno 0f\n"
19624+ "sub %3,%2\n"
19625+ "int $4\n0:\n"
19626+ _ASM_EXTABLE(0b, 0b)
19627+#endif
19628+
19629 " jle 2f\n\t"
19630 LOCK_PREFIX " cmpxchg %2,%0\n\t"
19631 " jnz 1b\n\t"
19632@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
19633 long tmp;
19634 asm volatile("# beginning down_write\n\t"
19635 LOCK_PREFIX " xadd %1,(%2)\n\t"
19636+
19637+#ifdef CONFIG_PAX_REFCOUNT
19638+ "jno 0f\n"
19639+ "mov %1,(%2)\n"
19640+ "int $4\n0:\n"
19641+ _ASM_EXTABLE(0b, 0b)
19642+#endif
19643+
19644 /* adds 0xffff0001, returns the old value */
19645 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
19646 /* was the active mask 0 before? */
19647@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
19648 long tmp;
19649 asm volatile("# beginning __up_read\n\t"
19650 LOCK_PREFIX " xadd %1,(%2)\n\t"
19651+
19652+#ifdef CONFIG_PAX_REFCOUNT
19653+ "jno 0f\n"
19654+ "mov %1,(%2)\n"
19655+ "int $4\n0:\n"
19656+ _ASM_EXTABLE(0b, 0b)
19657+#endif
19658+
19659 /* subtracts 1, returns the old value */
19660 " jns 1f\n\t"
19661 " call call_rwsem_wake\n" /* expects old value in %edx */
19662@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
19663 long tmp;
19664 asm volatile("# beginning __up_write\n\t"
19665 LOCK_PREFIX " xadd %1,(%2)\n\t"
19666+
19667+#ifdef CONFIG_PAX_REFCOUNT
19668+ "jno 0f\n"
19669+ "mov %1,(%2)\n"
19670+ "int $4\n0:\n"
19671+ _ASM_EXTABLE(0b, 0b)
19672+#endif
19673+
19674 /* subtracts 0xffff0001, returns the old value */
19675 " jns 1f\n\t"
19676 " call call_rwsem_wake\n" /* expects old value in %edx */
19677@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
19678 {
19679 asm volatile("# beginning __downgrade_write\n\t"
19680 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
19681+
19682+#ifdef CONFIG_PAX_REFCOUNT
19683+ "jno 0f\n"
19684+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
19685+ "int $4\n0:\n"
19686+ _ASM_EXTABLE(0b, 0b)
19687+#endif
19688+
19689 /*
19690 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
19691 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
19692@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
19693 */
19694 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19695 {
19696- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
19697+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
19698+
19699+#ifdef CONFIG_PAX_REFCOUNT
19700+ "jno 0f\n"
19701+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
19702+ "int $4\n0:\n"
19703+ _ASM_EXTABLE(0b, 0b)
19704+#endif
19705+
19706 : "+m" (sem->count)
19707 : "er" (delta));
19708 }
19709@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
19710 */
19711 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
19712 {
19713- return delta + xadd(&sem->count, delta);
19714+ return delta + xadd_check_overflow(&sem->count, delta);
19715 }
19716
19717 #endif /* __KERNEL__ */
19718diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
19719index 6f1c3a8..7744f19 100644
19720--- a/arch/x86/include/asm/segment.h
19721+++ b/arch/x86/include/asm/segment.h
19722@@ -64,10 +64,15 @@
19723 * 26 - ESPFIX small SS
19724 * 27 - per-cpu [ offset to per-cpu data area ]
19725 * 28 - stack_canary-20 [ for stack protector ]
19726- * 29 - unused
19727- * 30 - unused
19728+ * 29 - PCI BIOS CS
19729+ * 30 - PCI BIOS DS
19730 * 31 - TSS for double fault handler
19731 */
19732+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
19733+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
19734+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
19735+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
19736+
19737 #define GDT_ENTRY_TLS_MIN 6
19738 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
19739
19740@@ -79,6 +84,8 @@
19741
19742 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
19743
19744+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
19745+
19746 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
19747
19748 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
19749@@ -104,6 +111,12 @@
19750 #define __KERNEL_STACK_CANARY 0
19751 #endif
19752
19753+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
19754+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
19755+
19756+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
19757+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
19758+
19759 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
19760
19761 /*
19762@@ -141,7 +154,7 @@
19763 */
19764
19765 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
19766-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
19767+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
19768
19769
19770 #else
19771@@ -165,6 +178,8 @@
19772 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
19773 #define __USER32_DS __USER_DS
19774
19775+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
19776+
19777 #define GDT_ENTRY_TSS 8 /* needs two entries */
19778 #define GDT_ENTRY_LDT 10 /* needs two entries */
19779 #define GDT_ENTRY_TLS_MIN 12
19780@@ -173,6 +188,8 @@
19781 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
19782 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
19783
19784+#define GDT_ENTRY_UDEREF_KERNEL_DS 16
19785+
19786 /* TLS indexes for 64bit - hardcoded in arch_prctl */
19787 #define FS_TLS 0
19788 #define GS_TLS 1
19789@@ -180,12 +197,14 @@
19790 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
19791 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
19792
19793-#define GDT_ENTRIES 16
19794+#define GDT_ENTRIES 17
19795
19796 #endif
19797
19798 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
19799+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
19800 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
19801+#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
19802 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
19803 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
19804 #ifndef CONFIG_PARAVIRT
19805@@ -268,7 +287,7 @@ static inline unsigned long get_limit(unsigned long segment)
19806 {
19807 unsigned long __limit;
19808 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
19809- return __limit + 1;
19810+ return __limit;
19811 }
19812
19813 #endif /* !__ASSEMBLY__ */
19814diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
19815index 8d3120f..352b440 100644
19816--- a/arch/x86/include/asm/smap.h
19817+++ b/arch/x86/include/asm/smap.h
19818@@ -25,11 +25,40 @@
19819
19820 #include <asm/alternative-asm.h>
19821
19822+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19823+#define ASM_PAX_OPEN_USERLAND \
19824+ 661: jmp 663f; \
19825+ .pushsection .altinstr_replacement, "a" ; \
19826+ 662: pushq %rax; nop; \
19827+ .popsection ; \
19828+ .pushsection .altinstructions, "a" ; \
19829+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19830+ .popsection ; \
19831+ call __pax_open_userland; \
19832+ popq %rax; \
19833+ 663:
19834+
19835+#define ASM_PAX_CLOSE_USERLAND \
19836+ 661: jmp 663f; \
19837+ .pushsection .altinstr_replacement, "a" ; \
19838+ 662: pushq %rax; nop; \
19839+ .popsection; \
19840+ .pushsection .altinstructions, "a" ; \
19841+ altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
19842+ .popsection; \
19843+ call __pax_close_userland; \
19844+ popq %rax; \
19845+ 663:
19846+#else
19847+#define ASM_PAX_OPEN_USERLAND
19848+#define ASM_PAX_CLOSE_USERLAND
19849+#endif
19850+
19851 #ifdef CONFIG_X86_SMAP
19852
19853 #define ASM_CLAC \
19854 661: ASM_NOP3 ; \
19855- .pushsection .altinstr_replacement, "ax" ; \
19856+ .pushsection .altinstr_replacement, "a" ; \
19857 662: __ASM_CLAC ; \
19858 .popsection ; \
19859 .pushsection .altinstructions, "a" ; \
19860@@ -38,7 +67,7 @@
19861
19862 #define ASM_STAC \
19863 661: ASM_NOP3 ; \
19864- .pushsection .altinstr_replacement, "ax" ; \
19865+ .pushsection .altinstr_replacement, "a" ; \
19866 662: __ASM_STAC ; \
19867 .popsection ; \
19868 .pushsection .altinstructions, "a" ; \
19869@@ -56,6 +85,37 @@
19870
19871 #include <asm/alternative.h>
19872
19873+#define __HAVE_ARCH_PAX_OPEN_USERLAND
19874+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
19875+
19876+extern void __pax_open_userland(void);
19877+static __always_inline unsigned long pax_open_userland(void)
19878+{
19879+
19880+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19881+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
19882+ :
19883+ : [open] "i" (__pax_open_userland)
19884+ : "memory", "rax");
19885+#endif
19886+
19887+ return 0;
19888+}
19889+
19890+extern void __pax_close_userland(void);
19891+static __always_inline unsigned long pax_close_userland(void)
19892+{
19893+
19894+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19895+ asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
19896+ :
19897+ : [close] "i" (__pax_close_userland)
19898+ : "memory", "rax");
19899+#endif
19900+
19901+ return 0;
19902+}
19903+
19904 #ifdef CONFIG_X86_SMAP
19905
19906 static __always_inline void clac(void)
19907diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
19908index 8cd27e0..7f05ec8 100644
19909--- a/arch/x86/include/asm/smp.h
19910+++ b/arch/x86/include/asm/smp.h
19911@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
19912 /* cpus sharing the last level cache: */
19913 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
19914 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
19915-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
19916+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
19917
19918 static inline struct cpumask *cpu_sibling_mask(int cpu)
19919 {
19920@@ -78,7 +78,7 @@ struct smp_ops {
19921
19922 void (*send_call_func_ipi)(const struct cpumask *mask);
19923 void (*send_call_func_single_ipi)(int cpu);
19924-};
19925+} __no_const;
19926
19927 /* Globals due to paravirt */
19928 extern void set_cpu_sibling_map(int cpu);
19929@@ -190,14 +190,8 @@ extern unsigned disabled_cpus;
19930 extern int safe_smp_processor_id(void);
19931
19932 #elif defined(CONFIG_X86_64_SMP)
19933-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19934-
19935-#define stack_smp_processor_id() \
19936-({ \
19937- struct thread_info *ti; \
19938- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
19939- ti->cpu; \
19940-})
19941+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
19942+#define stack_smp_processor_id() raw_smp_processor_id()
19943 #define safe_smp_processor_id() smp_processor_id()
19944
19945 #endif
19946diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
19947index 54f1c80..39362a5 100644
19948--- a/arch/x86/include/asm/spinlock.h
19949+++ b/arch/x86/include/asm/spinlock.h
19950@@ -223,6 +223,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
19951 static inline void arch_read_lock(arch_rwlock_t *rw)
19952 {
19953 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
19954+
19955+#ifdef CONFIG_PAX_REFCOUNT
19956+ "jno 0f\n"
19957+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
19958+ "int $4\n0:\n"
19959+ _ASM_EXTABLE(0b, 0b)
19960+#endif
19961+
19962 "jns 1f\n"
19963 "call __read_lock_failed\n\t"
19964 "1:\n"
19965@@ -232,6 +240,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
19966 static inline void arch_write_lock(arch_rwlock_t *rw)
19967 {
19968 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
19969+
19970+#ifdef CONFIG_PAX_REFCOUNT
19971+ "jno 0f\n"
19972+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
19973+ "int $4\n0:\n"
19974+ _ASM_EXTABLE(0b, 0b)
19975+#endif
19976+
19977 "jz 1f\n"
19978 "call __write_lock_failed\n\t"
19979 "1:\n"
19980@@ -261,13 +277,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
19981
19982 static inline void arch_read_unlock(arch_rwlock_t *rw)
19983 {
19984- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
19985+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
19986+
19987+#ifdef CONFIG_PAX_REFCOUNT
19988+ "jno 0f\n"
19989+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
19990+ "int $4\n0:\n"
19991+ _ASM_EXTABLE(0b, 0b)
19992+#endif
19993+
19994 :"+m" (rw->lock) : : "memory");
19995 }
19996
19997 static inline void arch_write_unlock(arch_rwlock_t *rw)
19998 {
19999- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
20000+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
20001+
20002+#ifdef CONFIG_PAX_REFCOUNT
20003+ "jno 0f\n"
20004+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
20005+ "int $4\n0:\n"
20006+ _ASM_EXTABLE(0b, 0b)
20007+#endif
20008+
20009 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
20010 }
20011 #else
20012diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
20013index 6a99859..03cb807 100644
20014--- a/arch/x86/include/asm/stackprotector.h
20015+++ b/arch/x86/include/asm/stackprotector.h
20016@@ -47,7 +47,7 @@
20017 * head_32 for boot CPU and setup_per_cpu_areas() for others.
20018 */
20019 #define GDT_STACK_CANARY_INIT \
20020- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
20021+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
20022
20023 /*
20024 * Initialize the stackprotector canary value.
20025@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
20026
20027 static inline void load_stack_canary_segment(void)
20028 {
20029-#ifdef CONFIG_X86_32
20030+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
20031 asm volatile ("mov %0, %%gs" : : "r" (0));
20032 #endif
20033 }
20034diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
20035index 70bbe39..4ae2bd4 100644
20036--- a/arch/x86/include/asm/stacktrace.h
20037+++ b/arch/x86/include/asm/stacktrace.h
20038@@ -11,28 +11,20 @@
20039
20040 extern int kstack_depth_to_print;
20041
20042-struct thread_info;
20043+struct task_struct;
20044 struct stacktrace_ops;
20045
20046-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
20047- unsigned long *stack,
20048- unsigned long bp,
20049- const struct stacktrace_ops *ops,
20050- void *data,
20051- unsigned long *end,
20052- int *graph);
20053+typedef unsigned long walk_stack_t(struct task_struct *task,
20054+ void *stack_start,
20055+ unsigned long *stack,
20056+ unsigned long bp,
20057+ const struct stacktrace_ops *ops,
20058+ void *data,
20059+ unsigned long *end,
20060+ int *graph);
20061
20062-extern unsigned long
20063-print_context_stack(struct thread_info *tinfo,
20064- unsigned long *stack, unsigned long bp,
20065- const struct stacktrace_ops *ops, void *data,
20066- unsigned long *end, int *graph);
20067-
20068-extern unsigned long
20069-print_context_stack_bp(struct thread_info *tinfo,
20070- unsigned long *stack, unsigned long bp,
20071- const struct stacktrace_ops *ops, void *data,
20072- unsigned long *end, int *graph);
20073+extern walk_stack_t print_context_stack;
20074+extern walk_stack_t print_context_stack_bp;
20075
20076 /* Generic stack tracer with callbacks */
20077
20078@@ -40,7 +32,7 @@ struct stacktrace_ops {
20079 void (*address)(void *data, unsigned long address, int reliable);
20080 /* On negative return stop dumping */
20081 int (*stack)(void *data, char *name);
20082- walk_stack_t walk_stack;
20083+ walk_stack_t *walk_stack;
20084 };
20085
20086 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
20087diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
20088index d7f3b3b..3cc39f1 100644
20089--- a/arch/x86/include/asm/switch_to.h
20090+++ b/arch/x86/include/asm/switch_to.h
20091@@ -108,7 +108,7 @@ do { \
20092 "call __switch_to\n\t" \
20093 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
20094 __switch_canary \
20095- "movq %P[thread_info](%%rsi),%%r8\n\t" \
20096+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
20097 "movq %%rax,%%rdi\n\t" \
20098 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
20099 "jnz ret_from_fork\n\t" \
20100@@ -119,7 +119,7 @@ do { \
20101 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
20102 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
20103 [_tif_fork] "i" (_TIF_FORK), \
20104- [thread_info] "i" (offsetof(struct task_struct, stack)), \
20105+ [thread_info] "m" (current_tinfo), \
20106 [current_task] "m" (current_task) \
20107 __switch_canary_iparam \
20108 : "memory", "cc" __EXTRA_CLOBBER)
20109diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
20110index 547e344..6be1175 100644
20111--- a/arch/x86/include/asm/thread_info.h
20112+++ b/arch/x86/include/asm/thread_info.h
20113@@ -24,7 +24,6 @@ struct exec_domain;
20114 #include <linux/atomic.h>
20115
20116 struct thread_info {
20117- struct task_struct *task; /* main task structure */
20118 struct exec_domain *exec_domain; /* execution domain */
20119 __u32 flags; /* low level flags */
20120 __u32 status; /* thread synchronous flags */
20121@@ -33,13 +32,13 @@ struct thread_info {
20122 mm_segment_t addr_limit;
20123 struct restart_block restart_block;
20124 void __user *sysenter_return;
20125+ unsigned long lowest_stack;
20126 unsigned int sig_on_uaccess_error:1;
20127 unsigned int uaccess_err:1; /* uaccess failed */
20128 };
20129
20130-#define INIT_THREAD_INFO(tsk) \
20131+#define INIT_THREAD_INFO \
20132 { \
20133- .task = &tsk, \
20134 .exec_domain = &default_exec_domain, \
20135 .flags = 0, \
20136 .cpu = 0, \
20137@@ -50,7 +49,7 @@ struct thread_info {
20138 }, \
20139 }
20140
20141-#define init_thread_info (init_thread_union.thread_info)
20142+#define init_thread_info (init_thread_union.stack)
20143 #define init_stack (init_thread_union.stack)
20144
20145 #else /* !__ASSEMBLY__ */
20146@@ -91,6 +90,7 @@ struct thread_info {
20147 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
20148 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
20149 #define TIF_X32 30 /* 32-bit native x86-64 binary */
20150+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
20151
20152 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
20153 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
20154@@ -115,17 +115,18 @@ struct thread_info {
20155 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
20156 #define _TIF_ADDR32 (1 << TIF_ADDR32)
20157 #define _TIF_X32 (1 << TIF_X32)
20158+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
20159
20160 /* work to do in syscall_trace_enter() */
20161 #define _TIF_WORK_SYSCALL_ENTRY \
20162 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
20163 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
20164- _TIF_NOHZ)
20165+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
20166
20167 /* work to do in syscall_trace_leave() */
20168 #define _TIF_WORK_SYSCALL_EXIT \
20169 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
20170- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
20171+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
20172
20173 /* work to do on interrupt/exception return */
20174 #define _TIF_WORK_MASK \
20175@@ -136,7 +137,7 @@ struct thread_info {
20176 /* work to do on any return to user space */
20177 #define _TIF_ALLWORK_MASK \
20178 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
20179- _TIF_NOHZ)
20180+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
20181
20182 /* Only used for 64 bit */
20183 #define _TIF_DO_NOTIFY_MASK \
20184@@ -151,7 +152,6 @@ struct thread_info {
20185 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
20186
20187 #define STACK_WARN (THREAD_SIZE/8)
20188-#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
20189
20190 /*
20191 * macros/functions for gaining access to the thread information structure
20192@@ -162,26 +162,18 @@ struct thread_info {
20193
20194 DECLARE_PER_CPU(unsigned long, kernel_stack);
20195
20196+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
20197+
20198 static inline struct thread_info *current_thread_info(void)
20199 {
20200- struct thread_info *ti;
20201- ti = (void *)(this_cpu_read_stable(kernel_stack) +
20202- KERNEL_STACK_OFFSET - THREAD_SIZE);
20203- return ti;
20204+ return this_cpu_read_stable(current_tinfo);
20205 }
20206
20207 #else /* !__ASSEMBLY__ */
20208
20209 /* how to get the thread information struct from ASM */
20210 #define GET_THREAD_INFO(reg) \
20211- _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
20212- _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
20213-
20214-/*
20215- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
20216- * a certain register (to be used in assembler memory operands).
20217- */
20218-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
20219+ _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
20220
20221 #endif
20222
20223@@ -237,5 +229,12 @@ static inline bool is_ia32_task(void)
20224 extern void arch_task_cache_init(void);
20225 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
20226 extern void arch_release_task_struct(struct task_struct *tsk);
20227+
20228+#define __HAVE_THREAD_FUNCTIONS
20229+#define task_thread_info(task) (&(task)->tinfo)
20230+#define task_stack_page(task) ((task)->stack)
20231+#define setup_thread_stack(p, org) do {} while (0)
20232+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
20233+
20234 #endif
20235 #endif /* _ASM_X86_THREAD_INFO_H */
20236diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
20237index 04905bf..1178cdf 100644
20238--- a/arch/x86/include/asm/tlbflush.h
20239+++ b/arch/x86/include/asm/tlbflush.h
20240@@ -17,18 +17,44 @@
20241
20242 static inline void __native_flush_tlb(void)
20243 {
20244+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
20245+ u64 descriptor[2];
20246+
20247+ descriptor[0] = PCID_KERNEL;
20248+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_NONGLOBAL) : "memory");
20249+ return;
20250+ }
20251+
20252+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20253+ if (static_cpu_has(X86_FEATURE_PCID)) {
20254+ unsigned int cpu = raw_get_cpu();
20255+
20256+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
20257+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
20258+ raw_put_cpu_no_resched();
20259+ return;
20260+ }
20261+#endif
20262+
20263 native_write_cr3(native_read_cr3());
20264 }
20265
20266 static inline void __native_flush_tlb_global_irq_disabled(void)
20267 {
20268- unsigned long cr4;
20269+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
20270+ u64 descriptor[2];
20271
20272- cr4 = native_read_cr4();
20273- /* clear PGE */
20274- native_write_cr4(cr4 & ~X86_CR4_PGE);
20275- /* write old PGE again and flush TLBs */
20276- native_write_cr4(cr4);
20277+ descriptor[0] = PCID_KERNEL;
20278+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
20279+ } else {
20280+ unsigned long cr4;
20281+
20282+ cr4 = native_read_cr4();
20283+ /* clear PGE */
20284+ native_write_cr4(cr4 & ~X86_CR4_PGE);
20285+ /* write old PGE again and flush TLBs */
20286+ native_write_cr4(cr4);
20287+ }
20288 }
20289
20290 static inline void __native_flush_tlb_global(void)
20291@@ -49,6 +75,41 @@ static inline void __native_flush_tlb_global(void)
20292
20293 static inline void __native_flush_tlb_single(unsigned long addr)
20294 {
20295+ if (static_cpu_has(X86_FEATURE_INVPCID)) {
20296+ u64 descriptor[2];
20297+
20298+ descriptor[0] = PCID_KERNEL;
20299+ descriptor[1] = addr;
20300+
20301+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20302+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
20303+ if (addr < TASK_SIZE_MAX)
20304+ descriptor[1] += pax_user_shadow_base;
20305+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
20306+ }
20307+
20308+ descriptor[0] = PCID_USER;
20309+ descriptor[1] = addr;
20310+#endif
20311+
20312+ asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
20313+ return;
20314+ }
20315+
20316+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20317+ if (static_cpu_has(X86_FEATURE_PCID)) {
20318+ unsigned int cpu = raw_get_cpu();
20319+
20320+ native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
20321+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
20322+ native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
20323+ raw_put_cpu_no_resched();
20324+
20325+ if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
20326+ addr += pax_user_shadow_base;
20327+ }
20328+#endif
20329+
20330 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
20331 }
20332
20333diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
20334index 0d592e0..526f797 100644
20335--- a/arch/x86/include/asm/uaccess.h
20336+++ b/arch/x86/include/asm/uaccess.h
20337@@ -7,6 +7,7 @@
20338 #include <linux/compiler.h>
20339 #include <linux/thread_info.h>
20340 #include <linux/string.h>
20341+#include <linux/spinlock.h>
20342 #include <asm/asm.h>
20343 #include <asm/page.h>
20344 #include <asm/smap.h>
20345@@ -29,7 +30,12 @@
20346
20347 #define get_ds() (KERNEL_DS)
20348 #define get_fs() (current_thread_info()->addr_limit)
20349+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20350+void __set_fs(mm_segment_t x);
20351+void set_fs(mm_segment_t x);
20352+#else
20353 #define set_fs(x) (current_thread_info()->addr_limit = (x))
20354+#endif
20355
20356 #define segment_eq(a, b) ((a).seg == (b).seg)
20357
20358@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
20359 * checks that the pointer is in the user space range - after calling
20360 * this function, memory access functions may still return -EFAULT.
20361 */
20362-#define access_ok(type, addr, size) \
20363- likely(!__range_not_ok(addr, size, user_addr_max()))
20364+extern int _cond_resched(void);
20365+#define access_ok_noprefault(type, addr, size) (likely(!__range_not_ok(addr, size, user_addr_max())))
20366+#define access_ok(type, addr, size) \
20367+({ \
20368+ unsigned long __size = size; \
20369+ unsigned long __addr = (unsigned long)addr; \
20370+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
20371+ if (__ret_ao && __size) { \
20372+ unsigned long __addr_ao = __addr & PAGE_MASK; \
20373+ unsigned long __end_ao = __addr + __size - 1; \
20374+ if (unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
20375+ while (__addr_ao <= __end_ao) { \
20376+ char __c_ao; \
20377+ __addr_ao += PAGE_SIZE; \
20378+ if (__size > PAGE_SIZE) \
20379+ _cond_resched(); \
20380+ if (__get_user(__c_ao, (char __user *)__addr)) \
20381+ break; \
20382+ if (type != VERIFY_WRITE) { \
20383+ __addr = __addr_ao; \
20384+ continue; \
20385+ } \
20386+ if (__put_user(__c_ao, (char __user *)__addr)) \
20387+ break; \
20388+ __addr = __addr_ao; \
20389+ } \
20390+ } \
20391+ } \
20392+ __ret_ao; \
20393+})
20394
20395 /*
20396 * The exception table consists of pairs of addresses relative to the
20397@@ -176,10 +210,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
20398 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
20399 __chk_user_ptr(ptr); \
20400 might_fault(); \
20401+ pax_open_userland(); \
20402 asm volatile("call __get_user_%P3" \
20403 : "=a" (__ret_gu), "=r" (__val_gu) \
20404 : "0" (ptr), "i" (sizeof(*(ptr)))); \
20405 (x) = (__typeof__(*(ptr))) __val_gu; \
20406+ pax_close_userland(); \
20407 __ret_gu; \
20408 })
20409
20410@@ -187,13 +223,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
20411 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
20412 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
20413
20414-
20415+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20416+#define __copyuser_seg "gs;"
20417+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
20418+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
20419+#else
20420+#define __copyuser_seg
20421+#define __COPYUSER_SET_ES
20422+#define __COPYUSER_RESTORE_ES
20423+#endif
20424
20425 #ifdef CONFIG_X86_32
20426 #define __put_user_asm_u64(x, addr, err, errret) \
20427 asm volatile(ASM_STAC "\n" \
20428- "1: movl %%eax,0(%2)\n" \
20429- "2: movl %%edx,4(%2)\n" \
20430+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
20431+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
20432 "3: " ASM_CLAC "\n" \
20433 ".section .fixup,\"ax\"\n" \
20434 "4: movl %3,%0\n" \
20435@@ -206,8 +250,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
20436
20437 #define __put_user_asm_ex_u64(x, addr) \
20438 asm volatile(ASM_STAC "\n" \
20439- "1: movl %%eax,0(%1)\n" \
20440- "2: movl %%edx,4(%1)\n" \
20441+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
20442+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
20443 "3: " ASM_CLAC "\n" \
20444 _ASM_EXTABLE_EX(1b, 2b) \
20445 _ASM_EXTABLE_EX(2b, 3b) \
20446@@ -257,7 +301,8 @@ extern void __put_user_8(void);
20447 __typeof__(*(ptr)) __pu_val; \
20448 __chk_user_ptr(ptr); \
20449 might_fault(); \
20450- __pu_val = x; \
20451+ __pu_val = (x); \
20452+ pax_open_userland(); \
20453 switch (sizeof(*(ptr))) { \
20454 case 1: \
20455 __put_user_x(1, __pu_val, ptr, __ret_pu); \
20456@@ -275,6 +320,7 @@ extern void __put_user_8(void);
20457 __put_user_x(X, __pu_val, ptr, __ret_pu); \
20458 break; \
20459 } \
20460+ pax_close_userland(); \
20461 __ret_pu; \
20462 })
20463
20464@@ -355,8 +401,10 @@ do { \
20465 } while (0)
20466
20467 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
20468+do { \
20469+ pax_open_userland(); \
20470 asm volatile(ASM_STAC "\n" \
20471- "1: mov"itype" %2,%"rtype"1\n" \
20472+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
20473 "2: " ASM_CLAC "\n" \
20474 ".section .fixup,\"ax\"\n" \
20475 "3: mov %3,%0\n" \
20476@@ -364,8 +412,10 @@ do { \
20477 " jmp 2b\n" \
20478 ".previous\n" \
20479 _ASM_EXTABLE(1b, 3b) \
20480- : "=r" (err), ltype(x) \
20481- : "m" (__m(addr)), "i" (errret), "0" (err))
20482+ : "=r" (err), ltype (x) \
20483+ : "m" (__m(addr)), "i" (errret), "0" (err)); \
20484+ pax_close_userland(); \
20485+} while (0)
20486
20487 #define __get_user_size_ex(x, ptr, size) \
20488 do { \
20489@@ -389,7 +439,7 @@ do { \
20490 } while (0)
20491
20492 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
20493- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
20494+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
20495 "2:\n" \
20496 _ASM_EXTABLE_EX(1b, 2b) \
20497 : ltype(x) : "m" (__m(addr)))
20498@@ -406,13 +456,24 @@ do { \
20499 int __gu_err; \
20500 unsigned long __gu_val; \
20501 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
20502- (x) = (__force __typeof__(*(ptr)))__gu_val; \
20503+ (x) = (__typeof__(*(ptr)))__gu_val; \
20504 __gu_err; \
20505 })
20506
20507 /* FIXME: this hack is definitely wrong -AK */
20508 struct __large_struct { unsigned long buf[100]; };
20509-#define __m(x) (*(struct __large_struct __user *)(x))
20510+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20511+#define ____m(x) \
20512+({ \
20513+ unsigned long ____x = (unsigned long)(x); \
20514+ if (____x < pax_user_shadow_base) \
20515+ ____x += pax_user_shadow_base; \
20516+ (typeof(x))____x; \
20517+})
20518+#else
20519+#define ____m(x) (x)
20520+#endif
20521+#define __m(x) (*(struct __large_struct __user *)____m(x))
20522
20523 /*
20524 * Tell gcc we read from memory instead of writing: this is because
20525@@ -420,8 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
20526 * aliasing issues.
20527 */
20528 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
20529+do { \
20530+ pax_open_userland(); \
20531 asm volatile(ASM_STAC "\n" \
20532- "1: mov"itype" %"rtype"1,%2\n" \
20533+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
20534 "2: " ASM_CLAC "\n" \
20535 ".section .fixup,\"ax\"\n" \
20536 "3: mov %3,%0\n" \
20537@@ -429,10 +492,12 @@ struct __large_struct { unsigned long buf[100]; };
20538 ".previous\n" \
20539 _ASM_EXTABLE(1b, 3b) \
20540 : "=r"(err) \
20541- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
20542+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
20543+ pax_close_userland(); \
20544+} while (0)
20545
20546 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
20547- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
20548+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
20549 "2:\n" \
20550 _ASM_EXTABLE_EX(1b, 2b) \
20551 : : ltype(x), "m" (__m(addr)))
20552@@ -442,11 +507,13 @@ struct __large_struct { unsigned long buf[100]; };
20553 */
20554 #define uaccess_try do { \
20555 current_thread_info()->uaccess_err = 0; \
20556+ pax_open_userland(); \
20557 stac(); \
20558 barrier();
20559
20560 #define uaccess_catch(err) \
20561 clac(); \
20562+ pax_close_userland(); \
20563 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
20564 } while (0)
20565
20566@@ -471,8 +538,12 @@ struct __large_struct { unsigned long buf[100]; };
20567 * On error, the variable @x is set to zero.
20568 */
20569
20570+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20571+#define __get_user(x, ptr) get_user((x), (ptr))
20572+#else
20573 #define __get_user(x, ptr) \
20574 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
20575+#endif
20576
20577 /**
20578 * __put_user: - Write a simple value into user space, with less checking.
20579@@ -494,8 +565,12 @@ struct __large_struct { unsigned long buf[100]; };
20580 * Returns zero on success, or -EFAULT on error.
20581 */
20582
20583+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20584+#define __put_user(x, ptr) put_user((x), (ptr))
20585+#else
20586 #define __put_user(x, ptr) \
20587 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
20588+#endif
20589
20590 #define __get_user_unaligned __get_user
20591 #define __put_user_unaligned __put_user
20592@@ -513,7 +588,7 @@ struct __large_struct { unsigned long buf[100]; };
20593 #define get_user_ex(x, ptr) do { \
20594 unsigned long __gue_val; \
20595 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
20596- (x) = (__force __typeof__(*(ptr)))__gue_val; \
20597+ (x) = (__typeof__(*(ptr)))__gue_val; \
20598 } while (0)
20599
20600 #define put_user_try uaccess_try
20601@@ -542,18 +617,19 @@ extern void __cmpxchg_wrong_size(void)
20602 __typeof__(ptr) __uval = (uval); \
20603 __typeof__(*(ptr)) __old = (old); \
20604 __typeof__(*(ptr)) __new = (new); \
20605+ pax_open_userland(); \
20606 switch (size) { \
20607 case 1: \
20608 { \
20609 asm volatile("\t" ASM_STAC "\n" \
20610- "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
20611+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgb %4, %2\n"\
20612 "2:\t" ASM_CLAC "\n" \
20613 "\t.section .fixup, \"ax\"\n" \
20614 "3:\tmov %3, %0\n" \
20615 "\tjmp 2b\n" \
20616 "\t.previous\n" \
20617 _ASM_EXTABLE(1b, 3b) \
20618- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20619+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20620 : "i" (-EFAULT), "q" (__new), "1" (__old) \
20621 : "memory" \
20622 ); \
20623@@ -562,14 +638,14 @@ extern void __cmpxchg_wrong_size(void)
20624 case 2: \
20625 { \
20626 asm volatile("\t" ASM_STAC "\n" \
20627- "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
20628+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgw %4, %2\n"\
20629 "2:\t" ASM_CLAC "\n" \
20630 "\t.section .fixup, \"ax\"\n" \
20631 "3:\tmov %3, %0\n" \
20632 "\tjmp 2b\n" \
20633 "\t.previous\n" \
20634 _ASM_EXTABLE(1b, 3b) \
20635- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20636+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20637 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20638 : "memory" \
20639 ); \
20640@@ -578,14 +654,14 @@ extern void __cmpxchg_wrong_size(void)
20641 case 4: \
20642 { \
20643 asm volatile("\t" ASM_STAC "\n" \
20644- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
20645+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"\
20646 "2:\t" ASM_CLAC "\n" \
20647 "\t.section .fixup, \"ax\"\n" \
20648 "3:\tmov %3, %0\n" \
20649 "\tjmp 2b\n" \
20650 "\t.previous\n" \
20651 _ASM_EXTABLE(1b, 3b) \
20652- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20653+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20654 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20655 : "memory" \
20656 ); \
20657@@ -597,14 +673,14 @@ extern void __cmpxchg_wrong_size(void)
20658 __cmpxchg_wrong_size(); \
20659 \
20660 asm volatile("\t" ASM_STAC "\n" \
20661- "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
20662+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgq %4, %2\n"\
20663 "2:\t" ASM_CLAC "\n" \
20664 "\t.section .fixup, \"ax\"\n" \
20665 "3:\tmov %3, %0\n" \
20666 "\tjmp 2b\n" \
20667 "\t.previous\n" \
20668 _ASM_EXTABLE(1b, 3b) \
20669- : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
20670+ : "+r" (__ret), "=a" (__old), "+m" (*____m(ptr))\
20671 : "i" (-EFAULT), "r" (__new), "1" (__old) \
20672 : "memory" \
20673 ); \
20674@@ -613,6 +689,7 @@ extern void __cmpxchg_wrong_size(void)
20675 default: \
20676 __cmpxchg_wrong_size(); \
20677 } \
20678+ pax_close_userland(); \
20679 *__uval = __old; \
20680 __ret; \
20681 })
20682@@ -636,17 +713,6 @@ extern struct movsl_mask {
20683
20684 #define ARCH_HAS_NOCACHE_UACCESS 1
20685
20686-#ifdef CONFIG_X86_32
20687-# include <asm/uaccess_32.h>
20688-#else
20689-# include <asm/uaccess_64.h>
20690-#endif
20691-
20692-unsigned long __must_check _copy_from_user(void *to, const void __user *from,
20693- unsigned n);
20694-unsigned long __must_check _copy_to_user(void __user *to, const void *from,
20695- unsigned n);
20696-
20697 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
20698 # define copy_user_diag __compiletime_error
20699 #else
20700@@ -656,7 +722,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
20701 extern void copy_user_diag("copy_from_user() buffer size is too small")
20702 copy_from_user_overflow(void);
20703 extern void copy_user_diag("copy_to_user() buffer size is too small")
20704-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
20705+copy_to_user_overflow(void);
20706
20707 #undef copy_user_diag
20708
20709@@ -669,7 +735,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
20710
20711 extern void
20712 __compiletime_warning("copy_to_user() buffer size is not provably correct")
20713-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
20714+__copy_to_user_overflow(void) __asm__("copy_to_user_overflow");
20715 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
20716
20717 #else
20718@@ -684,10 +750,16 @@ __copy_from_user_overflow(int size, unsigned long count)
20719
20720 #endif
20721
20722+#ifdef CONFIG_X86_32
20723+# include <asm/uaccess_32.h>
20724+#else
20725+# include <asm/uaccess_64.h>
20726+#endif
20727+
20728 static inline unsigned long __must_check
20729 copy_from_user(void *to, const void __user *from, unsigned long n)
20730 {
20731- int sz = __compiletime_object_size(to);
20732+ size_t sz = __compiletime_object_size(to);
20733
20734 might_fault();
20735
20736@@ -709,12 +781,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
20737 * case, and do only runtime checking for non-constant sizes.
20738 */
20739
20740- if (likely(sz < 0 || sz >= n))
20741- n = _copy_from_user(to, from, n);
20742- else if(__builtin_constant_p(n))
20743- copy_from_user_overflow();
20744- else
20745- __copy_from_user_overflow(sz, n);
20746+ if (likely(sz != (size_t)-1 && sz < n)) {
20747+ if(__builtin_constant_p(n))
20748+ copy_from_user_overflow();
20749+ else
20750+ __copy_from_user_overflow(sz, n);
20751+ } else if (access_ok(VERIFY_READ, from, n))
20752+ n = __copy_from_user(to, from, n);
20753+ else if ((long)n > 0)
20754+ memset(to, 0, n);
20755
20756 return n;
20757 }
20758@@ -722,17 +797,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
20759 static inline unsigned long __must_check
20760 copy_to_user(void __user *to, const void *from, unsigned long n)
20761 {
20762- int sz = __compiletime_object_size(from);
20763+ size_t sz = __compiletime_object_size(from);
20764
20765 might_fault();
20766
20767 /* See the comment in copy_from_user() above. */
20768- if (likely(sz < 0 || sz >= n))
20769- n = _copy_to_user(to, from, n);
20770- else if(__builtin_constant_p(n))
20771- copy_to_user_overflow();
20772- else
20773- __copy_to_user_overflow(sz, n);
20774+ if (likely(sz != (size_t)-1 && sz < n)) {
20775+ if(__builtin_constant_p(n))
20776+ copy_to_user_overflow();
20777+ else
20778+ __copy_to_user_overflow(sz, n);
20779+ } else if (access_ok(VERIFY_WRITE, to, n))
20780+ n = __copy_to_user(to, from, n);
20781
20782 return n;
20783 }
20784diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
20785index 3c03a5d..1071638 100644
20786--- a/arch/x86/include/asm/uaccess_32.h
20787+++ b/arch/x86/include/asm/uaccess_32.h
20788@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
20789 static __always_inline unsigned long __must_check
20790 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
20791 {
20792+ if ((long)n < 0)
20793+ return n;
20794+
20795+ check_object_size(from, n, true);
20796+
20797 if (__builtin_constant_p(n)) {
20798 unsigned long ret;
20799
20800@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
20801 __copy_to_user(void __user *to, const void *from, unsigned long n)
20802 {
20803 might_fault();
20804+
20805 return __copy_to_user_inatomic(to, from, n);
20806 }
20807
20808 static __always_inline unsigned long
20809 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
20810 {
20811+ if ((long)n < 0)
20812+ return n;
20813+
20814 /* Avoid zeroing the tail if the copy fails..
20815 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
20816 * but as the zeroing behaviour is only significant when n is not
20817@@ -137,6 +146,12 @@ static __always_inline unsigned long
20818 __copy_from_user(void *to, const void __user *from, unsigned long n)
20819 {
20820 might_fault();
20821+
20822+ if ((long)n < 0)
20823+ return n;
20824+
20825+ check_object_size(to, n, false);
20826+
20827 if (__builtin_constant_p(n)) {
20828 unsigned long ret;
20829
20830@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
20831 const void __user *from, unsigned long n)
20832 {
20833 might_fault();
20834+
20835+ if ((long)n < 0)
20836+ return n;
20837+
20838 if (__builtin_constant_p(n)) {
20839 unsigned long ret;
20840
20841@@ -181,7 +200,10 @@ static __always_inline unsigned long
20842 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
20843 unsigned long n)
20844 {
20845- return __copy_from_user_ll_nocache_nozero(to, from, n);
20846+ if ((long)n < 0)
20847+ return n;
20848+
20849+ return __copy_from_user_ll_nocache_nozero(to, from, n);
20850 }
20851
20852 #endif /* _ASM_X86_UACCESS_32_H */
20853diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
20854index 12a26b9..206c200 100644
20855--- a/arch/x86/include/asm/uaccess_64.h
20856+++ b/arch/x86/include/asm/uaccess_64.h
20857@@ -10,6 +10,9 @@
20858 #include <asm/alternative.h>
20859 #include <asm/cpufeature.h>
20860 #include <asm/page.h>
20861+#include <asm/pgtable.h>
20862+
20863+#define set_fs(x) (current_thread_info()->addr_limit = (x))
20864
20865 /*
20866 * Copy To/From Userspace
20867@@ -17,14 +20,14 @@
20868
20869 /* Handles exceptions in both to and from, but doesn't do access_ok */
20870 __must_check unsigned long
20871-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
20872+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
20873 __must_check unsigned long
20874-copy_user_generic_string(void *to, const void *from, unsigned len);
20875+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
20876 __must_check unsigned long
20877-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
20878+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
20879
20880 static __always_inline __must_check unsigned long
20881-copy_user_generic(void *to, const void *from, unsigned len)
20882+copy_user_generic(void *to, const void *from, unsigned long len)
20883 {
20884 unsigned ret;
20885
20886@@ -46,121 +49,170 @@ copy_user_generic(void *to, const void *from, unsigned len)
20887 }
20888
20889 __must_check unsigned long
20890-copy_in_user(void __user *to, const void __user *from, unsigned len);
20891+copy_in_user(void __user *to, const void __user *from, unsigned long len);
20892
20893 static __always_inline __must_check
20894-int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
20895+unsigned long __copy_from_user_nocheck(void *dst, const void __user *src, unsigned long size)
20896 {
20897- int ret = 0;
20898+ size_t sz = __compiletime_object_size(dst);
20899+ unsigned ret = 0;
20900+
20901+ if (size > INT_MAX)
20902+ return size;
20903+
20904+ check_object_size(dst, size, false);
20905+
20906+#ifdef CONFIG_PAX_MEMORY_UDEREF
20907+ if (!access_ok_noprefault(VERIFY_READ, src, size))
20908+ return size;
20909+#endif
20910+
20911+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20912+ if(__builtin_constant_p(size))
20913+ copy_from_user_overflow();
20914+ else
20915+ __copy_from_user_overflow(sz, size);
20916+ return size;
20917+ }
20918
20919 if (!__builtin_constant_p(size))
20920- return copy_user_generic(dst, (__force void *)src, size);
20921+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20922 switch (size) {
20923- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
20924+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
20925 ret, "b", "b", "=q", 1);
20926 return ret;
20927- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
20928+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
20929 ret, "w", "w", "=r", 2);
20930 return ret;
20931- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
20932+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
20933 ret, "l", "k", "=r", 4);
20934 return ret;
20935- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
20936+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20937 ret, "q", "", "=r", 8);
20938 return ret;
20939 case 10:
20940- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20941+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20942 ret, "q", "", "=r", 10);
20943 if (unlikely(ret))
20944 return ret;
20945 __get_user_asm(*(u16 *)(8 + (char *)dst),
20946- (u16 __user *)(8 + (char __user *)src),
20947+ (const u16 __user *)(8 + (const char __user *)src),
20948 ret, "w", "w", "=r", 2);
20949 return ret;
20950 case 16:
20951- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
20952+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
20953 ret, "q", "", "=r", 16);
20954 if (unlikely(ret))
20955 return ret;
20956 __get_user_asm(*(u64 *)(8 + (char *)dst),
20957- (u64 __user *)(8 + (char __user *)src),
20958+ (const u64 __user *)(8 + (const char __user *)src),
20959 ret, "q", "", "=r", 8);
20960 return ret;
20961 default:
20962- return copy_user_generic(dst, (__force void *)src, size);
20963+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
20964 }
20965 }
20966
20967 static __always_inline __must_check
20968-int __copy_from_user(void *dst, const void __user *src, unsigned size)
20969+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
20970 {
20971 might_fault();
20972 return __copy_from_user_nocheck(dst, src, size);
20973 }
20974
20975 static __always_inline __must_check
20976-int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
20977+unsigned long __copy_to_user_nocheck(void __user *dst, const void *src, unsigned long size)
20978 {
20979- int ret = 0;
20980+ size_t sz = __compiletime_object_size(src);
20981+ unsigned ret = 0;
20982+
20983+ if (size > INT_MAX)
20984+ return size;
20985+
20986+ check_object_size(src, size, true);
20987+
20988+#ifdef CONFIG_PAX_MEMORY_UDEREF
20989+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
20990+ return size;
20991+#endif
20992+
20993+ if (unlikely(sz != (size_t)-1 && sz < size)) {
20994+ if(__builtin_constant_p(size))
20995+ copy_to_user_overflow();
20996+ else
20997+ __copy_to_user_overflow(sz, size);
20998+ return size;
20999+ }
21000
21001 if (!__builtin_constant_p(size))
21002- return copy_user_generic((__force void *)dst, src, size);
21003+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
21004 switch (size) {
21005- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
21006+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
21007 ret, "b", "b", "iq", 1);
21008 return ret;
21009- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
21010+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
21011 ret, "w", "w", "ir", 2);
21012 return ret;
21013- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
21014+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
21015 ret, "l", "k", "ir", 4);
21016 return ret;
21017- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
21018+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
21019 ret, "q", "", "er", 8);
21020 return ret;
21021 case 10:
21022- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
21023+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
21024 ret, "q", "", "er", 10);
21025 if (unlikely(ret))
21026 return ret;
21027 asm("":::"memory");
21028- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
21029+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
21030 ret, "w", "w", "ir", 2);
21031 return ret;
21032 case 16:
21033- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
21034+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
21035 ret, "q", "", "er", 16);
21036 if (unlikely(ret))
21037 return ret;
21038 asm("":::"memory");
21039- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
21040+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
21041 ret, "q", "", "er", 8);
21042 return ret;
21043 default:
21044- return copy_user_generic((__force void *)dst, src, size);
21045+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
21046 }
21047 }
21048
21049 static __always_inline __must_check
21050-int __copy_to_user(void __user *dst, const void *src, unsigned size)
21051+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
21052 {
21053 might_fault();
21054 return __copy_to_user_nocheck(dst, src, size);
21055 }
21056
21057 static __always_inline __must_check
21058-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
21059+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
21060 {
21061- int ret = 0;
21062+ unsigned ret = 0;
21063
21064 might_fault();
21065+
21066+ if (size > INT_MAX)
21067+ return size;
21068+
21069+#ifdef CONFIG_PAX_MEMORY_UDEREF
21070+ if (!access_ok_noprefault(VERIFY_READ, src, size))
21071+ return size;
21072+ if (!access_ok_noprefault(VERIFY_WRITE, dst, size))
21073+ return size;
21074+#endif
21075+
21076 if (!__builtin_constant_p(size))
21077- return copy_user_generic((__force void *)dst,
21078- (__force void *)src, size);
21079+ return copy_user_generic((__force_kernel void *)____m(dst),
21080+ (__force_kernel const void *)____m(src), size);
21081 switch (size) {
21082 case 1: {
21083 u8 tmp;
21084- __get_user_asm(tmp, (u8 __user *)src,
21085+ __get_user_asm(tmp, (const u8 __user *)src,
21086 ret, "b", "b", "=q", 1);
21087 if (likely(!ret))
21088 __put_user_asm(tmp, (u8 __user *)dst,
21089@@ -169,7 +221,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
21090 }
21091 case 2: {
21092 u16 tmp;
21093- __get_user_asm(tmp, (u16 __user *)src,
21094+ __get_user_asm(tmp, (const u16 __user *)src,
21095 ret, "w", "w", "=r", 2);
21096 if (likely(!ret))
21097 __put_user_asm(tmp, (u16 __user *)dst,
21098@@ -179,7 +231,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
21099
21100 case 4: {
21101 u32 tmp;
21102- __get_user_asm(tmp, (u32 __user *)src,
21103+ __get_user_asm(tmp, (const u32 __user *)src,
21104 ret, "l", "k", "=r", 4);
21105 if (likely(!ret))
21106 __put_user_asm(tmp, (u32 __user *)dst,
21107@@ -188,7 +240,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
21108 }
21109 case 8: {
21110 u64 tmp;
21111- __get_user_asm(tmp, (u64 __user *)src,
21112+ __get_user_asm(tmp, (const u64 __user *)src,
21113 ret, "q", "", "=r", 8);
21114 if (likely(!ret))
21115 __put_user_asm(tmp, (u64 __user *)dst,
21116@@ -196,41 +248,58 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
21117 return ret;
21118 }
21119 default:
21120- return copy_user_generic((__force void *)dst,
21121- (__force void *)src, size);
21122+ return copy_user_generic((__force_kernel void *)____m(dst),
21123+ (__force_kernel const void *)____m(src), size);
21124 }
21125 }
21126
21127-static __must_check __always_inline int
21128-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
21129+static __must_check __always_inline unsigned long
21130+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
21131 {
21132 return __copy_from_user_nocheck(dst, src, size);
21133 }
21134
21135-static __must_check __always_inline int
21136-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
21137+static __must_check __always_inline unsigned long
21138+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
21139 {
21140 return __copy_to_user_nocheck(dst, src, size);
21141 }
21142
21143-extern long __copy_user_nocache(void *dst, const void __user *src,
21144- unsigned size, int zerorest);
21145+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
21146+ unsigned long size, int zerorest);
21147
21148-static inline int
21149-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
21150+static inline unsigned long
21151+__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
21152 {
21153 might_fault();
21154+
21155+ if (size > INT_MAX)
21156+ return size;
21157+
21158+#ifdef CONFIG_PAX_MEMORY_UDEREF
21159+ if (!access_ok_noprefault(VERIFY_READ, src, size))
21160+ return size;
21161+#endif
21162+
21163 return __copy_user_nocache(dst, src, size, 1);
21164 }
21165
21166-static inline int
21167+static inline unsigned long
21168 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
21169- unsigned size)
21170+ unsigned long size)
21171 {
21172+ if (size > INT_MAX)
21173+ return size;
21174+
21175+#ifdef CONFIG_PAX_MEMORY_UDEREF
21176+ if (!access_ok_noprefault(VERIFY_READ, src, size))
21177+ return size;
21178+#endif
21179+
21180 return __copy_user_nocache(dst, src, size, 0);
21181 }
21182
21183 unsigned long
21184-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
21185+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
21186
21187 #endif /* _ASM_X86_UACCESS_64_H */
21188diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
21189index 5b238981..77fdd78 100644
21190--- a/arch/x86/include/asm/word-at-a-time.h
21191+++ b/arch/x86/include/asm/word-at-a-time.h
21192@@ -11,7 +11,7 @@
21193 * and shift, for example.
21194 */
21195 struct word_at_a_time {
21196- const unsigned long one_bits, high_bits;
21197+ unsigned long one_bits, high_bits;
21198 };
21199
21200 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
21201diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
21202index e45e4da..44e8572 100644
21203--- a/arch/x86/include/asm/x86_init.h
21204+++ b/arch/x86/include/asm/x86_init.h
21205@@ -129,7 +129,7 @@ struct x86_init_ops {
21206 struct x86_init_timers timers;
21207 struct x86_init_iommu iommu;
21208 struct x86_init_pci pci;
21209-};
21210+} __no_const;
21211
21212 /**
21213 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
21214@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
21215 void (*setup_percpu_clockev)(void);
21216 void (*early_percpu_clock_init)(void);
21217 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
21218-};
21219+} __no_const;
21220
21221 struct timespec;
21222
21223@@ -168,7 +168,7 @@ struct x86_platform_ops {
21224 void (*save_sched_clock_state)(void);
21225 void (*restore_sched_clock_state)(void);
21226 void (*apic_post_init)(void);
21227-};
21228+} __no_const;
21229
21230 struct pci_dev;
21231 struct msi_msg;
21232@@ -185,7 +185,7 @@ struct x86_msi_ops {
21233 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
21234 u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag);
21235 u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag);
21236-};
21237+} __no_const;
21238
21239 struct IO_APIC_route_entry;
21240 struct io_apic_irq_attr;
21241@@ -206,7 +206,7 @@ struct x86_io_apic_ops {
21242 unsigned int destination, int vector,
21243 struct io_apic_irq_attr *attr);
21244 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
21245-};
21246+} __no_const;
21247
21248 extern struct x86_init_ops x86_init;
21249 extern struct x86_cpuinit_ops x86_cpuinit;
21250diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
21251index c949923..c22bfa4 100644
21252--- a/arch/x86/include/asm/xen/page.h
21253+++ b/arch/x86/include/asm/xen/page.h
21254@@ -63,7 +63,7 @@ extern int m2p_remove_override(struct page *page,
21255 extern struct page *m2p_find_override(unsigned long mfn);
21256 extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
21257
21258-static inline unsigned long pfn_to_mfn(unsigned long pfn)
21259+static inline unsigned long __intentional_overflow(-1) pfn_to_mfn(unsigned long pfn)
21260 {
21261 unsigned long mfn;
21262
21263diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
21264index 7e7a79a..0824666 100644
21265--- a/arch/x86/include/asm/xsave.h
21266+++ b/arch/x86/include/asm/xsave.h
21267@@ -228,12 +228,16 @@ static inline int xsave_user(struct xsave_struct __user *buf)
21268 if (unlikely(err))
21269 return -EFAULT;
21270
21271+ pax_open_userland();
21272 __asm__ __volatile__(ASM_STAC "\n"
21273- "1:"XSAVE"\n"
21274+ "1:"
21275+ __copyuser_seg
21276+ XSAVE"\n"
21277 "2: " ASM_CLAC "\n"
21278 xstate_fault
21279 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
21280 : "memory");
21281+ pax_close_userland();
21282 return err;
21283 }
21284
21285@@ -243,16 +247,20 @@ static inline int xsave_user(struct xsave_struct __user *buf)
21286 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
21287 {
21288 int err = 0;
21289- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
21290+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
21291 u32 lmask = mask;
21292 u32 hmask = mask >> 32;
21293
21294+ pax_open_userland();
21295 __asm__ __volatile__(ASM_STAC "\n"
21296- "1:"XRSTOR"\n"
21297+ "1:"
21298+ __copyuser_seg
21299+ XRSTOR"\n"
21300 "2: " ASM_CLAC "\n"
21301 xstate_fault
21302 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
21303 : "memory"); /* memory required? */
21304+ pax_close_userland();
21305 return err;
21306 }
21307
21308diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
21309index bbae024..e1528f9 100644
21310--- a/arch/x86/include/uapi/asm/e820.h
21311+++ b/arch/x86/include/uapi/asm/e820.h
21312@@ -63,7 +63,7 @@ struct e820map {
21313 #define ISA_START_ADDRESS 0xa0000
21314 #define ISA_END_ADDRESS 0x100000
21315
21316-#define BIOS_BEGIN 0x000a0000
21317+#define BIOS_BEGIN 0x000c0000
21318 #define BIOS_END 0x00100000
21319
21320 #define BIOS_ROM_BASE 0xffe00000
21321diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
21322index 7b0a55a..ad115bf 100644
21323--- a/arch/x86/include/uapi/asm/ptrace-abi.h
21324+++ b/arch/x86/include/uapi/asm/ptrace-abi.h
21325@@ -49,7 +49,6 @@
21326 #define EFLAGS 144
21327 #define RSP 152
21328 #define SS 160
21329-#define ARGOFFSET R11
21330 #endif /* __ASSEMBLY__ */
21331
21332 /* top of stack page */
21333diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
21334index ada2e2d..ca69e16 100644
21335--- a/arch/x86/kernel/Makefile
21336+++ b/arch/x86/kernel/Makefile
21337@@ -24,7 +24,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
21338 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
21339 obj-$(CONFIG_IRQ_WORK) += irq_work.o
21340 obj-y += probe_roms.o
21341-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
21342+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
21343 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
21344 obj-$(CONFIG_X86_64) += mcount_64.o
21345 obj-y += syscall_$(BITS).o vsyscall_gtod.o
21346diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
21347index a142e77..6222cdd 100644
21348--- a/arch/x86/kernel/acpi/boot.c
21349+++ b/arch/x86/kernel/acpi/boot.c
21350@@ -1276,7 +1276,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
21351 * If your system is blacklisted here, but you find that acpi=force
21352 * works for you, please contact linux-acpi@vger.kernel.org
21353 */
21354-static struct dmi_system_id __initdata acpi_dmi_table[] = {
21355+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
21356 /*
21357 * Boxes that need ACPI disabled
21358 */
21359@@ -1351,7 +1351,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
21360 };
21361
21362 /* second table for DMI checks that should run after early-quirks */
21363-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
21364+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
21365 /*
21366 * HP laptops which use a DSDT reporting as HP/SB400/10000,
21367 * which includes some code which overrides all temperature
21368diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
21369index 3136820..e2c6577 100644
21370--- a/arch/x86/kernel/acpi/sleep.c
21371+++ b/arch/x86/kernel/acpi/sleep.c
21372@@ -99,8 +99,12 @@ int x86_acpi_suspend_lowlevel(void)
21373 #else /* CONFIG_64BIT */
21374 #ifdef CONFIG_SMP
21375 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
21376+
21377+ pax_open_kernel();
21378 early_gdt_descr.address =
21379 (unsigned long)get_cpu_gdt_table(smp_processor_id());
21380+ pax_close_kernel();
21381+
21382 initial_gs = per_cpu_offset(smp_processor_id());
21383 #endif
21384 initial_code = (unsigned long)wakeup_long64;
21385diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
21386index 665c6b7..eae4d56 100644
21387--- a/arch/x86/kernel/acpi/wakeup_32.S
21388+++ b/arch/x86/kernel/acpi/wakeup_32.S
21389@@ -29,13 +29,11 @@ wakeup_pmode_return:
21390 # and restore the stack ... but you need gdt for this to work
21391 movl saved_context_esp, %esp
21392
21393- movl %cs:saved_magic, %eax
21394- cmpl $0x12345678, %eax
21395+ cmpl $0x12345678, saved_magic
21396 jne bogus_magic
21397
21398 # jump to place where we left off
21399- movl saved_eip, %eax
21400- jmp *%eax
21401+ jmp *(saved_eip)
21402
21403 bogus_magic:
21404 jmp bogus_magic
21405diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
21406index 703130f..27a155d 100644
21407--- a/arch/x86/kernel/alternative.c
21408+++ b/arch/x86/kernel/alternative.c
21409@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
21410 */
21411 for (a = start; a < end; a++) {
21412 instr = (u8 *)&a->instr_offset + a->instr_offset;
21413+
21414+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21415+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
21416+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
21417+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
21418+#endif
21419+
21420 replacement = (u8 *)&a->repl_offset + a->repl_offset;
21421 BUG_ON(a->replacementlen > a->instrlen);
21422 BUG_ON(a->instrlen > sizeof(insnbuf));
21423@@ -284,6 +291,11 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
21424 add_nops(insnbuf + a->replacementlen,
21425 a->instrlen - a->replacementlen);
21426
21427+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21428+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
21429+ instr = ktva_ktla(instr);
21430+#endif
21431+
21432 text_poke_early(instr, insnbuf, a->instrlen);
21433 }
21434 }
21435@@ -299,10 +311,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
21436 for (poff = start; poff < end; poff++) {
21437 u8 *ptr = (u8 *)poff + *poff;
21438
21439+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21440+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
21441+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
21442+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
21443+#endif
21444+
21445 if (!*poff || ptr < text || ptr >= text_end)
21446 continue;
21447 /* turn DS segment override prefix into lock prefix */
21448- if (*ptr == 0x3e)
21449+ if (*ktla_ktva(ptr) == 0x3e)
21450 text_poke(ptr, ((unsigned char []){0xf0}), 1);
21451 }
21452 mutex_unlock(&text_mutex);
21453@@ -317,10 +335,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
21454 for (poff = start; poff < end; poff++) {
21455 u8 *ptr = (u8 *)poff + *poff;
21456
21457+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21458+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
21459+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
21460+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
21461+#endif
21462+
21463 if (!*poff || ptr < text || ptr >= text_end)
21464 continue;
21465 /* turn lock prefix into DS segment override prefix */
21466- if (*ptr == 0xf0)
21467+ if (*ktla_ktva(ptr) == 0xf0)
21468 text_poke(ptr, ((unsigned char []){0x3E}), 1);
21469 }
21470 mutex_unlock(&text_mutex);
21471@@ -457,7 +481,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
21472
21473 BUG_ON(p->len > MAX_PATCH_LEN);
21474 /* prep the buffer with the original instructions */
21475- memcpy(insnbuf, p->instr, p->len);
21476+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
21477 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
21478 (unsigned long)p->instr, p->len);
21479
21480@@ -504,7 +528,7 @@ void __init alternative_instructions(void)
21481 if (!uniproc_patched || num_possible_cpus() == 1)
21482 free_init_pages("SMP alternatives",
21483 (unsigned long)__smp_locks,
21484- (unsigned long)__smp_locks_end);
21485+ PAGE_ALIGN((unsigned long)__smp_locks_end));
21486 #endif
21487
21488 apply_paravirt(__parainstructions, __parainstructions_end);
21489@@ -524,13 +548,17 @@ void __init alternative_instructions(void)
21490 * instructions. And on the local CPU you need to be protected again NMI or MCE
21491 * handlers seeing an inconsistent instruction while you patch.
21492 */
21493-void *__init_or_module text_poke_early(void *addr, const void *opcode,
21494+void *__kprobes text_poke_early(void *addr, const void *opcode,
21495 size_t len)
21496 {
21497 unsigned long flags;
21498 local_irq_save(flags);
21499- memcpy(addr, opcode, len);
21500+
21501+ pax_open_kernel();
21502+ memcpy(ktla_ktva(addr), opcode, len);
21503 sync_core();
21504+ pax_close_kernel();
21505+
21506 local_irq_restore(flags);
21507 /* Could also do a CLFLUSH here to speed up CPU recovery; but
21508 that causes hangs on some VIA CPUs. */
21509@@ -552,36 +580,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
21510 */
21511 void *text_poke(void *addr, const void *opcode, size_t len)
21512 {
21513- unsigned long flags;
21514- char *vaddr;
21515+ unsigned char *vaddr = ktla_ktva(addr);
21516 struct page *pages[2];
21517- int i;
21518+ size_t i;
21519
21520 if (!core_kernel_text((unsigned long)addr)) {
21521- pages[0] = vmalloc_to_page(addr);
21522- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
21523+ pages[0] = vmalloc_to_page(vaddr);
21524+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
21525 } else {
21526- pages[0] = virt_to_page(addr);
21527+ pages[0] = virt_to_page(vaddr);
21528 WARN_ON(!PageReserved(pages[0]));
21529- pages[1] = virt_to_page(addr + PAGE_SIZE);
21530+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
21531 }
21532 BUG_ON(!pages[0]);
21533- local_irq_save(flags);
21534- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
21535- if (pages[1])
21536- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
21537- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
21538- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
21539- clear_fixmap(FIX_TEXT_POKE0);
21540- if (pages[1])
21541- clear_fixmap(FIX_TEXT_POKE1);
21542- local_flush_tlb();
21543- sync_core();
21544- /* Could also do a CLFLUSH here to speed up CPU recovery; but
21545- that causes hangs on some VIA CPUs. */
21546+ text_poke_early(addr, opcode, len);
21547 for (i = 0; i < len; i++)
21548- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
21549- local_irq_restore(flags);
21550+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
21551 return addr;
21552 }
21553
21554@@ -601,7 +615,7 @@ int poke_int3_handler(struct pt_regs *regs)
21555 if (likely(!bp_patching_in_progress))
21556 return 0;
21557
21558- if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
21559+ if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
21560 return 0;
21561
21562 /* set up the specified breakpoint handler */
21563@@ -635,7 +649,7 @@ int poke_int3_handler(struct pt_regs *regs)
21564 */
21565 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
21566 {
21567- unsigned char int3 = 0xcc;
21568+ const unsigned char int3 = 0xcc;
21569
21570 bp_int3_handler = handler;
21571 bp_int3_addr = (u8 *)addr + sizeof(int3);
21572diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
21573index 24b5894..6d9701b 100644
21574--- a/arch/x86/kernel/apic/apic.c
21575+++ b/arch/x86/kernel/apic/apic.c
21576@@ -201,7 +201,7 @@ int first_system_vector = 0xfe;
21577 /*
21578 * Debug level, exported for io_apic.c
21579 */
21580-unsigned int apic_verbosity;
21581+int apic_verbosity;
21582
21583 int pic_mode;
21584
21585@@ -1989,7 +1989,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
21586 apic_write(APIC_ESR, 0);
21587 v = apic_read(APIC_ESR);
21588 ack_APIC_irq();
21589- atomic_inc(&irq_err_count);
21590+ atomic_inc_unchecked(&irq_err_count);
21591
21592 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
21593 smp_processor_id(), v);
21594diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
21595index de918c4..32eed23 100644
21596--- a/arch/x86/kernel/apic/apic_flat_64.c
21597+++ b/arch/x86/kernel/apic/apic_flat_64.c
21598@@ -154,7 +154,7 @@ static int flat_probe(void)
21599 return 1;
21600 }
21601
21602-static struct apic apic_flat = {
21603+static struct apic apic_flat __read_only = {
21604 .name = "flat",
21605 .probe = flat_probe,
21606 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
21607@@ -260,7 +260,7 @@ static int physflat_probe(void)
21608 return 0;
21609 }
21610
21611-static struct apic apic_physflat = {
21612+static struct apic apic_physflat __read_only = {
21613
21614 .name = "physical flat",
21615 .probe = physflat_probe,
21616diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
21617index b205cdb..d8503ff 100644
21618--- a/arch/x86/kernel/apic/apic_noop.c
21619+++ b/arch/x86/kernel/apic/apic_noop.c
21620@@ -108,7 +108,7 @@ static void noop_apic_write(u32 reg, u32 v)
21621 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
21622 }
21623
21624-struct apic apic_noop = {
21625+struct apic apic_noop __read_only = {
21626 .name = "noop",
21627 .probe = noop_probe,
21628 .acpi_madt_oem_check = NULL,
21629diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
21630index c4a8d63..fe893ac 100644
21631--- a/arch/x86/kernel/apic/bigsmp_32.c
21632+++ b/arch/x86/kernel/apic/bigsmp_32.c
21633@@ -147,7 +147,7 @@ static int probe_bigsmp(void)
21634 return dmi_bigsmp;
21635 }
21636
21637-static struct apic apic_bigsmp = {
21638+static struct apic apic_bigsmp __read_only = {
21639
21640 .name = "bigsmp",
21641 .probe = probe_bigsmp,
21642diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
21643index 337ce5a..c8d98b4 100644
21644--- a/arch/x86/kernel/apic/io_apic.c
21645+++ b/arch/x86/kernel/apic/io_apic.c
21646@@ -1230,7 +1230,7 @@ out:
21647 }
21648 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
21649
21650-void lock_vector_lock(void)
21651+void lock_vector_lock(void) __acquires(vector_lock)
21652 {
21653 /* Used to the online set of cpus does not change
21654 * during assign_irq_vector.
21655@@ -1238,7 +1238,7 @@ void lock_vector_lock(void)
21656 raw_spin_lock(&vector_lock);
21657 }
21658
21659-void unlock_vector_lock(void)
21660+void unlock_vector_lock(void) __releases(vector_lock)
21661 {
21662 raw_spin_unlock(&vector_lock);
21663 }
21664@@ -2465,7 +2465,7 @@ static void ack_apic_edge(struct irq_data *data)
21665 ack_APIC_irq();
21666 }
21667
21668-atomic_t irq_mis_count;
21669+atomic_unchecked_t irq_mis_count;
21670
21671 #ifdef CONFIG_GENERIC_PENDING_IRQ
21672 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
21673@@ -2606,7 +2606,7 @@ static void ack_apic_level(struct irq_data *data)
21674 * at the cpu.
21675 */
21676 if (!(v & (1 << (i & 0x1f)))) {
21677- atomic_inc(&irq_mis_count);
21678+ atomic_inc_unchecked(&irq_mis_count);
21679
21680 eoi_ioapic_irq(irq, cfg);
21681 }
21682diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
21683index bda4886..f9c7195 100644
21684--- a/arch/x86/kernel/apic/probe_32.c
21685+++ b/arch/x86/kernel/apic/probe_32.c
21686@@ -72,7 +72,7 @@ static int probe_default(void)
21687 return 1;
21688 }
21689
21690-static struct apic apic_default = {
21691+static struct apic apic_default __read_only = {
21692
21693 .name = "default",
21694 .probe = probe_default,
21695diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
21696index 6ce600f..cb44af8 100644
21697--- a/arch/x86/kernel/apic/x2apic_cluster.c
21698+++ b/arch/x86/kernel/apic/x2apic_cluster.c
21699@@ -182,7 +182,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
21700 return notifier_from_errno(err);
21701 }
21702
21703-static struct notifier_block __refdata x2apic_cpu_notifier = {
21704+static struct notifier_block x2apic_cpu_notifier = {
21705 .notifier_call = update_clusterinfo,
21706 };
21707
21708@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
21709 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
21710 }
21711
21712-static struct apic apic_x2apic_cluster = {
21713+static struct apic apic_x2apic_cluster __read_only = {
21714
21715 .name = "cluster x2apic",
21716 .probe = x2apic_cluster_probe,
21717diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
21718index 6fae733..5ca17af 100644
21719--- a/arch/x86/kernel/apic/x2apic_phys.c
21720+++ b/arch/x86/kernel/apic/x2apic_phys.c
21721@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
21722 return apic == &apic_x2apic_phys;
21723 }
21724
21725-static struct apic apic_x2apic_phys = {
21726+static struct apic apic_x2apic_phys __read_only = {
21727
21728 .name = "physical x2apic",
21729 .probe = x2apic_phys_probe,
21730diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
21731index 004f017..8fbc8b5 100644
21732--- a/arch/x86/kernel/apic/x2apic_uv_x.c
21733+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
21734@@ -350,7 +350,7 @@ static int uv_probe(void)
21735 return apic == &apic_x2apic_uv_x;
21736 }
21737
21738-static struct apic __refdata apic_x2apic_uv_x = {
21739+static struct apic apic_x2apic_uv_x __read_only = {
21740
21741 .name = "UV large system",
21742 .probe = uv_probe,
21743diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
21744index 5848744..56cb598 100644
21745--- a/arch/x86/kernel/apm_32.c
21746+++ b/arch/x86/kernel/apm_32.c
21747@@ -433,7 +433,7 @@ static DEFINE_MUTEX(apm_mutex);
21748 * This is for buggy BIOS's that refer to (real mode) segment 0x40
21749 * even though they are called in protected mode.
21750 */
21751-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
21752+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
21753 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
21754
21755 static const char driver_version[] = "1.16ac"; /* no spaces */
21756@@ -611,7 +611,10 @@ static long __apm_bios_call(void *_call)
21757 BUG_ON(cpu != 0);
21758 gdt = get_cpu_gdt_table(cpu);
21759 save_desc_40 = gdt[0x40 / 8];
21760+
21761+ pax_open_kernel();
21762 gdt[0x40 / 8] = bad_bios_desc;
21763+ pax_close_kernel();
21764
21765 apm_irq_save(flags);
21766 APM_DO_SAVE_SEGS;
21767@@ -620,7 +623,11 @@ static long __apm_bios_call(void *_call)
21768 &call->esi);
21769 APM_DO_RESTORE_SEGS;
21770 apm_irq_restore(flags);
21771+
21772+ pax_open_kernel();
21773 gdt[0x40 / 8] = save_desc_40;
21774+ pax_close_kernel();
21775+
21776 put_cpu();
21777
21778 return call->eax & 0xff;
21779@@ -687,7 +694,10 @@ static long __apm_bios_call_simple(void *_call)
21780 BUG_ON(cpu != 0);
21781 gdt = get_cpu_gdt_table(cpu);
21782 save_desc_40 = gdt[0x40 / 8];
21783+
21784+ pax_open_kernel();
21785 gdt[0x40 / 8] = bad_bios_desc;
21786+ pax_close_kernel();
21787
21788 apm_irq_save(flags);
21789 APM_DO_SAVE_SEGS;
21790@@ -695,7 +705,11 @@ static long __apm_bios_call_simple(void *_call)
21791 &call->eax);
21792 APM_DO_RESTORE_SEGS;
21793 apm_irq_restore(flags);
21794+
21795+ pax_open_kernel();
21796 gdt[0x40 / 8] = save_desc_40;
21797+ pax_close_kernel();
21798+
21799 put_cpu();
21800 return error;
21801 }
21802@@ -2350,12 +2364,15 @@ static int __init apm_init(void)
21803 * code to that CPU.
21804 */
21805 gdt = get_cpu_gdt_table(0);
21806+
21807+ pax_open_kernel();
21808 set_desc_base(&gdt[APM_CS >> 3],
21809 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
21810 set_desc_base(&gdt[APM_CS_16 >> 3],
21811 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
21812 set_desc_base(&gdt[APM_DS >> 3],
21813 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
21814+ pax_close_kernel();
21815
21816 proc_create("apm", 0, NULL, &apm_file_ops);
21817
21818diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
21819index 9f6b934..cf5ffb3 100644
21820--- a/arch/x86/kernel/asm-offsets.c
21821+++ b/arch/x86/kernel/asm-offsets.c
21822@@ -32,6 +32,8 @@ void common(void) {
21823 OFFSET(TI_flags, thread_info, flags);
21824 OFFSET(TI_status, thread_info, status);
21825 OFFSET(TI_addr_limit, thread_info, addr_limit);
21826+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
21827+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
21828
21829 BLANK();
21830 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
21831@@ -52,8 +54,26 @@ void common(void) {
21832 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
21833 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
21834 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
21835+
21836+#ifdef CONFIG_PAX_KERNEXEC
21837+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
21838 #endif
21839
21840+#ifdef CONFIG_PAX_MEMORY_UDEREF
21841+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
21842+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
21843+#ifdef CONFIG_X86_64
21844+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
21845+#endif
21846+#endif
21847+
21848+#endif
21849+
21850+ BLANK();
21851+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
21852+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
21853+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
21854+
21855 #ifdef CONFIG_XEN
21856 BLANK();
21857 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
21858diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
21859index e7c798b..2b2019b 100644
21860--- a/arch/x86/kernel/asm-offsets_64.c
21861+++ b/arch/x86/kernel/asm-offsets_64.c
21862@@ -77,6 +77,7 @@ int main(void)
21863 BLANK();
21864 #undef ENTRY
21865
21866+ DEFINE(TSS_size, sizeof(struct tss_struct));
21867 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
21868 BLANK();
21869
21870diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
21871index 7fd54f0..0691410 100644
21872--- a/arch/x86/kernel/cpu/Makefile
21873+++ b/arch/x86/kernel/cpu/Makefile
21874@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
21875 CFLAGS_REMOVE_perf_event.o = -pg
21876 endif
21877
21878-# Make sure load_percpu_segment has no stackprotector
21879-nostackp := $(call cc-option, -fno-stack-protector)
21880-CFLAGS_common.o := $(nostackp)
21881-
21882 obj-y := intel_cacheinfo.o scattered.o topology.o
21883 obj-y += proc.o capflags.o powerflags.o common.o
21884 obj-y += rdrand.o
21885diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
21886index 813d29d..6e542d4 100644
21887--- a/arch/x86/kernel/cpu/amd.c
21888+++ b/arch/x86/kernel/cpu/amd.c
21889@@ -718,7 +718,7 @@ static void init_amd(struct cpuinfo_x86 *c)
21890 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
21891 {
21892 /* AMD errata T13 (order #21922) */
21893- if ((c->x86 == 6)) {
21894+ if (c->x86 == 6) {
21895 /* Duron Rev A0 */
21896 if (c->x86_model == 3 && c->x86_mask == 0)
21897 size = 64;
21898diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
21899index 35db56b..256e87c 100644
21900--- a/arch/x86/kernel/cpu/common.c
21901+++ b/arch/x86/kernel/cpu/common.c
21902@@ -90,60 +90,6 @@ static const struct cpu_dev default_cpu = {
21903
21904 static const struct cpu_dev *this_cpu = &default_cpu;
21905
21906-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
21907-#ifdef CONFIG_X86_64
21908- /*
21909- * We need valid kernel segments for data and code in long mode too
21910- * IRET will check the segment types kkeil 2000/10/28
21911- * Also sysret mandates a special GDT layout
21912- *
21913- * TLS descriptors are currently at a different place compared to i386.
21914- * Hopefully nobody expects them at a fixed place (Wine?)
21915- */
21916- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
21917- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
21918- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
21919- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
21920- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
21921- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
21922-#else
21923- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
21924- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21925- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
21926- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
21927- /*
21928- * Segments used for calling PnP BIOS have byte granularity.
21929- * They code segments and data segments have fixed 64k limits,
21930- * the transfer segment sizes are set at run time.
21931- */
21932- /* 32-bit code */
21933- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21934- /* 16-bit code */
21935- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21936- /* 16-bit data */
21937- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
21938- /* 16-bit data */
21939- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
21940- /* 16-bit data */
21941- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
21942- /*
21943- * The APM segments have byte granularity and their bases
21944- * are set at run time. All have 64k limits.
21945- */
21946- /* 32-bit code */
21947- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
21948- /* 16-bit code */
21949- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
21950- /* data */
21951- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
21952-
21953- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21954- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
21955- GDT_STACK_CANARY_INIT
21956-#endif
21957-} };
21958-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
21959-
21960 static int __init x86_xsave_setup(char *s)
21961 {
21962 if (strlen(s))
21963@@ -305,6 +251,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
21964 }
21965 }
21966
21967+#ifdef CONFIG_X86_64
21968+static __init int setup_disable_pcid(char *arg)
21969+{
21970+ setup_clear_cpu_cap(X86_FEATURE_PCID);
21971+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
21972+
21973+#ifdef CONFIG_PAX_MEMORY_UDEREF
21974+ if (clone_pgd_mask != ~(pgdval_t)0UL)
21975+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21976+#endif
21977+
21978+ return 1;
21979+}
21980+__setup("nopcid", setup_disable_pcid);
21981+
21982+static void setup_pcid(struct cpuinfo_x86 *c)
21983+{
21984+ if (!cpu_has(c, X86_FEATURE_PCID)) {
21985+ clear_cpu_cap(c, X86_FEATURE_INVPCID);
21986+
21987+#ifdef CONFIG_PAX_MEMORY_UDEREF
21988+ if (clone_pgd_mask != ~(pgdval_t)0UL) {
21989+ pax_open_kernel();
21990+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
21991+ pax_close_kernel();
21992+ printk("PAX: slow and weak UDEREF enabled\n");
21993+ } else
21994+ printk("PAX: UDEREF disabled\n");
21995+#endif
21996+
21997+ return;
21998+ }
21999+
22000+ printk("PAX: PCID detected\n");
22001+ set_in_cr4(X86_CR4_PCIDE);
22002+
22003+#ifdef CONFIG_PAX_MEMORY_UDEREF
22004+ pax_open_kernel();
22005+ clone_pgd_mask = ~(pgdval_t)0UL;
22006+ pax_close_kernel();
22007+ if (pax_user_shadow_base)
22008+ printk("PAX: weak UDEREF enabled\n");
22009+ else {
22010+ set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
22011+ printk("PAX: strong UDEREF enabled\n");
22012+ }
22013+#endif
22014+
22015+ if (cpu_has(c, X86_FEATURE_INVPCID))
22016+ printk("PAX: INVPCID detected\n");
22017+}
22018+#endif
22019+
22020 /*
22021 * Some CPU features depend on higher CPUID levels, which may not always
22022 * be available due to CPUID level capping or broken virtualization
22023@@ -405,7 +404,7 @@ void switch_to_new_gdt(int cpu)
22024 {
22025 struct desc_ptr gdt_descr;
22026
22027- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
22028+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
22029 gdt_descr.size = GDT_SIZE - 1;
22030 load_gdt(&gdt_descr);
22031 /* Reload the per-cpu base */
22032@@ -895,6 +894,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
22033 setup_smep(c);
22034 setup_smap(c);
22035
22036+#ifdef CONFIG_X86_64
22037+ setup_pcid(c);
22038+#endif
22039+
22040 /*
22041 * The vendor-specific functions might have changed features.
22042 * Now we do "generic changes."
22043@@ -903,6 +906,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
22044 /* Filter out anything that depends on CPUID levels we don't have */
22045 filter_cpuid_features(c, true);
22046
22047+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
22048+ setup_clear_cpu_cap(X86_FEATURE_SEP);
22049+#endif
22050+
22051 /* If the model name is still unset, do table lookup. */
22052 if (!c->x86_model_id[0]) {
22053 const char *p;
22054@@ -983,7 +990,7 @@ static void syscall32_cpu_init(void)
22055 void enable_sep_cpu(void)
22056 {
22057 int cpu = get_cpu();
22058- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22059+ struct tss_struct *tss = init_tss + cpu;
22060
22061 if (!boot_cpu_has(X86_FEATURE_SEP)) {
22062 put_cpu();
22063@@ -1123,14 +1130,16 @@ static __init int setup_disablecpuid(char *arg)
22064 }
22065 __setup("clearcpuid=", setup_disablecpuid);
22066
22067+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
22068+EXPORT_PER_CPU_SYMBOL(current_tinfo);
22069+
22070 DEFINE_PER_CPU(unsigned long, kernel_stack) =
22071- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
22072+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
22073 EXPORT_PER_CPU_SYMBOL(kernel_stack);
22074
22075 #ifdef CONFIG_X86_64
22076-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
22077-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
22078- (unsigned long) debug_idt_table };
22079+struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
22080+const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
22081
22082 DEFINE_PER_CPU_FIRST(union irq_stack_union,
22083 irq_stack_union) __aligned(PAGE_SIZE) __visible;
22084@@ -1293,7 +1302,7 @@ void cpu_init(void)
22085 load_ucode_ap();
22086
22087 cpu = stack_smp_processor_id();
22088- t = &per_cpu(init_tss, cpu);
22089+ t = init_tss + cpu;
22090 oist = &per_cpu(orig_ist, cpu);
22091
22092 #ifdef CONFIG_NUMA
22093@@ -1328,7 +1337,6 @@ void cpu_init(void)
22094 wrmsrl(MSR_KERNEL_GS_BASE, 0);
22095 barrier();
22096
22097- x86_configure_nx();
22098 enable_x2apic();
22099
22100 /*
22101@@ -1380,7 +1388,7 @@ void cpu_init(void)
22102 {
22103 int cpu = smp_processor_id();
22104 struct task_struct *curr = current;
22105- struct tss_struct *t = &per_cpu(init_tss, cpu);
22106+ struct tss_struct *t = init_tss + cpu;
22107 struct thread_struct *thread = &curr->thread;
22108
22109 show_ucode_info_early();
22110diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
22111index c703507..28535e3 100644
22112--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
22113+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
22114@@ -1026,6 +1026,22 @@ static struct attribute *default_attrs[] = {
22115 };
22116
22117 #ifdef CONFIG_AMD_NB
22118+static struct attribute *default_attrs_amd_nb[] = {
22119+ &type.attr,
22120+ &level.attr,
22121+ &coherency_line_size.attr,
22122+ &physical_line_partition.attr,
22123+ &ways_of_associativity.attr,
22124+ &number_of_sets.attr,
22125+ &size.attr,
22126+ &shared_cpu_map.attr,
22127+ &shared_cpu_list.attr,
22128+ NULL,
22129+ NULL,
22130+ NULL,
22131+ NULL
22132+};
22133+
22134 static struct attribute **amd_l3_attrs(void)
22135 {
22136 static struct attribute **attrs;
22137@@ -1036,18 +1052,7 @@ static struct attribute **amd_l3_attrs(void)
22138
22139 n = ARRAY_SIZE(default_attrs);
22140
22141- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
22142- n += 2;
22143-
22144- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
22145- n += 1;
22146-
22147- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
22148- if (attrs == NULL)
22149- return attrs = default_attrs;
22150-
22151- for (n = 0; default_attrs[n]; n++)
22152- attrs[n] = default_attrs[n];
22153+ attrs = default_attrs_amd_nb;
22154
22155 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
22156 attrs[n++] = &cache_disable_0.attr;
22157@@ -1098,6 +1103,13 @@ static struct kobj_type ktype_cache = {
22158 .default_attrs = default_attrs,
22159 };
22160
22161+#ifdef CONFIG_AMD_NB
22162+static struct kobj_type ktype_cache_amd_nb = {
22163+ .sysfs_ops = &sysfs_ops,
22164+ .default_attrs = default_attrs_amd_nb,
22165+};
22166+#endif
22167+
22168 static struct kobj_type ktype_percpu_entry = {
22169 .sysfs_ops = &sysfs_ops,
22170 };
22171@@ -1163,20 +1175,26 @@ static int cache_add_dev(struct device *dev)
22172 return retval;
22173 }
22174
22175+#ifdef CONFIG_AMD_NB
22176+ amd_l3_attrs();
22177+#endif
22178+
22179 for (i = 0; i < num_cache_leaves; i++) {
22180+ struct kobj_type *ktype;
22181+
22182 this_object = INDEX_KOBJECT_PTR(cpu, i);
22183 this_object->cpu = cpu;
22184 this_object->index = i;
22185
22186 this_leaf = CPUID4_INFO_IDX(cpu, i);
22187
22188- ktype_cache.default_attrs = default_attrs;
22189+ ktype = &ktype_cache;
22190 #ifdef CONFIG_AMD_NB
22191 if (this_leaf->base.nb)
22192- ktype_cache.default_attrs = amd_l3_attrs();
22193+ ktype = &ktype_cache_amd_nb;
22194 #endif
22195 retval = kobject_init_and_add(&(this_object->kobj),
22196- &ktype_cache,
22197+ ktype,
22198 per_cpu(ici_cache_kobject, cpu),
22199 "index%1lu", i);
22200 if (unlikely(retval)) {
22201diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
22202index bd9ccda..38314e7 100644
22203--- a/arch/x86/kernel/cpu/mcheck/mce.c
22204+++ b/arch/x86/kernel/cpu/mcheck/mce.c
22205@@ -45,6 +45,7 @@
22206 #include <asm/processor.h>
22207 #include <asm/mce.h>
22208 #include <asm/msr.h>
22209+#include <asm/local.h>
22210
22211 #include "mce-internal.h"
22212
22213@@ -259,7 +260,7 @@ static void print_mce(struct mce *m)
22214 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
22215 m->cs, m->ip);
22216
22217- if (m->cs == __KERNEL_CS)
22218+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
22219 print_symbol("{%s}", m->ip);
22220 pr_cont("\n");
22221 }
22222@@ -292,10 +293,10 @@ static void print_mce(struct mce *m)
22223
22224 #define PANIC_TIMEOUT 5 /* 5 seconds */
22225
22226-static atomic_t mce_paniced;
22227+static atomic_unchecked_t mce_paniced;
22228
22229 static int fake_panic;
22230-static atomic_t mce_fake_paniced;
22231+static atomic_unchecked_t mce_fake_paniced;
22232
22233 /* Panic in progress. Enable interrupts and wait for final IPI */
22234 static void wait_for_panic(void)
22235@@ -319,7 +320,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
22236 /*
22237 * Make sure only one CPU runs in machine check panic
22238 */
22239- if (atomic_inc_return(&mce_paniced) > 1)
22240+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
22241 wait_for_panic();
22242 barrier();
22243
22244@@ -327,7 +328,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
22245 console_verbose();
22246 } else {
22247 /* Don't log too much for fake panic */
22248- if (atomic_inc_return(&mce_fake_paniced) > 1)
22249+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
22250 return;
22251 }
22252 /* First print corrected ones that are still unlogged */
22253@@ -366,7 +367,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
22254 if (!fake_panic) {
22255 if (panic_timeout == 0)
22256 panic_timeout = mca_cfg.panic_timeout;
22257- panic(msg);
22258+ panic("%s", msg);
22259 } else
22260 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
22261 }
22262@@ -697,7 +698,7 @@ static int mce_timed_out(u64 *t)
22263 * might have been modified by someone else.
22264 */
22265 rmb();
22266- if (atomic_read(&mce_paniced))
22267+ if (atomic_read_unchecked(&mce_paniced))
22268 wait_for_panic();
22269 if (!mca_cfg.monarch_timeout)
22270 goto out;
22271@@ -1674,7 +1675,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
22272 }
22273
22274 /* Call the installed machine check handler for this CPU setup. */
22275-void (*machine_check_vector)(struct pt_regs *, long error_code) =
22276+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
22277 unexpected_machine_check;
22278
22279 /*
22280@@ -1697,7 +1698,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
22281 return;
22282 }
22283
22284+ pax_open_kernel();
22285 machine_check_vector = do_machine_check;
22286+ pax_close_kernel();
22287
22288 __mcheck_cpu_init_generic();
22289 __mcheck_cpu_init_vendor(c);
22290@@ -1711,7 +1714,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
22291 */
22292
22293 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
22294-static int mce_chrdev_open_count; /* #times opened */
22295+static local_t mce_chrdev_open_count; /* #times opened */
22296 static int mce_chrdev_open_exclu; /* already open exclusive? */
22297
22298 static int mce_chrdev_open(struct inode *inode, struct file *file)
22299@@ -1719,7 +1722,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
22300 spin_lock(&mce_chrdev_state_lock);
22301
22302 if (mce_chrdev_open_exclu ||
22303- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
22304+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
22305 spin_unlock(&mce_chrdev_state_lock);
22306
22307 return -EBUSY;
22308@@ -1727,7 +1730,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
22309
22310 if (file->f_flags & O_EXCL)
22311 mce_chrdev_open_exclu = 1;
22312- mce_chrdev_open_count++;
22313+ local_inc(&mce_chrdev_open_count);
22314
22315 spin_unlock(&mce_chrdev_state_lock);
22316
22317@@ -1738,7 +1741,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
22318 {
22319 spin_lock(&mce_chrdev_state_lock);
22320
22321- mce_chrdev_open_count--;
22322+ local_dec(&mce_chrdev_open_count);
22323 mce_chrdev_open_exclu = 0;
22324
22325 spin_unlock(&mce_chrdev_state_lock);
22326@@ -2413,7 +2416,7 @@ static __init void mce_init_banks(void)
22327
22328 for (i = 0; i < mca_cfg.banks; i++) {
22329 struct mce_bank *b = &mce_banks[i];
22330- struct device_attribute *a = &b->attr;
22331+ device_attribute_no_const *a = &b->attr;
22332
22333 sysfs_attr_init(&a->attr);
22334 a->attr.name = b->attrname;
22335@@ -2520,7 +2523,7 @@ struct dentry *mce_get_debugfs_dir(void)
22336 static void mce_reset(void)
22337 {
22338 cpu_missing = 0;
22339- atomic_set(&mce_fake_paniced, 0);
22340+ atomic_set_unchecked(&mce_fake_paniced, 0);
22341 atomic_set(&mce_executing, 0);
22342 atomic_set(&mce_callin, 0);
22343 atomic_set(&global_nwo, 0);
22344diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
22345index a304298..49b6d06 100644
22346--- a/arch/x86/kernel/cpu/mcheck/p5.c
22347+++ b/arch/x86/kernel/cpu/mcheck/p5.c
22348@@ -10,6 +10,7 @@
22349 #include <asm/processor.h>
22350 #include <asm/mce.h>
22351 #include <asm/msr.h>
22352+#include <asm/pgtable.h>
22353
22354 /* By default disabled */
22355 int mce_p5_enabled __read_mostly;
22356@@ -48,7 +49,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
22357 if (!cpu_has(c, X86_FEATURE_MCE))
22358 return;
22359
22360+ pax_open_kernel();
22361 machine_check_vector = pentium_machine_check;
22362+ pax_close_kernel();
22363 /* Make sure the vector pointer is visible before we enable MCEs: */
22364 wmb();
22365
22366diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
22367index 7dc5564..1273569 100644
22368--- a/arch/x86/kernel/cpu/mcheck/winchip.c
22369+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
22370@@ -9,6 +9,7 @@
22371 #include <asm/processor.h>
22372 #include <asm/mce.h>
22373 #include <asm/msr.h>
22374+#include <asm/pgtable.h>
22375
22376 /* Machine check handler for WinChip C6: */
22377 static void winchip_machine_check(struct pt_regs *regs, long error_code)
22378@@ -22,7 +23,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
22379 {
22380 u32 lo, hi;
22381
22382+ pax_open_kernel();
22383 machine_check_vector = winchip_machine_check;
22384+ pax_close_kernel();
22385 /* Make sure the vector pointer is visible before we enable MCEs: */
22386 wmb();
22387
22388diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
22389index dd9d619..86e1d81 100644
22390--- a/arch/x86/kernel/cpu/microcode/core.c
22391+++ b/arch/x86/kernel/cpu/microcode/core.c
22392@@ -516,7 +516,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
22393 return NOTIFY_OK;
22394 }
22395
22396-static struct notifier_block __refdata mc_cpu_notifier = {
22397+static struct notifier_block mc_cpu_notifier = {
22398 .notifier_call = mc_cpu_callback,
22399 };
22400
22401diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
22402index a276fa7..e66810f 100644
22403--- a/arch/x86/kernel/cpu/microcode/intel.c
22404+++ b/arch/x86/kernel/cpu/microcode/intel.c
22405@@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
22406
22407 static int get_ucode_user(void *to, const void *from, size_t n)
22408 {
22409- return copy_from_user(to, from, n);
22410+ return copy_from_user(to, (const void __force_user *)from, n);
22411 }
22412
22413 static enum ucode_state
22414 request_microcode_user(int cpu, const void __user *buf, size_t size)
22415 {
22416- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
22417+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
22418 }
22419
22420 static void microcode_fini_cpu(int cpu)
22421diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
22422index f961de9..8a9d332 100644
22423--- a/arch/x86/kernel/cpu/mtrr/main.c
22424+++ b/arch/x86/kernel/cpu/mtrr/main.c
22425@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
22426 u64 size_or_mask, size_and_mask;
22427 static bool mtrr_aps_delayed_init;
22428
22429-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
22430+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
22431
22432 const struct mtrr_ops *mtrr_if;
22433
22434diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
22435index df5e41f..816c719 100644
22436--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
22437+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
22438@@ -25,7 +25,7 @@ struct mtrr_ops {
22439 int (*validate_add_page)(unsigned long base, unsigned long size,
22440 unsigned int type);
22441 int (*have_wrcomb)(void);
22442-};
22443+} __do_const;
22444
22445 extern int generic_get_free_region(unsigned long base, unsigned long size,
22446 int replace_reg);
22447diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
22448index 2879ecd..bb8c80b 100644
22449--- a/arch/x86/kernel/cpu/perf_event.c
22450+++ b/arch/x86/kernel/cpu/perf_event.c
22451@@ -1372,7 +1372,7 @@ static void __init pmu_check_apic(void)
22452
22453 }
22454
22455-static struct attribute_group x86_pmu_format_group = {
22456+static attribute_group_no_const x86_pmu_format_group = {
22457 .name = "format",
22458 .attrs = NULL,
22459 };
22460@@ -1471,7 +1471,7 @@ static struct attribute *events_attr[] = {
22461 NULL,
22462 };
22463
22464-static struct attribute_group x86_pmu_events_group = {
22465+static attribute_group_no_const x86_pmu_events_group = {
22466 .name = "events",
22467 .attrs = events_attr,
22468 };
22469@@ -1995,7 +1995,7 @@ static unsigned long get_segment_base(unsigned int segment)
22470 if (idx > GDT_ENTRIES)
22471 return 0;
22472
22473- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
22474+ desc = get_cpu_gdt_table(smp_processor_id());
22475 }
22476
22477 return get_desc_base(desc + idx);
22478@@ -2085,7 +2085,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
22479 break;
22480
22481 perf_callchain_store(entry, frame.return_address);
22482- fp = frame.next_frame;
22483+ fp = (const void __force_user *)frame.next_frame;
22484 }
22485 }
22486
22487diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
22488index 639d128..e92d7e5 100644
22489--- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
22490+++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
22491@@ -405,7 +405,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
22492 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
22493 {
22494 struct attribute **attrs;
22495- struct attribute_group *attr_group;
22496+ attribute_group_no_const *attr_group;
22497 int i = 0, j;
22498
22499 while (amd_iommu_v2_event_descs[i].attr.attr.name)
22500diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
22501index 2502d0d..e5cc05c 100644
22502--- a/arch/x86/kernel/cpu/perf_event_intel.c
22503+++ b/arch/x86/kernel/cpu/perf_event_intel.c
22504@@ -2353,10 +2353,10 @@ __init int intel_pmu_init(void)
22505 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
22506
22507 if (boot_cpu_has(X86_FEATURE_PDCM)) {
22508- u64 capabilities;
22509+ u64 capabilities = x86_pmu.intel_cap.capabilities;
22510
22511- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
22512- x86_pmu.intel_cap.capabilities = capabilities;
22513+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
22514+ x86_pmu.intel_cap.capabilities = capabilities;
22515 }
22516
22517 intel_ds_init();
22518diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
22519index 619f769..d510008 100644
22520--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
22521+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
22522@@ -449,7 +449,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
22523 NULL,
22524 };
22525
22526-static struct attribute_group rapl_pmu_events_group = {
22527+static attribute_group_no_const rapl_pmu_events_group __read_only = {
22528 .name = "events",
22529 .attrs = NULL, /* patched at runtime */
22530 };
22531diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
22532index 0939f86..69730af 100644
22533--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
22534+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
22535@@ -3691,7 +3691,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
22536 static int __init uncore_type_init(struct intel_uncore_type *type)
22537 {
22538 struct intel_uncore_pmu *pmus;
22539- struct attribute_group *attr_group;
22540+ attribute_group_no_const *attr_group;
22541 struct attribute **attrs;
22542 int i, j;
22543
22544diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
22545index 90236f0..54cb20d 100644
22546--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
22547+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
22548@@ -503,7 +503,7 @@ struct intel_uncore_box {
22549 struct uncore_event_desc {
22550 struct kobj_attribute attr;
22551 const char *config;
22552-};
22553+} __do_const;
22554
22555 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
22556 { \
22557diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
22558index 3225ae6c..ee3c6db 100644
22559--- a/arch/x86/kernel/cpuid.c
22560+++ b/arch/x86/kernel/cpuid.c
22561@@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
22562 return notifier_from_errno(err);
22563 }
22564
22565-static struct notifier_block __refdata cpuid_class_cpu_notifier =
22566+static struct notifier_block cpuid_class_cpu_notifier =
22567 {
22568 .notifier_call = cpuid_class_cpu_callback,
22569 };
22570diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
22571index a618fcd..200e95b 100644
22572--- a/arch/x86/kernel/crash.c
22573+++ b/arch/x86/kernel/crash.c
22574@@ -104,7 +104,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
22575 #ifdef CONFIG_X86_32
22576 struct pt_regs fixed_regs;
22577
22578- if (!user_mode_vm(regs)) {
22579+ if (!user_mode(regs)) {
22580 crash_fixup_ss_esp(&fixed_regs, regs);
22581 regs = &fixed_regs;
22582 }
22583diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
22584index afa64ad..dce67dd 100644
22585--- a/arch/x86/kernel/crash_dump_64.c
22586+++ b/arch/x86/kernel/crash_dump_64.c
22587@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
22588 return -ENOMEM;
22589
22590 if (userbuf) {
22591- if (copy_to_user(buf, vaddr + offset, csize)) {
22592+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
22593 iounmap(vaddr);
22594 return -EFAULT;
22595 }
22596diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
22597index f6dfd93..892ade4 100644
22598--- a/arch/x86/kernel/doublefault.c
22599+++ b/arch/x86/kernel/doublefault.c
22600@@ -12,7 +12,7 @@
22601
22602 #define DOUBLEFAULT_STACKSIZE (1024)
22603 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
22604-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
22605+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
22606
22607 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
22608
22609@@ -22,7 +22,7 @@ static void doublefault_fn(void)
22610 unsigned long gdt, tss;
22611
22612 native_store_gdt(&gdt_desc);
22613- gdt = gdt_desc.address;
22614+ gdt = (unsigned long)gdt_desc.address;
22615
22616 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
22617
22618@@ -59,10 +59,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
22619 /* 0x2 bit is always set */
22620 .flags = X86_EFLAGS_SF | 0x2,
22621 .sp = STACK_START,
22622- .es = __USER_DS,
22623+ .es = __KERNEL_DS,
22624 .cs = __KERNEL_CS,
22625 .ss = __KERNEL_DS,
22626- .ds = __USER_DS,
22627+ .ds = __KERNEL_DS,
22628 .fs = __KERNEL_PERCPU,
22629
22630 .__cr3 = __pa_nodebug(swapper_pg_dir),
22631diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
22632index b74ebc7..2c95874 100644
22633--- a/arch/x86/kernel/dumpstack.c
22634+++ b/arch/x86/kernel/dumpstack.c
22635@@ -2,6 +2,9 @@
22636 * Copyright (C) 1991, 1992 Linus Torvalds
22637 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
22638 */
22639+#ifdef CONFIG_GRKERNSEC_HIDESYM
22640+#define __INCLUDED_BY_HIDESYM 1
22641+#endif
22642 #include <linux/kallsyms.h>
22643 #include <linux/kprobes.h>
22644 #include <linux/uaccess.h>
22645@@ -33,23 +36,21 @@ static void printk_stack_address(unsigned long address, int reliable)
22646
22647 void printk_address(unsigned long address)
22648 {
22649- pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
22650+ pr_cont(" [<%p>] %pA\n", (void *)address, (void *)address);
22651 }
22652
22653 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
22654 static void
22655 print_ftrace_graph_addr(unsigned long addr, void *data,
22656 const struct stacktrace_ops *ops,
22657- struct thread_info *tinfo, int *graph)
22658+ struct task_struct *task, int *graph)
22659 {
22660- struct task_struct *task;
22661 unsigned long ret_addr;
22662 int index;
22663
22664 if (addr != (unsigned long)return_to_handler)
22665 return;
22666
22667- task = tinfo->task;
22668 index = task->curr_ret_stack;
22669
22670 if (!task->ret_stack || index < *graph)
22671@@ -66,7 +67,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
22672 static inline void
22673 print_ftrace_graph_addr(unsigned long addr, void *data,
22674 const struct stacktrace_ops *ops,
22675- struct thread_info *tinfo, int *graph)
22676+ struct task_struct *task, int *graph)
22677 { }
22678 #endif
22679
22680@@ -77,10 +78,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
22681 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
22682 */
22683
22684-static inline int valid_stack_ptr(struct thread_info *tinfo,
22685- void *p, unsigned int size, void *end)
22686+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
22687 {
22688- void *t = tinfo;
22689 if (end) {
22690 if (p < end && p >= (end-THREAD_SIZE))
22691 return 1;
22692@@ -91,14 +90,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
22693 }
22694
22695 unsigned long
22696-print_context_stack(struct thread_info *tinfo,
22697+print_context_stack(struct task_struct *task, void *stack_start,
22698 unsigned long *stack, unsigned long bp,
22699 const struct stacktrace_ops *ops, void *data,
22700 unsigned long *end, int *graph)
22701 {
22702 struct stack_frame *frame = (struct stack_frame *)bp;
22703
22704- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
22705+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
22706 unsigned long addr;
22707
22708 addr = *stack;
22709@@ -110,7 +109,7 @@ print_context_stack(struct thread_info *tinfo,
22710 } else {
22711 ops->address(data, addr, 0);
22712 }
22713- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22714+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22715 }
22716 stack++;
22717 }
22718@@ -119,7 +118,7 @@ print_context_stack(struct thread_info *tinfo,
22719 EXPORT_SYMBOL_GPL(print_context_stack);
22720
22721 unsigned long
22722-print_context_stack_bp(struct thread_info *tinfo,
22723+print_context_stack_bp(struct task_struct *task, void *stack_start,
22724 unsigned long *stack, unsigned long bp,
22725 const struct stacktrace_ops *ops, void *data,
22726 unsigned long *end, int *graph)
22727@@ -127,7 +126,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22728 struct stack_frame *frame = (struct stack_frame *)bp;
22729 unsigned long *ret_addr = &frame->return_address;
22730
22731- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
22732+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
22733 unsigned long addr = *ret_addr;
22734
22735 if (!__kernel_text_address(addr))
22736@@ -136,7 +135,7 @@ print_context_stack_bp(struct thread_info *tinfo,
22737 ops->address(data, addr, 1);
22738 frame = frame->next_frame;
22739 ret_addr = &frame->return_address;
22740- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
22741+ print_ftrace_graph_addr(addr, data, ops, task, graph);
22742 }
22743
22744 return (unsigned long)frame;
22745@@ -155,7 +154,7 @@ static int print_trace_stack(void *data, char *name)
22746 static void print_trace_address(void *data, unsigned long addr, int reliable)
22747 {
22748 touch_nmi_watchdog();
22749- printk(data);
22750+ printk("%s", (char *)data);
22751 printk_stack_address(addr, reliable);
22752 }
22753
22754@@ -225,6 +224,8 @@ unsigned long oops_begin(void)
22755 EXPORT_SYMBOL_GPL(oops_begin);
22756 NOKPROBE_SYMBOL(oops_begin);
22757
22758+extern void gr_handle_kernel_exploit(void);
22759+
22760 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22761 {
22762 if (regs && kexec_should_crash(current))
22763@@ -246,7 +247,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
22764 panic("Fatal exception in interrupt");
22765 if (panic_on_oops)
22766 panic("Fatal exception");
22767- do_exit(signr);
22768+
22769+ gr_handle_kernel_exploit();
22770+
22771+ do_group_exit(signr);
22772 }
22773 NOKPROBE_SYMBOL(oops_end);
22774
22775@@ -275,7 +279,7 @@ int __die(const char *str, struct pt_regs *regs, long err)
22776 print_modules();
22777 show_regs(regs);
22778 #ifdef CONFIG_X86_32
22779- if (user_mode_vm(regs)) {
22780+ if (user_mode(regs)) {
22781 sp = regs->sp;
22782 ss = regs->ss & 0xffff;
22783 } else {
22784@@ -304,7 +308,7 @@ void die(const char *str, struct pt_regs *regs, long err)
22785 unsigned long flags = oops_begin();
22786 int sig = SIGSEGV;
22787
22788- if (!user_mode_vm(regs))
22789+ if (!user_mode(regs))
22790 report_bug(regs->ip, regs);
22791
22792 if (__die(str, regs, err))
22793diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
22794index 5abd4cd..c65733b 100644
22795--- a/arch/x86/kernel/dumpstack_32.c
22796+++ b/arch/x86/kernel/dumpstack_32.c
22797@@ -61,15 +61,14 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22798 bp = stack_frame(task, regs);
22799
22800 for (;;) {
22801- struct thread_info *context;
22802+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22803 void *end_stack;
22804
22805 end_stack = is_hardirq_stack(stack, cpu);
22806 if (!end_stack)
22807 end_stack = is_softirq_stack(stack, cpu);
22808
22809- context = task_thread_info(task);
22810- bp = ops->walk_stack(context, stack, bp, ops, data,
22811+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data,
22812 end_stack, &graph);
22813
22814 /* Stop if not on irq stack */
22815@@ -123,27 +122,28 @@ void show_regs(struct pt_regs *regs)
22816 int i;
22817
22818 show_regs_print_info(KERN_EMERG);
22819- __show_regs(regs, !user_mode_vm(regs));
22820+ __show_regs(regs, !user_mode(regs));
22821
22822 /*
22823 * When in-kernel, we also print out the stack and code at the
22824 * time of the fault..
22825 */
22826- if (!user_mode_vm(regs)) {
22827+ if (!user_mode(regs)) {
22828 unsigned int code_prologue = code_bytes * 43 / 64;
22829 unsigned int code_len = code_bytes;
22830 unsigned char c;
22831 u8 *ip;
22832+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
22833
22834 pr_emerg("Stack:\n");
22835 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
22836
22837 pr_emerg("Code:");
22838
22839- ip = (u8 *)regs->ip - code_prologue;
22840+ ip = (u8 *)regs->ip - code_prologue + cs_base;
22841 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
22842 /* try starting at IP */
22843- ip = (u8 *)regs->ip;
22844+ ip = (u8 *)regs->ip + cs_base;
22845 code_len = code_len - code_prologue + 1;
22846 }
22847 for (i = 0; i < code_len; i++, ip++) {
22848@@ -152,7 +152,7 @@ void show_regs(struct pt_regs *regs)
22849 pr_cont(" Bad EIP value.");
22850 break;
22851 }
22852- if (ip == (u8 *)regs->ip)
22853+ if (ip == (u8 *)regs->ip + cs_base)
22854 pr_cont(" <%02x>", c);
22855 else
22856 pr_cont(" %02x", c);
22857@@ -165,6 +165,7 @@ int is_valid_bugaddr(unsigned long ip)
22858 {
22859 unsigned short ud2;
22860
22861+ ip = ktla_ktva(ip);
22862 if (ip < PAGE_OFFSET)
22863 return 0;
22864 if (probe_kernel_address((unsigned short *)ip, ud2))
22865@@ -172,3 +173,15 @@ int is_valid_bugaddr(unsigned long ip)
22866
22867 return ud2 == 0x0b0f;
22868 }
22869+
22870+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22871+void pax_check_alloca(unsigned long size)
22872+{
22873+ unsigned long sp = (unsigned long)&sp, stack_left;
22874+
22875+ /* all kernel stacks are of the same size */
22876+ stack_left = sp & (THREAD_SIZE - 1);
22877+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22878+}
22879+EXPORT_SYMBOL(pax_check_alloca);
22880+#endif
22881diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
22882index ff86f19..a20c62c 100644
22883--- a/arch/x86/kernel/dumpstack_64.c
22884+++ b/arch/x86/kernel/dumpstack_64.c
22885@@ -153,12 +153,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22886 const struct stacktrace_ops *ops, void *data)
22887 {
22888 const unsigned cpu = get_cpu();
22889- struct thread_info *tinfo;
22890 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
22891 unsigned long dummy;
22892 unsigned used = 0;
22893 int graph = 0;
22894 int done = 0;
22895+ void *stack_start;
22896
22897 if (!task)
22898 task = current;
22899@@ -179,7 +179,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22900 * current stack address. If the stacks consist of nested
22901 * exceptions
22902 */
22903- tinfo = task_thread_info(task);
22904 while (!done) {
22905 unsigned long *stack_end;
22906 enum stack_type stype;
22907@@ -202,7 +201,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22908 if (ops->stack(data, id) < 0)
22909 break;
22910
22911- bp = ops->walk_stack(tinfo, stack, bp, ops,
22912+ bp = ops->walk_stack(task, stack_end - EXCEPTION_STKSZ, stack, bp, ops,
22913 data, stack_end, &graph);
22914 ops->stack(data, "<EOE>");
22915 /*
22916@@ -210,6 +209,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22917 * second-to-last pointer (index -2 to end) in the
22918 * exception stack:
22919 */
22920+ if ((u16)stack_end[-1] != __KERNEL_DS)
22921+ goto out;
22922 stack = (unsigned long *) stack_end[-2];
22923 done = 0;
22924 break;
22925@@ -218,7 +219,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22926
22927 if (ops->stack(data, "IRQ") < 0)
22928 break;
22929- bp = ops->walk_stack(tinfo, stack, bp,
22930+ bp = ops->walk_stack(task, irq_stack, stack, bp,
22931 ops, data, stack_end, &graph);
22932 /*
22933 * We link to the next stack (which would be
22934@@ -240,7 +241,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
22935 /*
22936 * This handles the process stack:
22937 */
22938- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
22939+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
22940+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
22941+out:
22942 put_cpu();
22943 }
22944 EXPORT_SYMBOL(dump_trace);
22945@@ -349,3 +352,50 @@ int is_valid_bugaddr(unsigned long ip)
22946
22947 return ud2 == 0x0b0f;
22948 }
22949+
22950+#if defined(CONFIG_PAX_MEMORY_STACKLEAK) || defined(CONFIG_PAX_USERCOPY)
22951+void pax_check_alloca(unsigned long size)
22952+{
22953+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
22954+ unsigned cpu, used;
22955+ char *id;
22956+
22957+ /* check the process stack first */
22958+ stack_start = (unsigned long)task_stack_page(current);
22959+ stack_end = stack_start + THREAD_SIZE;
22960+ if (likely(stack_start <= sp && sp < stack_end)) {
22961+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
22962+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22963+ return;
22964+ }
22965+
22966+ cpu = get_cpu();
22967+
22968+ /* check the irq stacks */
22969+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
22970+ stack_start = stack_end - IRQ_STACK_SIZE;
22971+ if (stack_start <= sp && sp < stack_end) {
22972+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
22973+ put_cpu();
22974+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22975+ return;
22976+ }
22977+
22978+ /* check the exception stacks */
22979+ used = 0;
22980+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
22981+ stack_start = stack_end - EXCEPTION_STKSZ;
22982+ if (stack_end && stack_start <= sp && sp < stack_end) {
22983+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
22984+ put_cpu();
22985+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
22986+ return;
22987+ }
22988+
22989+ put_cpu();
22990+
22991+ /* unknown stack */
22992+ BUG();
22993+}
22994+EXPORT_SYMBOL(pax_check_alloca);
22995+#endif
22996diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
22997index 988c00a..4f673b6 100644
22998--- a/arch/x86/kernel/e820.c
22999+++ b/arch/x86/kernel/e820.c
23000@@ -803,8 +803,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
23001
23002 static void early_panic(char *msg)
23003 {
23004- early_printk(msg);
23005- panic(msg);
23006+ early_printk("%s", msg);
23007+ panic("%s", msg);
23008 }
23009
23010 static int userdef __initdata;
23011diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
23012index 01d1c18..8073693 100644
23013--- a/arch/x86/kernel/early_printk.c
23014+++ b/arch/x86/kernel/early_printk.c
23015@@ -7,6 +7,7 @@
23016 #include <linux/pci_regs.h>
23017 #include <linux/pci_ids.h>
23018 #include <linux/errno.h>
23019+#include <linux/sched.h>
23020 #include <asm/io.h>
23021 #include <asm/processor.h>
23022 #include <asm/fcntl.h>
23023diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
23024index 4b0e1df..884b67e 100644
23025--- a/arch/x86/kernel/entry_32.S
23026+++ b/arch/x86/kernel/entry_32.S
23027@@ -177,13 +177,153 @@
23028 /*CFI_REL_OFFSET gs, PT_GS*/
23029 .endm
23030 .macro SET_KERNEL_GS reg
23031+
23032+#ifdef CONFIG_CC_STACKPROTECTOR
23033 movl $(__KERNEL_STACK_CANARY), \reg
23034+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
23035+ movl $(__USER_DS), \reg
23036+#else
23037+ xorl \reg, \reg
23038+#endif
23039+
23040 movl \reg, %gs
23041 .endm
23042
23043 #endif /* CONFIG_X86_32_LAZY_GS */
23044
23045-.macro SAVE_ALL
23046+.macro pax_enter_kernel
23047+#ifdef CONFIG_PAX_KERNEXEC
23048+ call pax_enter_kernel
23049+#endif
23050+.endm
23051+
23052+.macro pax_exit_kernel
23053+#ifdef CONFIG_PAX_KERNEXEC
23054+ call pax_exit_kernel
23055+#endif
23056+.endm
23057+
23058+#ifdef CONFIG_PAX_KERNEXEC
23059+ENTRY(pax_enter_kernel)
23060+#ifdef CONFIG_PARAVIRT
23061+ pushl %eax
23062+ pushl %ecx
23063+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
23064+ mov %eax, %esi
23065+#else
23066+ mov %cr0, %esi
23067+#endif
23068+ bts $16, %esi
23069+ jnc 1f
23070+ mov %cs, %esi
23071+ cmp $__KERNEL_CS, %esi
23072+ jz 3f
23073+ ljmp $__KERNEL_CS, $3f
23074+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
23075+2:
23076+#ifdef CONFIG_PARAVIRT
23077+ mov %esi, %eax
23078+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
23079+#else
23080+ mov %esi, %cr0
23081+#endif
23082+3:
23083+#ifdef CONFIG_PARAVIRT
23084+ popl %ecx
23085+ popl %eax
23086+#endif
23087+ ret
23088+ENDPROC(pax_enter_kernel)
23089+
23090+ENTRY(pax_exit_kernel)
23091+#ifdef CONFIG_PARAVIRT
23092+ pushl %eax
23093+ pushl %ecx
23094+#endif
23095+ mov %cs, %esi
23096+ cmp $__KERNEXEC_KERNEL_CS, %esi
23097+ jnz 2f
23098+#ifdef CONFIG_PARAVIRT
23099+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
23100+ mov %eax, %esi
23101+#else
23102+ mov %cr0, %esi
23103+#endif
23104+ btr $16, %esi
23105+ ljmp $__KERNEL_CS, $1f
23106+1:
23107+#ifdef CONFIG_PARAVIRT
23108+ mov %esi, %eax
23109+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
23110+#else
23111+ mov %esi, %cr0
23112+#endif
23113+2:
23114+#ifdef CONFIG_PARAVIRT
23115+ popl %ecx
23116+ popl %eax
23117+#endif
23118+ ret
23119+ENDPROC(pax_exit_kernel)
23120+#endif
23121+
23122+ .macro pax_erase_kstack
23123+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23124+ call pax_erase_kstack
23125+#endif
23126+ .endm
23127+
23128+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
23129+/*
23130+ * ebp: thread_info
23131+ */
23132+ENTRY(pax_erase_kstack)
23133+ pushl %edi
23134+ pushl %ecx
23135+ pushl %eax
23136+
23137+ mov TI_lowest_stack(%ebp), %edi
23138+ mov $-0xBEEF, %eax
23139+ std
23140+
23141+1: mov %edi, %ecx
23142+ and $THREAD_SIZE_asm - 1, %ecx
23143+ shr $2, %ecx
23144+ repne scasl
23145+ jecxz 2f
23146+
23147+ cmp $2*16, %ecx
23148+ jc 2f
23149+
23150+ mov $2*16, %ecx
23151+ repe scasl
23152+ jecxz 2f
23153+ jne 1b
23154+
23155+2: cld
23156+ mov %esp, %ecx
23157+ sub %edi, %ecx
23158+
23159+ cmp $THREAD_SIZE_asm, %ecx
23160+ jb 3f
23161+ ud2
23162+3:
23163+
23164+ shr $2, %ecx
23165+ rep stosl
23166+
23167+ mov TI_task_thread_sp0(%ebp), %edi
23168+ sub $128, %edi
23169+ mov %edi, TI_lowest_stack(%ebp)
23170+
23171+ popl %eax
23172+ popl %ecx
23173+ popl %edi
23174+ ret
23175+ENDPROC(pax_erase_kstack)
23176+#endif
23177+
23178+.macro __SAVE_ALL _DS
23179 cld
23180 PUSH_GS
23181 pushl_cfi %fs
23182@@ -206,7 +346,7 @@
23183 CFI_REL_OFFSET ecx, 0
23184 pushl_cfi %ebx
23185 CFI_REL_OFFSET ebx, 0
23186- movl $(__USER_DS), %edx
23187+ movl $\_DS, %edx
23188 movl %edx, %ds
23189 movl %edx, %es
23190 movl $(__KERNEL_PERCPU), %edx
23191@@ -214,6 +354,15 @@
23192 SET_KERNEL_GS %edx
23193 .endm
23194
23195+.macro SAVE_ALL
23196+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23197+ __SAVE_ALL __KERNEL_DS
23198+ pax_enter_kernel
23199+#else
23200+ __SAVE_ALL __USER_DS
23201+#endif
23202+.endm
23203+
23204 .macro RESTORE_INT_REGS
23205 popl_cfi %ebx
23206 CFI_RESTORE ebx
23207@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
23208 popfl_cfi
23209 jmp syscall_exit
23210 CFI_ENDPROC
23211-END(ret_from_fork)
23212+ENDPROC(ret_from_fork)
23213
23214 ENTRY(ret_from_kernel_thread)
23215 CFI_STARTPROC
23216@@ -340,7 +489,15 @@ ret_from_intr:
23217 andl $SEGMENT_RPL_MASK, %eax
23218 #endif
23219 cmpl $USER_RPL, %eax
23220+
23221+#ifdef CONFIG_PAX_KERNEXEC
23222+ jae resume_userspace
23223+
23224+ pax_exit_kernel
23225+ jmp resume_kernel
23226+#else
23227 jb resume_kernel # not returning to v8086 or userspace
23228+#endif
23229
23230 ENTRY(resume_userspace)
23231 LOCKDEP_SYS_EXIT
23232@@ -352,8 +509,8 @@ ENTRY(resume_userspace)
23233 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
23234 # int/exception return?
23235 jne work_pending
23236- jmp restore_all
23237-END(ret_from_exception)
23238+ jmp restore_all_pax
23239+ENDPROC(ret_from_exception)
23240
23241 #ifdef CONFIG_PREEMPT
23242 ENTRY(resume_kernel)
23243@@ -365,7 +522,7 @@ need_resched:
23244 jz restore_all
23245 call preempt_schedule_irq
23246 jmp need_resched
23247-END(resume_kernel)
23248+ENDPROC(resume_kernel)
23249 #endif
23250 CFI_ENDPROC
23251
23252@@ -395,30 +552,45 @@ sysenter_past_esp:
23253 /*CFI_REL_OFFSET cs, 0*/
23254 /*
23255 * Push current_thread_info()->sysenter_return to the stack.
23256- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
23257- * pushed above; +8 corresponds to copy_thread's esp0 setting.
23258 */
23259- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
23260+ pushl_cfi $0
23261 CFI_REL_OFFSET eip, 0
23262
23263 pushl_cfi %eax
23264 SAVE_ALL
23265+ GET_THREAD_INFO(%ebp)
23266+ movl TI_sysenter_return(%ebp),%ebp
23267+ movl %ebp,PT_EIP(%esp)
23268 ENABLE_INTERRUPTS(CLBR_NONE)
23269
23270 /*
23271 * Load the potential sixth argument from user stack.
23272 * Careful about security.
23273 */
23274+ movl PT_OLDESP(%esp),%ebp
23275+
23276+#ifdef CONFIG_PAX_MEMORY_UDEREF
23277+ mov PT_OLDSS(%esp),%ds
23278+1: movl %ds:(%ebp),%ebp
23279+ push %ss
23280+ pop %ds
23281+#else
23282 cmpl $__PAGE_OFFSET-3,%ebp
23283 jae syscall_fault
23284 ASM_STAC
23285 1: movl (%ebp),%ebp
23286 ASM_CLAC
23287+#endif
23288+
23289 movl %ebp,PT_EBP(%esp)
23290 _ASM_EXTABLE(1b,syscall_fault)
23291
23292 GET_THREAD_INFO(%ebp)
23293
23294+#ifdef CONFIG_PAX_RANDKSTACK
23295+ pax_erase_kstack
23296+#endif
23297+
23298 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
23299 jnz sysenter_audit
23300 sysenter_do_call:
23301@@ -434,12 +606,24 @@ sysenter_after_call:
23302 testl $_TIF_ALLWORK_MASK, %ecx
23303 jne sysexit_audit
23304 sysenter_exit:
23305+
23306+#ifdef CONFIG_PAX_RANDKSTACK
23307+ pushl_cfi %eax
23308+ movl %esp, %eax
23309+ call pax_randomize_kstack
23310+ popl_cfi %eax
23311+#endif
23312+
23313+ pax_erase_kstack
23314+
23315 /* if something modifies registers it must also disable sysexit */
23316 movl PT_EIP(%esp), %edx
23317 movl PT_OLDESP(%esp), %ecx
23318 xorl %ebp,%ebp
23319 TRACE_IRQS_ON
23320 1: mov PT_FS(%esp), %fs
23321+2: mov PT_DS(%esp), %ds
23322+3: mov PT_ES(%esp), %es
23323 PTGS_TO_GS
23324 ENABLE_INTERRUPTS_SYSEXIT
23325
23326@@ -456,6 +640,9 @@ sysenter_audit:
23327 movl %eax,%edx /* 2nd arg: syscall number */
23328 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
23329 call __audit_syscall_entry
23330+
23331+ pax_erase_kstack
23332+
23333 pushl_cfi %ebx
23334 movl PT_EAX(%esp),%eax /* reload syscall number */
23335 jmp sysenter_do_call
23336@@ -481,10 +668,16 @@ sysexit_audit:
23337
23338 CFI_ENDPROC
23339 .pushsection .fixup,"ax"
23340-2: movl $0,PT_FS(%esp)
23341+4: movl $0,PT_FS(%esp)
23342+ jmp 1b
23343+5: movl $0,PT_DS(%esp)
23344+ jmp 1b
23345+6: movl $0,PT_ES(%esp)
23346 jmp 1b
23347 .popsection
23348- _ASM_EXTABLE(1b,2b)
23349+ _ASM_EXTABLE(1b,4b)
23350+ _ASM_EXTABLE(2b,5b)
23351+ _ASM_EXTABLE(3b,6b)
23352 PTGS_TO_GS_EX
23353 ENDPROC(ia32_sysenter_target)
23354
23355@@ -495,6 +688,11 @@ ENTRY(system_call)
23356 pushl_cfi %eax # save orig_eax
23357 SAVE_ALL
23358 GET_THREAD_INFO(%ebp)
23359+
23360+#ifdef CONFIG_PAX_RANDKSTACK
23361+ pax_erase_kstack
23362+#endif
23363+
23364 # system call tracing in operation / emulation
23365 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
23366 jnz syscall_trace_entry
23367@@ -514,6 +712,15 @@ syscall_exit:
23368 testl $_TIF_ALLWORK_MASK, %ecx # current->work
23369 jne syscall_exit_work
23370
23371+restore_all_pax:
23372+
23373+#ifdef CONFIG_PAX_RANDKSTACK
23374+ movl %esp, %eax
23375+ call pax_randomize_kstack
23376+#endif
23377+
23378+ pax_erase_kstack
23379+
23380 restore_all:
23381 TRACE_IRQS_IRET
23382 restore_all_notrace:
23383@@ -568,14 +775,34 @@ ldt_ss:
23384 * compensating for the offset by changing to the ESPFIX segment with
23385 * a base address that matches for the difference.
23386 */
23387-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
23388+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
23389 mov %esp, %edx /* load kernel esp */
23390 mov PT_OLDESP(%esp), %eax /* load userspace esp */
23391 mov %dx, %ax /* eax: new kernel esp */
23392 sub %eax, %edx /* offset (low word is 0) */
23393+#ifdef CONFIG_SMP
23394+ movl PER_CPU_VAR(cpu_number), %ebx
23395+ shll $PAGE_SHIFT_asm, %ebx
23396+ addl $cpu_gdt_table, %ebx
23397+#else
23398+ movl $cpu_gdt_table, %ebx
23399+#endif
23400 shr $16, %edx
23401- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
23402- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
23403+
23404+#ifdef CONFIG_PAX_KERNEXEC
23405+ mov %cr0, %esi
23406+ btr $16, %esi
23407+ mov %esi, %cr0
23408+#endif
23409+
23410+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
23411+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
23412+
23413+#ifdef CONFIG_PAX_KERNEXEC
23414+ bts $16, %esi
23415+ mov %esi, %cr0
23416+#endif
23417+
23418 pushl_cfi $__ESPFIX_SS
23419 pushl_cfi %eax /* new kernel esp */
23420 /* Disable interrupts, but do not irqtrace this section: we
23421@@ -605,20 +832,18 @@ work_resched:
23422 movl TI_flags(%ebp), %ecx
23423 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
23424 # than syscall tracing?
23425- jz restore_all
23426+ jz restore_all_pax
23427 testb $_TIF_NEED_RESCHED, %cl
23428 jnz work_resched
23429
23430 work_notifysig: # deal with pending signals and
23431 # notify-resume requests
23432+ movl %esp, %eax
23433 #ifdef CONFIG_VM86
23434 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
23435- movl %esp, %eax
23436 jne work_notifysig_v86 # returning to kernel-space or
23437 # vm86-space
23438 1:
23439-#else
23440- movl %esp, %eax
23441 #endif
23442 TRACE_IRQS_ON
23443 ENABLE_INTERRUPTS(CLBR_NONE)
23444@@ -639,7 +864,7 @@ work_notifysig_v86:
23445 movl %eax, %esp
23446 jmp 1b
23447 #endif
23448-END(work_pending)
23449+ENDPROC(work_pending)
23450
23451 # perform syscall exit tracing
23452 ALIGN
23453@@ -647,11 +872,14 @@ syscall_trace_entry:
23454 movl $-ENOSYS,PT_EAX(%esp)
23455 movl %esp, %eax
23456 call syscall_trace_enter
23457+
23458+ pax_erase_kstack
23459+
23460 /* What it returned is what we'll actually use. */
23461 cmpl $(NR_syscalls), %eax
23462 jnae syscall_call
23463 jmp syscall_exit
23464-END(syscall_trace_entry)
23465+ENDPROC(syscall_trace_entry)
23466
23467 # perform syscall exit tracing
23468 ALIGN
23469@@ -664,26 +892,30 @@ syscall_exit_work:
23470 movl %esp, %eax
23471 call syscall_trace_leave
23472 jmp resume_userspace
23473-END(syscall_exit_work)
23474+ENDPROC(syscall_exit_work)
23475 CFI_ENDPROC
23476
23477 RING0_INT_FRAME # can't unwind into user space anyway
23478 syscall_fault:
23479+#ifdef CONFIG_PAX_MEMORY_UDEREF
23480+ push %ss
23481+ pop %ds
23482+#endif
23483 ASM_CLAC
23484 GET_THREAD_INFO(%ebp)
23485 movl $-EFAULT,PT_EAX(%esp)
23486 jmp resume_userspace
23487-END(syscall_fault)
23488+ENDPROC(syscall_fault)
23489
23490 syscall_badsys:
23491 movl $-ENOSYS,%eax
23492 jmp syscall_after_call
23493-END(syscall_badsys)
23494+ENDPROC(syscall_badsys)
23495
23496 sysenter_badsys:
23497 movl $-ENOSYS,%eax
23498 jmp sysenter_after_call
23499-END(sysenter_badsys)
23500+ENDPROC(sysenter_badsys)
23501 CFI_ENDPROC
23502
23503 .macro FIXUP_ESPFIX_STACK
23504@@ -696,8 +928,15 @@ END(sysenter_badsys)
23505 */
23506 #ifdef CONFIG_X86_ESPFIX32
23507 /* fixup the stack */
23508- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
23509- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
23510+#ifdef CONFIG_SMP
23511+ movl PER_CPU_VAR(cpu_number), %ebx
23512+ shll $PAGE_SHIFT_asm, %ebx
23513+ addl $cpu_gdt_table, %ebx
23514+#else
23515+ movl $cpu_gdt_table, %ebx
23516+#endif
23517+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
23518+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
23519 shl $16, %eax
23520 addl %esp, %eax /* the adjusted stack pointer */
23521 pushl_cfi $__KERNEL_DS
23522@@ -753,7 +992,7 @@ vector=vector+1
23523 .endr
23524 2: jmp common_interrupt
23525 .endr
23526-END(irq_entries_start)
23527+ENDPROC(irq_entries_start)
23528
23529 .previous
23530 END(interrupt)
23531@@ -810,7 +1049,7 @@ ENTRY(coprocessor_error)
23532 pushl_cfi $do_coprocessor_error
23533 jmp error_code
23534 CFI_ENDPROC
23535-END(coprocessor_error)
23536+ENDPROC(coprocessor_error)
23537
23538 ENTRY(simd_coprocessor_error)
23539 RING0_INT_FRAME
23540@@ -823,7 +1062,7 @@ ENTRY(simd_coprocessor_error)
23541 .section .altinstructions,"a"
23542 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
23543 .previous
23544-.section .altinstr_replacement,"ax"
23545+.section .altinstr_replacement,"a"
23546 663: pushl $do_simd_coprocessor_error
23547 664:
23548 .previous
23549@@ -832,7 +1071,7 @@ ENTRY(simd_coprocessor_error)
23550 #endif
23551 jmp error_code
23552 CFI_ENDPROC
23553-END(simd_coprocessor_error)
23554+ENDPROC(simd_coprocessor_error)
23555
23556 ENTRY(device_not_available)
23557 RING0_INT_FRAME
23558@@ -841,18 +1080,18 @@ ENTRY(device_not_available)
23559 pushl_cfi $do_device_not_available
23560 jmp error_code
23561 CFI_ENDPROC
23562-END(device_not_available)
23563+ENDPROC(device_not_available)
23564
23565 #ifdef CONFIG_PARAVIRT
23566 ENTRY(native_iret)
23567 iret
23568 _ASM_EXTABLE(native_iret, iret_exc)
23569-END(native_iret)
23570+ENDPROC(native_iret)
23571
23572 ENTRY(native_irq_enable_sysexit)
23573 sti
23574 sysexit
23575-END(native_irq_enable_sysexit)
23576+ENDPROC(native_irq_enable_sysexit)
23577 #endif
23578
23579 ENTRY(overflow)
23580@@ -862,7 +1101,7 @@ ENTRY(overflow)
23581 pushl_cfi $do_overflow
23582 jmp error_code
23583 CFI_ENDPROC
23584-END(overflow)
23585+ENDPROC(overflow)
23586
23587 ENTRY(bounds)
23588 RING0_INT_FRAME
23589@@ -871,7 +1110,7 @@ ENTRY(bounds)
23590 pushl_cfi $do_bounds
23591 jmp error_code
23592 CFI_ENDPROC
23593-END(bounds)
23594+ENDPROC(bounds)
23595
23596 ENTRY(invalid_op)
23597 RING0_INT_FRAME
23598@@ -880,7 +1119,7 @@ ENTRY(invalid_op)
23599 pushl_cfi $do_invalid_op
23600 jmp error_code
23601 CFI_ENDPROC
23602-END(invalid_op)
23603+ENDPROC(invalid_op)
23604
23605 ENTRY(coprocessor_segment_overrun)
23606 RING0_INT_FRAME
23607@@ -889,7 +1128,7 @@ ENTRY(coprocessor_segment_overrun)
23608 pushl_cfi $do_coprocessor_segment_overrun
23609 jmp error_code
23610 CFI_ENDPROC
23611-END(coprocessor_segment_overrun)
23612+ENDPROC(coprocessor_segment_overrun)
23613
23614 ENTRY(invalid_TSS)
23615 RING0_EC_FRAME
23616@@ -897,7 +1136,7 @@ ENTRY(invalid_TSS)
23617 pushl_cfi $do_invalid_TSS
23618 jmp error_code
23619 CFI_ENDPROC
23620-END(invalid_TSS)
23621+ENDPROC(invalid_TSS)
23622
23623 ENTRY(segment_not_present)
23624 RING0_EC_FRAME
23625@@ -905,7 +1144,7 @@ ENTRY(segment_not_present)
23626 pushl_cfi $do_segment_not_present
23627 jmp error_code
23628 CFI_ENDPROC
23629-END(segment_not_present)
23630+ENDPROC(segment_not_present)
23631
23632 ENTRY(stack_segment)
23633 RING0_EC_FRAME
23634@@ -913,7 +1152,7 @@ ENTRY(stack_segment)
23635 pushl_cfi $do_stack_segment
23636 jmp error_code
23637 CFI_ENDPROC
23638-END(stack_segment)
23639+ENDPROC(stack_segment)
23640
23641 ENTRY(alignment_check)
23642 RING0_EC_FRAME
23643@@ -921,7 +1160,7 @@ ENTRY(alignment_check)
23644 pushl_cfi $do_alignment_check
23645 jmp error_code
23646 CFI_ENDPROC
23647-END(alignment_check)
23648+ENDPROC(alignment_check)
23649
23650 ENTRY(divide_error)
23651 RING0_INT_FRAME
23652@@ -930,7 +1169,7 @@ ENTRY(divide_error)
23653 pushl_cfi $do_divide_error
23654 jmp error_code
23655 CFI_ENDPROC
23656-END(divide_error)
23657+ENDPROC(divide_error)
23658
23659 #ifdef CONFIG_X86_MCE
23660 ENTRY(machine_check)
23661@@ -940,7 +1179,7 @@ ENTRY(machine_check)
23662 pushl_cfi machine_check_vector
23663 jmp error_code
23664 CFI_ENDPROC
23665-END(machine_check)
23666+ENDPROC(machine_check)
23667 #endif
23668
23669 ENTRY(spurious_interrupt_bug)
23670@@ -950,7 +1189,7 @@ ENTRY(spurious_interrupt_bug)
23671 pushl_cfi $do_spurious_interrupt_bug
23672 jmp error_code
23673 CFI_ENDPROC
23674-END(spurious_interrupt_bug)
23675+ENDPROC(spurious_interrupt_bug)
23676
23677 #ifdef CONFIG_XEN
23678 /* Xen doesn't set %esp to be precisely what the normal sysenter
23679@@ -1056,7 +1295,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
23680
23681 ENTRY(mcount)
23682 ret
23683-END(mcount)
23684+ENDPROC(mcount)
23685
23686 ENTRY(ftrace_caller)
23687 pushl %eax
23688@@ -1086,7 +1325,7 @@ ftrace_graph_call:
23689 .globl ftrace_stub
23690 ftrace_stub:
23691 ret
23692-END(ftrace_caller)
23693+ENDPROC(ftrace_caller)
23694
23695 ENTRY(ftrace_regs_caller)
23696 pushf /* push flags before compare (in cs location) */
23697@@ -1184,7 +1423,7 @@ trace:
23698 popl %ecx
23699 popl %eax
23700 jmp ftrace_stub
23701-END(mcount)
23702+ENDPROC(mcount)
23703 #endif /* CONFIG_DYNAMIC_FTRACE */
23704 #endif /* CONFIG_FUNCTION_TRACER */
23705
23706@@ -1202,7 +1441,7 @@ ENTRY(ftrace_graph_caller)
23707 popl %ecx
23708 popl %eax
23709 ret
23710-END(ftrace_graph_caller)
23711+ENDPROC(ftrace_graph_caller)
23712
23713 .globl return_to_handler
23714 return_to_handler:
23715@@ -1263,15 +1502,18 @@ error_code:
23716 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
23717 REG_TO_PTGS %ecx
23718 SET_KERNEL_GS %ecx
23719- movl $(__USER_DS), %ecx
23720+ movl $(__KERNEL_DS), %ecx
23721 movl %ecx, %ds
23722 movl %ecx, %es
23723+
23724+ pax_enter_kernel
23725+
23726 TRACE_IRQS_OFF
23727 movl %esp,%eax # pt_regs pointer
23728 call *%edi
23729 jmp ret_from_exception
23730 CFI_ENDPROC
23731-END(page_fault)
23732+ENDPROC(page_fault)
23733
23734 /*
23735 * Debug traps and NMI can happen at the one SYSENTER instruction
23736@@ -1314,7 +1556,7 @@ debug_stack_correct:
23737 call do_debug
23738 jmp ret_from_exception
23739 CFI_ENDPROC
23740-END(debug)
23741+ENDPROC(debug)
23742
23743 /*
23744 * NMI is doubly nasty. It can happen _while_ we're handling
23745@@ -1354,6 +1596,9 @@ nmi_stack_correct:
23746 xorl %edx,%edx # zero error code
23747 movl %esp,%eax # pt_regs pointer
23748 call do_nmi
23749+
23750+ pax_exit_kernel
23751+
23752 jmp restore_all_notrace
23753 CFI_ENDPROC
23754
23755@@ -1391,13 +1636,16 @@ nmi_espfix_stack:
23756 FIXUP_ESPFIX_STACK # %eax == %esp
23757 xorl %edx,%edx # zero error code
23758 call do_nmi
23759+
23760+ pax_exit_kernel
23761+
23762 RESTORE_REGS
23763 lss 12+4(%esp), %esp # back to espfix stack
23764 CFI_ADJUST_CFA_OFFSET -24
23765 jmp irq_return
23766 #endif
23767 CFI_ENDPROC
23768-END(nmi)
23769+ENDPROC(nmi)
23770
23771 ENTRY(int3)
23772 RING0_INT_FRAME
23773@@ -1410,14 +1658,14 @@ ENTRY(int3)
23774 call do_int3
23775 jmp ret_from_exception
23776 CFI_ENDPROC
23777-END(int3)
23778+ENDPROC(int3)
23779
23780 ENTRY(general_protection)
23781 RING0_EC_FRAME
23782 pushl_cfi $do_general_protection
23783 jmp error_code
23784 CFI_ENDPROC
23785-END(general_protection)
23786+ENDPROC(general_protection)
23787
23788 #ifdef CONFIG_KVM_GUEST
23789 ENTRY(async_page_fault)
23790@@ -1426,6 +1674,6 @@ ENTRY(async_page_fault)
23791 pushl_cfi $do_async_page_fault
23792 jmp error_code
23793 CFI_ENDPROC
23794-END(async_page_fault)
23795+ENDPROC(async_page_fault)
23796 #endif
23797
23798diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
23799index b9dde27..6e9dc4e 100644
23800--- a/arch/x86/kernel/entry_64.S
23801+++ b/arch/x86/kernel/entry_64.S
23802@@ -59,6 +59,8 @@
23803 #include <asm/smap.h>
23804 #include <asm/pgtable_types.h>
23805 #include <linux/err.h>
23806+#include <asm/pgtable.h>
23807+#include <asm/alternative-asm.h>
23808
23809 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
23810 #include <linux/elf-em.h>
23811@@ -81,6 +83,430 @@ ENTRY(native_usergs_sysret64)
23812 ENDPROC(native_usergs_sysret64)
23813 #endif /* CONFIG_PARAVIRT */
23814
23815+ .macro ljmpq sel, off
23816+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
23817+ .byte 0x48; ljmp *1234f(%rip)
23818+ .pushsection .rodata
23819+ .align 16
23820+ 1234: .quad \off; .word \sel
23821+ .popsection
23822+#else
23823+ pushq $\sel
23824+ pushq $\off
23825+ lretq
23826+#endif
23827+ .endm
23828+
23829+ .macro pax_enter_kernel
23830+ pax_set_fptr_mask
23831+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23832+ call pax_enter_kernel
23833+#endif
23834+ .endm
23835+
23836+ .macro pax_exit_kernel
23837+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23838+ call pax_exit_kernel
23839+#endif
23840+
23841+ .endm
23842+
23843+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
23844+ENTRY(pax_enter_kernel)
23845+ pushq %rdi
23846+
23847+#ifdef CONFIG_PARAVIRT
23848+ PV_SAVE_REGS(CLBR_RDI)
23849+#endif
23850+
23851+#ifdef CONFIG_PAX_KERNEXEC
23852+ GET_CR0_INTO_RDI
23853+ bts $16,%rdi
23854+ jnc 3f
23855+ mov %cs,%edi
23856+ cmp $__KERNEL_CS,%edi
23857+ jnz 2f
23858+1:
23859+#endif
23860+
23861+#ifdef CONFIG_PAX_MEMORY_UDEREF
23862+ 661: jmp 111f
23863+ .pushsection .altinstr_replacement, "a"
23864+ 662: ASM_NOP2
23865+ .popsection
23866+ .pushsection .altinstructions, "a"
23867+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23868+ .popsection
23869+ GET_CR3_INTO_RDI
23870+ cmp $0,%dil
23871+ jnz 112f
23872+ mov $__KERNEL_DS,%edi
23873+ mov %edi,%ss
23874+ jmp 111f
23875+112: cmp $1,%dil
23876+ jz 113f
23877+ ud2
23878+113: sub $4097,%rdi
23879+ bts $63,%rdi
23880+ SET_RDI_INTO_CR3
23881+ mov $__UDEREF_KERNEL_DS,%edi
23882+ mov %edi,%ss
23883+111:
23884+#endif
23885+
23886+#ifdef CONFIG_PARAVIRT
23887+ PV_RESTORE_REGS(CLBR_RDI)
23888+#endif
23889+
23890+ popq %rdi
23891+ pax_force_retaddr
23892+ retq
23893+
23894+#ifdef CONFIG_PAX_KERNEXEC
23895+2: ljmpq __KERNEL_CS,1b
23896+3: ljmpq __KERNEXEC_KERNEL_CS,4f
23897+4: SET_RDI_INTO_CR0
23898+ jmp 1b
23899+#endif
23900+ENDPROC(pax_enter_kernel)
23901+
23902+ENTRY(pax_exit_kernel)
23903+ pushq %rdi
23904+
23905+#ifdef CONFIG_PARAVIRT
23906+ PV_SAVE_REGS(CLBR_RDI)
23907+#endif
23908+
23909+#ifdef CONFIG_PAX_KERNEXEC
23910+ mov %cs,%rdi
23911+ cmp $__KERNEXEC_KERNEL_CS,%edi
23912+ jz 2f
23913+ GET_CR0_INTO_RDI
23914+ bts $16,%rdi
23915+ jnc 4f
23916+1:
23917+#endif
23918+
23919+#ifdef CONFIG_PAX_MEMORY_UDEREF
23920+ 661: jmp 111f
23921+ .pushsection .altinstr_replacement, "a"
23922+ 662: ASM_NOP2
23923+ .popsection
23924+ .pushsection .altinstructions, "a"
23925+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23926+ .popsection
23927+ mov %ss,%edi
23928+ cmp $__UDEREF_KERNEL_DS,%edi
23929+ jnz 111f
23930+ GET_CR3_INTO_RDI
23931+ cmp $0,%dil
23932+ jz 112f
23933+ ud2
23934+112: add $4097,%rdi
23935+ bts $63,%rdi
23936+ SET_RDI_INTO_CR3
23937+ mov $__KERNEL_DS,%edi
23938+ mov %edi,%ss
23939+111:
23940+#endif
23941+
23942+#ifdef CONFIG_PARAVIRT
23943+ PV_RESTORE_REGS(CLBR_RDI);
23944+#endif
23945+
23946+ popq %rdi
23947+ pax_force_retaddr
23948+ retq
23949+
23950+#ifdef CONFIG_PAX_KERNEXEC
23951+2: GET_CR0_INTO_RDI
23952+ btr $16,%rdi
23953+ jnc 4f
23954+ ljmpq __KERNEL_CS,3f
23955+3: SET_RDI_INTO_CR0
23956+ jmp 1b
23957+4: ud2
23958+ jmp 4b
23959+#endif
23960+ENDPROC(pax_exit_kernel)
23961+#endif
23962+
23963+ .macro pax_enter_kernel_user
23964+ pax_set_fptr_mask
23965+#ifdef CONFIG_PAX_MEMORY_UDEREF
23966+ call pax_enter_kernel_user
23967+#endif
23968+ .endm
23969+
23970+ .macro pax_exit_kernel_user
23971+#ifdef CONFIG_PAX_MEMORY_UDEREF
23972+ call pax_exit_kernel_user
23973+#endif
23974+#ifdef CONFIG_PAX_RANDKSTACK
23975+ pushq %rax
23976+ pushq %r11
23977+ call pax_randomize_kstack
23978+ popq %r11
23979+ popq %rax
23980+#endif
23981+ .endm
23982+
23983+#ifdef CONFIG_PAX_MEMORY_UDEREF
23984+ENTRY(pax_enter_kernel_user)
23985+ pushq %rdi
23986+ pushq %rbx
23987+
23988+#ifdef CONFIG_PARAVIRT
23989+ PV_SAVE_REGS(CLBR_RDI)
23990+#endif
23991+
23992+ 661: jmp 111f
23993+ .pushsection .altinstr_replacement, "a"
23994+ 662: ASM_NOP2
23995+ .popsection
23996+ .pushsection .altinstructions, "a"
23997+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
23998+ .popsection
23999+ GET_CR3_INTO_RDI
24000+ cmp $1,%dil
24001+ jnz 4f
24002+ sub $4097,%rdi
24003+ bts $63,%rdi
24004+ SET_RDI_INTO_CR3
24005+ jmp 3f
24006+111:
24007+
24008+ GET_CR3_INTO_RDI
24009+ mov %rdi,%rbx
24010+ add $__START_KERNEL_map,%rbx
24011+ sub phys_base(%rip),%rbx
24012+
24013+#ifdef CONFIG_PARAVIRT
24014+ cmpl $0, pv_info+PARAVIRT_enabled
24015+ jz 1f
24016+ pushq %rdi
24017+ i = 0
24018+ .rept USER_PGD_PTRS
24019+ mov i*8(%rbx),%rsi
24020+ mov $0,%sil
24021+ lea i*8(%rbx),%rdi
24022+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
24023+ i = i + 1
24024+ .endr
24025+ popq %rdi
24026+ jmp 2f
24027+1:
24028+#endif
24029+
24030+ i = 0
24031+ .rept USER_PGD_PTRS
24032+ movb $0,i*8(%rbx)
24033+ i = i + 1
24034+ .endr
24035+
24036+2: SET_RDI_INTO_CR3
24037+
24038+#ifdef CONFIG_PAX_KERNEXEC
24039+ GET_CR0_INTO_RDI
24040+ bts $16,%rdi
24041+ SET_RDI_INTO_CR0
24042+#endif
24043+
24044+3:
24045+
24046+#ifdef CONFIG_PARAVIRT
24047+ PV_RESTORE_REGS(CLBR_RDI)
24048+#endif
24049+
24050+ popq %rbx
24051+ popq %rdi
24052+ pax_force_retaddr
24053+ retq
24054+4: ud2
24055+ENDPROC(pax_enter_kernel_user)
24056+
24057+ENTRY(pax_exit_kernel_user)
24058+ pushq %rdi
24059+ pushq %rbx
24060+
24061+#ifdef CONFIG_PARAVIRT
24062+ PV_SAVE_REGS(CLBR_RDI)
24063+#endif
24064+
24065+ GET_CR3_INTO_RDI
24066+ 661: jmp 1f
24067+ .pushsection .altinstr_replacement, "a"
24068+ 662: ASM_NOP2
24069+ .popsection
24070+ .pushsection .altinstructions, "a"
24071+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
24072+ .popsection
24073+ cmp $0,%dil
24074+ jnz 3f
24075+ add $4097,%rdi
24076+ bts $63,%rdi
24077+ SET_RDI_INTO_CR3
24078+ jmp 2f
24079+1:
24080+
24081+ mov %rdi,%rbx
24082+
24083+#ifdef CONFIG_PAX_KERNEXEC
24084+ GET_CR0_INTO_RDI
24085+ btr $16,%rdi
24086+ jnc 3f
24087+ SET_RDI_INTO_CR0
24088+#endif
24089+
24090+ add $__START_KERNEL_map,%rbx
24091+ sub phys_base(%rip),%rbx
24092+
24093+#ifdef CONFIG_PARAVIRT
24094+ cmpl $0, pv_info+PARAVIRT_enabled
24095+ jz 1f
24096+ i = 0
24097+ .rept USER_PGD_PTRS
24098+ mov i*8(%rbx),%rsi
24099+ mov $0x67,%sil
24100+ lea i*8(%rbx),%rdi
24101+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
24102+ i = i + 1
24103+ .endr
24104+ jmp 2f
24105+1:
24106+#endif
24107+
24108+ i = 0
24109+ .rept USER_PGD_PTRS
24110+ movb $0x67,i*8(%rbx)
24111+ i = i + 1
24112+ .endr
24113+2:
24114+
24115+#ifdef CONFIG_PARAVIRT
24116+ PV_RESTORE_REGS(CLBR_RDI)
24117+#endif
24118+
24119+ popq %rbx
24120+ popq %rdi
24121+ pax_force_retaddr
24122+ retq
24123+3: ud2
24124+ENDPROC(pax_exit_kernel_user)
24125+#endif
24126+
24127+ .macro pax_enter_kernel_nmi
24128+ pax_set_fptr_mask
24129+
24130+#ifdef CONFIG_PAX_KERNEXEC
24131+ GET_CR0_INTO_RDI
24132+ bts $16,%rdi
24133+ jc 110f
24134+ SET_RDI_INTO_CR0
24135+ or $2,%ebx
24136+110:
24137+#endif
24138+
24139+#ifdef CONFIG_PAX_MEMORY_UDEREF
24140+ 661: jmp 111f
24141+ .pushsection .altinstr_replacement, "a"
24142+ 662: ASM_NOP2
24143+ .popsection
24144+ .pushsection .altinstructions, "a"
24145+ altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
24146+ .popsection
24147+ GET_CR3_INTO_RDI
24148+ cmp $0,%dil
24149+ jz 111f
24150+ sub $4097,%rdi
24151+ or $4,%ebx
24152+ bts $63,%rdi
24153+ SET_RDI_INTO_CR3
24154+ mov $__UDEREF_KERNEL_DS,%edi
24155+ mov %edi,%ss
24156+111:
24157+#endif
24158+ .endm
24159+
24160+ .macro pax_exit_kernel_nmi
24161+#ifdef CONFIG_PAX_KERNEXEC
24162+ btr $1,%ebx
24163+ jnc 110f
24164+ GET_CR0_INTO_RDI
24165+ btr $16,%rdi
24166+ SET_RDI_INTO_CR0
24167+110:
24168+#endif
24169+
24170+#ifdef CONFIG_PAX_MEMORY_UDEREF
24171+ btr $2,%ebx
24172+ jnc 111f
24173+ GET_CR3_INTO_RDI
24174+ add $4097,%rdi
24175+ bts $63,%rdi
24176+ SET_RDI_INTO_CR3
24177+ mov $__KERNEL_DS,%edi
24178+ mov %edi,%ss
24179+111:
24180+#endif
24181+ .endm
24182+
24183+ .macro pax_erase_kstack
24184+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
24185+ call pax_erase_kstack
24186+#endif
24187+ .endm
24188+
24189+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
24190+ENTRY(pax_erase_kstack)
24191+ pushq %rdi
24192+ pushq %rcx
24193+ pushq %rax
24194+ pushq %r11
24195+
24196+ GET_THREAD_INFO(%r11)
24197+ mov TI_lowest_stack(%r11), %rdi
24198+ mov $-0xBEEF, %rax
24199+ std
24200+
24201+1: mov %edi, %ecx
24202+ and $THREAD_SIZE_asm - 1, %ecx
24203+ shr $3, %ecx
24204+ repne scasq
24205+ jecxz 2f
24206+
24207+ cmp $2*8, %ecx
24208+ jc 2f
24209+
24210+ mov $2*8, %ecx
24211+ repe scasq
24212+ jecxz 2f
24213+ jne 1b
24214+
24215+2: cld
24216+ mov %esp, %ecx
24217+ sub %edi, %ecx
24218+
24219+ cmp $THREAD_SIZE_asm, %rcx
24220+ jb 3f
24221+ ud2
24222+3:
24223+
24224+ shr $3, %ecx
24225+ rep stosq
24226+
24227+ mov TI_task_thread_sp0(%r11), %rdi
24228+ sub $256, %rdi
24229+ mov %rdi, TI_lowest_stack(%r11)
24230+
24231+ popq %r11
24232+ popq %rax
24233+ popq %rcx
24234+ popq %rdi
24235+ pax_force_retaddr
24236+ ret
24237+ENDPROC(pax_erase_kstack)
24238+#endif
24239
24240 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
24241 #ifdef CONFIG_TRACE_IRQFLAGS
24242@@ -117,7 +543,7 @@ ENDPROC(native_usergs_sysret64)
24243 .endm
24244
24245 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
24246- bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
24247+ bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
24248 jnc 1f
24249 TRACE_IRQS_ON_DEBUG
24250 1:
24251@@ -155,27 +581,6 @@ ENDPROC(native_usergs_sysret64)
24252 movq \tmp,R11+\offset(%rsp)
24253 .endm
24254
24255- .macro FAKE_STACK_FRAME child_rip
24256- /* push in order ss, rsp, eflags, cs, rip */
24257- xorl %eax, %eax
24258- pushq_cfi $__KERNEL_DS /* ss */
24259- /*CFI_REL_OFFSET ss,0*/
24260- pushq_cfi %rax /* rsp */
24261- CFI_REL_OFFSET rsp,0
24262- pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
24263- /*CFI_REL_OFFSET rflags,0*/
24264- pushq_cfi $__KERNEL_CS /* cs */
24265- /*CFI_REL_OFFSET cs,0*/
24266- pushq_cfi \child_rip /* rip */
24267- CFI_REL_OFFSET rip,0
24268- pushq_cfi %rax /* orig rax */
24269- .endm
24270-
24271- .macro UNFAKE_STACK_FRAME
24272- addq $8*6, %rsp
24273- CFI_ADJUST_CFA_OFFSET -(6*8)
24274- .endm
24275-
24276 /*
24277 * initial frame state for interrupts (and exceptions without error code)
24278 */
24279@@ -241,25 +646,26 @@ ENDPROC(native_usergs_sysret64)
24280 /* save partial stack frame */
24281 .macro SAVE_ARGS_IRQ
24282 cld
24283- /* start from rbp in pt_regs and jump over */
24284- movq_cfi rdi, (RDI-RBP)
24285- movq_cfi rsi, (RSI-RBP)
24286- movq_cfi rdx, (RDX-RBP)
24287- movq_cfi rcx, (RCX-RBP)
24288- movq_cfi rax, (RAX-RBP)
24289- movq_cfi r8, (R8-RBP)
24290- movq_cfi r9, (R9-RBP)
24291- movq_cfi r10, (R10-RBP)
24292- movq_cfi r11, (R11-RBP)
24293+ /* start from r15 in pt_regs and jump over */
24294+ movq_cfi rdi, RDI
24295+ movq_cfi rsi, RSI
24296+ movq_cfi rdx, RDX
24297+ movq_cfi rcx, RCX
24298+ movq_cfi rax, RAX
24299+ movq_cfi r8, R8
24300+ movq_cfi r9, R9
24301+ movq_cfi r10, R10
24302+ movq_cfi r11, R11
24303+ movq_cfi r12, R12
24304
24305 /* Save rbp so that we can unwind from get_irq_regs() */
24306- movq_cfi rbp, 0
24307+ movq_cfi rbp, RBP
24308
24309 /* Save previous stack value */
24310 movq %rsp, %rsi
24311
24312- leaq -RBP(%rsp),%rdi /* arg1 for handler */
24313- testl $3, CS-RBP(%rsi)
24314+ movq %rsp,%rdi /* arg1 for handler */
24315+ testb $3, CS(%rsi)
24316 je 1f
24317 SWAPGS
24318 /*
24319@@ -279,6 +685,18 @@ ENDPROC(native_usergs_sysret64)
24320 0x06 /* DW_OP_deref */, \
24321 0x08 /* DW_OP_const1u */, SS+8-RBP, \
24322 0x22 /* DW_OP_plus */
24323+
24324+#ifdef CONFIG_PAX_MEMORY_UDEREF
24325+ testb $3, CS(%rdi)
24326+ jnz 1f
24327+ pax_enter_kernel
24328+ jmp 2f
24329+1: pax_enter_kernel_user
24330+2:
24331+#else
24332+ pax_enter_kernel
24333+#endif
24334+
24335 /* We entered an interrupt context - irqs are off: */
24336 TRACE_IRQS_OFF
24337 .endm
24338@@ -308,9 +726,52 @@ ENTRY(save_paranoid)
24339 js 1f /* negative -> in kernel */
24340 SWAPGS
24341 xorl %ebx,%ebx
24342-1: ret
24343+1:
24344+#ifdef CONFIG_PAX_MEMORY_UDEREF
24345+ testb $3, CS+8(%rsp)
24346+ jnz 1f
24347+ pax_enter_kernel
24348+ jmp 2f
24349+1: pax_enter_kernel_user
24350+2:
24351+#else
24352+ pax_enter_kernel
24353+#endif
24354+ pax_force_retaddr
24355+ ret
24356 CFI_ENDPROC
24357-END(save_paranoid)
24358+ENDPROC(save_paranoid)
24359+
24360+ENTRY(save_paranoid_nmi)
24361+ XCPT_FRAME 1 RDI+8
24362+ cld
24363+ movq_cfi rdi, RDI+8
24364+ movq_cfi rsi, RSI+8
24365+ movq_cfi rdx, RDX+8
24366+ movq_cfi rcx, RCX+8
24367+ movq_cfi rax, RAX+8
24368+ movq_cfi r8, R8+8
24369+ movq_cfi r9, R9+8
24370+ movq_cfi r10, R10+8
24371+ movq_cfi r11, R11+8
24372+ movq_cfi rbx, RBX+8
24373+ movq_cfi rbp, RBP+8
24374+ movq_cfi r12, R12+8
24375+ movq_cfi r13, R13+8
24376+ movq_cfi r14, R14+8
24377+ movq_cfi r15, R15+8
24378+ movl $1,%ebx
24379+ movl $MSR_GS_BASE,%ecx
24380+ rdmsr
24381+ testl %edx,%edx
24382+ js 1f /* negative -> in kernel */
24383+ SWAPGS
24384+ xorl %ebx,%ebx
24385+1: pax_enter_kernel_nmi
24386+ pax_force_retaddr
24387+ ret
24388+ CFI_ENDPROC
24389+ENDPROC(save_paranoid_nmi)
24390
24391 /*
24392 * A newly forked process directly context switches into this address.
24393@@ -331,7 +792,7 @@ ENTRY(ret_from_fork)
24394
24395 RESTORE_REST
24396
24397- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
24398+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
24399 jz 1f
24400
24401 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
24402@@ -341,15 +802,13 @@ ENTRY(ret_from_fork)
24403 jmp ret_from_sys_call # go to the SYSRET fastpath
24404
24405 1:
24406- subq $REST_SKIP, %rsp # leave space for volatiles
24407- CFI_ADJUST_CFA_OFFSET REST_SKIP
24408 movq %rbp, %rdi
24409 call *%rbx
24410 movl $0, RAX(%rsp)
24411 RESTORE_REST
24412 jmp int_ret_from_sys_call
24413 CFI_ENDPROC
24414-END(ret_from_fork)
24415+ENDPROC(ret_from_fork)
24416
24417 /*
24418 * System call entry. Up to 6 arguments in registers are supported.
24419@@ -386,7 +845,7 @@ END(ret_from_fork)
24420 ENTRY(system_call)
24421 CFI_STARTPROC simple
24422 CFI_SIGNAL_FRAME
24423- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
24424+ CFI_DEF_CFA rsp,0
24425 CFI_REGISTER rip,rcx
24426 /*CFI_REGISTER rflags,r11*/
24427 SWAPGS_UNSAFE_STACK
24428@@ -399,16 +858,23 @@ GLOBAL(system_call_after_swapgs)
24429
24430 movq %rsp,PER_CPU_VAR(old_rsp)
24431 movq PER_CPU_VAR(kernel_stack),%rsp
24432+ SAVE_ARGS 8*6,0
24433+ pax_enter_kernel_user
24434+
24435+#ifdef CONFIG_PAX_RANDKSTACK
24436+ pax_erase_kstack
24437+#endif
24438+
24439 /*
24440 * No need to follow this irqs off/on section - it's straight
24441 * and short:
24442 */
24443 ENABLE_INTERRUPTS(CLBR_NONE)
24444- SAVE_ARGS 8,0
24445 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
24446 movq %rcx,RIP-ARGOFFSET(%rsp)
24447 CFI_REL_OFFSET rip,RIP-ARGOFFSET
24448- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
24449+ GET_THREAD_INFO(%rcx)
24450+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
24451 jnz tracesys
24452 system_call_fastpath:
24453 #if __SYSCALL_MASK == ~0
24454@@ -432,10 +898,13 @@ sysret_check:
24455 LOCKDEP_SYS_EXIT
24456 DISABLE_INTERRUPTS(CLBR_NONE)
24457 TRACE_IRQS_OFF
24458- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
24459+ GET_THREAD_INFO(%rcx)
24460+ movl TI_flags(%rcx),%edx
24461 andl %edi,%edx
24462 jnz sysret_careful
24463 CFI_REMEMBER_STATE
24464+ pax_exit_kernel_user
24465+ pax_erase_kstack
24466 /*
24467 * sysretq will re-enable interrupts:
24468 */
24469@@ -494,6 +963,9 @@ auditsys:
24470 movq %rax,%rsi /* 2nd arg: syscall number */
24471 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
24472 call __audit_syscall_entry
24473+
24474+ pax_erase_kstack
24475+
24476 LOAD_ARGS 0 /* reload call-clobbered registers */
24477 jmp system_call_fastpath
24478
24479@@ -515,7 +987,7 @@ sysret_audit:
24480 /* Do syscall tracing */
24481 tracesys:
24482 #ifdef CONFIG_AUDITSYSCALL
24483- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
24484+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
24485 jz auditsys
24486 #endif
24487 SAVE_REST
24488@@ -523,12 +995,15 @@ tracesys:
24489 FIXUP_TOP_OF_STACK %rdi
24490 movq %rsp,%rdi
24491 call syscall_trace_enter
24492+
24493+ pax_erase_kstack
24494+
24495 /*
24496 * Reload arg registers from stack in case ptrace changed them.
24497 * We don't reload %rax because syscall_trace_enter() returned
24498 * the value it wants us to use in the table lookup.
24499 */
24500- LOAD_ARGS ARGOFFSET, 1
24501+ LOAD_ARGS 1
24502 RESTORE_REST
24503 #if __SYSCALL_MASK == ~0
24504 cmpq $__NR_syscall_max,%rax
24505@@ -558,7 +1033,9 @@ GLOBAL(int_with_check)
24506 andl %edi,%edx
24507 jnz int_careful
24508 andl $~TS_COMPAT,TI_status(%rcx)
24509- jmp retint_swapgs
24510+ pax_exit_kernel_user
24511+ pax_erase_kstack
24512+ jmp retint_swapgs_pax
24513
24514 /* Either reschedule or signal or syscall exit tracking needed. */
24515 /* First do a reschedule test. */
24516@@ -604,7 +1081,7 @@ int_restore_rest:
24517 TRACE_IRQS_OFF
24518 jmp int_with_check
24519 CFI_ENDPROC
24520-END(system_call)
24521+ENDPROC(system_call)
24522
24523 .macro FORK_LIKE func
24524 ENTRY(stub_\func)
24525@@ -617,9 +1094,10 @@ ENTRY(stub_\func)
24526 DEFAULT_FRAME 0 8 /* offset 8: return address */
24527 call sys_\func
24528 RESTORE_TOP_OF_STACK %r11, 8
24529- ret $REST_SKIP /* pop extended registers */
24530+ pax_force_retaddr
24531+ ret
24532 CFI_ENDPROC
24533-END(stub_\func)
24534+ENDPROC(stub_\func)
24535 .endm
24536
24537 .macro FIXED_FRAME label,func
24538@@ -629,9 +1107,10 @@ ENTRY(\label)
24539 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
24540 call \func
24541 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
24542+ pax_force_retaddr
24543 ret
24544 CFI_ENDPROC
24545-END(\label)
24546+ENDPROC(\label)
24547 .endm
24548
24549 FORK_LIKE clone
24550@@ -639,19 +1118,6 @@ END(\label)
24551 FORK_LIKE vfork
24552 FIXED_FRAME stub_iopl, sys_iopl
24553
24554-ENTRY(ptregscall_common)
24555- DEFAULT_FRAME 1 8 /* offset 8: return address */
24556- RESTORE_TOP_OF_STACK %r11, 8
24557- movq_cfi_restore R15+8, r15
24558- movq_cfi_restore R14+8, r14
24559- movq_cfi_restore R13+8, r13
24560- movq_cfi_restore R12+8, r12
24561- movq_cfi_restore RBP+8, rbp
24562- movq_cfi_restore RBX+8, rbx
24563- ret $REST_SKIP /* pop extended registers */
24564- CFI_ENDPROC
24565-END(ptregscall_common)
24566-
24567 ENTRY(stub_execve)
24568 CFI_STARTPROC
24569 addq $8, %rsp
24570@@ -663,7 +1129,7 @@ ENTRY(stub_execve)
24571 RESTORE_REST
24572 jmp int_ret_from_sys_call
24573 CFI_ENDPROC
24574-END(stub_execve)
24575+ENDPROC(stub_execve)
24576
24577 /*
24578 * sigreturn is special because it needs to restore all registers on return.
24579@@ -680,7 +1146,7 @@ ENTRY(stub_rt_sigreturn)
24580 RESTORE_REST
24581 jmp int_ret_from_sys_call
24582 CFI_ENDPROC
24583-END(stub_rt_sigreturn)
24584+ENDPROC(stub_rt_sigreturn)
24585
24586 #ifdef CONFIG_X86_X32_ABI
24587 ENTRY(stub_x32_rt_sigreturn)
24588@@ -694,7 +1160,7 @@ ENTRY(stub_x32_rt_sigreturn)
24589 RESTORE_REST
24590 jmp int_ret_from_sys_call
24591 CFI_ENDPROC
24592-END(stub_x32_rt_sigreturn)
24593+ENDPROC(stub_x32_rt_sigreturn)
24594
24595 ENTRY(stub_x32_execve)
24596 CFI_STARTPROC
24597@@ -708,7 +1174,7 @@ ENTRY(stub_x32_execve)
24598 RESTORE_REST
24599 jmp int_ret_from_sys_call
24600 CFI_ENDPROC
24601-END(stub_x32_execve)
24602+ENDPROC(stub_x32_execve)
24603
24604 #endif
24605
24606@@ -745,7 +1211,7 @@ vector=vector+1
24607 2: jmp common_interrupt
24608 .endr
24609 CFI_ENDPROC
24610-END(irq_entries_start)
24611+ENDPROC(irq_entries_start)
24612
24613 .previous
24614 END(interrupt)
24615@@ -762,8 +1228,8 @@ END(interrupt)
24616 /* 0(%rsp): ~(interrupt number) */
24617 .macro interrupt func
24618 /* reserve pt_regs for scratch regs and rbp */
24619- subq $ORIG_RAX-RBP, %rsp
24620- CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
24621+ subq $ORIG_RAX, %rsp
24622+ CFI_ADJUST_CFA_OFFSET ORIG_RAX
24623 SAVE_ARGS_IRQ
24624 call \func
24625 .endm
24626@@ -786,14 +1252,14 @@ ret_from_intr:
24627
24628 /* Restore saved previous stack */
24629 popq %rsi
24630- CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
24631- leaq ARGOFFSET-RBP(%rsi), %rsp
24632+ CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
24633+ movq %rsi, %rsp
24634 CFI_DEF_CFA_REGISTER rsp
24635- CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
24636+ CFI_ADJUST_CFA_OFFSET -ARGOFFSET
24637
24638 exit_intr:
24639 GET_THREAD_INFO(%rcx)
24640- testl $3,CS-ARGOFFSET(%rsp)
24641+ testb $3,CS-ARGOFFSET(%rsp)
24642 je retint_kernel
24643
24644 /* Interrupt came from user space */
24645@@ -815,12 +1281,35 @@ retint_swapgs: /* return to user-space */
24646 * The iretq could re-enable interrupts:
24647 */
24648 DISABLE_INTERRUPTS(CLBR_ANY)
24649+ pax_exit_kernel_user
24650+retint_swapgs_pax:
24651 TRACE_IRQS_IRETQ
24652 SWAPGS
24653 jmp restore_args
24654
24655 retint_restore_args: /* return to kernel space */
24656 DISABLE_INTERRUPTS(CLBR_ANY)
24657+ pax_exit_kernel
24658+
24659+#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
24660+ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
24661+ * namely calling EFI runtime services with a phys mapping. We're
24662+ * starting off with NOPs and patch in the real instrumentation
24663+ * (BTS/OR) before starting any userland process; even before starting
24664+ * up the APs.
24665+ */
24666+ .pushsection .altinstr_replacement, "a"
24667+ 601: pax_force_retaddr (RIP-ARGOFFSET)
24668+ 602:
24669+ .popsection
24670+ 603: .fill 602b-601b, 1, 0x90
24671+ .pushsection .altinstructions, "a"
24672+ altinstruction_entry 603b, 601b, X86_FEATURE_ALWAYS, 602b-601b, 602b-601b
24673+ .popsection
24674+#else
24675+ pax_force_retaddr (RIP-ARGOFFSET)
24676+#endif
24677+
24678 /*
24679 * The iretq could re-enable interrupts:
24680 */
24681@@ -920,7 +1409,7 @@ ENTRY(retint_kernel)
24682 jmp exit_intr
24683 #endif
24684 CFI_ENDPROC
24685-END(common_interrupt)
24686+ENDPROC(common_interrupt)
24687
24688 /*
24689 * APIC interrupts.
24690@@ -934,7 +1423,7 @@ ENTRY(\sym)
24691 interrupt \do_sym
24692 jmp ret_from_intr
24693 CFI_ENDPROC
24694-END(\sym)
24695+ENDPROC(\sym)
24696 .endm
24697
24698 #ifdef CONFIG_TRACING
24699@@ -1007,7 +1496,7 @@ apicinterrupt IRQ_WORK_VECTOR \
24700 /*
24701 * Exception entry points.
24702 */
24703-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
24704+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
24705
24706 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
24707 ENTRY(\sym)
24708@@ -1058,6 +1547,12 @@ ENTRY(\sym)
24709 .endif
24710
24711 .if \shift_ist != -1
24712+#ifdef CONFIG_SMP
24713+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
24714+ lea init_tss(%r13), %r13
24715+#else
24716+ lea init_tss(%rip), %r13
24717+#endif
24718 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\shift_ist)
24719 .endif
24720
24721@@ -1074,7 +1569,7 @@ ENTRY(\sym)
24722 .endif
24723
24724 CFI_ENDPROC
24725-END(\sym)
24726+ENDPROC(\sym)
24727 .endm
24728
24729 #ifdef CONFIG_TRACING
24730@@ -1115,9 +1610,10 @@ gs_change:
24731 2: mfence /* workaround */
24732 SWAPGS
24733 popfq_cfi
24734+ pax_force_retaddr
24735 ret
24736 CFI_ENDPROC
24737-END(native_load_gs_index)
24738+ENDPROC(native_load_gs_index)
24739
24740 _ASM_EXTABLE(gs_change,bad_gs)
24741 .section .fixup,"ax"
24742@@ -1145,9 +1641,10 @@ ENTRY(do_softirq_own_stack)
24743 CFI_DEF_CFA_REGISTER rsp
24744 CFI_ADJUST_CFA_OFFSET -8
24745 decl PER_CPU_VAR(irq_count)
24746+ pax_force_retaddr
24747 ret
24748 CFI_ENDPROC
24749-END(do_softirq_own_stack)
24750+ENDPROC(do_softirq_own_stack)
24751
24752 #ifdef CONFIG_XEN
24753 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
24754@@ -1185,7 +1682,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
24755 decl PER_CPU_VAR(irq_count)
24756 jmp error_exit
24757 CFI_ENDPROC
24758-END(xen_do_hypervisor_callback)
24759+ENDPROC(xen_do_hypervisor_callback)
24760
24761 /*
24762 * Hypervisor uses this for application faults while it executes.
24763@@ -1244,7 +1741,7 @@ ENTRY(xen_failsafe_callback)
24764 SAVE_ALL
24765 jmp error_exit
24766 CFI_ENDPROC
24767-END(xen_failsafe_callback)
24768+ENDPROC(xen_failsafe_callback)
24769
24770 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
24771 xen_hvm_callback_vector xen_evtchn_do_upcall
24772@@ -1291,18 +1788,33 @@ ENTRY(paranoid_exit)
24773 DEFAULT_FRAME
24774 DISABLE_INTERRUPTS(CLBR_NONE)
24775 TRACE_IRQS_OFF_DEBUG
24776- testl %ebx,%ebx /* swapgs needed? */
24777+ testl $1,%ebx /* swapgs needed? */
24778 jnz paranoid_restore
24779- testl $3,CS(%rsp)
24780+ testb $3,CS(%rsp)
24781 jnz paranoid_userspace
24782+#ifdef CONFIG_PAX_MEMORY_UDEREF
24783+ pax_exit_kernel
24784+ TRACE_IRQS_IRETQ 0
24785+ SWAPGS_UNSAFE_STACK
24786+ RESTORE_ALL 8
24787+ pax_force_retaddr_bts
24788+ jmp irq_return
24789+#endif
24790 paranoid_swapgs:
24791+#ifdef CONFIG_PAX_MEMORY_UDEREF
24792+ pax_exit_kernel_user
24793+#else
24794+ pax_exit_kernel
24795+#endif
24796 TRACE_IRQS_IRETQ 0
24797 SWAPGS_UNSAFE_STACK
24798 RESTORE_ALL 8
24799 jmp irq_return
24800 paranoid_restore:
24801+ pax_exit_kernel
24802 TRACE_IRQS_IRETQ_DEBUG 0
24803 RESTORE_ALL 8
24804+ pax_force_retaddr_bts
24805 jmp irq_return
24806 paranoid_userspace:
24807 GET_THREAD_INFO(%rcx)
24808@@ -1331,7 +1843,7 @@ paranoid_schedule:
24809 TRACE_IRQS_OFF
24810 jmp paranoid_userspace
24811 CFI_ENDPROC
24812-END(paranoid_exit)
24813+ENDPROC(paranoid_exit)
24814
24815 /*
24816 * Exception entry point. This expects an error code/orig_rax on the stack.
24817@@ -1358,12 +1870,23 @@ ENTRY(error_entry)
24818 movq %r14, R14+8(%rsp)
24819 movq %r15, R15+8(%rsp)
24820 xorl %ebx,%ebx
24821- testl $3,CS+8(%rsp)
24822+ testb $3,CS+8(%rsp)
24823 je error_kernelspace
24824 error_swapgs:
24825 SWAPGS
24826 error_sti:
24827+#ifdef CONFIG_PAX_MEMORY_UDEREF
24828+ testb $3, CS+8(%rsp)
24829+ jnz 1f
24830+ pax_enter_kernel
24831+ jmp 2f
24832+1: pax_enter_kernel_user
24833+2:
24834+#else
24835+ pax_enter_kernel
24836+#endif
24837 TRACE_IRQS_OFF
24838+ pax_force_retaddr
24839 ret
24840
24841 /*
24842@@ -1398,7 +1921,7 @@ error_bad_iret:
24843 decl %ebx /* Return to usergs */
24844 jmp error_sti
24845 CFI_ENDPROC
24846-END(error_entry)
24847+ENDPROC(error_entry)
24848
24849
24850 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
24851@@ -1409,7 +1932,7 @@ ENTRY(error_exit)
24852 DISABLE_INTERRUPTS(CLBR_NONE)
24853 TRACE_IRQS_OFF
24854 GET_THREAD_INFO(%rcx)
24855- testl %eax,%eax
24856+ testl $1,%eax
24857 jne retint_kernel
24858 LOCKDEP_SYS_EXIT_IRQ
24859 movl TI_flags(%rcx),%edx
24860@@ -1418,7 +1941,7 @@ ENTRY(error_exit)
24861 jnz retint_careful
24862 jmp retint_swapgs
24863 CFI_ENDPROC
24864-END(error_exit)
24865+ENDPROC(error_exit)
24866
24867 /*
24868 * Test if a given stack is an NMI stack or not.
24869@@ -1476,9 +1999,11 @@ ENTRY(nmi)
24870 * If %cs was not the kernel segment, then the NMI triggered in user
24871 * space, which means it is definitely not nested.
24872 */
24873+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
24874+ je 1f
24875 cmpl $__KERNEL_CS, 16(%rsp)
24876 jne first_nmi
24877-
24878+1:
24879 /*
24880 * Check the special variable on the stack to see if NMIs are
24881 * executing.
24882@@ -1512,8 +2037,7 @@ nested_nmi:
24883
24884 1:
24885 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
24886- leaq -1*8(%rsp), %rdx
24887- movq %rdx, %rsp
24888+ subq $8, %rsp
24889 CFI_ADJUST_CFA_OFFSET 1*8
24890 leaq -10*8(%rsp), %rdx
24891 pushq_cfi $__KERNEL_DS
24892@@ -1531,6 +2055,7 @@ nested_nmi_out:
24893 CFI_RESTORE rdx
24894
24895 /* No need to check faults here */
24896+# pax_force_retaddr_bts
24897 INTERRUPT_RETURN
24898
24899 CFI_RESTORE_STATE
24900@@ -1627,13 +2152,13 @@ end_repeat_nmi:
24901 subq $ORIG_RAX-R15, %rsp
24902 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
24903 /*
24904- * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
24905+ * Use save_paranoid_nmi to handle SWAPGS, but no need to use paranoid_exit
24906 * as we should not be calling schedule in NMI context.
24907 * Even with normal interrupts enabled. An NMI should not be
24908 * setting NEED_RESCHED or anything that normal interrupts and
24909 * exceptions might do.
24910 */
24911- call save_paranoid
24912+ call save_paranoid_nmi
24913 DEFAULT_FRAME 0
24914
24915 /*
24916@@ -1643,9 +2168,9 @@ end_repeat_nmi:
24917 * NMI itself takes a page fault, the page fault that was preempted
24918 * will read the information from the NMI page fault and not the
24919 * origin fault. Save it off and restore it if it changes.
24920- * Use the r12 callee-saved register.
24921+ * Use the r13 callee-saved register.
24922 */
24923- movq %cr2, %r12
24924+ movq %cr2, %r13
24925
24926 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
24927 movq %rsp,%rdi
24928@@ -1654,29 +2179,34 @@ end_repeat_nmi:
24929
24930 /* Did the NMI take a page fault? Restore cr2 if it did */
24931 movq %cr2, %rcx
24932- cmpq %rcx, %r12
24933+ cmpq %rcx, %r13
24934 je 1f
24935- movq %r12, %cr2
24936+ movq %r13, %cr2
24937 1:
24938
24939- testl %ebx,%ebx /* swapgs needed? */
24940+ testl $1,%ebx /* swapgs needed? */
24941 jnz nmi_restore
24942 nmi_swapgs:
24943 SWAPGS_UNSAFE_STACK
24944 nmi_restore:
24945+ pax_exit_kernel_nmi
24946 /* Pop the extra iret frame at once */
24947 RESTORE_ALL 6*8
24948+ testb $3, 8(%rsp)
24949+ jnz 1f
24950+ pax_force_retaddr_bts
24951+1:
24952
24953 /* Clear the NMI executing stack variable */
24954 movq $0, 5*8(%rsp)
24955 jmp irq_return
24956 CFI_ENDPROC
24957-END(nmi)
24958+ENDPROC(nmi)
24959
24960 ENTRY(ignore_sysret)
24961 CFI_STARTPROC
24962 mov $-ENOSYS,%eax
24963 sysret
24964 CFI_ENDPROC
24965-END(ignore_sysret)
24966+ENDPROC(ignore_sysret)
24967
24968diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
24969index 94d857f..bf1f0bf 100644
24970--- a/arch/x86/kernel/espfix_64.c
24971+++ b/arch/x86/kernel/espfix_64.c
24972@@ -197,7 +197,7 @@ void init_espfix_ap(void)
24973 set_pte(&pte_p[n*PTE_STRIDE], pte);
24974
24975 /* Job is done for this CPU and any CPU which shares this page */
24976- ACCESS_ONCE(espfix_pages[page]) = stack_page;
24977+ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
24978
24979 unlock_done:
24980 mutex_unlock(&espfix_init_mutex);
24981diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
24982index 3386dc9..28bdf81 100644
24983--- a/arch/x86/kernel/ftrace.c
24984+++ b/arch/x86/kernel/ftrace.c
24985@@ -88,7 +88,7 @@ static unsigned long text_ip_addr(unsigned long ip)
24986 * kernel identity mapping to modify code.
24987 */
24988 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
24989- ip = (unsigned long)__va(__pa_symbol(ip));
24990+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
24991
24992 return ip;
24993 }
24994@@ -104,6 +104,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
24995 {
24996 unsigned char replaced[MCOUNT_INSN_SIZE];
24997
24998+ ip = ktla_ktva(ip);
24999+
25000 /*
25001 * Note: Due to modules and __init, code can
25002 * disappear and change, we need to protect against faulting
25003@@ -229,7 +231,7 @@ static int update_ftrace_func(unsigned long ip, void *new)
25004 unsigned char old[MCOUNT_INSN_SIZE];
25005 int ret;
25006
25007- memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
25008+ memcpy(old, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE);
25009
25010 ftrace_update_func = ip;
25011 /* Make sure the breakpoints see the ftrace_update_func update */
25012@@ -310,7 +312,7 @@ static int add_break(unsigned long ip, const char *old)
25013 unsigned char replaced[MCOUNT_INSN_SIZE];
25014 unsigned char brk = BREAKPOINT_INSTRUCTION;
25015
25016- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
25017+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
25018 return -EFAULT;
25019
25020 /* Make sure it is what we expect it to be */
25021diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
25022index eda1a86..8f6df48 100644
25023--- a/arch/x86/kernel/head64.c
25024+++ b/arch/x86/kernel/head64.c
25025@@ -67,12 +67,12 @@ again:
25026 pgd = *pgd_p;
25027
25028 /*
25029- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
25030- * critical -- __PAGE_OFFSET would point us back into the dynamic
25031+ * The use of __early_va rather than __va here is critical:
25032+ * __va would point us back into the dynamic
25033 * range and we might end up looping forever...
25034 */
25035 if (pgd)
25036- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
25037+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
25038 else {
25039 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
25040 reset_early_page_tables();
25041@@ -82,13 +82,13 @@ again:
25042 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
25043 for (i = 0; i < PTRS_PER_PUD; i++)
25044 pud_p[i] = 0;
25045- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
25046+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
25047 }
25048 pud_p += pud_index(address);
25049 pud = *pud_p;
25050
25051 if (pud)
25052- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
25053+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
25054 else {
25055 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
25056 reset_early_page_tables();
25057@@ -98,7 +98,7 @@ again:
25058 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
25059 for (i = 0; i < PTRS_PER_PMD; i++)
25060 pmd_p[i] = 0;
25061- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
25062+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
25063 }
25064 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
25065 pmd_p[pmd_index(address)] = pmd;
25066@@ -175,7 +175,6 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
25067 if (console_loglevel >= CONSOLE_LOGLEVEL_DEBUG)
25068 early_printk("Kernel alive\n");
25069
25070- clear_page(init_level4_pgt);
25071 /* set init_level4_pgt kernel high mapping*/
25072 init_level4_pgt[511] = early_level4_pgt[511];
25073
25074diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
25075index f36bd42..0ab4474 100644
25076--- a/arch/x86/kernel/head_32.S
25077+++ b/arch/x86/kernel/head_32.S
25078@@ -26,6 +26,12 @@
25079 /* Physical address */
25080 #define pa(X) ((X) - __PAGE_OFFSET)
25081
25082+#ifdef CONFIG_PAX_KERNEXEC
25083+#define ta(X) (X)
25084+#else
25085+#define ta(X) ((X) - __PAGE_OFFSET)
25086+#endif
25087+
25088 /*
25089 * References to members of the new_cpu_data structure.
25090 */
25091@@ -55,11 +61,7 @@
25092 * and small than max_low_pfn, otherwise will waste some page table entries
25093 */
25094
25095-#if PTRS_PER_PMD > 1
25096-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
25097-#else
25098-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
25099-#endif
25100+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
25101
25102 /* Number of possible pages in the lowmem region */
25103 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
25104@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
25105 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
25106
25107 /*
25108+ * Real beginning of normal "text" segment
25109+ */
25110+ENTRY(stext)
25111+ENTRY(_stext)
25112+
25113+/*
25114 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
25115 * %esi points to the real-mode code as a 32-bit pointer.
25116 * CS and DS must be 4 GB flat segments, but we don't depend on
25117@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
25118 * can.
25119 */
25120 __HEAD
25121+
25122+#ifdef CONFIG_PAX_KERNEXEC
25123+ jmp startup_32
25124+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
25125+.fill PAGE_SIZE-5,1,0xcc
25126+#endif
25127+
25128 ENTRY(startup_32)
25129 movl pa(stack_start),%ecx
25130
25131@@ -106,6 +121,59 @@ ENTRY(startup_32)
25132 2:
25133 leal -__PAGE_OFFSET(%ecx),%esp
25134
25135+#ifdef CONFIG_SMP
25136+ movl $pa(cpu_gdt_table),%edi
25137+ movl $__per_cpu_load,%eax
25138+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
25139+ rorl $16,%eax
25140+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
25141+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
25142+ movl $__per_cpu_end - 1,%eax
25143+ subl $__per_cpu_start,%eax
25144+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
25145+#endif
25146+
25147+#ifdef CONFIG_PAX_MEMORY_UDEREF
25148+ movl $NR_CPUS,%ecx
25149+ movl $pa(cpu_gdt_table),%edi
25150+1:
25151+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
25152+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
25153+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
25154+ addl $PAGE_SIZE_asm,%edi
25155+ loop 1b
25156+#endif
25157+
25158+#ifdef CONFIG_PAX_KERNEXEC
25159+ movl $pa(boot_gdt),%edi
25160+ movl $__LOAD_PHYSICAL_ADDR,%eax
25161+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
25162+ rorl $16,%eax
25163+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
25164+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
25165+ rorl $16,%eax
25166+
25167+ ljmp $(__BOOT_CS),$1f
25168+1:
25169+
25170+ movl $NR_CPUS,%ecx
25171+ movl $pa(cpu_gdt_table),%edi
25172+ addl $__PAGE_OFFSET,%eax
25173+1:
25174+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
25175+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
25176+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
25177+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
25178+ rorl $16,%eax
25179+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
25180+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
25181+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
25182+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
25183+ rorl $16,%eax
25184+ addl $PAGE_SIZE_asm,%edi
25185+ loop 1b
25186+#endif
25187+
25188 /*
25189 * Clear BSS first so that there are no surprises...
25190 */
25191@@ -201,8 +269,11 @@ ENTRY(startup_32)
25192 movl %eax, pa(max_pfn_mapped)
25193
25194 /* Do early initialization of the fixmap area */
25195- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
25196- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
25197+#ifdef CONFIG_COMPAT_VDSO
25198+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
25199+#else
25200+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
25201+#endif
25202 #else /* Not PAE */
25203
25204 page_pde_offset = (__PAGE_OFFSET >> 20);
25205@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
25206 movl %eax, pa(max_pfn_mapped)
25207
25208 /* Do early initialization of the fixmap area */
25209- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
25210- movl %eax,pa(initial_page_table+0xffc)
25211+#ifdef CONFIG_COMPAT_VDSO
25212+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
25213+#else
25214+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
25215+#endif
25216 #endif
25217
25218 #ifdef CONFIG_PARAVIRT
25219@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
25220 cmpl $num_subarch_entries, %eax
25221 jae bad_subarch
25222
25223- movl pa(subarch_entries)(,%eax,4), %eax
25224- subl $__PAGE_OFFSET, %eax
25225- jmp *%eax
25226+ jmp *pa(subarch_entries)(,%eax,4)
25227
25228 bad_subarch:
25229 WEAK(lguest_entry)
25230@@ -261,10 +333,10 @@ WEAK(xen_entry)
25231 __INITDATA
25232
25233 subarch_entries:
25234- .long default_entry /* normal x86/PC */
25235- .long lguest_entry /* lguest hypervisor */
25236- .long xen_entry /* Xen hypervisor */
25237- .long default_entry /* Moorestown MID */
25238+ .long ta(default_entry) /* normal x86/PC */
25239+ .long ta(lguest_entry) /* lguest hypervisor */
25240+ .long ta(xen_entry) /* Xen hypervisor */
25241+ .long ta(default_entry) /* Moorestown MID */
25242 num_subarch_entries = (. - subarch_entries) / 4
25243 .previous
25244 #else
25245@@ -354,6 +426,7 @@ default_entry:
25246 movl pa(mmu_cr4_features),%eax
25247 movl %eax,%cr4
25248
25249+#ifdef CONFIG_X86_PAE
25250 testb $X86_CR4_PAE, %al # check if PAE is enabled
25251 jz enable_paging
25252
25253@@ -382,6 +455,9 @@ default_entry:
25254 /* Make changes effective */
25255 wrmsr
25256
25257+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
25258+#endif
25259+
25260 enable_paging:
25261
25262 /*
25263@@ -449,14 +525,20 @@ is486:
25264 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
25265 movl %eax,%ss # after changing gdt.
25266
25267- movl $(__USER_DS),%eax # DS/ES contains default USER segment
25268+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
25269 movl %eax,%ds
25270 movl %eax,%es
25271
25272 movl $(__KERNEL_PERCPU), %eax
25273 movl %eax,%fs # set this cpu's percpu
25274
25275+#ifdef CONFIG_CC_STACKPROTECTOR
25276 movl $(__KERNEL_STACK_CANARY),%eax
25277+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
25278+ movl $(__USER_DS),%eax
25279+#else
25280+ xorl %eax,%eax
25281+#endif
25282 movl %eax,%gs
25283
25284 xorl %eax,%eax # Clear LDT
25285@@ -512,8 +594,11 @@ setup_once:
25286 * relocation. Manually set base address in stack canary
25287 * segment descriptor.
25288 */
25289- movl $gdt_page,%eax
25290+ movl $cpu_gdt_table,%eax
25291 movl $stack_canary,%ecx
25292+#ifdef CONFIG_SMP
25293+ addl $__per_cpu_load,%ecx
25294+#endif
25295 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
25296 shrl $16, %ecx
25297 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
25298@@ -548,7 +633,7 @@ ENTRY(early_idt_handler)
25299 cmpl $2,(%esp) # X86_TRAP_NMI
25300 je is_nmi # Ignore NMI
25301
25302- cmpl $2,%ss:early_recursion_flag
25303+ cmpl $1,%ss:early_recursion_flag
25304 je hlt_loop
25305 incl %ss:early_recursion_flag
25306
25307@@ -586,8 +671,8 @@ ENTRY(early_idt_handler)
25308 pushl (20+6*4)(%esp) /* trapno */
25309 pushl $fault_msg
25310 call printk
25311-#endif
25312 call dump_stack
25313+#endif
25314 hlt_loop:
25315 hlt
25316 jmp hlt_loop
25317@@ -607,8 +692,11 @@ ENDPROC(early_idt_handler)
25318 /* This is the default interrupt "handler" :-) */
25319 ALIGN
25320 ignore_int:
25321- cld
25322 #ifdef CONFIG_PRINTK
25323+ cmpl $2,%ss:early_recursion_flag
25324+ je hlt_loop
25325+ incl %ss:early_recursion_flag
25326+ cld
25327 pushl %eax
25328 pushl %ecx
25329 pushl %edx
25330@@ -617,9 +705,6 @@ ignore_int:
25331 movl $(__KERNEL_DS),%eax
25332 movl %eax,%ds
25333 movl %eax,%es
25334- cmpl $2,early_recursion_flag
25335- je hlt_loop
25336- incl early_recursion_flag
25337 pushl 16(%esp)
25338 pushl 24(%esp)
25339 pushl 32(%esp)
25340@@ -653,29 +738,34 @@ ENTRY(setup_once_ref)
25341 /*
25342 * BSS section
25343 */
25344-__PAGE_ALIGNED_BSS
25345- .align PAGE_SIZE
25346 #ifdef CONFIG_X86_PAE
25347+.section .initial_pg_pmd,"a",@progbits
25348 initial_pg_pmd:
25349 .fill 1024*KPMDS,4,0
25350 #else
25351+.section .initial_page_table,"a",@progbits
25352 ENTRY(initial_page_table)
25353 .fill 1024,4,0
25354 #endif
25355+.section .initial_pg_fixmap,"a",@progbits
25356 initial_pg_fixmap:
25357 .fill 1024,4,0
25358+.section .empty_zero_page,"a",@progbits
25359 ENTRY(empty_zero_page)
25360 .fill 4096,1,0
25361+.section .swapper_pg_dir,"a",@progbits
25362 ENTRY(swapper_pg_dir)
25363+#ifdef CONFIG_X86_PAE
25364+ .fill 4,8,0
25365+#else
25366 .fill 1024,4,0
25367+#endif
25368
25369 /*
25370 * This starts the data section.
25371 */
25372 #ifdef CONFIG_X86_PAE
25373-__PAGE_ALIGNED_DATA
25374- /* Page-aligned for the benefit of paravirt? */
25375- .align PAGE_SIZE
25376+.section .initial_page_table,"a",@progbits
25377 ENTRY(initial_page_table)
25378 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
25379 # if KPMDS == 3
25380@@ -694,12 +784,20 @@ ENTRY(initial_page_table)
25381 # error "Kernel PMDs should be 1, 2 or 3"
25382 # endif
25383 .align PAGE_SIZE /* needs to be page-sized too */
25384+
25385+#ifdef CONFIG_PAX_PER_CPU_PGD
25386+ENTRY(cpu_pgd)
25387+ .rept 2*NR_CPUS
25388+ .fill 4,8,0
25389+ .endr
25390+#endif
25391+
25392 #endif
25393
25394 .data
25395 .balign 4
25396 ENTRY(stack_start)
25397- .long init_thread_union+THREAD_SIZE
25398+ .long init_thread_union+THREAD_SIZE-8
25399
25400 __INITRODATA
25401 int_msg:
25402@@ -727,7 +825,7 @@ fault_msg:
25403 * segment size, and 32-bit linear address value:
25404 */
25405
25406- .data
25407+.section .rodata,"a",@progbits
25408 .globl boot_gdt_descr
25409 .globl idt_descr
25410
25411@@ -736,7 +834,7 @@ fault_msg:
25412 .word 0 # 32 bit align gdt_desc.address
25413 boot_gdt_descr:
25414 .word __BOOT_DS+7
25415- .long boot_gdt - __PAGE_OFFSET
25416+ .long pa(boot_gdt)
25417
25418 .word 0 # 32-bit align idt_desc.address
25419 idt_descr:
25420@@ -747,7 +845,7 @@ idt_descr:
25421 .word 0 # 32 bit align gdt_desc.address
25422 ENTRY(early_gdt_descr)
25423 .word GDT_ENTRIES*8-1
25424- .long gdt_page /* Overwritten for secondary CPUs */
25425+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
25426
25427 /*
25428 * The boot_gdt must mirror the equivalent in setup.S and is
25429@@ -756,5 +854,65 @@ ENTRY(early_gdt_descr)
25430 .align L1_CACHE_BYTES
25431 ENTRY(boot_gdt)
25432 .fill GDT_ENTRY_BOOT_CS,8,0
25433- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
25434- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
25435+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
25436+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
25437+
25438+ .align PAGE_SIZE_asm
25439+ENTRY(cpu_gdt_table)
25440+ .rept NR_CPUS
25441+ .quad 0x0000000000000000 /* NULL descriptor */
25442+ .quad 0x0000000000000000 /* 0x0b reserved */
25443+ .quad 0x0000000000000000 /* 0x13 reserved */
25444+ .quad 0x0000000000000000 /* 0x1b reserved */
25445+
25446+#ifdef CONFIG_PAX_KERNEXEC
25447+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
25448+#else
25449+ .quad 0x0000000000000000 /* 0x20 unused */
25450+#endif
25451+
25452+ .quad 0x0000000000000000 /* 0x28 unused */
25453+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
25454+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
25455+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
25456+ .quad 0x0000000000000000 /* 0x4b reserved */
25457+ .quad 0x0000000000000000 /* 0x53 reserved */
25458+ .quad 0x0000000000000000 /* 0x5b reserved */
25459+
25460+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
25461+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
25462+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
25463+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
25464+
25465+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
25466+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
25467+
25468+ /*
25469+ * Segments used for calling PnP BIOS have byte granularity.
25470+ * The code segments and data segments have fixed 64k limits,
25471+ * the transfer segment sizes are set at run time.
25472+ */
25473+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
25474+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
25475+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
25476+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
25477+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
25478+
25479+ /*
25480+ * The APM segments have byte granularity and their bases
25481+ * are set at run time. All have 64k limits.
25482+ */
25483+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
25484+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
25485+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
25486+
25487+ .quad 0x00c093000000ffff /* 0xd0 - ESPFIX SS */
25488+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
25489+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
25490+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
25491+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
25492+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
25493+
25494+ /* Be sure this is zeroed to avoid false validations in Xen */
25495+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
25496+ .endr
25497diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
25498index a468c0a..8b5a879 100644
25499--- a/arch/x86/kernel/head_64.S
25500+++ b/arch/x86/kernel/head_64.S
25501@@ -20,6 +20,8 @@
25502 #include <asm/processor-flags.h>
25503 #include <asm/percpu.h>
25504 #include <asm/nops.h>
25505+#include <asm/cpufeature.h>
25506+#include <asm/alternative-asm.h>
25507
25508 #ifdef CONFIG_PARAVIRT
25509 #include <asm/asm-offsets.h>
25510@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
25511 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
25512 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
25513 L3_START_KERNEL = pud_index(__START_KERNEL_map)
25514+L4_VMALLOC_START = pgd_index(VMALLOC_START)
25515+L3_VMALLOC_START = pud_index(VMALLOC_START)
25516+L4_VMALLOC_END = pgd_index(VMALLOC_END)
25517+L3_VMALLOC_END = pud_index(VMALLOC_END)
25518+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
25519+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
25520
25521 .text
25522 __HEAD
25523@@ -89,11 +97,24 @@ startup_64:
25524 * Fixup the physical addresses in the page table
25525 */
25526 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
25527+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
25528+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
25529+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
25530+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
25531+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
25532
25533- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
25534- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
25535+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
25536+#ifndef CONFIG_XEN
25537+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
25538+#endif
25539+
25540+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
25541+
25542+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
25543+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
25544
25545 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
25546+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
25547
25548 /*
25549 * Set up the identity mapping for the switchover. These
25550@@ -174,11 +195,12 @@ ENTRY(secondary_startup_64)
25551 * after the boot processor executes this code.
25552 */
25553
25554+ orq $-1, %rbp
25555 movq $(init_level4_pgt - __START_KERNEL_map), %rax
25556 1:
25557
25558- /* Enable PAE mode and PGE */
25559- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
25560+ /* Enable PAE mode and PSE/PGE */
25561+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
25562 movq %rcx, %cr4
25563
25564 /* Setup early boot stage 4 level pagetables. */
25565@@ -199,10 +221,19 @@ ENTRY(secondary_startup_64)
25566 movl $MSR_EFER, %ecx
25567 rdmsr
25568 btsl $_EFER_SCE, %eax /* Enable System Call */
25569- btl $20,%edi /* No Execute supported? */
25570+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
25571 jnc 1f
25572 btsl $_EFER_NX, %eax
25573+ cmpq $-1, %rbp
25574+ je 1f
25575 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
25576+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
25577+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
25578+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
25579+ btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
25580+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
25581+ btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
25582+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
25583 1: wrmsr /* Make changes effective */
25584
25585 /* Setup cr0 */
25586@@ -282,6 +313,7 @@ ENTRY(secondary_startup_64)
25587 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
25588 * address given in m16:64.
25589 */
25590+ pax_set_fptr_mask
25591 movq initial_code(%rip),%rax
25592 pushq $0 # fake return address to stop unwinder
25593 pushq $__KERNEL_CS # set correct cs
25594@@ -313,7 +345,7 @@ ENDPROC(start_cpu0)
25595 .quad INIT_PER_CPU_VAR(irq_stack_union)
25596
25597 GLOBAL(stack_start)
25598- .quad init_thread_union+THREAD_SIZE-8
25599+ .quad init_thread_union+THREAD_SIZE-16
25600 .word 0
25601 __FINITDATA
25602
25603@@ -391,7 +423,7 @@ ENTRY(early_idt_handler)
25604 call dump_stack
25605 #ifdef CONFIG_KALLSYMS
25606 leaq early_idt_ripmsg(%rip),%rdi
25607- movq 40(%rsp),%rsi # %rip again
25608+ movq 88(%rsp),%rsi # %rip again
25609 call __print_symbol
25610 #endif
25611 #endif /* EARLY_PRINTK */
25612@@ -420,6 +452,7 @@ ENDPROC(early_idt_handler)
25613 early_recursion_flag:
25614 .long 0
25615
25616+ .section .rodata,"a",@progbits
25617 #ifdef CONFIG_EARLY_PRINTK
25618 early_idt_msg:
25619 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
25620@@ -447,29 +480,52 @@ NEXT_PAGE(early_level4_pgt)
25621 NEXT_PAGE(early_dynamic_pgts)
25622 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
25623
25624- .data
25625+ .section .rodata,"a",@progbits
25626
25627-#ifndef CONFIG_XEN
25628 NEXT_PAGE(init_level4_pgt)
25629- .fill 512,8,0
25630-#else
25631-NEXT_PAGE(init_level4_pgt)
25632- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25633 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
25634 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25635+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
25636+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
25637+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
25638+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
25639+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
25640+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25641 .org init_level4_pgt + L4_START_KERNEL*8, 0
25642 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
25643 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
25644
25645+#ifdef CONFIG_PAX_PER_CPU_PGD
25646+NEXT_PAGE(cpu_pgd)
25647+ .rept 2*NR_CPUS
25648+ .fill 512,8,0
25649+ .endr
25650+#endif
25651+
25652 NEXT_PAGE(level3_ident_pgt)
25653 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
25654+#ifdef CONFIG_XEN
25655 .fill 511, 8, 0
25656+#else
25657+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
25658+ .fill 510,8,0
25659+#endif
25660+
25661+NEXT_PAGE(level3_vmalloc_start_pgt)
25662+ .fill 512,8,0
25663+
25664+NEXT_PAGE(level3_vmalloc_end_pgt)
25665+ .fill 512,8,0
25666+
25667+NEXT_PAGE(level3_vmemmap_pgt)
25668+ .fill L3_VMEMMAP_START,8,0
25669+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
25670+
25671 NEXT_PAGE(level2_ident_pgt)
25672- /* Since I easily can, map the first 1G.
25673+ /* Since I easily can, map the first 2G.
25674 * Don't set NX because code runs from these pages.
25675 */
25676- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
25677-#endif
25678+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
25679
25680 NEXT_PAGE(level3_kernel_pgt)
25681 .fill L3_START_KERNEL,8,0
25682@@ -477,6 +533,9 @@ NEXT_PAGE(level3_kernel_pgt)
25683 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
25684 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25685
25686+NEXT_PAGE(level2_vmemmap_pgt)
25687+ .fill 512,8,0
25688+
25689 NEXT_PAGE(level2_kernel_pgt)
25690 /*
25691 * 512 MB kernel mapping. We spend a full page on this pagetable
25692@@ -494,28 +553,64 @@ NEXT_PAGE(level2_kernel_pgt)
25693 NEXT_PAGE(level2_fixmap_pgt)
25694 .fill 506,8,0
25695 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
25696- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
25697- .fill 5,8,0
25698+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
25699+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
25700+ .fill 4,8,0
25701
25702 NEXT_PAGE(level1_fixmap_pgt)
25703 .fill 512,8,0
25704
25705+NEXT_PAGE(level1_vsyscall_pgt)
25706+ .fill 512,8,0
25707+
25708 #undef PMDS
25709
25710- .data
25711+ .align PAGE_SIZE
25712+ENTRY(cpu_gdt_table)
25713+ .rept NR_CPUS
25714+ .quad 0x0000000000000000 /* NULL descriptor */
25715+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
25716+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
25717+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
25718+ .quad 0x00cffb000000ffff /* __USER32_CS */
25719+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
25720+ .quad 0x00affb000000ffff /* __USER_CS */
25721+
25722+#ifdef CONFIG_PAX_KERNEXEC
25723+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
25724+#else
25725+ .quad 0x0 /* unused */
25726+#endif
25727+
25728+ .quad 0,0 /* TSS */
25729+ .quad 0,0 /* LDT */
25730+ .quad 0,0,0 /* three TLS descriptors */
25731+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
25732+ /* asm/segment.h:GDT_ENTRIES must match this */
25733+
25734+#ifdef CONFIG_PAX_MEMORY_UDEREF
25735+ .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
25736+#else
25737+ .quad 0x0 /* unused */
25738+#endif
25739+
25740+ /* zero the remaining page */
25741+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
25742+ .endr
25743+
25744 .align 16
25745 .globl early_gdt_descr
25746 early_gdt_descr:
25747 .word GDT_ENTRIES*8-1
25748 early_gdt_descr_base:
25749- .quad INIT_PER_CPU_VAR(gdt_page)
25750+ .quad cpu_gdt_table
25751
25752 ENTRY(phys_base)
25753 /* This must match the first entry in level2_kernel_pgt */
25754 .quad 0x0000000000000000
25755
25756 #include "../../x86/xen/xen-head.S"
25757-
25758- __PAGE_ALIGNED_BSS
25759+
25760+ .section .rodata,"a",@progbits
25761 NEXT_PAGE(empty_zero_page)
25762 .skip PAGE_SIZE
25763diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
25764index 05fd74f..c3548b1 100644
25765--- a/arch/x86/kernel/i386_ksyms_32.c
25766+++ b/arch/x86/kernel/i386_ksyms_32.c
25767@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
25768 EXPORT_SYMBOL(cmpxchg8b_emu);
25769 #endif
25770
25771+EXPORT_SYMBOL_GPL(cpu_gdt_table);
25772+
25773 /* Networking helper routines. */
25774 EXPORT_SYMBOL(csum_partial_copy_generic);
25775+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
25776+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
25777
25778 EXPORT_SYMBOL(__get_user_1);
25779 EXPORT_SYMBOL(__get_user_2);
25780@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
25781 EXPORT_SYMBOL(___preempt_schedule_context);
25782 #endif
25783 #endif
25784+
25785+#ifdef CONFIG_PAX_KERNEXEC
25786+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
25787+#endif
25788+
25789+#ifdef CONFIG_PAX_PER_CPU_PGD
25790+EXPORT_SYMBOL(cpu_pgd);
25791+#endif
25792diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
25793index a9a4229..6f4d476 100644
25794--- a/arch/x86/kernel/i387.c
25795+++ b/arch/x86/kernel/i387.c
25796@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
25797 static inline bool interrupted_user_mode(void)
25798 {
25799 struct pt_regs *regs = get_irq_regs();
25800- return regs && user_mode_vm(regs);
25801+ return regs && user_mode(regs);
25802 }
25803
25804 /*
25805diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
25806index 8af8171..f8c1169 100644
25807--- a/arch/x86/kernel/i8259.c
25808+++ b/arch/x86/kernel/i8259.c
25809@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
25810 static void make_8259A_irq(unsigned int irq)
25811 {
25812 disable_irq_nosync(irq);
25813- io_apic_irqs &= ~(1<<irq);
25814+ io_apic_irqs &= ~(1UL<<irq);
25815 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
25816 i8259A_chip.name);
25817 enable_irq(irq);
25818@@ -209,7 +209,7 @@ spurious_8259A_irq:
25819 "spurious 8259A interrupt: IRQ%d.\n", irq);
25820 spurious_irq_mask |= irqmask;
25821 }
25822- atomic_inc(&irq_err_count);
25823+ atomic_inc_unchecked(&irq_err_count);
25824 /*
25825 * Theoretically we do not have to handle this IRQ,
25826 * but in Linux this does not cause problems and is
25827@@ -350,14 +350,16 @@ static void init_8259A(int auto_eoi)
25828 /* (slave's support for AEOI in flat mode is to be investigated) */
25829 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
25830
25831+ pax_open_kernel();
25832 if (auto_eoi)
25833 /*
25834 * In AEOI mode we just have to mask the interrupt
25835 * when acking.
25836 */
25837- i8259A_chip.irq_mask_ack = disable_8259A_irq;
25838+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
25839 else
25840- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25841+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
25842+ pax_close_kernel();
25843
25844 udelay(100); /* wait for 8259A to initialize */
25845
25846diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
25847index a979b5b..1d6db75 100644
25848--- a/arch/x86/kernel/io_delay.c
25849+++ b/arch/x86/kernel/io_delay.c
25850@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
25851 * Quirk table for systems that misbehave (lock up, etc.) if port
25852 * 0x80 is used:
25853 */
25854-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
25855+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
25856 {
25857 .callback = dmi_io_delay_0xed_port,
25858 .ident = "Compaq Presario V6000",
25859diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
25860index 4ddaf66..49d5c18 100644
25861--- a/arch/x86/kernel/ioport.c
25862+++ b/arch/x86/kernel/ioport.c
25863@@ -6,6 +6,7 @@
25864 #include <linux/sched.h>
25865 #include <linux/kernel.h>
25866 #include <linux/capability.h>
25867+#include <linux/security.h>
25868 #include <linux/errno.h>
25869 #include <linux/types.h>
25870 #include <linux/ioport.h>
25871@@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25872 return -EINVAL;
25873 if (turn_on && !capable(CAP_SYS_RAWIO))
25874 return -EPERM;
25875+#ifdef CONFIG_GRKERNSEC_IO
25876+ if (turn_on && grsec_disable_privio) {
25877+ gr_handle_ioperm();
25878+ return -ENODEV;
25879+ }
25880+#endif
25881
25882 /*
25883 * If it's the first ioperm() call in this thread's lifetime, set the
25884@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25885 * because the ->io_bitmap_max value must match the bitmap
25886 * contents:
25887 */
25888- tss = &per_cpu(init_tss, get_cpu());
25889+ tss = init_tss + get_cpu();
25890
25891 if (turn_on)
25892 bitmap_clear(t->io_bitmap_ptr, from, num);
25893@@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
25894 if (level > old) {
25895 if (!capable(CAP_SYS_RAWIO))
25896 return -EPERM;
25897+#ifdef CONFIG_GRKERNSEC_IO
25898+ if (grsec_disable_privio) {
25899+ gr_handle_iopl();
25900+ return -ENODEV;
25901+ }
25902+#endif
25903 }
25904 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
25905 t->iopl = level << 12;
25906diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
25907index 922d285..6d20692 100644
25908--- a/arch/x86/kernel/irq.c
25909+++ b/arch/x86/kernel/irq.c
25910@@ -22,7 +22,7 @@
25911 #define CREATE_TRACE_POINTS
25912 #include <asm/trace/irq_vectors.h>
25913
25914-atomic_t irq_err_count;
25915+atomic_unchecked_t irq_err_count;
25916
25917 /* Function pointer for generic interrupt vector handling */
25918 void (*x86_platform_ipi_callback)(void) = NULL;
25919@@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
25920 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
25921 seq_printf(p, " Hypervisor callback interrupts\n");
25922 #endif
25923- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
25924+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
25925 #if defined(CONFIG_X86_IO_APIC)
25926- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
25927+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
25928 #endif
25929 return 0;
25930 }
25931@@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
25932
25933 u64 arch_irq_stat(void)
25934 {
25935- u64 sum = atomic_read(&irq_err_count);
25936+ u64 sum = atomic_read_unchecked(&irq_err_count);
25937 return sum;
25938 }
25939
25940diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
25941index 63ce838..2ea3e06 100644
25942--- a/arch/x86/kernel/irq_32.c
25943+++ b/arch/x86/kernel/irq_32.c
25944@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
25945
25946 #ifdef CONFIG_DEBUG_STACKOVERFLOW
25947
25948+extern void gr_handle_kernel_exploit(void);
25949+
25950 int sysctl_panic_on_stackoverflow __read_mostly;
25951
25952 /* Debugging check for stack overflow: is there less than 1KB free? */
25953@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
25954 __asm__ __volatile__("andl %%esp,%0" :
25955 "=r" (sp) : "0" (THREAD_SIZE - 1));
25956
25957- return sp < (sizeof(struct thread_info) + STACK_WARN);
25958+ return sp < STACK_WARN;
25959 }
25960
25961 static void print_stack_overflow(void)
25962 {
25963 printk(KERN_WARNING "low stack detected by irq handler\n");
25964 dump_stack();
25965+ gr_handle_kernel_exploit();
25966 if (sysctl_panic_on_stackoverflow)
25967 panic("low stack detected by irq handler - check messages\n");
25968 }
25969@@ -84,10 +87,9 @@ static inline void *current_stack(void)
25970 static inline int
25971 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25972 {
25973- struct irq_stack *curstk, *irqstk;
25974+ struct irq_stack *irqstk;
25975 u32 *isp, *prev_esp, arg1, arg2;
25976
25977- curstk = (struct irq_stack *) current_stack();
25978 irqstk = __this_cpu_read(hardirq_stack);
25979
25980 /*
25981@@ -96,15 +98,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
25982 * handler) we can't do that and just have to keep using the
25983 * current stack (which is the irq stack already after all)
25984 */
25985- if (unlikely(curstk == irqstk))
25986+ if (unlikely((void *)current_stack_pointer - (void *)irqstk < THREAD_SIZE))
25987 return 0;
25988
25989- isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
25990+ isp = (u32 *) ((char *)irqstk + sizeof(*irqstk) - 8);
25991
25992 /* Save the next esp at the bottom of the stack */
25993 prev_esp = (u32 *)irqstk;
25994 *prev_esp = current_stack_pointer;
25995
25996+#ifdef CONFIG_PAX_MEMORY_UDEREF
25997+ __set_fs(MAKE_MM_SEG(0));
25998+#endif
25999+
26000 if (unlikely(overflow))
26001 call_on_stack(print_stack_overflow, isp);
26002
26003@@ -115,6 +121,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
26004 : "0" (irq), "1" (desc), "2" (isp),
26005 "D" (desc->handle_irq)
26006 : "memory", "cc", "ecx");
26007+
26008+#ifdef CONFIG_PAX_MEMORY_UDEREF
26009+ __set_fs(current_thread_info()->addr_limit);
26010+#endif
26011+
26012 return 1;
26013 }
26014
26015@@ -123,32 +134,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
26016 */
26017 void irq_ctx_init(int cpu)
26018 {
26019- struct irq_stack *irqstk;
26020-
26021 if (per_cpu(hardirq_stack, cpu))
26022 return;
26023
26024- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
26025- THREADINFO_GFP,
26026- THREAD_SIZE_ORDER));
26027- per_cpu(hardirq_stack, cpu) = irqstk;
26028-
26029- irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
26030- THREADINFO_GFP,
26031- THREAD_SIZE_ORDER));
26032- per_cpu(softirq_stack, cpu) = irqstk;
26033-
26034- printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
26035- cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
26036+ per_cpu(hardirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
26037+ per_cpu(softirq_stack, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
26038 }
26039
26040 void do_softirq_own_stack(void)
26041 {
26042- struct thread_info *curstk;
26043 struct irq_stack *irqstk;
26044 u32 *isp, *prev_esp;
26045
26046- curstk = current_stack();
26047 irqstk = __this_cpu_read(softirq_stack);
26048
26049 /* build the stack frame on the softirq stack */
26050@@ -158,7 +155,16 @@ void do_softirq_own_stack(void)
26051 prev_esp = (u32 *)irqstk;
26052 *prev_esp = current_stack_pointer;
26053
26054+#ifdef CONFIG_PAX_MEMORY_UDEREF
26055+ __set_fs(MAKE_MM_SEG(0));
26056+#endif
26057+
26058 call_on_stack(__do_softirq, isp);
26059+
26060+#ifdef CONFIG_PAX_MEMORY_UDEREF
26061+ __set_fs(current_thread_info()->addr_limit);
26062+#endif
26063+
26064 }
26065
26066 bool handle_irq(unsigned irq, struct pt_regs *regs)
26067@@ -172,7 +178,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
26068 if (unlikely(!desc))
26069 return false;
26070
26071- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
26072+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
26073 if (unlikely(overflow))
26074 print_stack_overflow();
26075 desc->handle_irq(irq, desc);
26076diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
26077index 4d1c746..55a22d6 100644
26078--- a/arch/x86/kernel/irq_64.c
26079+++ b/arch/x86/kernel/irq_64.c
26080@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
26081 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
26082 EXPORT_PER_CPU_SYMBOL(irq_regs);
26083
26084+extern void gr_handle_kernel_exploit(void);
26085+
26086 int sysctl_panic_on_stackoverflow;
26087
26088 /*
26089@@ -44,7 +46,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
26090 u64 estack_top, estack_bottom;
26091 u64 curbase = (u64)task_stack_page(current);
26092
26093- if (user_mode_vm(regs))
26094+ if (user_mode(regs))
26095 return;
26096
26097 if (regs->sp >= curbase + sizeof(struct thread_info) +
26098@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
26099 irq_stack_top, irq_stack_bottom,
26100 estack_top, estack_bottom);
26101
26102+ gr_handle_kernel_exploit();
26103+
26104 if (sysctl_panic_on_stackoverflow)
26105 panic("low stack detected by irq handler - check messages\n");
26106 #endif
26107diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
26108index 26d5a55..a01160a 100644
26109--- a/arch/x86/kernel/jump_label.c
26110+++ b/arch/x86/kernel/jump_label.c
26111@@ -51,7 +51,7 @@ static void __jump_label_transform(struct jump_entry *entry,
26112 * Jump label is enabled for the first time.
26113 * So we expect a default_nop...
26114 */
26115- if (unlikely(memcmp((void *)entry->code, default_nop, 5)
26116+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5)
26117 != 0))
26118 bug_at((void *)entry->code, __LINE__);
26119 } else {
26120@@ -59,7 +59,7 @@ static void __jump_label_transform(struct jump_entry *entry,
26121 * ...otherwise expect an ideal_nop. Otherwise
26122 * something went horribly wrong.
26123 */
26124- if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
26125+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5)
26126 != 0))
26127 bug_at((void *)entry->code, __LINE__);
26128 }
26129@@ -75,13 +75,13 @@ static void __jump_label_transform(struct jump_entry *entry,
26130 * are converting the default nop to the ideal nop.
26131 */
26132 if (init) {
26133- if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
26134+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
26135 bug_at((void *)entry->code, __LINE__);
26136 } else {
26137 code.jump = 0xe9;
26138 code.offset = entry->target -
26139 (entry->code + JUMP_LABEL_NOP_SIZE);
26140- if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
26141+ if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
26142 bug_at((void *)entry->code, __LINE__);
26143 }
26144 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
26145diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
26146index 7ec1d5f..5a7d130 100644
26147--- a/arch/x86/kernel/kgdb.c
26148+++ b/arch/x86/kernel/kgdb.c
26149@@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
26150 #ifdef CONFIG_X86_32
26151 switch (regno) {
26152 case GDB_SS:
26153- if (!user_mode_vm(regs))
26154+ if (!user_mode(regs))
26155 *(unsigned long *)mem = __KERNEL_DS;
26156 break;
26157 case GDB_SP:
26158- if (!user_mode_vm(regs))
26159+ if (!user_mode(regs))
26160 *(unsigned long *)mem = kernel_stack_pointer(regs);
26161 break;
26162 case GDB_GS:
26163@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
26164 bp->attr.bp_addr = breakinfo[breakno].addr;
26165 bp->attr.bp_len = breakinfo[breakno].len;
26166 bp->attr.bp_type = breakinfo[breakno].type;
26167- info->address = breakinfo[breakno].addr;
26168+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
26169+ info->address = ktla_ktva(breakinfo[breakno].addr);
26170+ else
26171+ info->address = breakinfo[breakno].addr;
26172 info->len = breakinfo[breakno].len;
26173 info->type = breakinfo[breakno].type;
26174 val = arch_install_hw_breakpoint(bp);
26175@@ -475,12 +478,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
26176 case 'k':
26177 /* clear the trace bit */
26178 linux_regs->flags &= ~X86_EFLAGS_TF;
26179- atomic_set(&kgdb_cpu_doing_single_step, -1);
26180+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
26181
26182 /* set the trace bit if we're stepping */
26183 if (remcomInBuffer[0] == 's') {
26184 linux_regs->flags |= X86_EFLAGS_TF;
26185- atomic_set(&kgdb_cpu_doing_single_step,
26186+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
26187 raw_smp_processor_id());
26188 }
26189
26190@@ -545,7 +548,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
26191
26192 switch (cmd) {
26193 case DIE_DEBUG:
26194- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
26195+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
26196 if (user_mode(regs))
26197 return single_step_cont(regs, args);
26198 break;
26199@@ -750,11 +753,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
26200 #endif /* CONFIG_DEBUG_RODATA */
26201
26202 bpt->type = BP_BREAKPOINT;
26203- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
26204+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
26205 BREAK_INSTR_SIZE);
26206 if (err)
26207 return err;
26208- err = probe_kernel_write((char *)bpt->bpt_addr,
26209+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
26210 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
26211 #ifdef CONFIG_DEBUG_RODATA
26212 if (!err)
26213@@ -767,7 +770,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
26214 return -EBUSY;
26215 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
26216 BREAK_INSTR_SIZE);
26217- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
26218+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
26219 if (err)
26220 return err;
26221 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
26222@@ -792,13 +795,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
26223 if (mutex_is_locked(&text_mutex))
26224 goto knl_write;
26225 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
26226- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
26227+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
26228 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
26229 goto knl_write;
26230 return err;
26231 knl_write:
26232 #endif /* CONFIG_DEBUG_RODATA */
26233- return probe_kernel_write((char *)bpt->bpt_addr,
26234+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
26235 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
26236 }
26237
26238diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
26239index 67e6d19..731ed28 100644
26240--- a/arch/x86/kernel/kprobes/core.c
26241+++ b/arch/x86/kernel/kprobes/core.c
26242@@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
26243 s32 raddr;
26244 } __packed *insn;
26245
26246- insn = (struct __arch_relative_insn *)from;
26247+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
26248+
26249+ pax_open_kernel();
26250 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
26251 insn->op = op;
26252+ pax_close_kernel();
26253 }
26254
26255 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
26256@@ -168,7 +171,7 @@ int can_boost(kprobe_opcode_t *opcodes)
26257 kprobe_opcode_t opcode;
26258 kprobe_opcode_t *orig_opcodes = opcodes;
26259
26260- if (search_exception_tables((unsigned long)opcodes))
26261+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
26262 return 0; /* Page fault may occur on this address. */
26263
26264 retry:
26265@@ -242,9 +245,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
26266 * for the first byte, we can recover the original instruction
26267 * from it and kp->opcode.
26268 */
26269- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
26270+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
26271 buf[0] = kp->opcode;
26272- return (unsigned long)buf;
26273+ return ktva_ktla((unsigned long)buf);
26274 }
26275
26276 /*
26277@@ -336,7 +339,9 @@ int __copy_instruction(u8 *dest, u8 *src)
26278 /* Another subsystem puts a breakpoint, failed to recover */
26279 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
26280 return 0;
26281+ pax_open_kernel();
26282 memcpy(dest, insn.kaddr, insn.length);
26283+ pax_close_kernel();
26284
26285 #ifdef CONFIG_X86_64
26286 if (insn_rip_relative(&insn)) {
26287@@ -363,7 +368,9 @@ int __copy_instruction(u8 *dest, u8 *src)
26288 return 0;
26289 }
26290 disp = (u8 *) dest + insn_offset_displacement(&insn);
26291+ pax_open_kernel();
26292 *(s32 *) disp = (s32) newdisp;
26293+ pax_close_kernel();
26294 }
26295 #endif
26296 return insn.length;
26297@@ -505,7 +512,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
26298 * nor set current_kprobe, because it doesn't use single
26299 * stepping.
26300 */
26301- regs->ip = (unsigned long)p->ainsn.insn;
26302+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
26303 preempt_enable_no_resched();
26304 return;
26305 }
26306@@ -522,9 +529,9 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
26307 regs->flags &= ~X86_EFLAGS_IF;
26308 /* single step inline if the instruction is an int3 */
26309 if (p->opcode == BREAKPOINT_INSTRUCTION)
26310- regs->ip = (unsigned long)p->addr;
26311+ regs->ip = ktla_ktva((unsigned long)p->addr);
26312 else
26313- regs->ip = (unsigned long)p->ainsn.insn;
26314+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
26315 }
26316 NOKPROBE_SYMBOL(setup_singlestep);
26317
26318@@ -574,7 +581,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
26319 struct kprobe *p;
26320 struct kprobe_ctlblk *kcb;
26321
26322- if (user_mode_vm(regs))
26323+ if (user_mode(regs))
26324 return 0;
26325
26326 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
26327@@ -609,7 +616,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
26328 setup_singlestep(p, regs, kcb, 0);
26329 return 1;
26330 }
26331- } else if (*addr != BREAKPOINT_INSTRUCTION) {
26332+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
26333 /*
26334 * The breakpoint instruction was removed right
26335 * after we hit it. Another cpu has removed
26336@@ -656,6 +663,9 @@ static void __used kretprobe_trampoline_holder(void)
26337 " movq %rax, 152(%rsp)\n"
26338 RESTORE_REGS_STRING
26339 " popfq\n"
26340+#ifdef KERNEXEC_PLUGIN
26341+ " btsq $63,(%rsp)\n"
26342+#endif
26343 #else
26344 " pushf\n"
26345 SAVE_REGS_STRING
26346@@ -796,7 +806,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
26347 struct kprobe_ctlblk *kcb)
26348 {
26349 unsigned long *tos = stack_addr(regs);
26350- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
26351+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
26352 unsigned long orig_ip = (unsigned long)p->addr;
26353 kprobe_opcode_t *insn = p->ainsn.insn;
26354
26355@@ -979,7 +989,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
26356 struct die_args *args = data;
26357 int ret = NOTIFY_DONE;
26358
26359- if (args->regs && user_mode_vm(args->regs))
26360+ if (args->regs && user_mode(args->regs))
26361 return ret;
26362
26363 if (val == DIE_GPF) {
26364diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
26365index f1314d0..15f3154 100644
26366--- a/arch/x86/kernel/kprobes/opt.c
26367+++ b/arch/x86/kernel/kprobes/opt.c
26368@@ -79,6 +79,7 @@ found:
26369 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
26370 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
26371 {
26372+ pax_open_kernel();
26373 #ifdef CONFIG_X86_64
26374 *addr++ = 0x48;
26375 *addr++ = 0xbf;
26376@@ -86,6 +87,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
26377 *addr++ = 0xb8;
26378 #endif
26379 *(unsigned long *)addr = val;
26380+ pax_close_kernel();
26381 }
26382
26383 asm (
26384@@ -337,7 +339,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
26385 * Verify if the address gap is in 2GB range, because this uses
26386 * a relative jump.
26387 */
26388- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
26389+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
26390 if (abs(rel) > 0x7fffffff) {
26391 __arch_remove_optimized_kprobe(op, 0);
26392 return -ERANGE;
26393@@ -354,16 +356,18 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
26394 op->optinsn.size = ret;
26395
26396 /* Copy arch-dep-instance from template */
26397- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
26398+ pax_open_kernel();
26399+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
26400+ pax_close_kernel();
26401
26402 /* Set probe information */
26403 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
26404
26405 /* Set probe function call */
26406- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
26407+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
26408
26409 /* Set returning jmp instruction at the tail of out-of-line buffer */
26410- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
26411+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
26412 (u8 *)op->kp.addr + op->optinsn.size);
26413
26414 flush_icache_range((unsigned long) buf,
26415@@ -388,7 +392,7 @@ void arch_optimize_kprobes(struct list_head *oplist)
26416 WARN_ON(kprobe_disabled(&op->kp));
26417
26418 /* Backup instructions which will be replaced by jump address */
26419- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
26420+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
26421 RELATIVE_ADDR_SIZE);
26422
26423 insn_buf[0] = RELATIVEJUMP_OPCODE;
26424@@ -436,7 +440,7 @@ int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
26425 /* This kprobe is really able to run optimized path. */
26426 op = container_of(p, struct optimized_kprobe, kp);
26427 /* Detour through copied instructions */
26428- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
26429+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
26430 if (!reenter)
26431 reset_current_kprobe();
26432 preempt_enable_no_resched();
26433diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
26434index c2bedae..25e7ab60 100644
26435--- a/arch/x86/kernel/ksysfs.c
26436+++ b/arch/x86/kernel/ksysfs.c
26437@@ -184,7 +184,7 @@ out:
26438
26439 static struct kobj_attribute type_attr = __ATTR_RO(type);
26440
26441-static struct bin_attribute data_attr = {
26442+static bin_attribute_no_const data_attr __read_only = {
26443 .attr = {
26444 .name = "data",
26445 .mode = S_IRUGO,
26446diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
26447index c37886d..d851d32 100644
26448--- a/arch/x86/kernel/ldt.c
26449+++ b/arch/x86/kernel/ldt.c
26450@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
26451 if (reload) {
26452 #ifdef CONFIG_SMP
26453 preempt_disable();
26454- load_LDT(pc);
26455+ load_LDT_nolock(pc);
26456 if (!cpumask_equal(mm_cpumask(current->mm),
26457 cpumask_of(smp_processor_id())))
26458 smp_call_function(flush_ldt, current->mm, 1);
26459 preempt_enable();
26460 #else
26461- load_LDT(pc);
26462+ load_LDT_nolock(pc);
26463 #endif
26464 }
26465 if (oldsize) {
26466@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
26467 return err;
26468
26469 for (i = 0; i < old->size; i++)
26470- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
26471+ write_ldt_entry(new->ldt, i, old->ldt + i);
26472 return 0;
26473 }
26474
26475@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
26476 retval = copy_ldt(&mm->context, &old_mm->context);
26477 mutex_unlock(&old_mm->context.lock);
26478 }
26479+
26480+ if (tsk == current) {
26481+ mm->context.vdso = 0;
26482+
26483+#ifdef CONFIG_X86_32
26484+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
26485+ mm->context.user_cs_base = 0UL;
26486+ mm->context.user_cs_limit = ~0UL;
26487+
26488+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
26489+ cpus_clear(mm->context.cpu_user_cs_mask);
26490+#endif
26491+
26492+#endif
26493+#endif
26494+
26495+ }
26496+
26497 return retval;
26498 }
26499
26500@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
26501 }
26502 }
26503
26504+#ifdef CONFIG_PAX_SEGMEXEC
26505+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
26506+ error = -EINVAL;
26507+ goto out_unlock;
26508+ }
26509+#endif
26510+
26511 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
26512 error = -EINVAL;
26513 goto out_unlock;
26514diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
26515index 1667b1d..16492c5 100644
26516--- a/arch/x86/kernel/machine_kexec_32.c
26517+++ b/arch/x86/kernel/machine_kexec_32.c
26518@@ -25,7 +25,7 @@
26519 #include <asm/cacheflush.h>
26520 #include <asm/debugreg.h>
26521
26522-static void set_idt(void *newidt, __u16 limit)
26523+static void set_idt(struct desc_struct *newidt, __u16 limit)
26524 {
26525 struct desc_ptr curidt;
26526
26527@@ -37,7 +37,7 @@ static void set_idt(void *newidt, __u16 limit)
26528 }
26529
26530
26531-static void set_gdt(void *newgdt, __u16 limit)
26532+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
26533 {
26534 struct desc_ptr curgdt;
26535
26536@@ -215,7 +215,7 @@ void machine_kexec(struct kimage *image)
26537 }
26538
26539 control_page = page_address(image->control_code_page);
26540- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
26541+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
26542
26543 relocate_kernel_ptr = control_page;
26544 page_list[PA_CONTROL_PAGE] = __pa(control_page);
26545diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
26546index c73aecf..4c63630 100644
26547--- a/arch/x86/kernel/mcount_64.S
26548+++ b/arch/x86/kernel/mcount_64.S
26549@@ -7,7 +7,7 @@
26550 #include <linux/linkage.h>
26551 #include <asm/ptrace.h>
26552 #include <asm/ftrace.h>
26553-
26554+#include <asm/alternative-asm.h>
26555
26556 .code64
26557 .section .entry.text, "ax"
26558@@ -24,8 +24,9 @@
26559 #ifdef CONFIG_DYNAMIC_FTRACE
26560
26561 ENTRY(function_hook)
26562+ pax_force_retaddr
26563 retq
26564-END(function_hook)
26565+ENDPROC(function_hook)
26566
26567 /* skip is set if stack has been adjusted */
26568 .macro ftrace_caller_setup skip=0
26569@@ -62,8 +63,9 @@ GLOBAL(ftrace_graph_call)
26570 #endif
26571
26572 GLOBAL(ftrace_stub)
26573+ pax_force_retaddr
26574 retq
26575-END(ftrace_caller)
26576+ENDPROC(ftrace_caller)
26577
26578 ENTRY(ftrace_regs_caller)
26579 /* Save the current flags before compare (in SS location)*/
26580@@ -127,7 +129,7 @@ GLOBAL(ftrace_regs_call)
26581 popfq
26582 jmp ftrace_stub
26583
26584-END(ftrace_regs_caller)
26585+ENDPROC(ftrace_regs_caller)
26586
26587
26588 #else /* ! CONFIG_DYNAMIC_FTRACE */
26589@@ -145,6 +147,7 @@ ENTRY(function_hook)
26590 #endif
26591
26592 GLOBAL(ftrace_stub)
26593+ pax_force_retaddr
26594 retq
26595
26596 trace:
26597@@ -158,12 +161,13 @@ trace:
26598 #endif
26599 subq $MCOUNT_INSN_SIZE, %rdi
26600
26601+ pax_force_fptr ftrace_trace_function
26602 call *ftrace_trace_function
26603
26604 MCOUNT_RESTORE_FRAME
26605
26606 jmp ftrace_stub
26607-END(function_hook)
26608+ENDPROC(function_hook)
26609 #endif /* CONFIG_DYNAMIC_FTRACE */
26610 #endif /* CONFIG_FUNCTION_TRACER */
26611
26612@@ -185,8 +189,9 @@ ENTRY(ftrace_graph_caller)
26613
26614 MCOUNT_RESTORE_FRAME
26615
26616+ pax_force_retaddr
26617 retq
26618-END(ftrace_graph_caller)
26619+ENDPROC(ftrace_graph_caller)
26620
26621 GLOBAL(return_to_handler)
26622 subq $24, %rsp
26623@@ -202,5 +207,7 @@ GLOBAL(return_to_handler)
26624 movq 8(%rsp), %rdx
26625 movq (%rsp), %rax
26626 addq $24, %rsp
26627+ pax_force_fptr %rdi
26628 jmp *%rdi
26629+ENDPROC(return_to_handler)
26630 #endif
26631diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
26632index e69f988..da078ea 100644
26633--- a/arch/x86/kernel/module.c
26634+++ b/arch/x86/kernel/module.c
26635@@ -81,17 +81,62 @@ static unsigned long int get_module_load_offset(void)
26636 }
26637 #endif
26638
26639-void *module_alloc(unsigned long size)
26640+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
26641 {
26642- if (PAGE_ALIGN(size) > MODULES_LEN)
26643+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
26644 return NULL;
26645 return __vmalloc_node_range(size, 1,
26646 MODULES_VADDR + get_module_load_offset(),
26647- MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
26648- PAGE_KERNEL_EXEC, NUMA_NO_NODE,
26649+ MODULES_END, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
26650+ prot, NUMA_NO_NODE,
26651 __builtin_return_address(0));
26652 }
26653
26654+void *module_alloc(unsigned long size)
26655+{
26656+
26657+#ifdef CONFIG_PAX_KERNEXEC
26658+ return __module_alloc(size, PAGE_KERNEL);
26659+#else
26660+ return __module_alloc(size, PAGE_KERNEL_EXEC);
26661+#endif
26662+
26663+}
26664+
26665+#ifdef CONFIG_PAX_KERNEXEC
26666+#ifdef CONFIG_X86_32
26667+void *module_alloc_exec(unsigned long size)
26668+{
26669+ struct vm_struct *area;
26670+
26671+ if (size == 0)
26672+ return NULL;
26673+
26674+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
26675+return area ? area->addr : NULL;
26676+}
26677+EXPORT_SYMBOL(module_alloc_exec);
26678+
26679+void module_free_exec(struct module *mod, void *module_region)
26680+{
26681+ vunmap(module_region);
26682+}
26683+EXPORT_SYMBOL(module_free_exec);
26684+#else
26685+void module_free_exec(struct module *mod, void *module_region)
26686+{
26687+ module_free(mod, module_region);
26688+}
26689+EXPORT_SYMBOL(module_free_exec);
26690+
26691+void *module_alloc_exec(unsigned long size)
26692+{
26693+ return __module_alloc(size, PAGE_KERNEL_RX);
26694+}
26695+EXPORT_SYMBOL(module_alloc_exec);
26696+#endif
26697+#endif
26698+
26699 #ifdef CONFIG_X86_32
26700 int apply_relocate(Elf32_Shdr *sechdrs,
26701 const char *strtab,
26702@@ -102,14 +147,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26703 unsigned int i;
26704 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
26705 Elf32_Sym *sym;
26706- uint32_t *location;
26707+ uint32_t *plocation, location;
26708
26709 DEBUGP("Applying relocate section %u to %u\n",
26710 relsec, sechdrs[relsec].sh_info);
26711 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
26712 /* This is where to make the change */
26713- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
26714- + rel[i].r_offset;
26715+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
26716+ location = (uint32_t)plocation;
26717+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
26718+ plocation = ktla_ktva((void *)plocation);
26719 /* This is the symbol it is referring to. Note that all
26720 undefined symbols have been resolved. */
26721 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
26722@@ -118,11 +165,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
26723 switch (ELF32_R_TYPE(rel[i].r_info)) {
26724 case R_386_32:
26725 /* We add the value into the location given */
26726- *location += sym->st_value;
26727+ pax_open_kernel();
26728+ *plocation += sym->st_value;
26729+ pax_close_kernel();
26730 break;
26731 case R_386_PC32:
26732 /* Add the value, subtract its position */
26733- *location += sym->st_value - (uint32_t)location;
26734+ pax_open_kernel();
26735+ *plocation += sym->st_value - location;
26736+ pax_close_kernel();
26737 break;
26738 default:
26739 pr_err("%s: Unknown relocation: %u\n",
26740@@ -167,21 +218,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
26741 case R_X86_64_NONE:
26742 break;
26743 case R_X86_64_64:
26744+ pax_open_kernel();
26745 *(u64 *)loc = val;
26746+ pax_close_kernel();
26747 break;
26748 case R_X86_64_32:
26749+ pax_open_kernel();
26750 *(u32 *)loc = val;
26751+ pax_close_kernel();
26752 if (val != *(u32 *)loc)
26753 goto overflow;
26754 break;
26755 case R_X86_64_32S:
26756+ pax_open_kernel();
26757 *(s32 *)loc = val;
26758+ pax_close_kernel();
26759 if ((s64)val != *(s32 *)loc)
26760 goto overflow;
26761 break;
26762 case R_X86_64_PC32:
26763 val -= (u64)loc;
26764+ pax_open_kernel();
26765 *(u32 *)loc = val;
26766+ pax_close_kernel();
26767+
26768 #if 0
26769 if ((s64)val != *(s32 *)loc)
26770 goto overflow;
26771diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
26772index c9603ac..9f88728 100644
26773--- a/arch/x86/kernel/msr.c
26774+++ b/arch/x86/kernel/msr.c
26775@@ -37,6 +37,7 @@
26776 #include <linux/notifier.h>
26777 #include <linux/uaccess.h>
26778 #include <linux/gfp.h>
26779+#include <linux/grsecurity.h>
26780
26781 #include <asm/processor.h>
26782 #include <asm/msr.h>
26783@@ -103,6 +104,11 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
26784 int err = 0;
26785 ssize_t bytes = 0;
26786
26787+#ifdef CONFIG_GRKERNSEC_KMEM
26788+ gr_handle_msr_write();
26789+ return -EPERM;
26790+#endif
26791+
26792 if (count % 8)
26793 return -EINVAL; /* Invalid chunk size */
26794
26795@@ -150,6 +156,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
26796 err = -EBADF;
26797 break;
26798 }
26799+#ifdef CONFIG_GRKERNSEC_KMEM
26800+ gr_handle_msr_write();
26801+ return -EPERM;
26802+#endif
26803 if (copy_from_user(&regs, uregs, sizeof regs)) {
26804 err = -EFAULT;
26805 break;
26806@@ -233,7 +243,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
26807 return notifier_from_errno(err);
26808 }
26809
26810-static struct notifier_block __refdata msr_class_cpu_notifier = {
26811+static struct notifier_block msr_class_cpu_notifier = {
26812 .notifier_call = msr_class_cpu_callback,
26813 };
26814
26815diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
26816index c3e985d..110a36a 100644
26817--- a/arch/x86/kernel/nmi.c
26818+++ b/arch/x86/kernel/nmi.c
26819@@ -98,16 +98,16 @@ fs_initcall(nmi_warning_debugfs);
26820
26821 static void nmi_max_handler(struct irq_work *w)
26822 {
26823- struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
26824+ struct nmiwork *n = container_of(w, struct nmiwork, irq_work);
26825 int remainder_ns, decimal_msecs;
26826- u64 whole_msecs = ACCESS_ONCE(a->max_duration);
26827+ u64 whole_msecs = ACCESS_ONCE(n->max_duration);
26828
26829 remainder_ns = do_div(whole_msecs, (1000 * 1000));
26830 decimal_msecs = remainder_ns / 1000;
26831
26832 printk_ratelimited(KERN_INFO
26833 "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
26834- a->handler, whole_msecs, decimal_msecs);
26835+ n->action->handler, whole_msecs, decimal_msecs);
26836 }
26837
26838 static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26839@@ -134,11 +134,11 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26840 delta = sched_clock() - delta;
26841 trace_nmi_handler(a->handler, (int)delta, thishandled);
26842
26843- if (delta < nmi_longest_ns || delta < a->max_duration)
26844+ if (delta < nmi_longest_ns || delta < a->work->max_duration)
26845 continue;
26846
26847- a->max_duration = delta;
26848- irq_work_queue(&a->irq_work);
26849+ a->work->max_duration = delta;
26850+ irq_work_queue(&a->work->irq_work);
26851 }
26852
26853 rcu_read_unlock();
26854@@ -148,7 +148,7 @@ static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
26855 }
26856 NOKPROBE_SYMBOL(nmi_handle);
26857
26858-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26859+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
26860 {
26861 struct nmi_desc *desc = nmi_to_desc(type);
26862 unsigned long flags;
26863@@ -156,7 +156,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26864 if (!action->handler)
26865 return -EINVAL;
26866
26867- init_irq_work(&action->irq_work, nmi_max_handler);
26868+ action->work->action = action;
26869+ init_irq_work(&action->work->irq_work, nmi_max_handler);
26870
26871 spin_lock_irqsave(&desc->lock, flags);
26872
26873@@ -174,9 +175,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
26874 * event confuses some handlers (kdump uses this flag)
26875 */
26876 if (action->flags & NMI_FLAG_FIRST)
26877- list_add_rcu(&action->list, &desc->head);
26878+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
26879 else
26880- list_add_tail_rcu(&action->list, &desc->head);
26881+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
26882
26883 spin_unlock_irqrestore(&desc->lock, flags);
26884 return 0;
26885@@ -199,7 +200,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
26886 if (!strcmp(n->name, name)) {
26887 WARN(in_nmi(),
26888 "Trying to free NMI (%s) from NMI context!\n", n->name);
26889- list_del_rcu(&n->list);
26890+ pax_list_del_rcu((struct list_head *)&n->list);
26891 break;
26892 }
26893 }
26894@@ -528,6 +529,17 @@ static inline void nmi_nesting_postprocess(void)
26895 dotraplinkage notrace void
26896 do_nmi(struct pt_regs *regs, long error_code)
26897 {
26898+
26899+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26900+ if (!user_mode(regs)) {
26901+ unsigned long cs = regs->cs & 0xFFFF;
26902+ unsigned long ip = ktva_ktla(regs->ip);
26903+
26904+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
26905+ regs->ip = ip;
26906+ }
26907+#endif
26908+
26909 nmi_nesting_preprocess(regs);
26910
26911 nmi_enter();
26912diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
26913index 6d9582e..f746287 100644
26914--- a/arch/x86/kernel/nmi_selftest.c
26915+++ b/arch/x86/kernel/nmi_selftest.c
26916@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
26917 {
26918 /* trap all the unknown NMIs we may generate */
26919 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
26920- __initdata);
26921+ __initconst);
26922 }
26923
26924 static void __init cleanup_nmi_testsuite(void)
26925@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
26926 unsigned long timeout;
26927
26928 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
26929- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
26930+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
26931 nmi_fail = FAILURE;
26932 return;
26933 }
26934diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
26935index bbb6c73..24a58ef 100644
26936--- a/arch/x86/kernel/paravirt-spinlocks.c
26937+++ b/arch/x86/kernel/paravirt-spinlocks.c
26938@@ -8,7 +8,7 @@
26939
26940 #include <asm/paravirt.h>
26941
26942-struct pv_lock_ops pv_lock_ops = {
26943+struct pv_lock_ops pv_lock_ops __read_only = {
26944 #ifdef CONFIG_SMP
26945 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
26946 .unlock_kick = paravirt_nop,
26947diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
26948index 548d25f..f8fb99c 100644
26949--- a/arch/x86/kernel/paravirt.c
26950+++ b/arch/x86/kernel/paravirt.c
26951@@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
26952 {
26953 return x;
26954 }
26955+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26956+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
26957+#endif
26958
26959 void __init default_banner(void)
26960 {
26961@@ -142,16 +145,20 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
26962
26963 if (opfunc == NULL)
26964 /* If there's no function, patch it with a ud2a (BUG) */
26965- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
26966- else if (opfunc == _paravirt_nop)
26967+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
26968+ else if (opfunc == (void *)_paravirt_nop)
26969 /* If the operation is a nop, then nop the callsite */
26970 ret = paravirt_patch_nop();
26971
26972 /* identity functions just return their single argument */
26973- else if (opfunc == _paravirt_ident_32)
26974+ else if (opfunc == (void *)_paravirt_ident_32)
26975 ret = paravirt_patch_ident_32(insnbuf, len);
26976- else if (opfunc == _paravirt_ident_64)
26977+ else if (opfunc == (void *)_paravirt_ident_64)
26978 ret = paravirt_patch_ident_64(insnbuf, len);
26979+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26980+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
26981+ ret = paravirt_patch_ident_64(insnbuf, len);
26982+#endif
26983
26984 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
26985 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
26986@@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
26987 if (insn_len > len || start == NULL)
26988 insn_len = len;
26989 else
26990- memcpy(insnbuf, start, insn_len);
26991+ memcpy(insnbuf, ktla_ktva(start), insn_len);
26992
26993 return insn_len;
26994 }
26995@@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
26996 return this_cpu_read(paravirt_lazy_mode);
26997 }
26998
26999-struct pv_info pv_info = {
27000+struct pv_info pv_info __read_only = {
27001 .name = "bare hardware",
27002 .paravirt_enabled = 0,
27003 .kernel_rpl = 0,
27004@@ -311,16 +318,16 @@ struct pv_info pv_info = {
27005 #endif
27006 };
27007
27008-struct pv_init_ops pv_init_ops = {
27009+struct pv_init_ops pv_init_ops __read_only = {
27010 .patch = native_patch,
27011 };
27012
27013-struct pv_time_ops pv_time_ops = {
27014+struct pv_time_ops pv_time_ops __read_only = {
27015 .sched_clock = native_sched_clock,
27016 .steal_clock = native_steal_clock,
27017 };
27018
27019-__visible struct pv_irq_ops pv_irq_ops = {
27020+__visible struct pv_irq_ops pv_irq_ops __read_only = {
27021 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
27022 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
27023 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
27024@@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
27025 #endif
27026 };
27027
27028-__visible struct pv_cpu_ops pv_cpu_ops = {
27029+__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
27030 .cpuid = native_cpuid,
27031 .get_debugreg = native_get_debugreg,
27032 .set_debugreg = native_set_debugreg,
27033@@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
27034 NOKPROBE_SYMBOL(native_set_debugreg);
27035 NOKPROBE_SYMBOL(native_load_idt);
27036
27037-struct pv_apic_ops pv_apic_ops = {
27038+struct pv_apic_ops pv_apic_ops __read_only= {
27039 #ifdef CONFIG_X86_LOCAL_APIC
27040 .startup_ipi_hook = paravirt_nop,
27041 #endif
27042 };
27043
27044-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
27045+#ifdef CONFIG_X86_32
27046+#ifdef CONFIG_X86_PAE
27047+/* 64-bit pagetable entries */
27048+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
27049+#else
27050 /* 32-bit pagetable entries */
27051 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
27052+#endif
27053 #else
27054 /* 64-bit pagetable entries */
27055 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
27056 #endif
27057
27058-struct pv_mmu_ops pv_mmu_ops = {
27059+struct pv_mmu_ops pv_mmu_ops __read_only = {
27060
27061 .read_cr2 = native_read_cr2,
27062 .write_cr2 = native_write_cr2,
27063@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
27064 .make_pud = PTE_IDENT,
27065
27066 .set_pgd = native_set_pgd,
27067+ .set_pgd_batched = native_set_pgd_batched,
27068 #endif
27069 #endif /* PAGETABLE_LEVELS >= 3 */
27070
27071@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
27072 },
27073
27074 .set_fixmap = native_set_fixmap,
27075+
27076+#ifdef CONFIG_PAX_KERNEXEC
27077+ .pax_open_kernel = native_pax_open_kernel,
27078+ .pax_close_kernel = native_pax_close_kernel,
27079+#endif
27080+
27081 };
27082
27083 EXPORT_SYMBOL_GPL(pv_time_ops);
27084diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
27085index 0497f71..7186c0d 100644
27086--- a/arch/x86/kernel/pci-calgary_64.c
27087+++ b/arch/x86/kernel/pci-calgary_64.c
27088@@ -1347,7 +1347,7 @@ static void __init get_tce_space_from_tar(void)
27089 tce_space = be64_to_cpu(readq(target));
27090 tce_space = tce_space & TAR_SW_BITS;
27091
27092- tce_space = tce_space & (~specified_table_size);
27093+ tce_space = tce_space & (~(unsigned long)specified_table_size);
27094 info->tce_space = (u64 *)__va(tce_space);
27095 }
27096 }
27097diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
27098index 35ccf75..7a15747 100644
27099--- a/arch/x86/kernel/pci-iommu_table.c
27100+++ b/arch/x86/kernel/pci-iommu_table.c
27101@@ -2,7 +2,7 @@
27102 #include <asm/iommu_table.h>
27103 #include <linux/string.h>
27104 #include <linux/kallsyms.h>
27105-
27106+#include <linux/sched.h>
27107
27108 #define DEBUG 1
27109
27110diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
27111index 77dd0ad..9ec4723 100644
27112--- a/arch/x86/kernel/pci-swiotlb.c
27113+++ b/arch/x86/kernel/pci-swiotlb.c
27114@@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
27115 struct dma_attrs *attrs)
27116 {
27117 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
27118- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
27119+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
27120 else
27121 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
27122 }
27123diff --git a/arch/x86/kernel/preempt.S b/arch/x86/kernel/preempt.S
27124index ca7f0d5..8996469 100644
27125--- a/arch/x86/kernel/preempt.S
27126+++ b/arch/x86/kernel/preempt.S
27127@@ -3,12 +3,14 @@
27128 #include <asm/dwarf2.h>
27129 #include <asm/asm.h>
27130 #include <asm/calling.h>
27131+#include <asm/alternative-asm.h>
27132
27133 ENTRY(___preempt_schedule)
27134 CFI_STARTPROC
27135 SAVE_ALL
27136 call preempt_schedule
27137 RESTORE_ALL
27138+ pax_force_retaddr
27139 ret
27140 CFI_ENDPROC
27141
27142@@ -19,6 +21,7 @@ ENTRY(___preempt_schedule_context)
27143 SAVE_ALL
27144 call preempt_schedule_context
27145 RESTORE_ALL
27146+ pax_force_retaddr
27147 ret
27148 CFI_ENDPROC
27149
27150diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
27151index f804dc9..7c62095 100644
27152--- a/arch/x86/kernel/process.c
27153+++ b/arch/x86/kernel/process.c
27154@@ -36,7 +36,8 @@
27155 * section. Since TSS's are completely CPU-local, we want them
27156 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
27157 */
27158-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
27159+struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
27160+EXPORT_SYMBOL(init_tss);
27161
27162 #ifdef CONFIG_X86_64
27163 static DEFINE_PER_CPU(unsigned char, is_idle);
27164@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
27165 task_xstate_cachep =
27166 kmem_cache_create("task_xstate", xstate_size,
27167 __alignof__(union thread_xstate),
27168- SLAB_PANIC | SLAB_NOTRACK, NULL);
27169+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
27170 setup_xstate_comp();
27171 }
27172
27173@@ -106,7 +107,7 @@ void exit_thread(void)
27174 unsigned long *bp = t->io_bitmap_ptr;
27175
27176 if (bp) {
27177- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
27178+ struct tss_struct *tss = init_tss + get_cpu();
27179
27180 t->io_bitmap_ptr = NULL;
27181 clear_thread_flag(TIF_IO_BITMAP);
27182@@ -126,6 +127,9 @@ void flush_thread(void)
27183 {
27184 struct task_struct *tsk = current;
27185
27186+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
27187+ loadsegment(gs, 0);
27188+#endif
27189 flush_ptrace_hw_breakpoint(tsk);
27190 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
27191 drop_init_fpu(tsk);
27192@@ -272,7 +276,7 @@ static void __exit_idle(void)
27193 void exit_idle(void)
27194 {
27195 /* idle loop has pid 0 */
27196- if (current->pid)
27197+ if (task_pid_nr(current))
27198 return;
27199 __exit_idle();
27200 }
27201@@ -325,7 +329,7 @@ bool xen_set_default_idle(void)
27202 return ret;
27203 }
27204 #endif
27205-void stop_this_cpu(void *dummy)
27206+__noreturn void stop_this_cpu(void *dummy)
27207 {
27208 local_irq_disable();
27209 /*
27210@@ -454,16 +458,37 @@ static int __init idle_setup(char *str)
27211 }
27212 early_param("idle", idle_setup);
27213
27214-unsigned long arch_align_stack(unsigned long sp)
27215+#ifdef CONFIG_PAX_RANDKSTACK
27216+void pax_randomize_kstack(struct pt_regs *regs)
27217 {
27218- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
27219- sp -= get_random_int() % 8192;
27220- return sp & ~0xf;
27221-}
27222+ struct thread_struct *thread = &current->thread;
27223+ unsigned long time;
27224
27225-unsigned long arch_randomize_brk(struct mm_struct *mm)
27226-{
27227- unsigned long range_end = mm->brk + 0x02000000;
27228- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
27229-}
27230+ if (!randomize_va_space)
27231+ return;
27232+
27233+ if (v8086_mode(regs))
27234+ return;
27235
27236+ rdtscl(time);
27237+
27238+ /* P4 seems to return a 0 LSB, ignore it */
27239+#ifdef CONFIG_MPENTIUM4
27240+ time &= 0x3EUL;
27241+ time <<= 2;
27242+#elif defined(CONFIG_X86_64)
27243+ time &= 0xFUL;
27244+ time <<= 4;
27245+#else
27246+ time &= 0x1FUL;
27247+ time <<= 3;
27248+#endif
27249+
27250+ thread->sp0 ^= time;
27251+ load_sp0(init_tss + smp_processor_id(), thread);
27252+
27253+#ifdef CONFIG_X86_64
27254+ this_cpu_write(kernel_stack, thread->sp0);
27255+#endif
27256+}
27257+#endif
27258diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
27259index 7bc86bb..0ea06e8 100644
27260--- a/arch/x86/kernel/process_32.c
27261+++ b/arch/x86/kernel/process_32.c
27262@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
27263 unsigned long thread_saved_pc(struct task_struct *tsk)
27264 {
27265 return ((unsigned long *)tsk->thread.sp)[3];
27266+//XXX return tsk->thread.eip;
27267 }
27268
27269 void __show_regs(struct pt_regs *regs, int all)
27270@@ -73,19 +74,18 @@ void __show_regs(struct pt_regs *regs, int all)
27271 unsigned long sp;
27272 unsigned short ss, gs;
27273
27274- if (user_mode_vm(regs)) {
27275+ if (user_mode(regs)) {
27276 sp = regs->sp;
27277 ss = regs->ss & 0xffff;
27278- gs = get_user_gs(regs);
27279 } else {
27280 sp = kernel_stack_pointer(regs);
27281 savesegment(ss, ss);
27282- savesegment(gs, gs);
27283 }
27284+ gs = get_user_gs(regs);
27285
27286 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
27287 (u16)regs->cs, regs->ip, regs->flags,
27288- smp_processor_id());
27289+ raw_smp_processor_id());
27290 print_symbol("EIP is at %s\n", regs->ip);
27291
27292 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
27293@@ -132,20 +132,21 @@ void release_thread(struct task_struct *dead_task)
27294 int copy_thread(unsigned long clone_flags, unsigned long sp,
27295 unsigned long arg, struct task_struct *p)
27296 {
27297- struct pt_regs *childregs = task_pt_regs(p);
27298+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
27299 struct task_struct *tsk;
27300 int err;
27301
27302 p->thread.sp = (unsigned long) childregs;
27303 p->thread.sp0 = (unsigned long) (childregs+1);
27304+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
27305
27306 if (unlikely(p->flags & PF_KTHREAD)) {
27307 /* kernel thread */
27308 memset(childregs, 0, sizeof(struct pt_regs));
27309 p->thread.ip = (unsigned long) ret_from_kernel_thread;
27310- task_user_gs(p) = __KERNEL_STACK_CANARY;
27311- childregs->ds = __USER_DS;
27312- childregs->es = __USER_DS;
27313+ savesegment(gs, childregs->gs);
27314+ childregs->ds = __KERNEL_DS;
27315+ childregs->es = __KERNEL_DS;
27316 childregs->fs = __KERNEL_PERCPU;
27317 childregs->bx = sp; /* function */
27318 childregs->bp = arg;
27319@@ -252,7 +253,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27320 struct thread_struct *prev = &prev_p->thread,
27321 *next = &next_p->thread;
27322 int cpu = smp_processor_id();
27323- struct tss_struct *tss = &per_cpu(init_tss, cpu);
27324+ struct tss_struct *tss = init_tss + cpu;
27325 fpu_switch_t fpu;
27326
27327 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
27328@@ -276,6 +277,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27329 */
27330 lazy_save_gs(prev->gs);
27331
27332+#ifdef CONFIG_PAX_MEMORY_UDEREF
27333+ __set_fs(task_thread_info(next_p)->addr_limit);
27334+#endif
27335+
27336 /*
27337 * Load the per-thread Thread-Local Storage descriptor.
27338 */
27339@@ -314,9 +319,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27340 */
27341 arch_end_context_switch(next_p);
27342
27343- this_cpu_write(kernel_stack,
27344- (unsigned long)task_stack_page(next_p) +
27345- THREAD_SIZE - KERNEL_STACK_OFFSET);
27346+ this_cpu_write(current_task, next_p);
27347+ this_cpu_write(current_tinfo, &next_p->tinfo);
27348+ this_cpu_write(kernel_stack, next->sp0);
27349
27350 /*
27351 * Restore %gs if needed (which is common)
27352@@ -326,8 +331,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27353
27354 switch_fpu_finish(next_p, fpu);
27355
27356- this_cpu_write(current_task, next_p);
27357-
27358 return prev_p;
27359 }
27360
27361@@ -357,4 +360,3 @@ unsigned long get_wchan(struct task_struct *p)
27362 } while (count++ < 16);
27363 return 0;
27364 }
27365-
27366diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
27367index ca5b02d..c0b2f6a 100644
27368--- a/arch/x86/kernel/process_64.c
27369+++ b/arch/x86/kernel/process_64.c
27370@@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
27371 struct pt_regs *childregs;
27372 struct task_struct *me = current;
27373
27374- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
27375+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
27376 childregs = task_pt_regs(p);
27377 p->thread.sp = (unsigned long) childregs;
27378 p->thread.usersp = me->thread.usersp;
27379+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
27380 set_tsk_thread_flag(p, TIF_FORK);
27381 p->thread.fpu_counter = 0;
27382 p->thread.io_bitmap_ptr = NULL;
27383@@ -172,6 +173,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
27384 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
27385 savesegment(es, p->thread.es);
27386 savesegment(ds, p->thread.ds);
27387+ savesegment(ss, p->thread.ss);
27388+ BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
27389 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
27390
27391 if (unlikely(p->flags & PF_KTHREAD)) {
27392@@ -280,7 +283,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27393 struct thread_struct *prev = &prev_p->thread;
27394 struct thread_struct *next = &next_p->thread;
27395 int cpu = smp_processor_id();
27396- struct tss_struct *tss = &per_cpu(init_tss, cpu);
27397+ struct tss_struct *tss = init_tss + cpu;
27398 unsigned fsindex, gsindex;
27399 fpu_switch_t fpu;
27400
27401@@ -303,6 +306,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27402 if (unlikely(next->ds | prev->ds))
27403 loadsegment(ds, next->ds);
27404
27405+ savesegment(ss, prev->ss);
27406+ if (unlikely(next->ss != prev->ss))
27407+ loadsegment(ss, next->ss);
27408
27409 /* We must save %fs and %gs before load_TLS() because
27410 * %fs and %gs may be cleared by load_TLS().
27411@@ -362,6 +368,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27412 prev->usersp = this_cpu_read(old_rsp);
27413 this_cpu_write(old_rsp, next->usersp);
27414 this_cpu_write(current_task, next_p);
27415+ this_cpu_write(current_tinfo, &next_p->tinfo);
27416
27417 /*
27418 * If it were not for PREEMPT_ACTIVE we could guarantee that the
27419@@ -371,9 +378,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
27420 task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
27421 this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);
27422
27423- this_cpu_write(kernel_stack,
27424- (unsigned long)task_stack_page(next_p) +
27425- THREAD_SIZE - KERNEL_STACK_OFFSET);
27426+ this_cpu_write(kernel_stack, next->sp0);
27427
27428 /*
27429 * Now maybe reload the debug registers and handle I/O bitmaps
27430@@ -443,12 +448,11 @@ unsigned long get_wchan(struct task_struct *p)
27431 if (!p || p == current || p->state == TASK_RUNNING)
27432 return 0;
27433 stack = (unsigned long)task_stack_page(p);
27434- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
27435+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
27436 return 0;
27437 fp = *(u64 *)(p->thread.sp);
27438 do {
27439- if (fp < (unsigned long)stack ||
27440- fp >= (unsigned long)stack+THREAD_SIZE)
27441+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
27442 return 0;
27443 ip = *(u64 *)(fp+8);
27444 if (!in_sched_functions(ip))
27445diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
27446index b1a5dfa..ed94526 100644
27447--- a/arch/x86/kernel/ptrace.c
27448+++ b/arch/x86/kernel/ptrace.c
27449@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
27450 unsigned long sp = (unsigned long)&regs->sp;
27451 u32 *prev_esp;
27452
27453- if (context == (sp & ~(THREAD_SIZE - 1)))
27454+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
27455 return sp;
27456
27457- prev_esp = (u32 *)(context);
27458+ prev_esp = *(u32 **)(context);
27459 if (prev_esp)
27460 return (unsigned long)prev_esp;
27461
27462@@ -452,6 +452,20 @@ static int putreg(struct task_struct *child,
27463 if (child->thread.gs != value)
27464 return do_arch_prctl(child, ARCH_SET_GS, value);
27465 return 0;
27466+
27467+ case offsetof(struct user_regs_struct,ip):
27468+ /*
27469+ * Protect against any attempt to set ip to an
27470+ * impossible address. There are dragons lurking if the
27471+ * address is noncanonical. (This explicitly allows
27472+ * setting ip to TASK_SIZE_MAX, because user code can do
27473+ * that all by itself by running off the end of its
27474+ * address space.
27475+ */
27476+ if (value > TASK_SIZE_MAX)
27477+ return -EIO;
27478+ break;
27479+
27480 #endif
27481 }
27482
27483@@ -588,7 +602,7 @@ static void ptrace_triggered(struct perf_event *bp,
27484 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
27485 {
27486 int i;
27487- int dr7 = 0;
27488+ unsigned long dr7 = 0;
27489 struct arch_hw_breakpoint *info;
27490
27491 for (i = 0; i < HBP_NUM; i++) {
27492@@ -822,7 +836,7 @@ long arch_ptrace(struct task_struct *child, long request,
27493 unsigned long addr, unsigned long data)
27494 {
27495 int ret;
27496- unsigned long __user *datap = (unsigned long __user *)data;
27497+ unsigned long __user *datap = (__force unsigned long __user *)data;
27498
27499 switch (request) {
27500 /* read the word at location addr in the USER area. */
27501@@ -907,14 +921,14 @@ long arch_ptrace(struct task_struct *child, long request,
27502 if ((int) addr < 0)
27503 return -EIO;
27504 ret = do_get_thread_area(child, addr,
27505- (struct user_desc __user *)data);
27506+ (__force struct user_desc __user *) data);
27507 break;
27508
27509 case PTRACE_SET_THREAD_AREA:
27510 if ((int) addr < 0)
27511 return -EIO;
27512 ret = do_set_thread_area(child, addr,
27513- (struct user_desc __user *)data, 0);
27514+ (__force struct user_desc __user *) data, 0);
27515 break;
27516 #endif
27517
27518@@ -1292,7 +1306,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
27519
27520 #ifdef CONFIG_X86_64
27521
27522-static struct user_regset x86_64_regsets[] __read_mostly = {
27523+static user_regset_no_const x86_64_regsets[] __read_only = {
27524 [REGSET_GENERAL] = {
27525 .core_note_type = NT_PRSTATUS,
27526 .n = sizeof(struct user_regs_struct) / sizeof(long),
27527@@ -1333,7 +1347,7 @@ static const struct user_regset_view user_x86_64_view = {
27528 #endif /* CONFIG_X86_64 */
27529
27530 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
27531-static struct user_regset x86_32_regsets[] __read_mostly = {
27532+static user_regset_no_const x86_32_regsets[] __read_only = {
27533 [REGSET_GENERAL] = {
27534 .core_note_type = NT_PRSTATUS,
27535 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
27536@@ -1386,7 +1400,7 @@ static const struct user_regset_view user_x86_32_view = {
27537 */
27538 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
27539
27540-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
27541+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
27542 {
27543 #ifdef CONFIG_X86_64
27544 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
27545@@ -1421,7 +1435,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
27546 memset(info, 0, sizeof(*info));
27547 info->si_signo = SIGTRAP;
27548 info->si_code = si_code;
27549- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
27550+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
27551 }
27552
27553 void user_single_step_siginfo(struct task_struct *tsk,
27554@@ -1441,6 +1455,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
27555 force_sig_info(SIGTRAP, &info, tsk);
27556 }
27557
27558+#ifdef CONFIG_GRKERNSEC_SETXID
27559+extern void gr_delayed_cred_worker(void);
27560+#endif
27561+
27562 /*
27563 * We must return the syscall number to actually look up in the table.
27564 * This can be -1L to skip running any syscall at all.
27565@@ -1451,6 +1469,11 @@ long syscall_trace_enter(struct pt_regs *regs)
27566
27567 user_exit();
27568
27569+#ifdef CONFIG_GRKERNSEC_SETXID
27570+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
27571+ gr_delayed_cred_worker();
27572+#endif
27573+
27574 /*
27575 * If we stepped into a sysenter/syscall insn, it trapped in
27576 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
27577@@ -1506,6 +1529,11 @@ void syscall_trace_leave(struct pt_regs *regs)
27578 */
27579 user_exit();
27580
27581+#ifdef CONFIG_GRKERNSEC_SETXID
27582+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
27583+ gr_delayed_cred_worker();
27584+#endif
27585+
27586 audit_syscall_exit(regs);
27587
27588 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
27589diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
27590index 2f355d2..e75ed0a 100644
27591--- a/arch/x86/kernel/pvclock.c
27592+++ b/arch/x86/kernel/pvclock.c
27593@@ -51,11 +51,11 @@ void pvclock_touch_watchdogs(void)
27594 reset_hung_task_detector();
27595 }
27596
27597-static atomic64_t last_value = ATOMIC64_INIT(0);
27598+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
27599
27600 void pvclock_resume(void)
27601 {
27602- atomic64_set(&last_value, 0);
27603+ atomic64_set_unchecked(&last_value, 0);
27604 }
27605
27606 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
27607@@ -105,11 +105,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
27608 * updating at the same time, and one of them could be slightly behind,
27609 * making the assumption that last_value always go forward fail to hold.
27610 */
27611- last = atomic64_read(&last_value);
27612+ last = atomic64_read_unchecked(&last_value);
27613 do {
27614 if (ret < last)
27615 return last;
27616- last = atomic64_cmpxchg(&last_value, last, ret);
27617+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
27618 } while (unlikely(last != ret));
27619
27620 return ret;
27621diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
27622index 17962e6..47f55db 100644
27623--- a/arch/x86/kernel/reboot.c
27624+++ b/arch/x86/kernel/reboot.c
27625@@ -69,6 +69,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
27626
27627 void __noreturn machine_real_restart(unsigned int type)
27628 {
27629+
27630+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
27631+ struct desc_struct *gdt;
27632+#endif
27633+
27634 local_irq_disable();
27635
27636 /*
27637@@ -96,7 +101,29 @@ void __noreturn machine_real_restart(unsigned int type)
27638
27639 /* Jump to the identity-mapped low memory code */
27640 #ifdef CONFIG_X86_32
27641- asm volatile("jmpl *%0" : :
27642+
27643+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
27644+ gdt = get_cpu_gdt_table(smp_processor_id());
27645+ pax_open_kernel();
27646+#ifdef CONFIG_PAX_MEMORY_UDEREF
27647+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
27648+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
27649+ loadsegment(ds, __KERNEL_DS);
27650+ loadsegment(es, __KERNEL_DS);
27651+ loadsegment(ss, __KERNEL_DS);
27652+#endif
27653+#ifdef CONFIG_PAX_KERNEXEC
27654+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
27655+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
27656+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
27657+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
27658+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
27659+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
27660+#endif
27661+ pax_close_kernel();
27662+#endif
27663+
27664+ asm volatile("ljmpl *%0" : :
27665 "rm" (real_mode_header->machine_real_restart_asm),
27666 "a" (type));
27667 #else
27668@@ -500,7 +527,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
27669 * This means that this function can never return, it can misbehave
27670 * by not rebooting properly and hanging.
27671 */
27672-static void native_machine_emergency_restart(void)
27673+static void __noreturn native_machine_emergency_restart(void)
27674 {
27675 int i;
27676 int attempt = 0;
27677@@ -620,13 +647,13 @@ void native_machine_shutdown(void)
27678 #endif
27679 }
27680
27681-static void __machine_emergency_restart(int emergency)
27682+static void __noreturn __machine_emergency_restart(int emergency)
27683 {
27684 reboot_emergency = emergency;
27685 machine_ops.emergency_restart();
27686 }
27687
27688-static void native_machine_restart(char *__unused)
27689+static void __noreturn native_machine_restart(char *__unused)
27690 {
27691 pr_notice("machine restart\n");
27692
27693@@ -635,7 +662,7 @@ static void native_machine_restart(char *__unused)
27694 __machine_emergency_restart(0);
27695 }
27696
27697-static void native_machine_halt(void)
27698+static void __noreturn native_machine_halt(void)
27699 {
27700 /* Stop other cpus and apics */
27701 machine_shutdown();
27702@@ -645,7 +672,7 @@ static void native_machine_halt(void)
27703 stop_this_cpu(NULL);
27704 }
27705
27706-static void native_machine_power_off(void)
27707+static void __noreturn native_machine_power_off(void)
27708 {
27709 if (pm_power_off) {
27710 if (!reboot_force)
27711@@ -654,9 +681,10 @@ static void native_machine_power_off(void)
27712 }
27713 /* A fallback in case there is no PM info available */
27714 tboot_shutdown(TB_SHUTDOWN_HALT);
27715+ unreachable();
27716 }
27717
27718-struct machine_ops machine_ops = {
27719+struct machine_ops machine_ops __read_only = {
27720 .power_off = native_machine_power_off,
27721 .shutdown = native_machine_shutdown,
27722 .emergency_restart = native_machine_emergency_restart,
27723diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
27724index c8e41e9..64049ef 100644
27725--- a/arch/x86/kernel/reboot_fixups_32.c
27726+++ b/arch/x86/kernel/reboot_fixups_32.c
27727@@ -57,7 +57,7 @@ struct device_fixup {
27728 unsigned int vendor;
27729 unsigned int device;
27730 void (*reboot_fixup)(struct pci_dev *);
27731-};
27732+} __do_const;
27733
27734 /*
27735 * PCI ids solely used for fixups_table go here
27736diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
27737index 3fd2c69..a444264 100644
27738--- a/arch/x86/kernel/relocate_kernel_64.S
27739+++ b/arch/x86/kernel/relocate_kernel_64.S
27740@@ -96,8 +96,7 @@ relocate_kernel:
27741
27742 /* jump to identity mapped page */
27743 addq $(identity_mapped - relocate_kernel), %r8
27744- pushq %r8
27745- ret
27746+ jmp *%r8
27747
27748 identity_mapped:
27749 /* set return address to 0 if not preserving context */
27750diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
27751index 41ead8d..7ccde23 100644
27752--- a/arch/x86/kernel/setup.c
27753+++ b/arch/x86/kernel/setup.c
27754@@ -110,6 +110,7 @@
27755 #include <asm/mce.h>
27756 #include <asm/alternative.h>
27757 #include <asm/prom.h>
27758+#include <asm/boot.h>
27759
27760 /*
27761 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
27762@@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
27763 #endif
27764
27765
27766-#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
27767-__visible unsigned long mmu_cr4_features;
27768+#ifdef CONFIG_X86_64
27769+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
27770+#elif defined(CONFIG_X86_PAE)
27771+__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
27772 #else
27773-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
27774+__visible unsigned long mmu_cr4_features __read_only;
27775 #endif
27776
27777+void set_in_cr4(unsigned long mask)
27778+{
27779+ unsigned long cr4 = read_cr4();
27780+
27781+ if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
27782+ return;
27783+
27784+ pax_open_kernel();
27785+ mmu_cr4_features |= mask;
27786+ pax_close_kernel();
27787+
27788+ if (trampoline_cr4_features)
27789+ *trampoline_cr4_features = mmu_cr4_features;
27790+ cr4 |= mask;
27791+ write_cr4(cr4);
27792+}
27793+EXPORT_SYMBOL(set_in_cr4);
27794+
27795+void clear_in_cr4(unsigned long mask)
27796+{
27797+ unsigned long cr4 = read_cr4();
27798+
27799+ if (!(cr4 & mask) && cr4 == mmu_cr4_features)
27800+ return;
27801+
27802+ pax_open_kernel();
27803+ mmu_cr4_features &= ~mask;
27804+ pax_close_kernel();
27805+
27806+ if (trampoline_cr4_features)
27807+ *trampoline_cr4_features = mmu_cr4_features;
27808+ cr4 &= ~mask;
27809+ write_cr4(cr4);
27810+}
27811+EXPORT_SYMBOL(clear_in_cr4);
27812+
27813 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
27814 int bootloader_type, bootloader_version;
27815
27816@@ -772,7 +811,7 @@ static void __init trim_bios_range(void)
27817 * area (640->1Mb) as ram even though it is not.
27818 * take them out.
27819 */
27820- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
27821+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
27822
27823 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
27824 }
27825@@ -780,7 +819,7 @@ static void __init trim_bios_range(void)
27826 /* called before trim_bios_range() to spare extra sanitize */
27827 static void __init e820_add_kernel_range(void)
27828 {
27829- u64 start = __pa_symbol(_text);
27830+ u64 start = __pa_symbol(ktla_ktva(_text));
27831 u64 size = __pa_symbol(_end) - start;
27832
27833 /*
27834@@ -856,8 +895,12 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
27835
27836 void __init setup_arch(char **cmdline_p)
27837 {
27838+#ifdef CONFIG_X86_32
27839+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
27840+#else
27841 memblock_reserve(__pa_symbol(_text),
27842 (unsigned long)__bss_stop - (unsigned long)_text);
27843+#endif
27844
27845 early_reserve_initrd();
27846
27847@@ -946,14 +989,14 @@ void __init setup_arch(char **cmdline_p)
27848
27849 if (!boot_params.hdr.root_flags)
27850 root_mountflags &= ~MS_RDONLY;
27851- init_mm.start_code = (unsigned long) _text;
27852- init_mm.end_code = (unsigned long) _etext;
27853+ init_mm.start_code = ktla_ktva((unsigned long) _text);
27854+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
27855 init_mm.end_data = (unsigned long) _edata;
27856 init_mm.brk = _brk_end;
27857
27858- code_resource.start = __pa_symbol(_text);
27859- code_resource.end = __pa_symbol(_etext)-1;
27860- data_resource.start = __pa_symbol(_etext);
27861+ code_resource.start = __pa_symbol(ktla_ktva(_text));
27862+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
27863+ data_resource.start = __pa_symbol(_sdata);
27864 data_resource.end = __pa_symbol(_edata)-1;
27865 bss_resource.start = __pa_symbol(__bss_start);
27866 bss_resource.end = __pa_symbol(__bss_stop)-1;
27867diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
27868index 5cdff03..80fa283 100644
27869--- a/arch/x86/kernel/setup_percpu.c
27870+++ b/arch/x86/kernel/setup_percpu.c
27871@@ -21,19 +21,17 @@
27872 #include <asm/cpu.h>
27873 #include <asm/stackprotector.h>
27874
27875-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
27876+#ifdef CONFIG_SMP
27877+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
27878 EXPORT_PER_CPU_SYMBOL(cpu_number);
27879+#endif
27880
27881-#ifdef CONFIG_X86_64
27882 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
27883-#else
27884-#define BOOT_PERCPU_OFFSET 0
27885-#endif
27886
27887 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
27888 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
27889
27890-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
27891+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
27892 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
27893 };
27894 EXPORT_SYMBOL(__per_cpu_offset);
27895@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
27896 {
27897 #ifdef CONFIG_NEED_MULTIPLE_NODES
27898 pg_data_t *last = NULL;
27899- unsigned int cpu;
27900+ int cpu;
27901
27902 for_each_possible_cpu(cpu) {
27903 int node = early_cpu_to_node(cpu);
27904@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
27905 {
27906 #ifdef CONFIG_X86_32
27907 struct desc_struct gdt;
27908+ unsigned long base = per_cpu_offset(cpu);
27909
27910- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
27911- 0x2 | DESCTYPE_S, 0x8);
27912- gdt.s = 1;
27913+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
27914+ 0x83 | DESCTYPE_S, 0xC);
27915 write_gdt_entry(get_cpu_gdt_table(cpu),
27916 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
27917 #endif
27918@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
27919 /* alrighty, percpu areas up and running */
27920 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
27921 for_each_possible_cpu(cpu) {
27922+#ifdef CONFIG_CC_STACKPROTECTOR
27923+#ifdef CONFIG_X86_32
27924+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
27925+#endif
27926+#endif
27927 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
27928 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
27929 per_cpu(cpu_number, cpu) = cpu;
27930@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
27931 */
27932 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
27933 #endif
27934+#ifdef CONFIG_CC_STACKPROTECTOR
27935+#ifdef CONFIG_X86_32
27936+ if (!cpu)
27937+ per_cpu(stack_canary.canary, cpu) = canary;
27938+#endif
27939+#endif
27940 /*
27941 * Up to this point, the boot CPU has been using .init.data
27942 * area. Reload any changed state for the boot CPU.
27943diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
27944index ed37a76..39f936e 100644
27945--- a/arch/x86/kernel/signal.c
27946+++ b/arch/x86/kernel/signal.c
27947@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
27948 * Align the stack pointer according to the i386 ABI,
27949 * i.e. so that on function entry ((sp + 4) & 15) == 0.
27950 */
27951- sp = ((sp + 4) & -16ul) - 4;
27952+ sp = ((sp - 12) & -16ul) - 4;
27953 #else /* !CONFIG_X86_32 */
27954 sp = round_down(sp, 16) - 8;
27955 #endif
27956@@ -298,10 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27957 }
27958
27959 if (current->mm->context.vdso)
27960- restorer = current->mm->context.vdso +
27961- selected_vdso32->sym___kernel_sigreturn;
27962+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_sigreturn);
27963 else
27964- restorer = &frame->retcode;
27965+ restorer = (void __user *)&frame->retcode;
27966 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27967 restorer = ksig->ka.sa.sa_restorer;
27968
27969@@ -315,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
27970 * reasons and because gdb uses it as a signature to notice
27971 * signal handler stack frames.
27972 */
27973- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
27974+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
27975
27976 if (err)
27977 return -EFAULT;
27978@@ -362,8 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27979 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
27980
27981 /* Set up to return from userspace. */
27982- restorer = current->mm->context.vdso +
27983- selected_vdso32->sym___kernel_rt_sigreturn;
27984+ if (current->mm->context.vdso)
27985+ restorer = (void __force_user *)(current->mm->context.vdso + selected_vdso32->sym___kernel_rt_sigreturn);
27986+ else
27987+ restorer = (void __user *)&frame->retcode;
27988 if (ksig->ka.sa.sa_flags & SA_RESTORER)
27989 restorer = ksig->ka.sa.sa_restorer;
27990 put_user_ex(restorer, &frame->pretcode);
27991@@ -375,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
27992 * reasons and because gdb uses it as a signature to notice
27993 * signal handler stack frames.
27994 */
27995- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
27996+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
27997 } put_user_catch(err);
27998
27999 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
28000@@ -611,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
28001 {
28002 int usig = signr_convert(ksig->sig);
28003 sigset_t *set = sigmask_to_save();
28004- compat_sigset_t *cset = (compat_sigset_t *) set;
28005+ sigset_t sigcopy;
28006+ compat_sigset_t *cset;
28007+
28008+ sigcopy = *set;
28009+
28010+ cset = (compat_sigset_t *) &sigcopy;
28011
28012 /* Set up the stack frame */
28013 if (is_ia32_frame()) {
28014@@ -622,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
28015 } else if (is_x32_frame()) {
28016 return x32_setup_rt_frame(ksig, cset, regs);
28017 } else {
28018- return __setup_rt_frame(ksig->sig, ksig, set, regs);
28019+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
28020 }
28021 }
28022
28023diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
28024index be8e1bd..a3d93fa 100644
28025--- a/arch/x86/kernel/smp.c
28026+++ b/arch/x86/kernel/smp.c
28027@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
28028
28029 __setup("nonmi_ipi", nonmi_ipi_setup);
28030
28031-struct smp_ops smp_ops = {
28032+struct smp_ops smp_ops __read_only = {
28033 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
28034 .smp_prepare_cpus = native_smp_prepare_cpus,
28035 .smp_cpus_done = native_smp_cpus_done,
28036diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
28037index 42a2dca..35a07aa 100644
28038--- a/arch/x86/kernel/smpboot.c
28039+++ b/arch/x86/kernel/smpboot.c
28040@@ -226,14 +226,17 @@ static void notrace start_secondary(void *unused)
28041
28042 enable_start_cpu0 = 0;
28043
28044-#ifdef CONFIG_X86_32
28045+ /* otherwise gcc will move up smp_processor_id before the cpu_init */
28046+ barrier();
28047+
28048 /* switch away from the initial page table */
28049+#ifdef CONFIG_PAX_PER_CPU_PGD
28050+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
28051+#else
28052 load_cr3(swapper_pg_dir);
28053+#endif
28054 __flush_tlb_all();
28055-#endif
28056
28057- /* otherwise gcc will move up smp_processor_id before the cpu_init */
28058- barrier();
28059 /*
28060 * Check TSC synchronization with the BP:
28061 */
28062@@ -760,8 +763,9 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
28063 alternatives_enable_smp();
28064
28065 idle->thread.sp = (unsigned long) (((struct pt_regs *)
28066- (THREAD_SIZE + task_stack_page(idle))) - 1);
28067+ (THREAD_SIZE - 16 + task_stack_page(idle))) - 1);
28068 per_cpu(current_task, cpu) = idle;
28069+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
28070
28071 #ifdef CONFIG_X86_32
28072 /* Stack for startup_32 can be just as for start_secondary onwards */
28073@@ -770,10 +774,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
28074 clear_tsk_thread_flag(idle, TIF_FORK);
28075 initial_gs = per_cpu_offset(cpu);
28076 #endif
28077- per_cpu(kernel_stack, cpu) =
28078- (unsigned long)task_stack_page(idle) -
28079- KERNEL_STACK_OFFSET + THREAD_SIZE;
28080+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
28081+ pax_open_kernel();
28082 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
28083+ pax_close_kernel();
28084 initial_code = (unsigned long)start_secondary;
28085 stack_start = idle->thread.sp;
28086
28087@@ -919,6 +923,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
28088 /* the FPU context is blank, nobody can own it */
28089 __cpu_disable_lazy_restore(cpu);
28090
28091+#ifdef CONFIG_PAX_PER_CPU_PGD
28092+ clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
28093+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
28094+ KERNEL_PGD_PTRS);
28095+ clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
28096+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
28097+ KERNEL_PGD_PTRS);
28098+#endif
28099+
28100 err = do_boot_cpu(apicid, cpu, tidle);
28101 if (err) {
28102 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
28103diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
28104index 9b4d51d..5d28b58 100644
28105--- a/arch/x86/kernel/step.c
28106+++ b/arch/x86/kernel/step.c
28107@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
28108 struct desc_struct *desc;
28109 unsigned long base;
28110
28111- seg &= ~7UL;
28112+ seg >>= 3;
28113
28114 mutex_lock(&child->mm->context.lock);
28115- if (unlikely((seg >> 3) >= child->mm->context.size))
28116+ if (unlikely(seg >= child->mm->context.size))
28117 addr = -1L; /* bogus selector, access would fault */
28118 else {
28119 desc = child->mm->context.ldt + seg;
28120@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
28121 addr += base;
28122 }
28123 mutex_unlock(&child->mm->context.lock);
28124- }
28125+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
28126+ addr = ktla_ktva(addr);
28127
28128 return addr;
28129 }
28130@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
28131 unsigned char opcode[15];
28132 unsigned long addr = convert_ip_to_linear(child, regs);
28133
28134+ if (addr == -EINVAL)
28135+ return 0;
28136+
28137 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
28138 for (i = 0; i < copied; i++) {
28139 switch (opcode[i]) {
28140diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
28141new file mode 100644
28142index 0000000..5877189
28143--- /dev/null
28144+++ b/arch/x86/kernel/sys_i386_32.c
28145@@ -0,0 +1,189 @@
28146+/*
28147+ * This file contains various random system calls that
28148+ * have a non-standard calling sequence on the Linux/i386
28149+ * platform.
28150+ */
28151+
28152+#include <linux/errno.h>
28153+#include <linux/sched.h>
28154+#include <linux/mm.h>
28155+#include <linux/fs.h>
28156+#include <linux/smp.h>
28157+#include <linux/sem.h>
28158+#include <linux/msg.h>
28159+#include <linux/shm.h>
28160+#include <linux/stat.h>
28161+#include <linux/syscalls.h>
28162+#include <linux/mman.h>
28163+#include <linux/file.h>
28164+#include <linux/utsname.h>
28165+#include <linux/ipc.h>
28166+#include <linux/elf.h>
28167+
28168+#include <linux/uaccess.h>
28169+#include <linux/unistd.h>
28170+
28171+#include <asm/syscalls.h>
28172+
28173+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
28174+{
28175+ unsigned long pax_task_size = TASK_SIZE;
28176+
28177+#ifdef CONFIG_PAX_SEGMEXEC
28178+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
28179+ pax_task_size = SEGMEXEC_TASK_SIZE;
28180+#endif
28181+
28182+ if (flags & MAP_FIXED)
28183+ if (len > pax_task_size || addr > pax_task_size - len)
28184+ return -EINVAL;
28185+
28186+ return 0;
28187+}
28188+
28189+/*
28190+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
28191+ */
28192+static unsigned long get_align_mask(void)
28193+{
28194+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
28195+ return 0;
28196+
28197+ if (!(current->flags & PF_RANDOMIZE))
28198+ return 0;
28199+
28200+ return va_align.mask;
28201+}
28202+
28203+unsigned long
28204+arch_get_unmapped_area(struct file *filp, unsigned long addr,
28205+ unsigned long len, unsigned long pgoff, unsigned long flags)
28206+{
28207+ struct mm_struct *mm = current->mm;
28208+ struct vm_area_struct *vma;
28209+ unsigned long pax_task_size = TASK_SIZE;
28210+ struct vm_unmapped_area_info info;
28211+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
28212+
28213+#ifdef CONFIG_PAX_SEGMEXEC
28214+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28215+ pax_task_size = SEGMEXEC_TASK_SIZE;
28216+#endif
28217+
28218+ pax_task_size -= PAGE_SIZE;
28219+
28220+ if (len > pax_task_size)
28221+ return -ENOMEM;
28222+
28223+ if (flags & MAP_FIXED)
28224+ return addr;
28225+
28226+#ifdef CONFIG_PAX_RANDMMAP
28227+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28228+#endif
28229+
28230+ if (addr) {
28231+ addr = PAGE_ALIGN(addr);
28232+ if (pax_task_size - len >= addr) {
28233+ vma = find_vma(mm, addr);
28234+ if (check_heap_stack_gap(vma, addr, len, offset))
28235+ return addr;
28236+ }
28237+ }
28238+
28239+ info.flags = 0;
28240+ info.length = len;
28241+ info.align_mask = filp ? get_align_mask() : 0;
28242+ info.align_offset = pgoff << PAGE_SHIFT;
28243+ info.threadstack_offset = offset;
28244+
28245+#ifdef CONFIG_PAX_PAGEEXEC
28246+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
28247+ info.low_limit = 0x00110000UL;
28248+ info.high_limit = mm->start_code;
28249+
28250+#ifdef CONFIG_PAX_RANDMMAP
28251+ if (mm->pax_flags & MF_PAX_RANDMMAP)
28252+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
28253+#endif
28254+
28255+ if (info.low_limit < info.high_limit) {
28256+ addr = vm_unmapped_area(&info);
28257+ if (!IS_ERR_VALUE(addr))
28258+ return addr;
28259+ }
28260+ } else
28261+#endif
28262+
28263+ info.low_limit = mm->mmap_base;
28264+ info.high_limit = pax_task_size;
28265+
28266+ return vm_unmapped_area(&info);
28267+}
28268+
28269+unsigned long
28270+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
28271+ const unsigned long len, const unsigned long pgoff,
28272+ const unsigned long flags)
28273+{
28274+ struct vm_area_struct *vma;
28275+ struct mm_struct *mm = current->mm;
28276+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
28277+ struct vm_unmapped_area_info info;
28278+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
28279+
28280+#ifdef CONFIG_PAX_SEGMEXEC
28281+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28282+ pax_task_size = SEGMEXEC_TASK_SIZE;
28283+#endif
28284+
28285+ pax_task_size -= PAGE_SIZE;
28286+
28287+ /* requested length too big for entire address space */
28288+ if (len > pax_task_size)
28289+ return -ENOMEM;
28290+
28291+ if (flags & MAP_FIXED)
28292+ return addr;
28293+
28294+#ifdef CONFIG_PAX_PAGEEXEC
28295+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
28296+ goto bottomup;
28297+#endif
28298+
28299+#ifdef CONFIG_PAX_RANDMMAP
28300+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28301+#endif
28302+
28303+ /* requesting a specific address */
28304+ if (addr) {
28305+ addr = PAGE_ALIGN(addr);
28306+ if (pax_task_size - len >= addr) {
28307+ vma = find_vma(mm, addr);
28308+ if (check_heap_stack_gap(vma, addr, len, offset))
28309+ return addr;
28310+ }
28311+ }
28312+
28313+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
28314+ info.length = len;
28315+ info.low_limit = PAGE_SIZE;
28316+ info.high_limit = mm->mmap_base;
28317+ info.align_mask = filp ? get_align_mask() : 0;
28318+ info.align_offset = pgoff << PAGE_SHIFT;
28319+ info.threadstack_offset = offset;
28320+
28321+ addr = vm_unmapped_area(&info);
28322+ if (!(addr & ~PAGE_MASK))
28323+ return addr;
28324+ VM_BUG_ON(addr != -ENOMEM);
28325+
28326+bottomup:
28327+ /*
28328+ * A failed mmap() very likely causes application failure,
28329+ * so fall back to the bottom-up function here. This scenario
28330+ * can happen with large stack limits and large mmap()
28331+ * allocations.
28332+ */
28333+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
28334+}
28335diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
28336index 30277e2..5664a29 100644
28337--- a/arch/x86/kernel/sys_x86_64.c
28338+++ b/arch/x86/kernel/sys_x86_64.c
28339@@ -81,8 +81,8 @@ out:
28340 return error;
28341 }
28342
28343-static void find_start_end(unsigned long flags, unsigned long *begin,
28344- unsigned long *end)
28345+static void find_start_end(struct mm_struct *mm, unsigned long flags,
28346+ unsigned long *begin, unsigned long *end)
28347 {
28348 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
28349 unsigned long new_begin;
28350@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
28351 *begin = new_begin;
28352 }
28353 } else {
28354- *begin = current->mm->mmap_legacy_base;
28355+ *begin = mm->mmap_legacy_base;
28356 *end = TASK_SIZE;
28357 }
28358 }
28359@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
28360 struct vm_area_struct *vma;
28361 struct vm_unmapped_area_info info;
28362 unsigned long begin, end;
28363+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
28364
28365 if (flags & MAP_FIXED)
28366 return addr;
28367
28368- find_start_end(flags, &begin, &end);
28369+ find_start_end(mm, flags, &begin, &end);
28370
28371 if (len > end)
28372 return -ENOMEM;
28373
28374+#ifdef CONFIG_PAX_RANDMMAP
28375+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28376+#endif
28377+
28378 if (addr) {
28379 addr = PAGE_ALIGN(addr);
28380 vma = find_vma(mm, addr);
28381- if (end - len >= addr &&
28382- (!vma || addr + len <= vma->vm_start))
28383+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
28384 return addr;
28385 }
28386
28387@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
28388 info.high_limit = end;
28389 info.align_mask = filp ? get_align_mask() : 0;
28390 info.align_offset = pgoff << PAGE_SHIFT;
28391+ info.threadstack_offset = offset;
28392 return vm_unmapped_area(&info);
28393 }
28394
28395@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
28396 struct mm_struct *mm = current->mm;
28397 unsigned long addr = addr0;
28398 struct vm_unmapped_area_info info;
28399+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
28400
28401 /* requested length too big for entire address space */
28402 if (len > TASK_SIZE)
28403@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
28404 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
28405 goto bottomup;
28406
28407+#ifdef CONFIG_PAX_RANDMMAP
28408+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28409+#endif
28410+
28411 /* requesting a specific address */
28412 if (addr) {
28413 addr = PAGE_ALIGN(addr);
28414 vma = find_vma(mm, addr);
28415- if (TASK_SIZE - len >= addr &&
28416- (!vma || addr + len <= vma->vm_start))
28417+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
28418 return addr;
28419 }
28420
28421@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
28422 info.high_limit = mm->mmap_base;
28423 info.align_mask = filp ? get_align_mask() : 0;
28424 info.align_offset = pgoff << PAGE_SHIFT;
28425+ info.threadstack_offset = offset;
28426 addr = vm_unmapped_area(&info);
28427 if (!(addr & ~PAGE_MASK))
28428 return addr;
28429diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
28430index 91a4496..bb87552 100644
28431--- a/arch/x86/kernel/tboot.c
28432+++ b/arch/x86/kernel/tboot.c
28433@@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
28434
28435 void tboot_shutdown(u32 shutdown_type)
28436 {
28437- void (*shutdown)(void);
28438+ void (* __noreturn shutdown)(void);
28439
28440 if (!tboot_enabled())
28441 return;
28442@@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
28443
28444 switch_to_tboot_pt();
28445
28446- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
28447+ shutdown = (void *)(unsigned long)tboot->shutdown_entry;
28448 shutdown();
28449
28450 /* should not reach here */
28451@@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
28452 return -ENODEV;
28453 }
28454
28455-static atomic_t ap_wfs_count;
28456+static atomic_unchecked_t ap_wfs_count;
28457
28458 static int tboot_wait_for_aps(int num_aps)
28459 {
28460@@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
28461 {
28462 switch (action) {
28463 case CPU_DYING:
28464- atomic_inc(&ap_wfs_count);
28465+ atomic_inc_unchecked(&ap_wfs_count);
28466 if (num_online_cpus() == 1)
28467- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
28468+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
28469 return NOTIFY_BAD;
28470 break;
28471 }
28472@@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
28473
28474 tboot_create_trampoline();
28475
28476- atomic_set(&ap_wfs_count, 0);
28477+ atomic_set_unchecked(&ap_wfs_count, 0);
28478 register_hotcpu_notifier(&tboot_cpu_notifier);
28479
28480 #ifdef CONFIG_DEBUG_FS
28481diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
28482index 0fa2960..91eabbe 100644
28483--- a/arch/x86/kernel/time.c
28484+++ b/arch/x86/kernel/time.c
28485@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
28486 {
28487 unsigned long pc = instruction_pointer(regs);
28488
28489- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
28490+ if (!user_mode(regs) && in_lock_functions(pc)) {
28491 #ifdef CONFIG_FRAME_POINTER
28492- return *(unsigned long *)(regs->bp + sizeof(long));
28493+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
28494 #else
28495 unsigned long *sp =
28496 (unsigned long *)kernel_stack_pointer(regs);
28497@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
28498 * or above a saved flags. Eflags has bits 22-31 zero,
28499 * kernel addresses don't.
28500 */
28501+
28502+#ifdef CONFIG_PAX_KERNEXEC
28503+ return ktla_ktva(sp[0]);
28504+#else
28505 if (sp[0] >> 22)
28506 return sp[0];
28507 if (sp[1] >> 22)
28508 return sp[1];
28509 #endif
28510+
28511+#endif
28512 }
28513 return pc;
28514 }
28515diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
28516index f7fec09..9991981 100644
28517--- a/arch/x86/kernel/tls.c
28518+++ b/arch/x86/kernel/tls.c
28519@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
28520 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
28521 return -EINVAL;
28522
28523+#ifdef CONFIG_PAX_SEGMEXEC
28524+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
28525+ return -EINVAL;
28526+#endif
28527+
28528 set_tls_desc(p, idx, &info, 1);
28529
28530 return 0;
28531@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
28532
28533 if (kbuf)
28534 info = kbuf;
28535- else if (__copy_from_user(infobuf, ubuf, count))
28536+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
28537 return -EFAULT;
28538 else
28539 info = infobuf;
28540diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
28541index 1c113db..287b42e 100644
28542--- a/arch/x86/kernel/tracepoint.c
28543+++ b/arch/x86/kernel/tracepoint.c
28544@@ -9,11 +9,11 @@
28545 #include <linux/atomic.h>
28546
28547 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
28548-struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
28549+const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
28550 (unsigned long) trace_idt_table };
28551
28552 /* No need to be aligned, but done to keep all IDTs defined the same way. */
28553-gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
28554+gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
28555
28556 static int trace_irq_vector_refcount;
28557 static DEFINE_MUTEX(irq_vector_mutex);
28558diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
28559index de801f2..f189dcf 100644
28560--- a/arch/x86/kernel/traps.c
28561+++ b/arch/x86/kernel/traps.c
28562@@ -67,7 +67,7 @@
28563 #include <asm/proto.h>
28564
28565 /* No need to be aligned, but done to keep all IDTs defined the same way. */
28566-gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
28567+gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
28568 #else
28569 #include <asm/processor-flags.h>
28570 #include <asm/setup.h>
28571@@ -76,7 +76,7 @@ asmlinkage int system_call(void);
28572 #endif
28573
28574 /* Must be page-aligned because the real IDT is used in a fixmap. */
28575-gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
28576+gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
28577
28578 DECLARE_BITMAP(used_vectors, NR_VECTORS);
28579 EXPORT_SYMBOL_GPL(used_vectors);
28580@@ -108,11 +108,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
28581 }
28582
28583 static nokprobe_inline int
28584-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
28585+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
28586 struct pt_regs *regs, long error_code)
28587 {
28588 #ifdef CONFIG_X86_32
28589- if (regs->flags & X86_VM_MASK) {
28590+ if (v8086_mode(regs)) {
28591 /*
28592 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
28593 * On nmi (interrupt 2), do_trap should not be called.
28594@@ -125,12 +125,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
28595 return -1;
28596 }
28597 #endif
28598- if (!user_mode(regs)) {
28599+ if (!user_mode_novm(regs)) {
28600 if (!fixup_exception(regs)) {
28601 tsk->thread.error_code = error_code;
28602 tsk->thread.trap_nr = trapnr;
28603+
28604+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28605+ if (trapnr == X86_TRAP_SS && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
28606+ str = "PAX: suspicious stack segment fault";
28607+#endif
28608+
28609 die(str, regs, error_code);
28610 }
28611+
28612+#ifdef CONFIG_PAX_REFCOUNT
28613+ if (trapnr == X86_TRAP_OF)
28614+ pax_report_refcount_overflow(regs);
28615+#endif
28616+
28617 return 0;
28618 }
28619
28620@@ -169,7 +181,7 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
28621 }
28622
28623 static void
28624-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28625+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
28626 long error_code, siginfo_t *info)
28627 {
28628 struct task_struct *tsk = current;
28629@@ -193,7 +205,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
28630 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
28631 printk_ratelimit()) {
28632 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
28633- tsk->comm, tsk->pid, str,
28634+ tsk->comm, task_pid_nr(tsk), str,
28635 regs->ip, regs->sp, error_code);
28636 print_vma_addr(" in ", regs->ip);
28637 pr_cont("\n");
28638@@ -274,6 +286,11 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
28639 tsk->thread.error_code = error_code;
28640 tsk->thread.trap_nr = X86_TRAP_DF;
28641
28642+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
28643+ if ((unsigned long)tsk->stack - regs->sp <= PAGE_SIZE)
28644+ die("grsec: kernel stack overflow detected", regs, error_code);
28645+#endif
28646+
28647 #ifdef CONFIG_DOUBLEFAULT
28648 df_debug(regs, error_code);
28649 #endif
28650@@ -296,7 +313,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
28651 conditional_sti(regs);
28652
28653 #ifdef CONFIG_X86_32
28654- if (regs->flags & X86_VM_MASK) {
28655+ if (v8086_mode(regs)) {
28656 local_irq_enable();
28657 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
28658 goto exit;
28659@@ -304,18 +321,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
28660 #endif
28661
28662 tsk = current;
28663- if (!user_mode(regs)) {
28664+ if (!user_mode_novm(regs)) {
28665 if (fixup_exception(regs))
28666 goto exit;
28667
28668 tsk->thread.error_code = error_code;
28669 tsk->thread.trap_nr = X86_TRAP_GP;
28670 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
28671- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
28672+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
28673+
28674+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28675+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
28676+ die("PAX: suspicious general protection fault", regs, error_code);
28677+ else
28678+#endif
28679+
28680 die("general protection fault", regs, error_code);
28681+ }
28682 goto exit;
28683 }
28684
28685+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
28686+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
28687+ struct mm_struct *mm = tsk->mm;
28688+ unsigned long limit;
28689+
28690+ down_write(&mm->mmap_sem);
28691+ limit = mm->context.user_cs_limit;
28692+ if (limit < TASK_SIZE) {
28693+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
28694+ up_write(&mm->mmap_sem);
28695+ return;
28696+ }
28697+ up_write(&mm->mmap_sem);
28698+ }
28699+#endif
28700+
28701 tsk->thread.error_code = error_code;
28702 tsk->thread.trap_nr = X86_TRAP_GP;
28703
28704@@ -433,7 +474,7 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
28705 /* Copy the remainder of the stack from the current stack. */
28706 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
28707
28708- BUG_ON(!user_mode_vm(&new_stack->regs));
28709+ BUG_ON(!user_mode(&new_stack->regs));
28710 return new_stack;
28711 }
28712 #endif
28713@@ -518,7 +559,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28714 /* It's safe to allow irq's after DR6 has been saved */
28715 preempt_conditional_sti(regs);
28716
28717- if (regs->flags & X86_VM_MASK) {
28718+ if (v8086_mode(regs)) {
28719 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
28720 X86_TRAP_DB);
28721 preempt_conditional_cli(regs);
28722@@ -533,7 +574,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
28723 * We already checked v86 mode above, so we can check for kernel mode
28724 * by just checking the CPL of CS.
28725 */
28726- if ((dr6 & DR_STEP) && !user_mode(regs)) {
28727+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
28728 tsk->thread.debugreg6 &= ~DR_STEP;
28729 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
28730 regs->flags &= ~X86_EFLAGS_TF;
28731@@ -566,7 +607,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
28732 return;
28733 conditional_sti(regs);
28734
28735- if (!user_mode_vm(regs))
28736+ if (!user_mode(regs))
28737 {
28738 if (!fixup_exception(regs)) {
28739 task->thread.error_code = error_code;
28740diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
28741index b7e50bb..f4a93ae 100644
28742--- a/arch/x86/kernel/tsc.c
28743+++ b/arch/x86/kernel/tsc.c
28744@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
28745 */
28746 smp_wmb();
28747
28748- ACCESS_ONCE(c2n->head) = data;
28749+ ACCESS_ONCE_RW(c2n->head) = data;
28750 }
28751
28752 /*
28753diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
28754index 5d1cbfe..2a21feb 100644
28755--- a/arch/x86/kernel/uprobes.c
28756+++ b/arch/x86/kernel/uprobes.c
28757@@ -845,7 +845,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
28758 int ret = NOTIFY_DONE;
28759
28760 /* We are only interested in userspace traps */
28761- if (regs && !user_mode_vm(regs))
28762+ if (regs && !user_mode(regs))
28763 return NOTIFY_DONE;
28764
28765 switch (val) {
28766@@ -919,7 +919,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
28767
28768 if (nleft != rasize) {
28769 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
28770- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
28771+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
28772
28773 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
28774 }
28775diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
28776index b9242ba..50c5edd 100644
28777--- a/arch/x86/kernel/verify_cpu.S
28778+++ b/arch/x86/kernel/verify_cpu.S
28779@@ -20,6 +20,7 @@
28780 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
28781 * arch/x86/kernel/trampoline_64.S: secondary processor verification
28782 * arch/x86/kernel/head_32.S: processor startup
28783+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
28784 *
28785 * verify_cpu, returns the status of longmode and SSE in register %eax.
28786 * 0: Success 1: Failure
28787diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
28788index e8edcf5..27f9344 100644
28789--- a/arch/x86/kernel/vm86_32.c
28790+++ b/arch/x86/kernel/vm86_32.c
28791@@ -44,6 +44,7 @@
28792 #include <linux/ptrace.h>
28793 #include <linux/audit.h>
28794 #include <linux/stddef.h>
28795+#include <linux/grsecurity.h>
28796
28797 #include <asm/uaccess.h>
28798 #include <asm/io.h>
28799@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
28800 do_exit(SIGSEGV);
28801 }
28802
28803- tss = &per_cpu(init_tss, get_cpu());
28804+ tss = init_tss + get_cpu();
28805 current->thread.sp0 = current->thread.saved_sp0;
28806 current->thread.sysenter_cs = __KERNEL_CS;
28807 load_sp0(tss, &current->thread);
28808@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
28809
28810 if (tsk->thread.saved_sp0)
28811 return -EPERM;
28812+
28813+#ifdef CONFIG_GRKERNSEC_VM86
28814+ if (!capable(CAP_SYS_RAWIO)) {
28815+ gr_handle_vm86();
28816+ return -EPERM;
28817+ }
28818+#endif
28819+
28820 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
28821 offsetof(struct kernel_vm86_struct, vm86plus) -
28822 sizeof(info.regs));
28823@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
28824 int tmp;
28825 struct vm86plus_struct __user *v86;
28826
28827+#ifdef CONFIG_GRKERNSEC_VM86
28828+ if (!capable(CAP_SYS_RAWIO)) {
28829+ gr_handle_vm86();
28830+ return -EPERM;
28831+ }
28832+#endif
28833+
28834 tsk = current;
28835 switch (cmd) {
28836 case VM86_REQUEST_IRQ:
28837@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
28838 tsk->thread.saved_fs = info->regs32->fs;
28839 tsk->thread.saved_gs = get_user_gs(info->regs32);
28840
28841- tss = &per_cpu(init_tss, get_cpu());
28842+ tss = init_tss + get_cpu();
28843 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
28844 if (cpu_has_sep)
28845 tsk->thread.sysenter_cs = 0;
28846@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
28847 goto cannot_handle;
28848 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
28849 goto cannot_handle;
28850- intr_ptr = (unsigned long __user *) (i << 2);
28851+ intr_ptr = (__force unsigned long __user *) (i << 2);
28852 if (get_user(segoffs, intr_ptr))
28853 goto cannot_handle;
28854 if ((segoffs >> 16) == BIOSSEG)
28855diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
28856index 49edf2d..c0d1362 100644
28857--- a/arch/x86/kernel/vmlinux.lds.S
28858+++ b/arch/x86/kernel/vmlinux.lds.S
28859@@ -26,6 +26,13 @@
28860 #include <asm/page_types.h>
28861 #include <asm/cache.h>
28862 #include <asm/boot.h>
28863+#include <asm/segment.h>
28864+
28865+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
28866+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
28867+#else
28868+#define __KERNEL_TEXT_OFFSET 0
28869+#endif
28870
28871 #undef i386 /* in case the preprocessor is a 32bit one */
28872
28873@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
28874
28875 PHDRS {
28876 text PT_LOAD FLAGS(5); /* R_E */
28877+#ifdef CONFIG_X86_32
28878+ module PT_LOAD FLAGS(5); /* R_E */
28879+#endif
28880+#ifdef CONFIG_XEN
28881+ rodata PT_LOAD FLAGS(5); /* R_E */
28882+#else
28883+ rodata PT_LOAD FLAGS(4); /* R__ */
28884+#endif
28885 data PT_LOAD FLAGS(6); /* RW_ */
28886-#ifdef CONFIG_X86_64
28887+ init.begin PT_LOAD FLAGS(6); /* RW_ */
28888 #ifdef CONFIG_SMP
28889 percpu PT_LOAD FLAGS(6); /* RW_ */
28890 #endif
28891+ text.init PT_LOAD FLAGS(5); /* R_E */
28892+ text.exit PT_LOAD FLAGS(5); /* R_E */
28893 init PT_LOAD FLAGS(7); /* RWE */
28894-#endif
28895 note PT_NOTE FLAGS(0); /* ___ */
28896 }
28897
28898 SECTIONS
28899 {
28900 #ifdef CONFIG_X86_32
28901- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
28902- phys_startup_32 = startup_32 - LOAD_OFFSET;
28903+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
28904 #else
28905- . = __START_KERNEL;
28906- phys_startup_64 = startup_64 - LOAD_OFFSET;
28907+ . = __START_KERNEL;
28908 #endif
28909
28910 /* Text and read-only data */
28911- .text : AT(ADDR(.text) - LOAD_OFFSET) {
28912- _text = .;
28913+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
28914 /* bootstrapping code */
28915+#ifdef CONFIG_X86_32
28916+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28917+#else
28918+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28919+#endif
28920+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
28921+ _text = .;
28922 HEAD_TEXT
28923 . = ALIGN(8);
28924 _stext = .;
28925@@ -104,13 +124,47 @@ SECTIONS
28926 IRQENTRY_TEXT
28927 *(.fixup)
28928 *(.gnu.warning)
28929- /* End of text section */
28930- _etext = .;
28931 } :text = 0x9090
28932
28933- NOTES :text :note
28934+ . += __KERNEL_TEXT_OFFSET;
28935
28936- EXCEPTION_TABLE(16) :text = 0x9090
28937+#ifdef CONFIG_X86_32
28938+ . = ALIGN(PAGE_SIZE);
28939+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
28940+
28941+#ifdef CONFIG_PAX_KERNEXEC
28942+ MODULES_EXEC_VADDR = .;
28943+ BYTE(0)
28944+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
28945+ . = ALIGN(HPAGE_SIZE) - 1;
28946+ MODULES_EXEC_END = .;
28947+#endif
28948+
28949+ } :module
28950+#endif
28951+
28952+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
28953+ /* End of text section */
28954+ BYTE(0)
28955+ _etext = . - __KERNEL_TEXT_OFFSET;
28956+ }
28957+
28958+#ifdef CONFIG_X86_32
28959+ . = ALIGN(PAGE_SIZE);
28960+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
28961+ . = ALIGN(PAGE_SIZE);
28962+ *(.empty_zero_page)
28963+ *(.initial_pg_fixmap)
28964+ *(.initial_pg_pmd)
28965+ *(.initial_page_table)
28966+ *(.swapper_pg_dir)
28967+ } :rodata
28968+#endif
28969+
28970+ . = ALIGN(PAGE_SIZE);
28971+ NOTES :rodata :note
28972+
28973+ EXCEPTION_TABLE(16) :rodata
28974
28975 #if defined(CONFIG_DEBUG_RODATA)
28976 /* .text should occupy whole number of pages */
28977@@ -122,16 +176,20 @@ SECTIONS
28978
28979 /* Data */
28980 .data : AT(ADDR(.data) - LOAD_OFFSET) {
28981+
28982+#ifdef CONFIG_PAX_KERNEXEC
28983+ . = ALIGN(HPAGE_SIZE);
28984+#else
28985+ . = ALIGN(PAGE_SIZE);
28986+#endif
28987+
28988 /* Start of data section */
28989 _sdata = .;
28990
28991 /* init_task */
28992 INIT_TASK_DATA(THREAD_SIZE)
28993
28994-#ifdef CONFIG_X86_32
28995- /* 32 bit has nosave before _edata */
28996 NOSAVE_DATA
28997-#endif
28998
28999 PAGE_ALIGNED_DATA(PAGE_SIZE)
29000
29001@@ -174,12 +232,19 @@ SECTIONS
29002 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
29003
29004 /* Init code and data - will be freed after init */
29005- . = ALIGN(PAGE_SIZE);
29006 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
29007+ BYTE(0)
29008+
29009+#ifdef CONFIG_PAX_KERNEXEC
29010+ . = ALIGN(HPAGE_SIZE);
29011+#else
29012+ . = ALIGN(PAGE_SIZE);
29013+#endif
29014+
29015 __init_begin = .; /* paired with __init_end */
29016- }
29017+ } :init.begin
29018
29019-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
29020+#ifdef CONFIG_SMP
29021 /*
29022 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
29023 * output PHDR, so the next output section - .init.text - should
29024@@ -188,12 +253,27 @@ SECTIONS
29025 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
29026 #endif
29027
29028- INIT_TEXT_SECTION(PAGE_SIZE)
29029-#ifdef CONFIG_X86_64
29030- :init
29031-#endif
29032+ . = ALIGN(PAGE_SIZE);
29033+ init_begin = .;
29034+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
29035+ VMLINUX_SYMBOL(_sinittext) = .;
29036+ INIT_TEXT
29037+ VMLINUX_SYMBOL(_einittext) = .;
29038+ . = ALIGN(PAGE_SIZE);
29039+ } :text.init
29040
29041- INIT_DATA_SECTION(16)
29042+ /*
29043+ * .exit.text is discard at runtime, not link time, to deal with
29044+ * references from .altinstructions and .eh_frame
29045+ */
29046+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
29047+ EXIT_TEXT
29048+ . = ALIGN(16);
29049+ } :text.exit
29050+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
29051+
29052+ . = ALIGN(PAGE_SIZE);
29053+ INIT_DATA_SECTION(16) :init
29054
29055 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
29056 __x86_cpu_dev_start = .;
29057@@ -264,19 +344,12 @@ SECTIONS
29058 }
29059
29060 . = ALIGN(8);
29061- /*
29062- * .exit.text is discard at runtime, not link time, to deal with
29063- * references from .altinstructions and .eh_frame
29064- */
29065- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
29066- EXIT_TEXT
29067- }
29068
29069 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
29070 EXIT_DATA
29071 }
29072
29073-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
29074+#ifndef CONFIG_SMP
29075 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
29076 #endif
29077
29078@@ -295,16 +368,10 @@ SECTIONS
29079 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
29080 __smp_locks = .;
29081 *(.smp_locks)
29082- . = ALIGN(PAGE_SIZE);
29083 __smp_locks_end = .;
29084+ . = ALIGN(PAGE_SIZE);
29085 }
29086
29087-#ifdef CONFIG_X86_64
29088- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
29089- NOSAVE_DATA
29090- }
29091-#endif
29092-
29093 /* BSS */
29094 . = ALIGN(PAGE_SIZE);
29095 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
29096@@ -320,6 +387,7 @@ SECTIONS
29097 __brk_base = .;
29098 . += 64 * 1024; /* 64k alignment slop space */
29099 *(.brk_reservation) /* areas brk users have reserved */
29100+ . = ALIGN(HPAGE_SIZE);
29101 __brk_limit = .;
29102 }
29103
29104@@ -346,13 +414,12 @@ SECTIONS
29105 * for the boot processor.
29106 */
29107 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
29108-INIT_PER_CPU(gdt_page);
29109 INIT_PER_CPU(irq_stack_union);
29110
29111 /*
29112 * Build-time check on the image size:
29113 */
29114-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
29115+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
29116 "kernel image bigger than KERNEL_IMAGE_SIZE");
29117
29118 #ifdef CONFIG_SMP
29119diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
29120index e1e1e80..1400089 100644
29121--- a/arch/x86/kernel/vsyscall_64.c
29122+++ b/arch/x86/kernel/vsyscall_64.c
29123@@ -54,15 +54,13 @@
29124
29125 DEFINE_VVAR(int, vgetcpu_mode);
29126
29127-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
29128+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
29129
29130 static int __init vsyscall_setup(char *str)
29131 {
29132 if (str) {
29133 if (!strcmp("emulate", str))
29134 vsyscall_mode = EMULATE;
29135- else if (!strcmp("native", str))
29136- vsyscall_mode = NATIVE;
29137 else if (!strcmp("none", str))
29138 vsyscall_mode = NONE;
29139 else
29140@@ -279,8 +277,7 @@ do_ret:
29141 return true;
29142
29143 sigsegv:
29144- force_sig(SIGSEGV, current);
29145- return true;
29146+ do_group_exit(SIGKILL);
29147 }
29148
29149 /*
29150@@ -331,10 +328,7 @@ void __init map_vsyscall(void)
29151 extern char __vsyscall_page;
29152 unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
29153
29154- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
29155- vsyscall_mode == NATIVE
29156- ? PAGE_KERNEL_VSYSCALL
29157- : PAGE_KERNEL_VVAR);
29158+ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
29159 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
29160 (unsigned long)VSYSCALL_ADDR);
29161 }
29162diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
29163index 04068192..4d75aa6 100644
29164--- a/arch/x86/kernel/x8664_ksyms_64.c
29165+++ b/arch/x86/kernel/x8664_ksyms_64.c
29166@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
29167 EXPORT_SYMBOL(copy_user_generic_unrolled);
29168 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
29169 EXPORT_SYMBOL(__copy_user_nocache);
29170-EXPORT_SYMBOL(_copy_from_user);
29171-EXPORT_SYMBOL(_copy_to_user);
29172
29173 EXPORT_SYMBOL(copy_page);
29174 EXPORT_SYMBOL(clear_page);
29175@@ -73,3 +71,7 @@ EXPORT_SYMBOL(___preempt_schedule);
29176 EXPORT_SYMBOL(___preempt_schedule_context);
29177 #endif
29178 #endif
29179+
29180+#ifdef CONFIG_PAX_PER_CPU_PGD
29181+EXPORT_SYMBOL(cpu_pgd);
29182+#endif
29183diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
29184index e48b674..a451dd9 100644
29185--- a/arch/x86/kernel/x86_init.c
29186+++ b/arch/x86/kernel/x86_init.c
29187@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
29188 static void default_nmi_init(void) { };
29189 static int default_i8042_detect(void) { return 1; };
29190
29191-struct x86_platform_ops x86_platform = {
29192+struct x86_platform_ops x86_platform __read_only = {
29193 .calibrate_tsc = native_calibrate_tsc,
29194 .get_wallclock = mach_get_cmos_time,
29195 .set_wallclock = mach_set_rtc_mmss,
29196@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
29197 EXPORT_SYMBOL_GPL(x86_platform);
29198
29199 #if defined(CONFIG_PCI_MSI)
29200-struct x86_msi_ops x86_msi = {
29201+struct x86_msi_ops x86_msi __read_only = {
29202 .setup_msi_irqs = native_setup_msi_irqs,
29203 .compose_msi_msg = native_compose_msi_msg,
29204 .teardown_msi_irq = native_teardown_msi_irq,
29205@@ -150,7 +150,7 @@ u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag)
29206 }
29207 #endif
29208
29209-struct x86_io_apic_ops x86_io_apic_ops = {
29210+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
29211 .init = native_io_apic_init_mappings,
29212 .read = native_io_apic_read,
29213 .write = native_io_apic_write,
29214diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
29215index 4c540c4..0b985b0 100644
29216--- a/arch/x86/kernel/xsave.c
29217+++ b/arch/x86/kernel/xsave.c
29218@@ -167,18 +167,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
29219
29220 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
29221 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
29222- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
29223+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
29224
29225 if (!use_xsave())
29226 return err;
29227
29228- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
29229+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
29230
29231 /*
29232 * Read the xstate_bv which we copied (directly from the cpu or
29233 * from the state in task struct) to the user buffers.
29234 */
29235- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
29236+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
29237
29238 /*
29239 * For legacy compatible, we always set FP/SSE bits in the bit
29240@@ -193,7 +193,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
29241 */
29242 xstate_bv |= XSTATE_FPSSE;
29243
29244- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
29245+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
29246
29247 return err;
29248 }
29249@@ -202,6 +202,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
29250 {
29251 int err;
29252
29253+ buf = (struct xsave_struct __user *)____m(buf);
29254 if (use_xsave())
29255 err = xsave_user(buf);
29256 else if (use_fxsr())
29257@@ -312,6 +313,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
29258 */
29259 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
29260 {
29261+ buf = (void __user *)____m(buf);
29262 if (use_xsave()) {
29263 if ((unsigned long)buf % 64 || fx_only) {
29264 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
29265diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
29266index 38a0afe..94421a9 100644
29267--- a/arch/x86/kvm/cpuid.c
29268+++ b/arch/x86/kvm/cpuid.c
29269@@ -166,15 +166,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
29270 struct kvm_cpuid2 *cpuid,
29271 struct kvm_cpuid_entry2 __user *entries)
29272 {
29273- int r;
29274+ int r, i;
29275
29276 r = -E2BIG;
29277 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
29278 goto out;
29279 r = -EFAULT;
29280- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
29281- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
29282+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
29283 goto out;
29284+ for (i = 0; i < cpuid->nent; ++i) {
29285+ struct kvm_cpuid_entry2 cpuid_entry;
29286+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
29287+ goto out;
29288+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
29289+ }
29290 vcpu->arch.cpuid_nent = cpuid->nent;
29291 kvm_apic_set_version(vcpu);
29292 kvm_x86_ops->cpuid_update(vcpu);
29293@@ -189,15 +194,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
29294 struct kvm_cpuid2 *cpuid,
29295 struct kvm_cpuid_entry2 __user *entries)
29296 {
29297- int r;
29298+ int r, i;
29299
29300 r = -E2BIG;
29301 if (cpuid->nent < vcpu->arch.cpuid_nent)
29302 goto out;
29303 r = -EFAULT;
29304- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
29305- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
29306+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
29307 goto out;
29308+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
29309+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
29310+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
29311+ goto out;
29312+ }
29313 return 0;
29314
29315 out:
29316diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
29317index 08e8a89..0e9183e 100644
29318--- a/arch/x86/kvm/lapic.c
29319+++ b/arch/x86/kvm/lapic.c
29320@@ -55,7 +55,7 @@
29321 #define APIC_BUS_CYCLE_NS 1
29322
29323 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
29324-#define apic_debug(fmt, arg...)
29325+#define apic_debug(fmt, arg...) do {} while (0)
29326
29327 #define APIC_LVT_NUM 6
29328 /* 14 is the version for Xeon and Pentium 8.4.8*/
29329diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
29330index 4107765..d9eb358 100644
29331--- a/arch/x86/kvm/paging_tmpl.h
29332+++ b/arch/x86/kvm/paging_tmpl.h
29333@@ -331,7 +331,7 @@ retry_walk:
29334 if (unlikely(kvm_is_error_hva(host_addr)))
29335 goto error;
29336
29337- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
29338+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
29339 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
29340 goto error;
29341 walker->ptep_user[walker->level - 1] = ptep_user;
29342diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
29343index 78dadc3..fd84599 100644
29344--- a/arch/x86/kvm/svm.c
29345+++ b/arch/x86/kvm/svm.c
29346@@ -3547,7 +3547,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
29347 int cpu = raw_smp_processor_id();
29348
29349 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
29350+
29351+ pax_open_kernel();
29352 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
29353+ pax_close_kernel();
29354+
29355 load_TR_desc();
29356 }
29357
29358@@ -3948,6 +3952,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
29359 #endif
29360 #endif
29361
29362+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
29363+ __set_fs(current_thread_info()->addr_limit);
29364+#endif
29365+
29366 reload_tss(vcpu);
29367
29368 local_irq_disable();
29369diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
29370index 41a5426..c0b3c00 100644
29371--- a/arch/x86/kvm/vmx.c
29372+++ b/arch/x86/kvm/vmx.c
29373@@ -1341,12 +1341,12 @@ static void vmcs_write64(unsigned long field, u64 value)
29374 #endif
29375 }
29376
29377-static void vmcs_clear_bits(unsigned long field, u32 mask)
29378+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
29379 {
29380 vmcs_writel(field, vmcs_readl(field) & ~mask);
29381 }
29382
29383-static void vmcs_set_bits(unsigned long field, u32 mask)
29384+static void vmcs_set_bits(unsigned long field, unsigned long mask)
29385 {
29386 vmcs_writel(field, vmcs_readl(field) | mask);
29387 }
29388@@ -1606,7 +1606,11 @@ static void reload_tss(void)
29389 struct desc_struct *descs;
29390
29391 descs = (void *)gdt->address;
29392+
29393+ pax_open_kernel();
29394 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
29395+ pax_close_kernel();
29396+
29397 load_TR_desc();
29398 }
29399
29400@@ -1834,6 +1838,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
29401 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
29402 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
29403
29404+#ifdef CONFIG_PAX_PER_CPU_PGD
29405+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
29406+#endif
29407+
29408 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
29409 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
29410 vmx->loaded_vmcs->cpu = cpu;
29411@@ -2123,7 +2131,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
29412 * reads and returns guest's timestamp counter "register"
29413 * guest_tsc = host_tsc + tsc_offset -- 21.3
29414 */
29415-static u64 guest_read_tsc(void)
29416+static u64 __intentional_overflow(-1) guest_read_tsc(void)
29417 {
29418 u64 host_tsc, tsc_offset;
29419
29420@@ -3114,8 +3122,11 @@ static __init int hardware_setup(void)
29421 if (!cpu_has_vmx_flexpriority())
29422 flexpriority_enabled = 0;
29423
29424- if (!cpu_has_vmx_tpr_shadow())
29425- kvm_x86_ops->update_cr8_intercept = NULL;
29426+ if (!cpu_has_vmx_tpr_shadow()) {
29427+ pax_open_kernel();
29428+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
29429+ pax_close_kernel();
29430+ }
29431
29432 if (enable_ept && !cpu_has_vmx_ept_2m_page())
29433 kvm_disable_largepages();
29434@@ -3126,13 +3137,15 @@ static __init int hardware_setup(void)
29435 if (!cpu_has_vmx_apicv())
29436 enable_apicv = 0;
29437
29438+ pax_open_kernel();
29439 if (enable_apicv)
29440- kvm_x86_ops->update_cr8_intercept = NULL;
29441+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
29442 else {
29443- kvm_x86_ops->hwapic_irr_update = NULL;
29444- kvm_x86_ops->deliver_posted_interrupt = NULL;
29445- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
29446+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
29447+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
29448+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
29449 }
29450+ pax_close_kernel();
29451
29452 if (nested)
29453 nested_vmx_setup_ctls_msrs();
29454@@ -4242,7 +4255,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
29455 unsigned long cr4;
29456
29457 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
29458+
29459+#ifndef CONFIG_PAX_PER_CPU_PGD
29460 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
29461+#endif
29462
29463 /* Save the most likely value for this task's CR4 in the VMCS. */
29464 cr4 = read_cr4();
29465@@ -4269,7 +4285,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
29466 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
29467 vmx->host_idt_base = dt.address;
29468
29469- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
29470+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
29471
29472 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
29473 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
29474@@ -7475,6 +7491,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
29475 "jmp 2f \n\t"
29476 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
29477 "2: "
29478+
29479+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29480+ "ljmp %[cs],$3f\n\t"
29481+ "3: "
29482+#endif
29483+
29484 /* Save guest registers, load host registers, keep flags */
29485 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
29486 "pop %0 \n\t"
29487@@ -7527,6 +7549,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
29488 #endif
29489 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
29490 [wordsize]"i"(sizeof(ulong))
29491+
29492+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
29493+ ,[cs]"i"(__KERNEL_CS)
29494+#endif
29495+
29496 : "cc", "memory"
29497 #ifdef CONFIG_X86_64
29498 , "rax", "rbx", "rdi", "rsi"
29499@@ -7540,7 +7567,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
29500 if (debugctlmsr)
29501 update_debugctlmsr(debugctlmsr);
29502
29503-#ifndef CONFIG_X86_64
29504+#ifdef CONFIG_X86_32
29505 /*
29506 * The sysexit path does not restore ds/es, so we must set them to
29507 * a reasonable value ourselves.
29508@@ -7549,8 +7576,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
29509 * may be executed in interrupt context, which saves and restore segments
29510 * around it, nullifying its effect.
29511 */
29512- loadsegment(ds, __USER_DS);
29513- loadsegment(es, __USER_DS);
29514+ loadsegment(ds, __KERNEL_DS);
29515+ loadsegment(es, __KERNEL_DS);
29516+ loadsegment(ss, __KERNEL_DS);
29517+
29518+#ifdef CONFIG_PAX_KERNEXEC
29519+ loadsegment(fs, __KERNEL_PERCPU);
29520+#endif
29521+
29522+#ifdef CONFIG_PAX_MEMORY_UDEREF
29523+ __set_fs(current_thread_info()->addr_limit);
29524+#endif
29525+
29526 #endif
29527
29528 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
29529diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
29530index d6aeccf..cea125a 100644
29531--- a/arch/x86/kvm/x86.c
29532+++ b/arch/x86/kvm/x86.c
29533@@ -1857,8 +1857,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
29534 {
29535 struct kvm *kvm = vcpu->kvm;
29536 int lm = is_long_mode(vcpu);
29537- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
29538- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
29539+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
29540+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
29541 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
29542 : kvm->arch.xen_hvm_config.blob_size_32;
29543 u32 page_num = data & ~PAGE_MASK;
29544@@ -2779,6 +2779,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
29545 if (n < msr_list.nmsrs)
29546 goto out;
29547 r = -EFAULT;
29548+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
29549+ goto out;
29550 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
29551 num_msrs_to_save * sizeof(u32)))
29552 goto out;
29553@@ -5639,7 +5641,7 @@ static struct notifier_block pvclock_gtod_notifier = {
29554 };
29555 #endif
29556
29557-int kvm_arch_init(void *opaque)
29558+int kvm_arch_init(const void *opaque)
29559 {
29560 int r;
29561 struct kvm_x86_ops *ops = opaque;
29562diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
29563index aae9413..d11e829 100644
29564--- a/arch/x86/lguest/boot.c
29565+++ b/arch/x86/lguest/boot.c
29566@@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
29567 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
29568 * Launcher to reboot us.
29569 */
29570-static void lguest_restart(char *reason)
29571+static __noreturn void lguest_restart(char *reason)
29572 {
29573 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
29574+ BUG();
29575 }
29576
29577 /*G:050
29578diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
29579index 00933d5..3a64af9 100644
29580--- a/arch/x86/lib/atomic64_386_32.S
29581+++ b/arch/x86/lib/atomic64_386_32.S
29582@@ -48,6 +48,10 @@ BEGIN(read)
29583 movl (v), %eax
29584 movl 4(v), %edx
29585 RET_ENDP
29586+BEGIN(read_unchecked)
29587+ movl (v), %eax
29588+ movl 4(v), %edx
29589+RET_ENDP
29590 #undef v
29591
29592 #define v %esi
29593@@ -55,6 +59,10 @@ BEGIN(set)
29594 movl %ebx, (v)
29595 movl %ecx, 4(v)
29596 RET_ENDP
29597+BEGIN(set_unchecked)
29598+ movl %ebx, (v)
29599+ movl %ecx, 4(v)
29600+RET_ENDP
29601 #undef v
29602
29603 #define v %esi
29604@@ -70,6 +78,20 @@ RET_ENDP
29605 BEGIN(add)
29606 addl %eax, (v)
29607 adcl %edx, 4(v)
29608+
29609+#ifdef CONFIG_PAX_REFCOUNT
29610+ jno 0f
29611+ subl %eax, (v)
29612+ sbbl %edx, 4(v)
29613+ int $4
29614+0:
29615+ _ASM_EXTABLE(0b, 0b)
29616+#endif
29617+
29618+RET_ENDP
29619+BEGIN(add_unchecked)
29620+ addl %eax, (v)
29621+ adcl %edx, 4(v)
29622 RET_ENDP
29623 #undef v
29624
29625@@ -77,6 +99,24 @@ RET_ENDP
29626 BEGIN(add_return)
29627 addl (v), %eax
29628 adcl 4(v), %edx
29629+
29630+#ifdef CONFIG_PAX_REFCOUNT
29631+ into
29632+1234:
29633+ _ASM_EXTABLE(1234b, 2f)
29634+#endif
29635+
29636+ movl %eax, (v)
29637+ movl %edx, 4(v)
29638+
29639+#ifdef CONFIG_PAX_REFCOUNT
29640+2:
29641+#endif
29642+
29643+RET_ENDP
29644+BEGIN(add_return_unchecked)
29645+ addl (v), %eax
29646+ adcl 4(v), %edx
29647 movl %eax, (v)
29648 movl %edx, 4(v)
29649 RET_ENDP
29650@@ -86,6 +126,20 @@ RET_ENDP
29651 BEGIN(sub)
29652 subl %eax, (v)
29653 sbbl %edx, 4(v)
29654+
29655+#ifdef CONFIG_PAX_REFCOUNT
29656+ jno 0f
29657+ addl %eax, (v)
29658+ adcl %edx, 4(v)
29659+ int $4
29660+0:
29661+ _ASM_EXTABLE(0b, 0b)
29662+#endif
29663+
29664+RET_ENDP
29665+BEGIN(sub_unchecked)
29666+ subl %eax, (v)
29667+ sbbl %edx, 4(v)
29668 RET_ENDP
29669 #undef v
29670
29671@@ -96,6 +150,27 @@ BEGIN(sub_return)
29672 sbbl $0, %edx
29673 addl (v), %eax
29674 adcl 4(v), %edx
29675+
29676+#ifdef CONFIG_PAX_REFCOUNT
29677+ into
29678+1234:
29679+ _ASM_EXTABLE(1234b, 2f)
29680+#endif
29681+
29682+ movl %eax, (v)
29683+ movl %edx, 4(v)
29684+
29685+#ifdef CONFIG_PAX_REFCOUNT
29686+2:
29687+#endif
29688+
29689+RET_ENDP
29690+BEGIN(sub_return_unchecked)
29691+ negl %edx
29692+ negl %eax
29693+ sbbl $0, %edx
29694+ addl (v), %eax
29695+ adcl 4(v), %edx
29696 movl %eax, (v)
29697 movl %edx, 4(v)
29698 RET_ENDP
29699@@ -105,6 +180,20 @@ RET_ENDP
29700 BEGIN(inc)
29701 addl $1, (v)
29702 adcl $0, 4(v)
29703+
29704+#ifdef CONFIG_PAX_REFCOUNT
29705+ jno 0f
29706+ subl $1, (v)
29707+ sbbl $0, 4(v)
29708+ int $4
29709+0:
29710+ _ASM_EXTABLE(0b, 0b)
29711+#endif
29712+
29713+RET_ENDP
29714+BEGIN(inc_unchecked)
29715+ addl $1, (v)
29716+ adcl $0, 4(v)
29717 RET_ENDP
29718 #undef v
29719
29720@@ -114,6 +203,26 @@ BEGIN(inc_return)
29721 movl 4(v), %edx
29722 addl $1, %eax
29723 adcl $0, %edx
29724+
29725+#ifdef CONFIG_PAX_REFCOUNT
29726+ into
29727+1234:
29728+ _ASM_EXTABLE(1234b, 2f)
29729+#endif
29730+
29731+ movl %eax, (v)
29732+ movl %edx, 4(v)
29733+
29734+#ifdef CONFIG_PAX_REFCOUNT
29735+2:
29736+#endif
29737+
29738+RET_ENDP
29739+BEGIN(inc_return_unchecked)
29740+ movl (v), %eax
29741+ movl 4(v), %edx
29742+ addl $1, %eax
29743+ adcl $0, %edx
29744 movl %eax, (v)
29745 movl %edx, 4(v)
29746 RET_ENDP
29747@@ -123,6 +232,20 @@ RET_ENDP
29748 BEGIN(dec)
29749 subl $1, (v)
29750 sbbl $0, 4(v)
29751+
29752+#ifdef CONFIG_PAX_REFCOUNT
29753+ jno 0f
29754+ addl $1, (v)
29755+ adcl $0, 4(v)
29756+ int $4
29757+0:
29758+ _ASM_EXTABLE(0b, 0b)
29759+#endif
29760+
29761+RET_ENDP
29762+BEGIN(dec_unchecked)
29763+ subl $1, (v)
29764+ sbbl $0, 4(v)
29765 RET_ENDP
29766 #undef v
29767
29768@@ -132,6 +255,26 @@ BEGIN(dec_return)
29769 movl 4(v), %edx
29770 subl $1, %eax
29771 sbbl $0, %edx
29772+
29773+#ifdef CONFIG_PAX_REFCOUNT
29774+ into
29775+1234:
29776+ _ASM_EXTABLE(1234b, 2f)
29777+#endif
29778+
29779+ movl %eax, (v)
29780+ movl %edx, 4(v)
29781+
29782+#ifdef CONFIG_PAX_REFCOUNT
29783+2:
29784+#endif
29785+
29786+RET_ENDP
29787+BEGIN(dec_return_unchecked)
29788+ movl (v), %eax
29789+ movl 4(v), %edx
29790+ subl $1, %eax
29791+ sbbl $0, %edx
29792 movl %eax, (v)
29793 movl %edx, 4(v)
29794 RET_ENDP
29795@@ -143,6 +286,13 @@ BEGIN(add_unless)
29796 adcl %edx, %edi
29797 addl (v), %eax
29798 adcl 4(v), %edx
29799+
29800+#ifdef CONFIG_PAX_REFCOUNT
29801+ into
29802+1234:
29803+ _ASM_EXTABLE(1234b, 2f)
29804+#endif
29805+
29806 cmpl %eax, %ecx
29807 je 3f
29808 1:
29809@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
29810 1:
29811 addl $1, %eax
29812 adcl $0, %edx
29813+
29814+#ifdef CONFIG_PAX_REFCOUNT
29815+ into
29816+1234:
29817+ _ASM_EXTABLE(1234b, 2f)
29818+#endif
29819+
29820 movl %eax, (v)
29821 movl %edx, 4(v)
29822 movl $1, %eax
29823@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
29824 movl 4(v), %edx
29825 subl $1, %eax
29826 sbbl $0, %edx
29827+
29828+#ifdef CONFIG_PAX_REFCOUNT
29829+ into
29830+1234:
29831+ _ASM_EXTABLE(1234b, 1f)
29832+#endif
29833+
29834 js 1f
29835 movl %eax, (v)
29836 movl %edx, 4(v)
29837diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
29838index f5cc9eb..51fa319 100644
29839--- a/arch/x86/lib/atomic64_cx8_32.S
29840+++ b/arch/x86/lib/atomic64_cx8_32.S
29841@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
29842 CFI_STARTPROC
29843
29844 read64 %ecx
29845+ pax_force_retaddr
29846 ret
29847 CFI_ENDPROC
29848 ENDPROC(atomic64_read_cx8)
29849
29850+ENTRY(atomic64_read_unchecked_cx8)
29851+ CFI_STARTPROC
29852+
29853+ read64 %ecx
29854+ pax_force_retaddr
29855+ ret
29856+ CFI_ENDPROC
29857+ENDPROC(atomic64_read_unchecked_cx8)
29858+
29859 ENTRY(atomic64_set_cx8)
29860 CFI_STARTPROC
29861
29862@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
29863 cmpxchg8b (%esi)
29864 jne 1b
29865
29866+ pax_force_retaddr
29867 ret
29868 CFI_ENDPROC
29869 ENDPROC(atomic64_set_cx8)
29870
29871+ENTRY(atomic64_set_unchecked_cx8)
29872+ CFI_STARTPROC
29873+
29874+1:
29875+/* we don't need LOCK_PREFIX since aligned 64-bit writes
29876+ * are atomic on 586 and newer */
29877+ cmpxchg8b (%esi)
29878+ jne 1b
29879+
29880+ pax_force_retaddr
29881+ ret
29882+ CFI_ENDPROC
29883+ENDPROC(atomic64_set_unchecked_cx8)
29884+
29885 ENTRY(atomic64_xchg_cx8)
29886 CFI_STARTPROC
29887
29888@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
29889 cmpxchg8b (%esi)
29890 jne 1b
29891
29892+ pax_force_retaddr
29893 ret
29894 CFI_ENDPROC
29895 ENDPROC(atomic64_xchg_cx8)
29896
29897-.macro addsub_return func ins insc
29898-ENTRY(atomic64_\func\()_return_cx8)
29899+.macro addsub_return func ins insc unchecked=""
29900+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29901 CFI_STARTPROC
29902 SAVE ebp
29903 SAVE ebx
29904@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
29905 movl %edx, %ecx
29906 \ins\()l %esi, %ebx
29907 \insc\()l %edi, %ecx
29908+
29909+.ifb \unchecked
29910+#ifdef CONFIG_PAX_REFCOUNT
29911+ into
29912+2:
29913+ _ASM_EXTABLE(2b, 3f)
29914+#endif
29915+.endif
29916+
29917 LOCK_PREFIX
29918 cmpxchg8b (%ebp)
29919 jne 1b
29920-
29921-10:
29922 movl %ebx, %eax
29923 movl %ecx, %edx
29924+
29925+.ifb \unchecked
29926+#ifdef CONFIG_PAX_REFCOUNT
29927+3:
29928+#endif
29929+.endif
29930+
29931 RESTORE edi
29932 RESTORE esi
29933 RESTORE ebx
29934 RESTORE ebp
29935+ pax_force_retaddr
29936 ret
29937 CFI_ENDPROC
29938-ENDPROC(atomic64_\func\()_return_cx8)
29939+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29940 .endm
29941
29942 addsub_return add add adc
29943 addsub_return sub sub sbb
29944+addsub_return add add adc _unchecked
29945+addsub_return sub sub sbb _unchecked
29946
29947-.macro incdec_return func ins insc
29948-ENTRY(atomic64_\func\()_return_cx8)
29949+.macro incdec_return func ins insc unchecked=""
29950+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
29951 CFI_STARTPROC
29952 SAVE ebx
29953
29954@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
29955 movl %edx, %ecx
29956 \ins\()l $1, %ebx
29957 \insc\()l $0, %ecx
29958+
29959+.ifb \unchecked
29960+#ifdef CONFIG_PAX_REFCOUNT
29961+ into
29962+2:
29963+ _ASM_EXTABLE(2b, 3f)
29964+#endif
29965+.endif
29966+
29967 LOCK_PREFIX
29968 cmpxchg8b (%esi)
29969 jne 1b
29970
29971-10:
29972 movl %ebx, %eax
29973 movl %ecx, %edx
29974+
29975+.ifb \unchecked
29976+#ifdef CONFIG_PAX_REFCOUNT
29977+3:
29978+#endif
29979+.endif
29980+
29981 RESTORE ebx
29982+ pax_force_retaddr
29983 ret
29984 CFI_ENDPROC
29985-ENDPROC(atomic64_\func\()_return_cx8)
29986+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
29987 .endm
29988
29989 incdec_return inc add adc
29990 incdec_return dec sub sbb
29991+incdec_return inc add adc _unchecked
29992+incdec_return dec sub sbb _unchecked
29993
29994 ENTRY(atomic64_dec_if_positive_cx8)
29995 CFI_STARTPROC
29996@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
29997 movl %edx, %ecx
29998 subl $1, %ebx
29999 sbb $0, %ecx
30000+
30001+#ifdef CONFIG_PAX_REFCOUNT
30002+ into
30003+1234:
30004+ _ASM_EXTABLE(1234b, 2f)
30005+#endif
30006+
30007 js 2f
30008 LOCK_PREFIX
30009 cmpxchg8b (%esi)
30010@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
30011 movl %ebx, %eax
30012 movl %ecx, %edx
30013 RESTORE ebx
30014+ pax_force_retaddr
30015 ret
30016 CFI_ENDPROC
30017 ENDPROC(atomic64_dec_if_positive_cx8)
30018@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
30019 movl %edx, %ecx
30020 addl %ebp, %ebx
30021 adcl %edi, %ecx
30022+
30023+#ifdef CONFIG_PAX_REFCOUNT
30024+ into
30025+1234:
30026+ _ASM_EXTABLE(1234b, 3f)
30027+#endif
30028+
30029 LOCK_PREFIX
30030 cmpxchg8b (%esi)
30031 jne 1b
30032@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
30033 CFI_ADJUST_CFA_OFFSET -8
30034 RESTORE ebx
30035 RESTORE ebp
30036+ pax_force_retaddr
30037 ret
30038 4:
30039 cmpl %edx, 4(%esp)
30040@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
30041 xorl %ecx, %ecx
30042 addl $1, %ebx
30043 adcl %edx, %ecx
30044+
30045+#ifdef CONFIG_PAX_REFCOUNT
30046+ into
30047+1234:
30048+ _ASM_EXTABLE(1234b, 3f)
30049+#endif
30050+
30051 LOCK_PREFIX
30052 cmpxchg8b (%esi)
30053 jne 1b
30054@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
30055 movl $1, %eax
30056 3:
30057 RESTORE ebx
30058+ pax_force_retaddr
30059 ret
30060 CFI_ENDPROC
30061 ENDPROC(atomic64_inc_not_zero_cx8)
30062diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
30063index e78b8eee..7e173a8 100644
30064--- a/arch/x86/lib/checksum_32.S
30065+++ b/arch/x86/lib/checksum_32.S
30066@@ -29,7 +29,8 @@
30067 #include <asm/dwarf2.h>
30068 #include <asm/errno.h>
30069 #include <asm/asm.h>
30070-
30071+#include <asm/segment.h>
30072+
30073 /*
30074 * computes a partial checksum, e.g. for TCP/UDP fragments
30075 */
30076@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
30077
30078 #define ARGBASE 16
30079 #define FP 12
30080-
30081-ENTRY(csum_partial_copy_generic)
30082+
30083+ENTRY(csum_partial_copy_generic_to_user)
30084 CFI_STARTPROC
30085+
30086+#ifdef CONFIG_PAX_MEMORY_UDEREF
30087+ pushl_cfi %gs
30088+ popl_cfi %es
30089+ jmp csum_partial_copy_generic
30090+#endif
30091+
30092+ENTRY(csum_partial_copy_generic_from_user)
30093+
30094+#ifdef CONFIG_PAX_MEMORY_UDEREF
30095+ pushl_cfi %gs
30096+ popl_cfi %ds
30097+#endif
30098+
30099+ENTRY(csum_partial_copy_generic)
30100 subl $4,%esp
30101 CFI_ADJUST_CFA_OFFSET 4
30102 pushl_cfi %edi
30103@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
30104 jmp 4f
30105 SRC(1: movw (%esi), %bx )
30106 addl $2, %esi
30107-DST( movw %bx, (%edi) )
30108+DST( movw %bx, %es:(%edi) )
30109 addl $2, %edi
30110 addw %bx, %ax
30111 adcl $0, %eax
30112@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
30113 SRC(1: movl (%esi), %ebx )
30114 SRC( movl 4(%esi), %edx )
30115 adcl %ebx, %eax
30116-DST( movl %ebx, (%edi) )
30117+DST( movl %ebx, %es:(%edi) )
30118 adcl %edx, %eax
30119-DST( movl %edx, 4(%edi) )
30120+DST( movl %edx, %es:4(%edi) )
30121
30122 SRC( movl 8(%esi), %ebx )
30123 SRC( movl 12(%esi), %edx )
30124 adcl %ebx, %eax
30125-DST( movl %ebx, 8(%edi) )
30126+DST( movl %ebx, %es:8(%edi) )
30127 adcl %edx, %eax
30128-DST( movl %edx, 12(%edi) )
30129+DST( movl %edx, %es:12(%edi) )
30130
30131 SRC( movl 16(%esi), %ebx )
30132 SRC( movl 20(%esi), %edx )
30133 adcl %ebx, %eax
30134-DST( movl %ebx, 16(%edi) )
30135+DST( movl %ebx, %es:16(%edi) )
30136 adcl %edx, %eax
30137-DST( movl %edx, 20(%edi) )
30138+DST( movl %edx, %es:20(%edi) )
30139
30140 SRC( movl 24(%esi), %ebx )
30141 SRC( movl 28(%esi), %edx )
30142 adcl %ebx, %eax
30143-DST( movl %ebx, 24(%edi) )
30144+DST( movl %ebx, %es:24(%edi) )
30145 adcl %edx, %eax
30146-DST( movl %edx, 28(%edi) )
30147+DST( movl %edx, %es:28(%edi) )
30148
30149 lea 32(%esi), %esi
30150 lea 32(%edi), %edi
30151@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
30152 shrl $2, %edx # This clears CF
30153 SRC(3: movl (%esi), %ebx )
30154 adcl %ebx, %eax
30155-DST( movl %ebx, (%edi) )
30156+DST( movl %ebx, %es:(%edi) )
30157 lea 4(%esi), %esi
30158 lea 4(%edi), %edi
30159 dec %edx
30160@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
30161 jb 5f
30162 SRC( movw (%esi), %cx )
30163 leal 2(%esi), %esi
30164-DST( movw %cx, (%edi) )
30165+DST( movw %cx, %es:(%edi) )
30166 leal 2(%edi), %edi
30167 je 6f
30168 shll $16,%ecx
30169 SRC(5: movb (%esi), %cl )
30170-DST( movb %cl, (%edi) )
30171+DST( movb %cl, %es:(%edi) )
30172 6: addl %ecx, %eax
30173 adcl $0, %eax
30174 7:
30175@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
30176
30177 6001:
30178 movl ARGBASE+20(%esp), %ebx # src_err_ptr
30179- movl $-EFAULT, (%ebx)
30180+ movl $-EFAULT, %ss:(%ebx)
30181
30182 # zero the complete destination - computing the rest
30183 # is too much work
30184@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
30185
30186 6002:
30187 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
30188- movl $-EFAULT,(%ebx)
30189+ movl $-EFAULT,%ss:(%ebx)
30190 jmp 5000b
30191
30192 .previous
30193
30194+ pushl_cfi %ss
30195+ popl_cfi %ds
30196+ pushl_cfi %ss
30197+ popl_cfi %es
30198 popl_cfi %ebx
30199 CFI_RESTORE ebx
30200 popl_cfi %esi
30201@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
30202 popl_cfi %ecx # equivalent to addl $4,%esp
30203 ret
30204 CFI_ENDPROC
30205-ENDPROC(csum_partial_copy_generic)
30206+ENDPROC(csum_partial_copy_generic_to_user)
30207
30208 #else
30209
30210 /* Version for PentiumII/PPro */
30211
30212 #define ROUND1(x) \
30213+ nop; nop; nop; \
30214 SRC(movl x(%esi), %ebx ) ; \
30215 addl %ebx, %eax ; \
30216- DST(movl %ebx, x(%edi) ) ;
30217+ DST(movl %ebx, %es:x(%edi)) ;
30218
30219 #define ROUND(x) \
30220+ nop; nop; nop; \
30221 SRC(movl x(%esi), %ebx ) ; \
30222 adcl %ebx, %eax ; \
30223- DST(movl %ebx, x(%edi) ) ;
30224+ DST(movl %ebx, %es:x(%edi)) ;
30225
30226 #define ARGBASE 12
30227-
30228-ENTRY(csum_partial_copy_generic)
30229+
30230+ENTRY(csum_partial_copy_generic_to_user)
30231 CFI_STARTPROC
30232+
30233+#ifdef CONFIG_PAX_MEMORY_UDEREF
30234+ pushl_cfi %gs
30235+ popl_cfi %es
30236+ jmp csum_partial_copy_generic
30237+#endif
30238+
30239+ENTRY(csum_partial_copy_generic_from_user)
30240+
30241+#ifdef CONFIG_PAX_MEMORY_UDEREF
30242+ pushl_cfi %gs
30243+ popl_cfi %ds
30244+#endif
30245+
30246+ENTRY(csum_partial_copy_generic)
30247 pushl_cfi %ebx
30248 CFI_REL_OFFSET ebx, 0
30249 pushl_cfi %edi
30250@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
30251 subl %ebx, %edi
30252 lea -1(%esi),%edx
30253 andl $-32,%edx
30254- lea 3f(%ebx,%ebx), %ebx
30255+ lea 3f(%ebx,%ebx,2), %ebx
30256 testl %esi, %esi
30257 jmp *%ebx
30258 1: addl $64,%esi
30259@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
30260 jb 5f
30261 SRC( movw (%esi), %dx )
30262 leal 2(%esi), %esi
30263-DST( movw %dx, (%edi) )
30264+DST( movw %dx, %es:(%edi) )
30265 leal 2(%edi), %edi
30266 je 6f
30267 shll $16,%edx
30268 5:
30269 SRC( movb (%esi), %dl )
30270-DST( movb %dl, (%edi) )
30271+DST( movb %dl, %es:(%edi) )
30272 6: addl %edx, %eax
30273 adcl $0, %eax
30274 7:
30275 .section .fixup, "ax"
30276 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
30277- movl $-EFAULT, (%ebx)
30278+ movl $-EFAULT, %ss:(%ebx)
30279 # zero the complete destination (computing the rest is too much work)
30280 movl ARGBASE+8(%esp),%edi # dst
30281 movl ARGBASE+12(%esp),%ecx # len
30282@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
30283 rep; stosb
30284 jmp 7b
30285 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
30286- movl $-EFAULT, (%ebx)
30287+ movl $-EFAULT, %ss:(%ebx)
30288 jmp 7b
30289 .previous
30290
30291+#ifdef CONFIG_PAX_MEMORY_UDEREF
30292+ pushl_cfi %ss
30293+ popl_cfi %ds
30294+ pushl_cfi %ss
30295+ popl_cfi %es
30296+#endif
30297+
30298 popl_cfi %esi
30299 CFI_RESTORE esi
30300 popl_cfi %edi
30301@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
30302 CFI_RESTORE ebx
30303 ret
30304 CFI_ENDPROC
30305-ENDPROC(csum_partial_copy_generic)
30306+ENDPROC(csum_partial_copy_generic_to_user)
30307
30308 #undef ROUND
30309 #undef ROUND1
30310diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
30311index f2145cf..cea889d 100644
30312--- a/arch/x86/lib/clear_page_64.S
30313+++ b/arch/x86/lib/clear_page_64.S
30314@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
30315 movl $4096/8,%ecx
30316 xorl %eax,%eax
30317 rep stosq
30318+ pax_force_retaddr
30319 ret
30320 CFI_ENDPROC
30321 ENDPROC(clear_page_c)
30322@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
30323 movl $4096,%ecx
30324 xorl %eax,%eax
30325 rep stosb
30326+ pax_force_retaddr
30327 ret
30328 CFI_ENDPROC
30329 ENDPROC(clear_page_c_e)
30330@@ -43,6 +45,7 @@ ENTRY(clear_page)
30331 leaq 64(%rdi),%rdi
30332 jnz .Lloop
30333 nop
30334+ pax_force_retaddr
30335 ret
30336 CFI_ENDPROC
30337 .Lclear_page_end:
30338@@ -58,7 +61,7 @@ ENDPROC(clear_page)
30339
30340 #include <asm/cpufeature.h>
30341
30342- .section .altinstr_replacement,"ax"
30343+ .section .altinstr_replacement,"a"
30344 1: .byte 0xeb /* jmp <disp8> */
30345 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
30346 2: .byte 0xeb /* jmp <disp8> */
30347diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
30348index 1e572c5..2a162cd 100644
30349--- a/arch/x86/lib/cmpxchg16b_emu.S
30350+++ b/arch/x86/lib/cmpxchg16b_emu.S
30351@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
30352
30353 popf
30354 mov $1, %al
30355+ pax_force_retaddr
30356 ret
30357
30358 not_same:
30359 popf
30360 xor %al,%al
30361+ pax_force_retaddr
30362 ret
30363
30364 CFI_ENDPROC
30365diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
30366index 176cca6..e0d658e 100644
30367--- a/arch/x86/lib/copy_page_64.S
30368+++ b/arch/x86/lib/copy_page_64.S
30369@@ -9,6 +9,7 @@ copy_page_rep:
30370 CFI_STARTPROC
30371 movl $4096/8, %ecx
30372 rep movsq
30373+ pax_force_retaddr
30374 ret
30375 CFI_ENDPROC
30376 ENDPROC(copy_page_rep)
30377@@ -24,8 +25,8 @@ ENTRY(copy_page)
30378 CFI_ADJUST_CFA_OFFSET 2*8
30379 movq %rbx, (%rsp)
30380 CFI_REL_OFFSET rbx, 0
30381- movq %r12, 1*8(%rsp)
30382- CFI_REL_OFFSET r12, 1*8
30383+ movq %r13, 1*8(%rsp)
30384+ CFI_REL_OFFSET r13, 1*8
30385
30386 movl $(4096/64)-5, %ecx
30387 .p2align 4
30388@@ -38,7 +39,7 @@ ENTRY(copy_page)
30389 movq 0x8*4(%rsi), %r9
30390 movq 0x8*5(%rsi), %r10
30391 movq 0x8*6(%rsi), %r11
30392- movq 0x8*7(%rsi), %r12
30393+ movq 0x8*7(%rsi), %r13
30394
30395 prefetcht0 5*64(%rsi)
30396
30397@@ -49,7 +50,7 @@ ENTRY(copy_page)
30398 movq %r9, 0x8*4(%rdi)
30399 movq %r10, 0x8*5(%rdi)
30400 movq %r11, 0x8*6(%rdi)
30401- movq %r12, 0x8*7(%rdi)
30402+ movq %r13, 0x8*7(%rdi)
30403
30404 leaq 64 (%rsi), %rsi
30405 leaq 64 (%rdi), %rdi
30406@@ -68,7 +69,7 @@ ENTRY(copy_page)
30407 movq 0x8*4(%rsi), %r9
30408 movq 0x8*5(%rsi), %r10
30409 movq 0x8*6(%rsi), %r11
30410- movq 0x8*7(%rsi), %r12
30411+ movq 0x8*7(%rsi), %r13
30412
30413 movq %rax, 0x8*0(%rdi)
30414 movq %rbx, 0x8*1(%rdi)
30415@@ -77,7 +78,7 @@ ENTRY(copy_page)
30416 movq %r9, 0x8*4(%rdi)
30417 movq %r10, 0x8*5(%rdi)
30418 movq %r11, 0x8*6(%rdi)
30419- movq %r12, 0x8*7(%rdi)
30420+ movq %r13, 0x8*7(%rdi)
30421
30422 leaq 64(%rdi), %rdi
30423 leaq 64(%rsi), %rsi
30424@@ -85,10 +86,11 @@ ENTRY(copy_page)
30425
30426 movq (%rsp), %rbx
30427 CFI_RESTORE rbx
30428- movq 1*8(%rsp), %r12
30429- CFI_RESTORE r12
30430+ movq 1*8(%rsp), %r13
30431+ CFI_RESTORE r13
30432 addq $2*8, %rsp
30433 CFI_ADJUST_CFA_OFFSET -2*8
30434+ pax_force_retaddr
30435 ret
30436 .Lcopy_page_end:
30437 CFI_ENDPROC
30438@@ -99,7 +101,7 @@ ENDPROC(copy_page)
30439
30440 #include <asm/cpufeature.h>
30441
30442- .section .altinstr_replacement,"ax"
30443+ .section .altinstr_replacement,"a"
30444 1: .byte 0xeb /* jmp <disp8> */
30445 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
30446 2:
30447diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
30448index dee945d..a84067b 100644
30449--- a/arch/x86/lib/copy_user_64.S
30450+++ b/arch/x86/lib/copy_user_64.S
30451@@ -18,31 +18,7 @@
30452 #include <asm/alternative-asm.h>
30453 #include <asm/asm.h>
30454 #include <asm/smap.h>
30455-
30456-/*
30457- * By placing feature2 after feature1 in altinstructions section, we logically
30458- * implement:
30459- * If CPU has feature2, jmp to alt2 is used
30460- * else if CPU has feature1, jmp to alt1 is used
30461- * else jmp to orig is used.
30462- */
30463- .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
30464-0:
30465- .byte 0xe9 /* 32bit jump */
30466- .long \orig-1f /* by default jump to orig */
30467-1:
30468- .section .altinstr_replacement,"ax"
30469-2: .byte 0xe9 /* near jump with 32bit immediate */
30470- .long \alt1-1b /* offset */ /* or alternatively to alt1 */
30471-3: .byte 0xe9 /* near jump with 32bit immediate */
30472- .long \alt2-1b /* offset */ /* or alternatively to alt2 */
30473- .previous
30474-
30475- .section .altinstructions,"a"
30476- altinstruction_entry 0b,2b,\feature1,5,5
30477- altinstruction_entry 0b,3b,\feature2,5,5
30478- .previous
30479- .endm
30480+#include <asm/pgtable.h>
30481
30482 .macro ALIGN_DESTINATION
30483 #ifdef FIX_ALIGNMENT
30484@@ -70,52 +46,6 @@
30485 #endif
30486 .endm
30487
30488-/* Standard copy_to_user with segment limit checking */
30489-ENTRY(_copy_to_user)
30490- CFI_STARTPROC
30491- GET_THREAD_INFO(%rax)
30492- movq %rdi,%rcx
30493- addq %rdx,%rcx
30494- jc bad_to_user
30495- cmpq TI_addr_limit(%rax),%rcx
30496- ja bad_to_user
30497- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
30498- copy_user_generic_unrolled,copy_user_generic_string, \
30499- copy_user_enhanced_fast_string
30500- CFI_ENDPROC
30501-ENDPROC(_copy_to_user)
30502-
30503-/* Standard copy_from_user with segment limit checking */
30504-ENTRY(_copy_from_user)
30505- CFI_STARTPROC
30506- GET_THREAD_INFO(%rax)
30507- movq %rsi,%rcx
30508- addq %rdx,%rcx
30509- jc bad_from_user
30510- cmpq TI_addr_limit(%rax),%rcx
30511- ja bad_from_user
30512- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
30513- copy_user_generic_unrolled,copy_user_generic_string, \
30514- copy_user_enhanced_fast_string
30515- CFI_ENDPROC
30516-ENDPROC(_copy_from_user)
30517-
30518- .section .fixup,"ax"
30519- /* must zero dest */
30520-ENTRY(bad_from_user)
30521-bad_from_user:
30522- CFI_STARTPROC
30523- movl %edx,%ecx
30524- xorl %eax,%eax
30525- rep
30526- stosb
30527-bad_to_user:
30528- movl %edx,%eax
30529- ret
30530- CFI_ENDPROC
30531-ENDPROC(bad_from_user)
30532- .previous
30533-
30534 /*
30535 * copy_user_generic_unrolled - memory copy with exception handling.
30536 * This version is for CPUs like P4 that don't have efficient micro
30537@@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
30538 */
30539 ENTRY(copy_user_generic_unrolled)
30540 CFI_STARTPROC
30541+ ASM_PAX_OPEN_USERLAND
30542 ASM_STAC
30543 cmpl $8,%edx
30544 jb 20f /* less then 8 bytes, go to byte copy loop */
30545@@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
30546 jnz 21b
30547 23: xor %eax,%eax
30548 ASM_CLAC
30549+ ASM_PAX_CLOSE_USERLAND
30550+ pax_force_retaddr
30551 ret
30552
30553 .section .fixup,"ax"
30554@@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
30555 */
30556 ENTRY(copy_user_generic_string)
30557 CFI_STARTPROC
30558+ ASM_PAX_OPEN_USERLAND
30559 ASM_STAC
30560 cmpl $8,%edx
30561 jb 2f /* less than 8 bytes, go to byte copy loop */
30562@@ -249,6 +183,8 @@ ENTRY(copy_user_generic_string)
30563 movsb
30564 xorl %eax,%eax
30565 ASM_CLAC
30566+ ASM_PAX_CLOSE_USERLAND
30567+ pax_force_retaddr
30568 ret
30569
30570 .section .fixup,"ax"
30571@@ -276,12 +212,15 @@ ENDPROC(copy_user_generic_string)
30572 */
30573 ENTRY(copy_user_enhanced_fast_string)
30574 CFI_STARTPROC
30575+ ASM_PAX_OPEN_USERLAND
30576 ASM_STAC
30577 movl %edx,%ecx
30578 1: rep
30579 movsb
30580 xorl %eax,%eax
30581 ASM_CLAC
30582+ ASM_PAX_CLOSE_USERLAND
30583+ pax_force_retaddr
30584 ret
30585
30586 .section .fixup,"ax"
30587diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
30588index 6a4f43c..c70fb52 100644
30589--- a/arch/x86/lib/copy_user_nocache_64.S
30590+++ b/arch/x86/lib/copy_user_nocache_64.S
30591@@ -8,6 +8,7 @@
30592
30593 #include <linux/linkage.h>
30594 #include <asm/dwarf2.h>
30595+#include <asm/alternative-asm.h>
30596
30597 #define FIX_ALIGNMENT 1
30598
30599@@ -16,6 +17,7 @@
30600 #include <asm/thread_info.h>
30601 #include <asm/asm.h>
30602 #include <asm/smap.h>
30603+#include <asm/pgtable.h>
30604
30605 .macro ALIGN_DESTINATION
30606 #ifdef FIX_ALIGNMENT
30607@@ -49,6 +51,16 @@
30608 */
30609 ENTRY(__copy_user_nocache)
30610 CFI_STARTPROC
30611+
30612+#ifdef CONFIG_PAX_MEMORY_UDEREF
30613+ mov pax_user_shadow_base,%rcx
30614+ cmp %rcx,%rsi
30615+ jae 1f
30616+ add %rcx,%rsi
30617+1:
30618+#endif
30619+
30620+ ASM_PAX_OPEN_USERLAND
30621 ASM_STAC
30622 cmpl $8,%edx
30623 jb 20f /* less then 8 bytes, go to byte copy loop */
30624@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
30625 jnz 21b
30626 23: xorl %eax,%eax
30627 ASM_CLAC
30628+ ASM_PAX_CLOSE_USERLAND
30629 sfence
30630+ pax_force_retaddr
30631 ret
30632
30633 .section .fixup,"ax"
30634diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
30635index 2419d5f..fe52d0e 100644
30636--- a/arch/x86/lib/csum-copy_64.S
30637+++ b/arch/x86/lib/csum-copy_64.S
30638@@ -9,6 +9,7 @@
30639 #include <asm/dwarf2.h>
30640 #include <asm/errno.h>
30641 #include <asm/asm.h>
30642+#include <asm/alternative-asm.h>
30643
30644 /*
30645 * Checksum copy with exception handling.
30646@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
30647 CFI_ADJUST_CFA_OFFSET 7*8
30648 movq %rbx, 2*8(%rsp)
30649 CFI_REL_OFFSET rbx, 2*8
30650- movq %r12, 3*8(%rsp)
30651- CFI_REL_OFFSET r12, 3*8
30652+ movq %r15, 3*8(%rsp)
30653+ CFI_REL_OFFSET r15, 3*8
30654 movq %r14, 4*8(%rsp)
30655 CFI_REL_OFFSET r14, 4*8
30656 movq %r13, 5*8(%rsp)
30657@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
30658 movl %edx, %ecx
30659
30660 xorl %r9d, %r9d
30661- movq %rcx, %r12
30662+ movq %rcx, %r15
30663
30664- shrq $6, %r12
30665+ shrq $6, %r15
30666 jz .Lhandle_tail /* < 64 */
30667
30668 clc
30669
30670 /* main loop. clear in 64 byte blocks */
30671 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
30672- /* r11: temp3, rdx: temp4, r12 loopcnt */
30673+ /* r11: temp3, rdx: temp4, r15 loopcnt */
30674 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
30675 .p2align 4
30676 .Lloop:
30677@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
30678 adcq %r14, %rax
30679 adcq %r13, %rax
30680
30681- decl %r12d
30682+ decl %r15d
30683
30684 dest
30685 movq %rbx, (%rsi)
30686@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
30687 .Lende:
30688 movq 2*8(%rsp), %rbx
30689 CFI_RESTORE rbx
30690- movq 3*8(%rsp), %r12
30691- CFI_RESTORE r12
30692+ movq 3*8(%rsp), %r15
30693+ CFI_RESTORE r15
30694 movq 4*8(%rsp), %r14
30695 CFI_RESTORE r14
30696 movq 5*8(%rsp), %r13
30697@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
30698 CFI_RESTORE rbp
30699 addq $7*8, %rsp
30700 CFI_ADJUST_CFA_OFFSET -7*8
30701+ pax_force_retaddr
30702 ret
30703 CFI_RESTORE_STATE
30704
30705diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
30706index 7609e0e..b449b98 100644
30707--- a/arch/x86/lib/csum-wrappers_64.c
30708+++ b/arch/x86/lib/csum-wrappers_64.c
30709@@ -53,10 +53,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
30710 len -= 2;
30711 }
30712 }
30713+ pax_open_userland();
30714 stac();
30715- isum = csum_partial_copy_generic((__force const void *)src,
30716+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
30717 dst, len, isum, errp, NULL);
30718 clac();
30719+ pax_close_userland();
30720 if (unlikely(*errp))
30721 goto out_err;
30722
30723@@ -110,10 +112,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
30724 }
30725
30726 *errp = 0;
30727+ pax_open_userland();
30728 stac();
30729- ret = csum_partial_copy_generic(src, (void __force *)dst,
30730+ ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
30731 len, isum, NULL, errp);
30732 clac();
30733+ pax_close_userland();
30734 return ret;
30735 }
30736 EXPORT_SYMBOL(csum_partial_copy_to_user);
30737diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
30738index a451235..1daa956 100644
30739--- a/arch/x86/lib/getuser.S
30740+++ b/arch/x86/lib/getuser.S
30741@@ -33,17 +33,40 @@
30742 #include <asm/thread_info.h>
30743 #include <asm/asm.h>
30744 #include <asm/smap.h>
30745+#include <asm/segment.h>
30746+#include <asm/pgtable.h>
30747+#include <asm/alternative-asm.h>
30748+
30749+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
30750+#define __copyuser_seg gs;
30751+#else
30752+#define __copyuser_seg
30753+#endif
30754
30755 .text
30756 ENTRY(__get_user_1)
30757 CFI_STARTPROC
30758+
30759+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30760 GET_THREAD_INFO(%_ASM_DX)
30761 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30762 jae bad_get_user
30763 ASM_STAC
30764-1: movzbl (%_ASM_AX),%edx
30765+
30766+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30767+ mov pax_user_shadow_base,%_ASM_DX
30768+ cmp %_ASM_DX,%_ASM_AX
30769+ jae 1234f
30770+ add %_ASM_DX,%_ASM_AX
30771+1234:
30772+#endif
30773+
30774+#endif
30775+
30776+1: __copyuser_seg movzbl (%_ASM_AX),%edx
30777 xor %eax,%eax
30778 ASM_CLAC
30779+ pax_force_retaddr
30780 ret
30781 CFI_ENDPROC
30782 ENDPROC(__get_user_1)
30783@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
30784 ENTRY(__get_user_2)
30785 CFI_STARTPROC
30786 add $1,%_ASM_AX
30787+
30788+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30789 jc bad_get_user
30790 GET_THREAD_INFO(%_ASM_DX)
30791 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30792 jae bad_get_user
30793 ASM_STAC
30794-2: movzwl -1(%_ASM_AX),%edx
30795+
30796+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30797+ mov pax_user_shadow_base,%_ASM_DX
30798+ cmp %_ASM_DX,%_ASM_AX
30799+ jae 1234f
30800+ add %_ASM_DX,%_ASM_AX
30801+1234:
30802+#endif
30803+
30804+#endif
30805+
30806+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
30807 xor %eax,%eax
30808 ASM_CLAC
30809+ pax_force_retaddr
30810 ret
30811 CFI_ENDPROC
30812 ENDPROC(__get_user_2)
30813@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
30814 ENTRY(__get_user_4)
30815 CFI_STARTPROC
30816 add $3,%_ASM_AX
30817+
30818+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
30819 jc bad_get_user
30820 GET_THREAD_INFO(%_ASM_DX)
30821 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30822 jae bad_get_user
30823 ASM_STAC
30824-3: movl -3(%_ASM_AX),%edx
30825+
30826+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30827+ mov pax_user_shadow_base,%_ASM_DX
30828+ cmp %_ASM_DX,%_ASM_AX
30829+ jae 1234f
30830+ add %_ASM_DX,%_ASM_AX
30831+1234:
30832+#endif
30833+
30834+#endif
30835+
30836+3: __copyuser_seg movl -3(%_ASM_AX),%edx
30837 xor %eax,%eax
30838 ASM_CLAC
30839+ pax_force_retaddr
30840 ret
30841 CFI_ENDPROC
30842 ENDPROC(__get_user_4)
30843@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
30844 GET_THREAD_INFO(%_ASM_DX)
30845 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30846 jae bad_get_user
30847+
30848+#ifdef CONFIG_PAX_MEMORY_UDEREF
30849+ mov pax_user_shadow_base,%_ASM_DX
30850+ cmp %_ASM_DX,%_ASM_AX
30851+ jae 1234f
30852+ add %_ASM_DX,%_ASM_AX
30853+1234:
30854+#endif
30855+
30856 ASM_STAC
30857 4: movq -7(%_ASM_AX),%rdx
30858 xor %eax,%eax
30859 ASM_CLAC
30860+ pax_force_retaddr
30861 ret
30862 #else
30863 add $7,%_ASM_AX
30864@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
30865 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
30866 jae bad_get_user_8
30867 ASM_STAC
30868-4: movl -7(%_ASM_AX),%edx
30869-5: movl -3(%_ASM_AX),%ecx
30870+4: __copyuser_seg movl -7(%_ASM_AX),%edx
30871+5: __copyuser_seg movl -3(%_ASM_AX),%ecx
30872 xor %eax,%eax
30873 ASM_CLAC
30874+ pax_force_retaddr
30875 ret
30876 #endif
30877 CFI_ENDPROC
30878@@ -113,6 +175,7 @@ bad_get_user:
30879 xor %edx,%edx
30880 mov $(-EFAULT),%_ASM_AX
30881 ASM_CLAC
30882+ pax_force_retaddr
30883 ret
30884 CFI_ENDPROC
30885 END(bad_get_user)
30886@@ -124,6 +187,7 @@ bad_get_user_8:
30887 xor %ecx,%ecx
30888 mov $(-EFAULT),%_ASM_AX
30889 ASM_CLAC
30890+ pax_force_retaddr
30891 ret
30892 CFI_ENDPROC
30893 END(bad_get_user_8)
30894diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
30895index 54fcffe..7be149e 100644
30896--- a/arch/x86/lib/insn.c
30897+++ b/arch/x86/lib/insn.c
30898@@ -20,8 +20,10 @@
30899
30900 #ifdef __KERNEL__
30901 #include <linux/string.h>
30902+#include <asm/pgtable_types.h>
30903 #else
30904 #include <string.h>
30905+#define ktla_ktva(addr) addr
30906 #endif
30907 #include <asm/inat.h>
30908 #include <asm/insn.h>
30909@@ -53,8 +55,8 @@
30910 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
30911 {
30912 memset(insn, 0, sizeof(*insn));
30913- insn->kaddr = kaddr;
30914- insn->next_byte = kaddr;
30915+ insn->kaddr = ktla_ktva(kaddr);
30916+ insn->next_byte = ktla_ktva(kaddr);
30917 insn->x86_64 = x86_64 ? 1 : 0;
30918 insn->opnd_bytes = 4;
30919 if (x86_64)
30920diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
30921index 05a95e7..326f2fa 100644
30922--- a/arch/x86/lib/iomap_copy_64.S
30923+++ b/arch/x86/lib/iomap_copy_64.S
30924@@ -17,6 +17,7 @@
30925
30926 #include <linux/linkage.h>
30927 #include <asm/dwarf2.h>
30928+#include <asm/alternative-asm.h>
30929
30930 /*
30931 * override generic version in lib/iomap_copy.c
30932@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
30933 CFI_STARTPROC
30934 movl %edx,%ecx
30935 rep movsd
30936+ pax_force_retaddr
30937 ret
30938 CFI_ENDPROC
30939 ENDPROC(__iowrite32_copy)
30940diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
30941index 56313a3..0db417e 100644
30942--- a/arch/x86/lib/memcpy_64.S
30943+++ b/arch/x86/lib/memcpy_64.S
30944@@ -24,7 +24,7 @@
30945 * This gets patched over the unrolled variant (below) via the
30946 * alternative instructions framework:
30947 */
30948- .section .altinstr_replacement, "ax", @progbits
30949+ .section .altinstr_replacement, "a", @progbits
30950 .Lmemcpy_c:
30951 movq %rdi, %rax
30952 movq %rdx, %rcx
30953@@ -33,6 +33,7 @@
30954 rep movsq
30955 movl %edx, %ecx
30956 rep movsb
30957+ pax_force_retaddr
30958 ret
30959 .Lmemcpy_e:
30960 .previous
30961@@ -44,11 +45,12 @@
30962 * This gets patched over the unrolled variant (below) via the
30963 * alternative instructions framework:
30964 */
30965- .section .altinstr_replacement, "ax", @progbits
30966+ .section .altinstr_replacement, "a", @progbits
30967 .Lmemcpy_c_e:
30968 movq %rdi, %rax
30969 movq %rdx, %rcx
30970 rep movsb
30971+ pax_force_retaddr
30972 ret
30973 .Lmemcpy_e_e:
30974 .previous
30975@@ -136,6 +138,7 @@ ENTRY(memcpy)
30976 movq %r9, 1*8(%rdi)
30977 movq %r10, -2*8(%rdi, %rdx)
30978 movq %r11, -1*8(%rdi, %rdx)
30979+ pax_force_retaddr
30980 retq
30981 .p2align 4
30982 .Lless_16bytes:
30983@@ -148,6 +151,7 @@ ENTRY(memcpy)
30984 movq -1*8(%rsi, %rdx), %r9
30985 movq %r8, 0*8(%rdi)
30986 movq %r9, -1*8(%rdi, %rdx)
30987+ pax_force_retaddr
30988 retq
30989 .p2align 4
30990 .Lless_8bytes:
30991@@ -161,6 +165,7 @@ ENTRY(memcpy)
30992 movl -4(%rsi, %rdx), %r8d
30993 movl %ecx, (%rdi)
30994 movl %r8d, -4(%rdi, %rdx)
30995+ pax_force_retaddr
30996 retq
30997 .p2align 4
30998 .Lless_3bytes:
30999@@ -179,6 +184,7 @@ ENTRY(memcpy)
31000 movb %cl, (%rdi)
31001
31002 .Lend:
31003+ pax_force_retaddr
31004 retq
31005 CFI_ENDPROC
31006 ENDPROC(memcpy)
31007diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
31008index 65268a6..dd1de11 100644
31009--- a/arch/x86/lib/memmove_64.S
31010+++ b/arch/x86/lib/memmove_64.S
31011@@ -202,14 +202,16 @@ ENTRY(memmove)
31012 movb (%rsi), %r11b
31013 movb %r11b, (%rdi)
31014 13:
31015+ pax_force_retaddr
31016 retq
31017 CFI_ENDPROC
31018
31019- .section .altinstr_replacement,"ax"
31020+ .section .altinstr_replacement,"a"
31021 .Lmemmove_begin_forward_efs:
31022 /* Forward moving data. */
31023 movq %rdx, %rcx
31024 rep movsb
31025+ pax_force_retaddr
31026 retq
31027 .Lmemmove_end_forward_efs:
31028 .previous
31029diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
31030index 2dcb380..2eb79fe 100644
31031--- a/arch/x86/lib/memset_64.S
31032+++ b/arch/x86/lib/memset_64.S
31033@@ -16,7 +16,7 @@
31034 *
31035 * rax original destination
31036 */
31037- .section .altinstr_replacement, "ax", @progbits
31038+ .section .altinstr_replacement, "a", @progbits
31039 .Lmemset_c:
31040 movq %rdi,%r9
31041 movq %rdx,%rcx
31042@@ -30,6 +30,7 @@
31043 movl %edx,%ecx
31044 rep stosb
31045 movq %r9,%rax
31046+ pax_force_retaddr
31047 ret
31048 .Lmemset_e:
31049 .previous
31050@@ -45,13 +46,14 @@
31051 *
31052 * rax original destination
31053 */
31054- .section .altinstr_replacement, "ax", @progbits
31055+ .section .altinstr_replacement, "a", @progbits
31056 .Lmemset_c_e:
31057 movq %rdi,%r9
31058 movb %sil,%al
31059 movq %rdx,%rcx
31060 rep stosb
31061 movq %r9,%rax
31062+ pax_force_retaddr
31063 ret
31064 .Lmemset_e_e:
31065 .previous
31066@@ -118,6 +120,7 @@ ENTRY(__memset)
31067
31068 .Lende:
31069 movq %r10,%rax
31070+ pax_force_retaddr
31071 ret
31072
31073 CFI_RESTORE_STATE
31074diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
31075index c9f2d9b..e7fd2c0 100644
31076--- a/arch/x86/lib/mmx_32.c
31077+++ b/arch/x86/lib/mmx_32.c
31078@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
31079 {
31080 void *p;
31081 int i;
31082+ unsigned long cr0;
31083
31084 if (unlikely(in_interrupt()))
31085 return __memcpy(to, from, len);
31086@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
31087 kernel_fpu_begin();
31088
31089 __asm__ __volatile__ (
31090- "1: prefetch (%0)\n" /* This set is 28 bytes */
31091- " prefetch 64(%0)\n"
31092- " prefetch 128(%0)\n"
31093- " prefetch 192(%0)\n"
31094- " prefetch 256(%0)\n"
31095+ "1: prefetch (%1)\n" /* This set is 28 bytes */
31096+ " prefetch 64(%1)\n"
31097+ " prefetch 128(%1)\n"
31098+ " prefetch 192(%1)\n"
31099+ " prefetch 256(%1)\n"
31100 "2: \n"
31101 ".section .fixup, \"ax\"\n"
31102- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
31103+ "3: \n"
31104+
31105+#ifdef CONFIG_PAX_KERNEXEC
31106+ " movl %%cr0, %0\n"
31107+ " movl %0, %%eax\n"
31108+ " andl $0xFFFEFFFF, %%eax\n"
31109+ " movl %%eax, %%cr0\n"
31110+#endif
31111+
31112+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
31113+
31114+#ifdef CONFIG_PAX_KERNEXEC
31115+ " movl %0, %%cr0\n"
31116+#endif
31117+
31118 " jmp 2b\n"
31119 ".previous\n"
31120 _ASM_EXTABLE(1b, 3b)
31121- : : "r" (from));
31122+ : "=&r" (cr0) : "r" (from) : "ax");
31123
31124 for ( ; i > 5; i--) {
31125 __asm__ __volatile__ (
31126- "1: prefetch 320(%0)\n"
31127- "2: movq (%0), %%mm0\n"
31128- " movq 8(%0), %%mm1\n"
31129- " movq 16(%0), %%mm2\n"
31130- " movq 24(%0), %%mm3\n"
31131- " movq %%mm0, (%1)\n"
31132- " movq %%mm1, 8(%1)\n"
31133- " movq %%mm2, 16(%1)\n"
31134- " movq %%mm3, 24(%1)\n"
31135- " movq 32(%0), %%mm0\n"
31136- " movq 40(%0), %%mm1\n"
31137- " movq 48(%0), %%mm2\n"
31138- " movq 56(%0), %%mm3\n"
31139- " movq %%mm0, 32(%1)\n"
31140- " movq %%mm1, 40(%1)\n"
31141- " movq %%mm2, 48(%1)\n"
31142- " movq %%mm3, 56(%1)\n"
31143+ "1: prefetch 320(%1)\n"
31144+ "2: movq (%1), %%mm0\n"
31145+ " movq 8(%1), %%mm1\n"
31146+ " movq 16(%1), %%mm2\n"
31147+ " movq 24(%1), %%mm3\n"
31148+ " movq %%mm0, (%2)\n"
31149+ " movq %%mm1, 8(%2)\n"
31150+ " movq %%mm2, 16(%2)\n"
31151+ " movq %%mm3, 24(%2)\n"
31152+ " movq 32(%1), %%mm0\n"
31153+ " movq 40(%1), %%mm1\n"
31154+ " movq 48(%1), %%mm2\n"
31155+ " movq 56(%1), %%mm3\n"
31156+ " movq %%mm0, 32(%2)\n"
31157+ " movq %%mm1, 40(%2)\n"
31158+ " movq %%mm2, 48(%2)\n"
31159+ " movq %%mm3, 56(%2)\n"
31160 ".section .fixup, \"ax\"\n"
31161- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
31162+ "3:\n"
31163+
31164+#ifdef CONFIG_PAX_KERNEXEC
31165+ " movl %%cr0, %0\n"
31166+ " movl %0, %%eax\n"
31167+ " andl $0xFFFEFFFF, %%eax\n"
31168+ " movl %%eax, %%cr0\n"
31169+#endif
31170+
31171+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
31172+
31173+#ifdef CONFIG_PAX_KERNEXEC
31174+ " movl %0, %%cr0\n"
31175+#endif
31176+
31177 " jmp 2b\n"
31178 ".previous\n"
31179 _ASM_EXTABLE(1b, 3b)
31180- : : "r" (from), "r" (to) : "memory");
31181+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
31182
31183 from += 64;
31184 to += 64;
31185@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
31186 static void fast_copy_page(void *to, void *from)
31187 {
31188 int i;
31189+ unsigned long cr0;
31190
31191 kernel_fpu_begin();
31192
31193@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
31194 * but that is for later. -AV
31195 */
31196 __asm__ __volatile__(
31197- "1: prefetch (%0)\n"
31198- " prefetch 64(%0)\n"
31199- " prefetch 128(%0)\n"
31200- " prefetch 192(%0)\n"
31201- " prefetch 256(%0)\n"
31202+ "1: prefetch (%1)\n"
31203+ " prefetch 64(%1)\n"
31204+ " prefetch 128(%1)\n"
31205+ " prefetch 192(%1)\n"
31206+ " prefetch 256(%1)\n"
31207 "2: \n"
31208 ".section .fixup, \"ax\"\n"
31209- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
31210+ "3: \n"
31211+
31212+#ifdef CONFIG_PAX_KERNEXEC
31213+ " movl %%cr0, %0\n"
31214+ " movl %0, %%eax\n"
31215+ " andl $0xFFFEFFFF, %%eax\n"
31216+ " movl %%eax, %%cr0\n"
31217+#endif
31218+
31219+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
31220+
31221+#ifdef CONFIG_PAX_KERNEXEC
31222+ " movl %0, %%cr0\n"
31223+#endif
31224+
31225 " jmp 2b\n"
31226 ".previous\n"
31227- _ASM_EXTABLE(1b, 3b) : : "r" (from));
31228+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
31229
31230 for (i = 0; i < (4096-320)/64; i++) {
31231 __asm__ __volatile__ (
31232- "1: prefetch 320(%0)\n"
31233- "2: movq (%0), %%mm0\n"
31234- " movntq %%mm0, (%1)\n"
31235- " movq 8(%0), %%mm1\n"
31236- " movntq %%mm1, 8(%1)\n"
31237- " movq 16(%0), %%mm2\n"
31238- " movntq %%mm2, 16(%1)\n"
31239- " movq 24(%0), %%mm3\n"
31240- " movntq %%mm3, 24(%1)\n"
31241- " movq 32(%0), %%mm4\n"
31242- " movntq %%mm4, 32(%1)\n"
31243- " movq 40(%0), %%mm5\n"
31244- " movntq %%mm5, 40(%1)\n"
31245- " movq 48(%0), %%mm6\n"
31246- " movntq %%mm6, 48(%1)\n"
31247- " movq 56(%0), %%mm7\n"
31248- " movntq %%mm7, 56(%1)\n"
31249+ "1: prefetch 320(%1)\n"
31250+ "2: movq (%1), %%mm0\n"
31251+ " movntq %%mm0, (%2)\n"
31252+ " movq 8(%1), %%mm1\n"
31253+ " movntq %%mm1, 8(%2)\n"
31254+ " movq 16(%1), %%mm2\n"
31255+ " movntq %%mm2, 16(%2)\n"
31256+ " movq 24(%1), %%mm3\n"
31257+ " movntq %%mm3, 24(%2)\n"
31258+ " movq 32(%1), %%mm4\n"
31259+ " movntq %%mm4, 32(%2)\n"
31260+ " movq 40(%1), %%mm5\n"
31261+ " movntq %%mm5, 40(%2)\n"
31262+ " movq 48(%1), %%mm6\n"
31263+ " movntq %%mm6, 48(%2)\n"
31264+ " movq 56(%1), %%mm7\n"
31265+ " movntq %%mm7, 56(%2)\n"
31266 ".section .fixup, \"ax\"\n"
31267- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
31268+ "3:\n"
31269+
31270+#ifdef CONFIG_PAX_KERNEXEC
31271+ " movl %%cr0, %0\n"
31272+ " movl %0, %%eax\n"
31273+ " andl $0xFFFEFFFF, %%eax\n"
31274+ " movl %%eax, %%cr0\n"
31275+#endif
31276+
31277+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
31278+
31279+#ifdef CONFIG_PAX_KERNEXEC
31280+ " movl %0, %%cr0\n"
31281+#endif
31282+
31283 " jmp 2b\n"
31284 ".previous\n"
31285- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
31286+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
31287
31288 from += 64;
31289 to += 64;
31290@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
31291 static void fast_copy_page(void *to, void *from)
31292 {
31293 int i;
31294+ unsigned long cr0;
31295
31296 kernel_fpu_begin();
31297
31298 __asm__ __volatile__ (
31299- "1: prefetch (%0)\n"
31300- " prefetch 64(%0)\n"
31301- " prefetch 128(%0)\n"
31302- " prefetch 192(%0)\n"
31303- " prefetch 256(%0)\n"
31304+ "1: prefetch (%1)\n"
31305+ " prefetch 64(%1)\n"
31306+ " prefetch 128(%1)\n"
31307+ " prefetch 192(%1)\n"
31308+ " prefetch 256(%1)\n"
31309 "2: \n"
31310 ".section .fixup, \"ax\"\n"
31311- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
31312+ "3: \n"
31313+
31314+#ifdef CONFIG_PAX_KERNEXEC
31315+ " movl %%cr0, %0\n"
31316+ " movl %0, %%eax\n"
31317+ " andl $0xFFFEFFFF, %%eax\n"
31318+ " movl %%eax, %%cr0\n"
31319+#endif
31320+
31321+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
31322+
31323+#ifdef CONFIG_PAX_KERNEXEC
31324+ " movl %0, %%cr0\n"
31325+#endif
31326+
31327 " jmp 2b\n"
31328 ".previous\n"
31329- _ASM_EXTABLE(1b, 3b) : : "r" (from));
31330+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
31331
31332 for (i = 0; i < 4096/64; i++) {
31333 __asm__ __volatile__ (
31334- "1: prefetch 320(%0)\n"
31335- "2: movq (%0), %%mm0\n"
31336- " movq 8(%0), %%mm1\n"
31337- " movq 16(%0), %%mm2\n"
31338- " movq 24(%0), %%mm3\n"
31339- " movq %%mm0, (%1)\n"
31340- " movq %%mm1, 8(%1)\n"
31341- " movq %%mm2, 16(%1)\n"
31342- " movq %%mm3, 24(%1)\n"
31343- " movq 32(%0), %%mm0\n"
31344- " movq 40(%0), %%mm1\n"
31345- " movq 48(%0), %%mm2\n"
31346- " movq 56(%0), %%mm3\n"
31347- " movq %%mm0, 32(%1)\n"
31348- " movq %%mm1, 40(%1)\n"
31349- " movq %%mm2, 48(%1)\n"
31350- " movq %%mm3, 56(%1)\n"
31351+ "1: prefetch 320(%1)\n"
31352+ "2: movq (%1), %%mm0\n"
31353+ " movq 8(%1), %%mm1\n"
31354+ " movq 16(%1), %%mm2\n"
31355+ " movq 24(%1), %%mm3\n"
31356+ " movq %%mm0, (%2)\n"
31357+ " movq %%mm1, 8(%2)\n"
31358+ " movq %%mm2, 16(%2)\n"
31359+ " movq %%mm3, 24(%2)\n"
31360+ " movq 32(%1), %%mm0\n"
31361+ " movq 40(%1), %%mm1\n"
31362+ " movq 48(%1), %%mm2\n"
31363+ " movq 56(%1), %%mm3\n"
31364+ " movq %%mm0, 32(%2)\n"
31365+ " movq %%mm1, 40(%2)\n"
31366+ " movq %%mm2, 48(%2)\n"
31367+ " movq %%mm3, 56(%2)\n"
31368 ".section .fixup, \"ax\"\n"
31369- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
31370+ "3:\n"
31371+
31372+#ifdef CONFIG_PAX_KERNEXEC
31373+ " movl %%cr0, %0\n"
31374+ " movl %0, %%eax\n"
31375+ " andl $0xFFFEFFFF, %%eax\n"
31376+ " movl %%eax, %%cr0\n"
31377+#endif
31378+
31379+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
31380+
31381+#ifdef CONFIG_PAX_KERNEXEC
31382+ " movl %0, %%cr0\n"
31383+#endif
31384+
31385 " jmp 2b\n"
31386 ".previous\n"
31387 _ASM_EXTABLE(1b, 3b)
31388- : : "r" (from), "r" (to) : "memory");
31389+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
31390
31391 from += 64;
31392 to += 64;
31393diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
31394index f6d13ee..d789440 100644
31395--- a/arch/x86/lib/msr-reg.S
31396+++ b/arch/x86/lib/msr-reg.S
31397@@ -3,6 +3,7 @@
31398 #include <asm/dwarf2.h>
31399 #include <asm/asm.h>
31400 #include <asm/msr.h>
31401+#include <asm/alternative-asm.h>
31402
31403 #ifdef CONFIG_X86_64
31404 /*
31405@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
31406 movl %edi, 28(%r10)
31407 popq_cfi %rbp
31408 popq_cfi %rbx
31409+ pax_force_retaddr
31410 ret
31411 3:
31412 CFI_RESTORE_STATE
31413diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
31414index fc6ba17..d4d989d 100644
31415--- a/arch/x86/lib/putuser.S
31416+++ b/arch/x86/lib/putuser.S
31417@@ -16,7 +16,9 @@
31418 #include <asm/errno.h>
31419 #include <asm/asm.h>
31420 #include <asm/smap.h>
31421-
31422+#include <asm/segment.h>
31423+#include <asm/pgtable.h>
31424+#include <asm/alternative-asm.h>
31425
31426 /*
31427 * __put_user_X
31428@@ -30,57 +32,125 @@
31429 * as they get called from within inline assembly.
31430 */
31431
31432-#define ENTER CFI_STARTPROC ; \
31433- GET_THREAD_INFO(%_ASM_BX)
31434-#define EXIT ASM_CLAC ; \
31435- ret ; \
31436+#define ENTER CFI_STARTPROC
31437+#define EXIT ASM_CLAC ; \
31438+ pax_force_retaddr ; \
31439+ ret ; \
31440 CFI_ENDPROC
31441
31442+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31443+#define _DEST %_ASM_CX,%_ASM_BX
31444+#else
31445+#define _DEST %_ASM_CX
31446+#endif
31447+
31448+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
31449+#define __copyuser_seg gs;
31450+#else
31451+#define __copyuser_seg
31452+#endif
31453+
31454 .text
31455 ENTRY(__put_user_1)
31456 ENTER
31457+
31458+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31459+ GET_THREAD_INFO(%_ASM_BX)
31460 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
31461 jae bad_put_user
31462 ASM_STAC
31463-1: movb %al,(%_ASM_CX)
31464+
31465+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31466+ mov pax_user_shadow_base,%_ASM_BX
31467+ cmp %_ASM_BX,%_ASM_CX
31468+ jb 1234f
31469+ xor %ebx,%ebx
31470+1234:
31471+#endif
31472+
31473+#endif
31474+
31475+1: __copyuser_seg movb %al,(_DEST)
31476 xor %eax,%eax
31477 EXIT
31478 ENDPROC(__put_user_1)
31479
31480 ENTRY(__put_user_2)
31481 ENTER
31482+
31483+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31484+ GET_THREAD_INFO(%_ASM_BX)
31485 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
31486 sub $1,%_ASM_BX
31487 cmp %_ASM_BX,%_ASM_CX
31488 jae bad_put_user
31489 ASM_STAC
31490-2: movw %ax,(%_ASM_CX)
31491+
31492+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31493+ mov pax_user_shadow_base,%_ASM_BX
31494+ cmp %_ASM_BX,%_ASM_CX
31495+ jb 1234f
31496+ xor %ebx,%ebx
31497+1234:
31498+#endif
31499+
31500+#endif
31501+
31502+2: __copyuser_seg movw %ax,(_DEST)
31503 xor %eax,%eax
31504 EXIT
31505 ENDPROC(__put_user_2)
31506
31507 ENTRY(__put_user_4)
31508 ENTER
31509+
31510+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31511+ GET_THREAD_INFO(%_ASM_BX)
31512 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
31513 sub $3,%_ASM_BX
31514 cmp %_ASM_BX,%_ASM_CX
31515 jae bad_put_user
31516 ASM_STAC
31517-3: movl %eax,(%_ASM_CX)
31518+
31519+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31520+ mov pax_user_shadow_base,%_ASM_BX
31521+ cmp %_ASM_BX,%_ASM_CX
31522+ jb 1234f
31523+ xor %ebx,%ebx
31524+1234:
31525+#endif
31526+
31527+#endif
31528+
31529+3: __copyuser_seg movl %eax,(_DEST)
31530 xor %eax,%eax
31531 EXIT
31532 ENDPROC(__put_user_4)
31533
31534 ENTRY(__put_user_8)
31535 ENTER
31536+
31537+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
31538+ GET_THREAD_INFO(%_ASM_BX)
31539 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
31540 sub $7,%_ASM_BX
31541 cmp %_ASM_BX,%_ASM_CX
31542 jae bad_put_user
31543 ASM_STAC
31544-4: mov %_ASM_AX,(%_ASM_CX)
31545+
31546+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
31547+ mov pax_user_shadow_base,%_ASM_BX
31548+ cmp %_ASM_BX,%_ASM_CX
31549+ jb 1234f
31550+ xor %ebx,%ebx
31551+1234:
31552+#endif
31553+
31554+#endif
31555+
31556+4: __copyuser_seg mov %_ASM_AX,(_DEST)
31557 #ifdef CONFIG_X86_32
31558-5: movl %edx,4(%_ASM_CX)
31559+5: __copyuser_seg movl %edx,4(_DEST)
31560 #endif
31561 xor %eax,%eax
31562 EXIT
31563diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
31564index 1cad221..de671ee 100644
31565--- a/arch/x86/lib/rwlock.S
31566+++ b/arch/x86/lib/rwlock.S
31567@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
31568 FRAME
31569 0: LOCK_PREFIX
31570 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
31571+
31572+#ifdef CONFIG_PAX_REFCOUNT
31573+ jno 1234f
31574+ LOCK_PREFIX
31575+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
31576+ int $4
31577+1234:
31578+ _ASM_EXTABLE(1234b, 1234b)
31579+#endif
31580+
31581 1: rep; nop
31582 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
31583 jne 1b
31584 LOCK_PREFIX
31585 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
31586+
31587+#ifdef CONFIG_PAX_REFCOUNT
31588+ jno 1234f
31589+ LOCK_PREFIX
31590+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
31591+ int $4
31592+1234:
31593+ _ASM_EXTABLE(1234b, 1234b)
31594+#endif
31595+
31596 jnz 0b
31597 ENDFRAME
31598+ pax_force_retaddr
31599 ret
31600 CFI_ENDPROC
31601 END(__write_lock_failed)
31602@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
31603 FRAME
31604 0: LOCK_PREFIX
31605 READ_LOCK_SIZE(inc) (%__lock_ptr)
31606+
31607+#ifdef CONFIG_PAX_REFCOUNT
31608+ jno 1234f
31609+ LOCK_PREFIX
31610+ READ_LOCK_SIZE(dec) (%__lock_ptr)
31611+ int $4
31612+1234:
31613+ _ASM_EXTABLE(1234b, 1234b)
31614+#endif
31615+
31616 1: rep; nop
31617 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
31618 js 1b
31619 LOCK_PREFIX
31620 READ_LOCK_SIZE(dec) (%__lock_ptr)
31621+
31622+#ifdef CONFIG_PAX_REFCOUNT
31623+ jno 1234f
31624+ LOCK_PREFIX
31625+ READ_LOCK_SIZE(inc) (%__lock_ptr)
31626+ int $4
31627+1234:
31628+ _ASM_EXTABLE(1234b, 1234b)
31629+#endif
31630+
31631 js 0b
31632 ENDFRAME
31633+ pax_force_retaddr
31634 ret
31635 CFI_ENDPROC
31636 END(__read_lock_failed)
31637diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
31638index 5dff5f0..cadebf4 100644
31639--- a/arch/x86/lib/rwsem.S
31640+++ b/arch/x86/lib/rwsem.S
31641@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
31642 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31643 CFI_RESTORE __ASM_REG(dx)
31644 restore_common_regs
31645+ pax_force_retaddr
31646 ret
31647 CFI_ENDPROC
31648 ENDPROC(call_rwsem_down_read_failed)
31649@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
31650 movq %rax,%rdi
31651 call rwsem_down_write_failed
31652 restore_common_regs
31653+ pax_force_retaddr
31654 ret
31655 CFI_ENDPROC
31656 ENDPROC(call_rwsem_down_write_failed)
31657@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
31658 movq %rax,%rdi
31659 call rwsem_wake
31660 restore_common_regs
31661-1: ret
31662+1: pax_force_retaddr
31663+ ret
31664 CFI_ENDPROC
31665 ENDPROC(call_rwsem_wake)
31666
31667@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
31668 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
31669 CFI_RESTORE __ASM_REG(dx)
31670 restore_common_regs
31671+ pax_force_retaddr
31672 ret
31673 CFI_ENDPROC
31674 ENDPROC(call_rwsem_downgrade_wake)
31675diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
31676index 92d9fea..b2762c8 100644
31677--- a/arch/x86/lib/thunk_64.S
31678+++ b/arch/x86/lib/thunk_64.S
31679@@ -9,6 +9,7 @@
31680 #include <asm/dwarf2.h>
31681 #include <asm/calling.h>
31682 #include <asm/asm.h>
31683+#include <asm/alternative-asm.h>
31684
31685 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
31686 .macro THUNK name, func, put_ret_addr_in_rdi=0
31687@@ -16,11 +17,11 @@
31688 \name:
31689 CFI_STARTPROC
31690
31691- /* this one pushes 9 elems, the next one would be %rIP */
31692- SAVE_ARGS
31693+ /* this one pushes 15+1 elems, the next one would be %rIP */
31694+ SAVE_ARGS 8
31695
31696 .if \put_ret_addr_in_rdi
31697- movq_cfi_restore 9*8, rdi
31698+ movq_cfi_restore RIP, rdi
31699 .endif
31700
31701 call \func
31702@@ -40,9 +41,10 @@
31703
31704 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
31705 CFI_STARTPROC
31706- SAVE_ARGS
31707+ SAVE_ARGS 8
31708 restore:
31709- RESTORE_ARGS
31710+ RESTORE_ARGS 1,8
31711+ pax_force_retaddr
31712 ret
31713 CFI_ENDPROC
31714 _ASM_NOKPROBE(restore)
31715diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
31716index e2f5e21..4b22130 100644
31717--- a/arch/x86/lib/usercopy_32.c
31718+++ b/arch/x86/lib/usercopy_32.c
31719@@ -42,11 +42,13 @@ do { \
31720 int __d0; \
31721 might_fault(); \
31722 __asm__ __volatile__( \
31723+ __COPYUSER_SET_ES \
31724 ASM_STAC "\n" \
31725 "0: rep; stosl\n" \
31726 " movl %2,%0\n" \
31727 "1: rep; stosb\n" \
31728 "2: " ASM_CLAC "\n" \
31729+ __COPYUSER_RESTORE_ES \
31730 ".section .fixup,\"ax\"\n" \
31731 "3: lea 0(%2,%0,4),%0\n" \
31732 " jmp 2b\n" \
31733@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
31734
31735 #ifdef CONFIG_X86_INTEL_USERCOPY
31736 static unsigned long
31737-__copy_user_intel(void __user *to, const void *from, unsigned long size)
31738+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
31739 {
31740 int d0, d1;
31741 __asm__ __volatile__(
31742@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31743 " .align 2,0x90\n"
31744 "3: movl 0(%4), %%eax\n"
31745 "4: movl 4(%4), %%edx\n"
31746- "5: movl %%eax, 0(%3)\n"
31747- "6: movl %%edx, 4(%3)\n"
31748+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
31749+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
31750 "7: movl 8(%4), %%eax\n"
31751 "8: movl 12(%4),%%edx\n"
31752- "9: movl %%eax, 8(%3)\n"
31753- "10: movl %%edx, 12(%3)\n"
31754+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
31755+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
31756 "11: movl 16(%4), %%eax\n"
31757 "12: movl 20(%4), %%edx\n"
31758- "13: movl %%eax, 16(%3)\n"
31759- "14: movl %%edx, 20(%3)\n"
31760+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
31761+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
31762 "15: movl 24(%4), %%eax\n"
31763 "16: movl 28(%4), %%edx\n"
31764- "17: movl %%eax, 24(%3)\n"
31765- "18: movl %%edx, 28(%3)\n"
31766+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
31767+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
31768 "19: movl 32(%4), %%eax\n"
31769 "20: movl 36(%4), %%edx\n"
31770- "21: movl %%eax, 32(%3)\n"
31771- "22: movl %%edx, 36(%3)\n"
31772+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
31773+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
31774 "23: movl 40(%4), %%eax\n"
31775 "24: movl 44(%4), %%edx\n"
31776- "25: movl %%eax, 40(%3)\n"
31777- "26: movl %%edx, 44(%3)\n"
31778+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
31779+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
31780 "27: movl 48(%4), %%eax\n"
31781 "28: movl 52(%4), %%edx\n"
31782- "29: movl %%eax, 48(%3)\n"
31783- "30: movl %%edx, 52(%3)\n"
31784+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
31785+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
31786 "31: movl 56(%4), %%eax\n"
31787 "32: movl 60(%4), %%edx\n"
31788- "33: movl %%eax, 56(%3)\n"
31789- "34: movl %%edx, 60(%3)\n"
31790+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
31791+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
31792 " addl $-64, %0\n"
31793 " addl $64, %4\n"
31794 " addl $64, %3\n"
31795@@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
31796 " shrl $2, %0\n"
31797 " andl $3, %%eax\n"
31798 " cld\n"
31799+ __COPYUSER_SET_ES
31800 "99: rep; movsl\n"
31801 "36: movl %%eax, %0\n"
31802 "37: rep; movsb\n"
31803 "100:\n"
31804+ __COPYUSER_RESTORE_ES
31805+ ".section .fixup,\"ax\"\n"
31806+ "101: lea 0(%%eax,%0,4),%0\n"
31807+ " jmp 100b\n"
31808+ ".previous\n"
31809+ _ASM_EXTABLE(1b,100b)
31810+ _ASM_EXTABLE(2b,100b)
31811+ _ASM_EXTABLE(3b,100b)
31812+ _ASM_EXTABLE(4b,100b)
31813+ _ASM_EXTABLE(5b,100b)
31814+ _ASM_EXTABLE(6b,100b)
31815+ _ASM_EXTABLE(7b,100b)
31816+ _ASM_EXTABLE(8b,100b)
31817+ _ASM_EXTABLE(9b,100b)
31818+ _ASM_EXTABLE(10b,100b)
31819+ _ASM_EXTABLE(11b,100b)
31820+ _ASM_EXTABLE(12b,100b)
31821+ _ASM_EXTABLE(13b,100b)
31822+ _ASM_EXTABLE(14b,100b)
31823+ _ASM_EXTABLE(15b,100b)
31824+ _ASM_EXTABLE(16b,100b)
31825+ _ASM_EXTABLE(17b,100b)
31826+ _ASM_EXTABLE(18b,100b)
31827+ _ASM_EXTABLE(19b,100b)
31828+ _ASM_EXTABLE(20b,100b)
31829+ _ASM_EXTABLE(21b,100b)
31830+ _ASM_EXTABLE(22b,100b)
31831+ _ASM_EXTABLE(23b,100b)
31832+ _ASM_EXTABLE(24b,100b)
31833+ _ASM_EXTABLE(25b,100b)
31834+ _ASM_EXTABLE(26b,100b)
31835+ _ASM_EXTABLE(27b,100b)
31836+ _ASM_EXTABLE(28b,100b)
31837+ _ASM_EXTABLE(29b,100b)
31838+ _ASM_EXTABLE(30b,100b)
31839+ _ASM_EXTABLE(31b,100b)
31840+ _ASM_EXTABLE(32b,100b)
31841+ _ASM_EXTABLE(33b,100b)
31842+ _ASM_EXTABLE(34b,100b)
31843+ _ASM_EXTABLE(35b,100b)
31844+ _ASM_EXTABLE(36b,100b)
31845+ _ASM_EXTABLE(37b,100b)
31846+ _ASM_EXTABLE(99b,101b)
31847+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
31848+ : "1"(to), "2"(from), "0"(size)
31849+ : "eax", "edx", "memory");
31850+ return size;
31851+}
31852+
31853+static unsigned long
31854+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
31855+{
31856+ int d0, d1;
31857+ __asm__ __volatile__(
31858+ " .align 2,0x90\n"
31859+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
31860+ " cmpl $67, %0\n"
31861+ " jbe 3f\n"
31862+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
31863+ " .align 2,0x90\n"
31864+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
31865+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
31866+ "5: movl %%eax, 0(%3)\n"
31867+ "6: movl %%edx, 4(%3)\n"
31868+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
31869+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
31870+ "9: movl %%eax, 8(%3)\n"
31871+ "10: movl %%edx, 12(%3)\n"
31872+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
31873+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
31874+ "13: movl %%eax, 16(%3)\n"
31875+ "14: movl %%edx, 20(%3)\n"
31876+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
31877+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
31878+ "17: movl %%eax, 24(%3)\n"
31879+ "18: movl %%edx, 28(%3)\n"
31880+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
31881+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
31882+ "21: movl %%eax, 32(%3)\n"
31883+ "22: movl %%edx, 36(%3)\n"
31884+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
31885+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
31886+ "25: movl %%eax, 40(%3)\n"
31887+ "26: movl %%edx, 44(%3)\n"
31888+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
31889+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
31890+ "29: movl %%eax, 48(%3)\n"
31891+ "30: movl %%edx, 52(%3)\n"
31892+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
31893+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
31894+ "33: movl %%eax, 56(%3)\n"
31895+ "34: movl %%edx, 60(%3)\n"
31896+ " addl $-64, %0\n"
31897+ " addl $64, %4\n"
31898+ " addl $64, %3\n"
31899+ " cmpl $63, %0\n"
31900+ " ja 1b\n"
31901+ "35: movl %0, %%eax\n"
31902+ " shrl $2, %0\n"
31903+ " andl $3, %%eax\n"
31904+ " cld\n"
31905+ "99: rep; "__copyuser_seg" movsl\n"
31906+ "36: movl %%eax, %0\n"
31907+ "37: rep; "__copyuser_seg" movsb\n"
31908+ "100:\n"
31909 ".section .fixup,\"ax\"\n"
31910 "101: lea 0(%%eax,%0,4),%0\n"
31911 " jmp 100b\n"
31912@@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31913 int d0, d1;
31914 __asm__ __volatile__(
31915 " .align 2,0x90\n"
31916- "0: movl 32(%4), %%eax\n"
31917+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31918 " cmpl $67, %0\n"
31919 " jbe 2f\n"
31920- "1: movl 64(%4), %%eax\n"
31921+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31922 " .align 2,0x90\n"
31923- "2: movl 0(%4), %%eax\n"
31924- "21: movl 4(%4), %%edx\n"
31925+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31926+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31927 " movl %%eax, 0(%3)\n"
31928 " movl %%edx, 4(%3)\n"
31929- "3: movl 8(%4), %%eax\n"
31930- "31: movl 12(%4),%%edx\n"
31931+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
31932+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
31933 " movl %%eax, 8(%3)\n"
31934 " movl %%edx, 12(%3)\n"
31935- "4: movl 16(%4), %%eax\n"
31936- "41: movl 20(%4), %%edx\n"
31937+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
31938+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
31939 " movl %%eax, 16(%3)\n"
31940 " movl %%edx, 20(%3)\n"
31941- "10: movl 24(%4), %%eax\n"
31942- "51: movl 28(%4), %%edx\n"
31943+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
31944+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
31945 " movl %%eax, 24(%3)\n"
31946 " movl %%edx, 28(%3)\n"
31947- "11: movl 32(%4), %%eax\n"
31948- "61: movl 36(%4), %%edx\n"
31949+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
31950+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
31951 " movl %%eax, 32(%3)\n"
31952 " movl %%edx, 36(%3)\n"
31953- "12: movl 40(%4), %%eax\n"
31954- "71: movl 44(%4), %%edx\n"
31955+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
31956+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
31957 " movl %%eax, 40(%3)\n"
31958 " movl %%edx, 44(%3)\n"
31959- "13: movl 48(%4), %%eax\n"
31960- "81: movl 52(%4), %%edx\n"
31961+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
31962+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
31963 " movl %%eax, 48(%3)\n"
31964 " movl %%edx, 52(%3)\n"
31965- "14: movl 56(%4), %%eax\n"
31966- "91: movl 60(%4), %%edx\n"
31967+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
31968+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
31969 " movl %%eax, 56(%3)\n"
31970 " movl %%edx, 60(%3)\n"
31971 " addl $-64, %0\n"
31972@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
31973 " shrl $2, %0\n"
31974 " andl $3, %%eax\n"
31975 " cld\n"
31976- "6: rep; movsl\n"
31977+ "6: rep; "__copyuser_seg" movsl\n"
31978 " movl %%eax,%0\n"
31979- "7: rep; movsb\n"
31980+ "7: rep; "__copyuser_seg" movsb\n"
31981 "8:\n"
31982 ".section .fixup,\"ax\"\n"
31983 "9: lea 0(%%eax,%0,4),%0\n"
31984@@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
31985
31986 __asm__ __volatile__(
31987 " .align 2,0x90\n"
31988- "0: movl 32(%4), %%eax\n"
31989+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
31990 " cmpl $67, %0\n"
31991 " jbe 2f\n"
31992- "1: movl 64(%4), %%eax\n"
31993+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
31994 " .align 2,0x90\n"
31995- "2: movl 0(%4), %%eax\n"
31996- "21: movl 4(%4), %%edx\n"
31997+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
31998+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
31999 " movnti %%eax, 0(%3)\n"
32000 " movnti %%edx, 4(%3)\n"
32001- "3: movl 8(%4), %%eax\n"
32002- "31: movl 12(%4),%%edx\n"
32003+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
32004+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
32005 " movnti %%eax, 8(%3)\n"
32006 " movnti %%edx, 12(%3)\n"
32007- "4: movl 16(%4), %%eax\n"
32008- "41: movl 20(%4), %%edx\n"
32009+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
32010+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
32011 " movnti %%eax, 16(%3)\n"
32012 " movnti %%edx, 20(%3)\n"
32013- "10: movl 24(%4), %%eax\n"
32014- "51: movl 28(%4), %%edx\n"
32015+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
32016+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
32017 " movnti %%eax, 24(%3)\n"
32018 " movnti %%edx, 28(%3)\n"
32019- "11: movl 32(%4), %%eax\n"
32020- "61: movl 36(%4), %%edx\n"
32021+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
32022+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
32023 " movnti %%eax, 32(%3)\n"
32024 " movnti %%edx, 36(%3)\n"
32025- "12: movl 40(%4), %%eax\n"
32026- "71: movl 44(%4), %%edx\n"
32027+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
32028+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
32029 " movnti %%eax, 40(%3)\n"
32030 " movnti %%edx, 44(%3)\n"
32031- "13: movl 48(%4), %%eax\n"
32032- "81: movl 52(%4), %%edx\n"
32033+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
32034+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
32035 " movnti %%eax, 48(%3)\n"
32036 " movnti %%edx, 52(%3)\n"
32037- "14: movl 56(%4), %%eax\n"
32038- "91: movl 60(%4), %%edx\n"
32039+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
32040+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
32041 " movnti %%eax, 56(%3)\n"
32042 " movnti %%edx, 60(%3)\n"
32043 " addl $-64, %0\n"
32044@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
32045 " shrl $2, %0\n"
32046 " andl $3, %%eax\n"
32047 " cld\n"
32048- "6: rep; movsl\n"
32049+ "6: rep; "__copyuser_seg" movsl\n"
32050 " movl %%eax,%0\n"
32051- "7: rep; movsb\n"
32052+ "7: rep; "__copyuser_seg" movsb\n"
32053 "8:\n"
32054 ".section .fixup,\"ax\"\n"
32055 "9: lea 0(%%eax,%0,4),%0\n"
32056@@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
32057
32058 __asm__ __volatile__(
32059 " .align 2,0x90\n"
32060- "0: movl 32(%4), %%eax\n"
32061+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
32062 " cmpl $67, %0\n"
32063 " jbe 2f\n"
32064- "1: movl 64(%4), %%eax\n"
32065+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
32066 " .align 2,0x90\n"
32067- "2: movl 0(%4), %%eax\n"
32068- "21: movl 4(%4), %%edx\n"
32069+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
32070+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
32071 " movnti %%eax, 0(%3)\n"
32072 " movnti %%edx, 4(%3)\n"
32073- "3: movl 8(%4), %%eax\n"
32074- "31: movl 12(%4),%%edx\n"
32075+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
32076+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
32077 " movnti %%eax, 8(%3)\n"
32078 " movnti %%edx, 12(%3)\n"
32079- "4: movl 16(%4), %%eax\n"
32080- "41: movl 20(%4), %%edx\n"
32081+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
32082+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
32083 " movnti %%eax, 16(%3)\n"
32084 " movnti %%edx, 20(%3)\n"
32085- "10: movl 24(%4), %%eax\n"
32086- "51: movl 28(%4), %%edx\n"
32087+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
32088+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
32089 " movnti %%eax, 24(%3)\n"
32090 " movnti %%edx, 28(%3)\n"
32091- "11: movl 32(%4), %%eax\n"
32092- "61: movl 36(%4), %%edx\n"
32093+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
32094+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
32095 " movnti %%eax, 32(%3)\n"
32096 " movnti %%edx, 36(%3)\n"
32097- "12: movl 40(%4), %%eax\n"
32098- "71: movl 44(%4), %%edx\n"
32099+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
32100+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
32101 " movnti %%eax, 40(%3)\n"
32102 " movnti %%edx, 44(%3)\n"
32103- "13: movl 48(%4), %%eax\n"
32104- "81: movl 52(%4), %%edx\n"
32105+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
32106+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
32107 " movnti %%eax, 48(%3)\n"
32108 " movnti %%edx, 52(%3)\n"
32109- "14: movl 56(%4), %%eax\n"
32110- "91: movl 60(%4), %%edx\n"
32111+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
32112+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
32113 " movnti %%eax, 56(%3)\n"
32114 " movnti %%edx, 60(%3)\n"
32115 " addl $-64, %0\n"
32116@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
32117 " shrl $2, %0\n"
32118 " andl $3, %%eax\n"
32119 " cld\n"
32120- "6: rep; movsl\n"
32121+ "6: rep; "__copyuser_seg" movsl\n"
32122 " movl %%eax,%0\n"
32123- "7: rep; movsb\n"
32124+ "7: rep; "__copyuser_seg" movsb\n"
32125 "8:\n"
32126 ".section .fixup,\"ax\"\n"
32127 "9: lea 0(%%eax,%0,4),%0\n"
32128@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
32129 */
32130 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
32131 unsigned long size);
32132-unsigned long __copy_user_intel(void __user *to, const void *from,
32133+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
32134+ unsigned long size);
32135+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
32136 unsigned long size);
32137 unsigned long __copy_user_zeroing_intel_nocache(void *to,
32138 const void __user *from, unsigned long size);
32139 #endif /* CONFIG_X86_INTEL_USERCOPY */
32140
32141 /* Generic arbitrary sized copy. */
32142-#define __copy_user(to, from, size) \
32143+#define __copy_user(to, from, size, prefix, set, restore) \
32144 do { \
32145 int __d0, __d1, __d2; \
32146 __asm__ __volatile__( \
32147+ set \
32148 " cmp $7,%0\n" \
32149 " jbe 1f\n" \
32150 " movl %1,%0\n" \
32151 " negl %0\n" \
32152 " andl $7,%0\n" \
32153 " subl %0,%3\n" \
32154- "4: rep; movsb\n" \
32155+ "4: rep; "prefix"movsb\n" \
32156 " movl %3,%0\n" \
32157 " shrl $2,%0\n" \
32158 " andl $3,%3\n" \
32159 " .align 2,0x90\n" \
32160- "0: rep; movsl\n" \
32161+ "0: rep; "prefix"movsl\n" \
32162 " movl %3,%0\n" \
32163- "1: rep; movsb\n" \
32164+ "1: rep; "prefix"movsb\n" \
32165 "2:\n" \
32166+ restore \
32167 ".section .fixup,\"ax\"\n" \
32168 "5: addl %3,%0\n" \
32169 " jmp 2b\n" \
32170@@ -538,14 +650,14 @@ do { \
32171 " negl %0\n" \
32172 " andl $7,%0\n" \
32173 " subl %0,%3\n" \
32174- "4: rep; movsb\n" \
32175+ "4: rep; "__copyuser_seg"movsb\n" \
32176 " movl %3,%0\n" \
32177 " shrl $2,%0\n" \
32178 " andl $3,%3\n" \
32179 " .align 2,0x90\n" \
32180- "0: rep; movsl\n" \
32181+ "0: rep; "__copyuser_seg"movsl\n" \
32182 " movl %3,%0\n" \
32183- "1: rep; movsb\n" \
32184+ "1: rep; "__copyuser_seg"movsb\n" \
32185 "2:\n" \
32186 ".section .fixup,\"ax\"\n" \
32187 "5: addl %3,%0\n" \
32188@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
32189 {
32190 stac();
32191 if (movsl_is_ok(to, from, n))
32192- __copy_user(to, from, n);
32193+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
32194 else
32195- n = __copy_user_intel(to, from, n);
32196+ n = __generic_copy_to_user_intel(to, from, n);
32197 clac();
32198 return n;
32199 }
32200@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
32201 {
32202 stac();
32203 if (movsl_is_ok(to, from, n))
32204- __copy_user(to, from, n);
32205+ __copy_user(to, from, n, __copyuser_seg, "", "");
32206 else
32207- n = __copy_user_intel((void __user *)to,
32208- (const void *)from, n);
32209+ n = __generic_copy_from_user_intel(to, from, n);
32210 clac();
32211 return n;
32212 }
32213@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
32214 if (n > 64 && cpu_has_xmm2)
32215 n = __copy_user_intel_nocache(to, from, n);
32216 else
32217- __copy_user(to, from, n);
32218+ __copy_user(to, from, n, __copyuser_seg, "", "");
32219 #else
32220- __copy_user(to, from, n);
32221+ __copy_user(to, from, n, __copyuser_seg, "", "");
32222 #endif
32223 clac();
32224 return n;
32225 }
32226 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
32227
32228-/**
32229- * copy_to_user: - Copy a block of data into user space.
32230- * @to: Destination address, in user space.
32231- * @from: Source address, in kernel space.
32232- * @n: Number of bytes to copy.
32233- *
32234- * Context: User context only. This function may sleep.
32235- *
32236- * Copy data from kernel space to user space.
32237- *
32238- * Returns number of bytes that could not be copied.
32239- * On success, this will be zero.
32240- */
32241-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
32242+#ifdef CONFIG_PAX_MEMORY_UDEREF
32243+void __set_fs(mm_segment_t x)
32244 {
32245- if (access_ok(VERIFY_WRITE, to, n))
32246- n = __copy_to_user(to, from, n);
32247- return n;
32248+ switch (x.seg) {
32249+ case 0:
32250+ loadsegment(gs, 0);
32251+ break;
32252+ case TASK_SIZE_MAX:
32253+ loadsegment(gs, __USER_DS);
32254+ break;
32255+ case -1UL:
32256+ loadsegment(gs, __KERNEL_DS);
32257+ break;
32258+ default:
32259+ BUG();
32260+ }
32261 }
32262-EXPORT_SYMBOL(_copy_to_user);
32263+EXPORT_SYMBOL(__set_fs);
32264
32265-/**
32266- * copy_from_user: - Copy a block of data from user space.
32267- * @to: Destination address, in kernel space.
32268- * @from: Source address, in user space.
32269- * @n: Number of bytes to copy.
32270- *
32271- * Context: User context only. This function may sleep.
32272- *
32273- * Copy data from user space to kernel space.
32274- *
32275- * Returns number of bytes that could not be copied.
32276- * On success, this will be zero.
32277- *
32278- * If some data could not be copied, this function will pad the copied
32279- * data to the requested size using zero bytes.
32280- */
32281-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
32282+void set_fs(mm_segment_t x)
32283 {
32284- if (access_ok(VERIFY_READ, from, n))
32285- n = __copy_from_user(to, from, n);
32286- else
32287- memset(to, 0, n);
32288- return n;
32289+ current_thread_info()->addr_limit = x;
32290+ __set_fs(x);
32291 }
32292-EXPORT_SYMBOL(_copy_from_user);
32293+EXPORT_SYMBOL(set_fs);
32294+#endif
32295diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
32296index c905e89..01ab928 100644
32297--- a/arch/x86/lib/usercopy_64.c
32298+++ b/arch/x86/lib/usercopy_64.c
32299@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
32300 might_fault();
32301 /* no memory constraint because it doesn't change any memory gcc knows
32302 about */
32303+ pax_open_userland();
32304 stac();
32305 asm volatile(
32306 " testq %[size8],%[size8]\n"
32307@@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
32308 _ASM_EXTABLE(0b,3b)
32309 _ASM_EXTABLE(1b,2b)
32310 : [size8] "=&c"(size), [dst] "=&D" (__d0)
32311- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
32312+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
32313 [zero] "r" (0UL), [eight] "r" (8UL));
32314 clac();
32315+ pax_close_userland();
32316 return size;
32317 }
32318 EXPORT_SYMBOL(__clear_user);
32319@@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
32320 }
32321 EXPORT_SYMBOL(clear_user);
32322
32323-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
32324+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
32325 {
32326- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
32327- return copy_user_generic((__force void *)to, (__force void *)from, len);
32328- }
32329- return len;
32330+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
32331+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
32332+ return len;
32333 }
32334 EXPORT_SYMBOL(copy_in_user);
32335
32336@@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
32337 * it is not necessary to optimize tail handling.
32338 */
32339 __visible unsigned long
32340-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
32341+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
32342 {
32343 char c;
32344 unsigned zero_len;
32345
32346+ clac();
32347+ pax_close_userland();
32348 for (; len; --len, to++) {
32349 if (__get_user_nocheck(c, from++, sizeof(char)))
32350 break;
32351@@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
32352 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
32353 if (__put_user_nocheck(c, to++, sizeof(char)))
32354 break;
32355- clac();
32356 return len;
32357 }
32358diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
32359index 6a19ad9..1c48f9a 100644
32360--- a/arch/x86/mm/Makefile
32361+++ b/arch/x86/mm/Makefile
32362@@ -30,3 +30,7 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o
32363 obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
32364
32365 obj-$(CONFIG_MEMTEST) += memtest.o
32366+
32367+quote:="
32368+obj-$(CONFIG_X86_64) += uderef_64.o
32369+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
32370diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
32371index 903ec1e..c4166b2 100644
32372--- a/arch/x86/mm/extable.c
32373+++ b/arch/x86/mm/extable.c
32374@@ -6,12 +6,24 @@
32375 static inline unsigned long
32376 ex_insn_addr(const struct exception_table_entry *x)
32377 {
32378- return (unsigned long)&x->insn + x->insn;
32379+ unsigned long reloc = 0;
32380+
32381+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
32382+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
32383+#endif
32384+
32385+ return (unsigned long)&x->insn + x->insn + reloc;
32386 }
32387 static inline unsigned long
32388 ex_fixup_addr(const struct exception_table_entry *x)
32389 {
32390- return (unsigned long)&x->fixup + x->fixup;
32391+ unsigned long reloc = 0;
32392+
32393+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
32394+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
32395+#endif
32396+
32397+ return (unsigned long)&x->fixup + x->fixup + reloc;
32398 }
32399
32400 int fixup_exception(struct pt_regs *regs)
32401@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
32402 unsigned long new_ip;
32403
32404 #ifdef CONFIG_PNPBIOS
32405- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
32406+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
32407 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
32408 extern u32 pnp_bios_is_utter_crap;
32409 pnp_bios_is_utter_crap = 1;
32410@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
32411 i += 4;
32412 p->fixup -= i;
32413 i += 4;
32414+
32415+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
32416+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
32417+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
32418+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
32419+#endif
32420+
32421 }
32422 }
32423
32424diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
32425index a241946..d7a04cf 100644
32426--- a/arch/x86/mm/fault.c
32427+++ b/arch/x86/mm/fault.c
32428@@ -14,12 +14,19 @@
32429 #include <linux/hugetlb.h> /* hstate_index_to_shift */
32430 #include <linux/prefetch.h> /* prefetchw */
32431 #include <linux/context_tracking.h> /* exception_enter(), ... */
32432+#include <linux/unistd.h>
32433+#include <linux/compiler.h>
32434
32435 #include <asm/traps.h> /* dotraplinkage, ... */
32436 #include <asm/pgalloc.h> /* pgd_*(), ... */
32437 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
32438 #include <asm/fixmap.h> /* VSYSCALL_ADDR */
32439 #include <asm/vsyscall.h> /* emulate_vsyscall */
32440+#include <asm/tlbflush.h>
32441+
32442+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32443+#include <asm/stacktrace.h>
32444+#endif
32445
32446 #define CREATE_TRACE_POINTS
32447 #include <asm/trace/exceptions.h>
32448@@ -60,7 +67,7 @@ static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
32449 int ret = 0;
32450
32451 /* kprobe_running() needs smp_processor_id() */
32452- if (kprobes_built_in() && !user_mode_vm(regs)) {
32453+ if (kprobes_built_in() && !user_mode(regs)) {
32454 preempt_disable();
32455 if (kprobe_running() && kprobe_fault_handler(regs, 14))
32456 ret = 1;
32457@@ -121,7 +128,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
32458 return !instr_lo || (instr_lo>>1) == 1;
32459 case 0x00:
32460 /* Prefetch instruction is 0x0F0D or 0x0F18 */
32461- if (probe_kernel_address(instr, opcode))
32462+ if (user_mode(regs)) {
32463+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
32464+ return 0;
32465+ } else if (probe_kernel_address(instr, opcode))
32466 return 0;
32467
32468 *prefetch = (instr_lo == 0xF) &&
32469@@ -155,7 +165,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
32470 while (instr < max_instr) {
32471 unsigned char opcode;
32472
32473- if (probe_kernel_address(instr, opcode))
32474+ if (user_mode(regs)) {
32475+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
32476+ break;
32477+ } else if (probe_kernel_address(instr, opcode))
32478 break;
32479
32480 instr++;
32481@@ -186,6 +199,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
32482 force_sig_info(si_signo, &info, tsk);
32483 }
32484
32485+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32486+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
32487+#endif
32488+
32489+#ifdef CONFIG_PAX_EMUTRAMP
32490+static int pax_handle_fetch_fault(struct pt_regs *regs);
32491+#endif
32492+
32493+#ifdef CONFIG_PAX_PAGEEXEC
32494+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
32495+{
32496+ pgd_t *pgd;
32497+ pud_t *pud;
32498+ pmd_t *pmd;
32499+
32500+ pgd = pgd_offset(mm, address);
32501+ if (!pgd_present(*pgd))
32502+ return NULL;
32503+ pud = pud_offset(pgd, address);
32504+ if (!pud_present(*pud))
32505+ return NULL;
32506+ pmd = pmd_offset(pud, address);
32507+ if (!pmd_present(*pmd))
32508+ return NULL;
32509+ return pmd;
32510+}
32511+#endif
32512+
32513 DEFINE_SPINLOCK(pgd_lock);
32514 LIST_HEAD(pgd_list);
32515
32516@@ -236,10 +277,27 @@ void vmalloc_sync_all(void)
32517 for (address = VMALLOC_START & PMD_MASK;
32518 address >= TASK_SIZE && address < FIXADDR_TOP;
32519 address += PMD_SIZE) {
32520+
32521+#ifdef CONFIG_PAX_PER_CPU_PGD
32522+ unsigned long cpu;
32523+#else
32524 struct page *page;
32525+#endif
32526
32527 spin_lock(&pgd_lock);
32528+
32529+#ifdef CONFIG_PAX_PER_CPU_PGD
32530+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
32531+ pgd_t *pgd = get_cpu_pgd(cpu, user);
32532+ pmd_t *ret;
32533+
32534+ ret = vmalloc_sync_one(pgd, address);
32535+ if (!ret)
32536+ break;
32537+ pgd = get_cpu_pgd(cpu, kernel);
32538+#else
32539 list_for_each_entry(page, &pgd_list, lru) {
32540+ pgd_t *pgd;
32541 spinlock_t *pgt_lock;
32542 pmd_t *ret;
32543
32544@@ -247,8 +305,14 @@ void vmalloc_sync_all(void)
32545 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
32546
32547 spin_lock(pgt_lock);
32548- ret = vmalloc_sync_one(page_address(page), address);
32549+ pgd = page_address(page);
32550+#endif
32551+
32552+ ret = vmalloc_sync_one(pgd, address);
32553+
32554+#ifndef CONFIG_PAX_PER_CPU_PGD
32555 spin_unlock(pgt_lock);
32556+#endif
32557
32558 if (!ret)
32559 break;
32560@@ -282,6 +346,12 @@ static noinline int vmalloc_fault(unsigned long address)
32561 * an interrupt in the middle of a task switch..
32562 */
32563 pgd_paddr = read_cr3();
32564+
32565+#ifdef CONFIG_PAX_PER_CPU_PGD
32566+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
32567+ vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
32568+#endif
32569+
32570 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
32571 if (!pmd_k)
32572 return -1;
32573@@ -378,11 +448,25 @@ static noinline int vmalloc_fault(unsigned long address)
32574 * happen within a race in page table update. In the later
32575 * case just flush:
32576 */
32577- pgd = pgd_offset(current->active_mm, address);
32578+
32579 pgd_ref = pgd_offset_k(address);
32580 if (pgd_none(*pgd_ref))
32581 return -1;
32582
32583+#ifdef CONFIG_PAX_PER_CPU_PGD
32584+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
32585+ pgd = pgd_offset_cpu(smp_processor_id(), user, address);
32586+ if (pgd_none(*pgd)) {
32587+ set_pgd(pgd, *pgd_ref);
32588+ arch_flush_lazy_mmu_mode();
32589+ } else {
32590+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
32591+ }
32592+ pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
32593+#else
32594+ pgd = pgd_offset(current->active_mm, address);
32595+#endif
32596+
32597 if (pgd_none(*pgd)) {
32598 set_pgd(pgd, *pgd_ref);
32599 arch_flush_lazy_mmu_mode();
32600@@ -549,7 +633,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
32601 static int is_errata100(struct pt_regs *regs, unsigned long address)
32602 {
32603 #ifdef CONFIG_X86_64
32604- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
32605+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
32606 return 1;
32607 #endif
32608 return 0;
32609@@ -576,9 +660,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
32610 }
32611
32612 static const char nx_warning[] = KERN_CRIT
32613-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
32614+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
32615 static const char smep_warning[] = KERN_CRIT
32616-"unable to execute userspace code (SMEP?) (uid: %d)\n";
32617+"unable to execute userspace code (SMEP?) (uid: %d, task: %s, pid: %d)\n";
32618
32619 static void
32620 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32621@@ -587,7 +671,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32622 if (!oops_may_print())
32623 return;
32624
32625- if (error_code & PF_INSTR) {
32626+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
32627 unsigned int level;
32628 pgd_t *pgd;
32629 pte_t *pte;
32630@@ -598,13 +682,25 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
32631 pte = lookup_address_in_pgd(pgd, address, &level);
32632
32633 if (pte && pte_present(*pte) && !pte_exec(*pte))
32634- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
32635+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
32636 if (pte && pte_present(*pte) && pte_exec(*pte) &&
32637 (pgd_flags(*pgd) & _PAGE_USER) &&
32638 (read_cr4() & X86_CR4_SMEP))
32639- printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
32640+ printk(smep_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
32641 }
32642
32643+#ifdef CONFIG_PAX_KERNEXEC
32644+ if (init_mm.start_code <= address && address < init_mm.end_code) {
32645+ if (current->signal->curr_ip)
32646+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
32647+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
32648+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32649+ else
32650+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
32651+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
32652+ }
32653+#endif
32654+
32655 printk(KERN_ALERT "BUG: unable to handle kernel ");
32656 if (address < PAGE_SIZE)
32657 printk(KERN_CONT "NULL pointer dereference");
32658@@ -785,6 +881,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
32659 return;
32660 }
32661 #endif
32662+
32663+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32664+ if (pax_is_fetch_fault(regs, error_code, address)) {
32665+
32666+#ifdef CONFIG_PAX_EMUTRAMP
32667+ switch (pax_handle_fetch_fault(regs)) {
32668+ case 2:
32669+ return;
32670+ }
32671+#endif
32672+
32673+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32674+ do_group_exit(SIGKILL);
32675+ }
32676+#endif
32677+
32678 /* Kernel addresses are always protection faults: */
32679 if (address >= TASK_SIZE)
32680 error_code |= PF_PROT;
32681@@ -870,7 +982,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
32682 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
32683 printk(KERN_ERR
32684 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
32685- tsk->comm, tsk->pid, address);
32686+ tsk->comm, task_pid_nr(tsk), address);
32687 code = BUS_MCEERR_AR;
32688 }
32689 #endif
32690@@ -924,6 +1036,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
32691 return 1;
32692 }
32693
32694+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32695+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
32696+{
32697+ pte_t *pte;
32698+ pmd_t *pmd;
32699+ spinlock_t *ptl;
32700+ unsigned char pte_mask;
32701+
32702+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
32703+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
32704+ return 0;
32705+
32706+ /* PaX: it's our fault, let's handle it if we can */
32707+
32708+ /* PaX: take a look at read faults before acquiring any locks */
32709+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
32710+ /* instruction fetch attempt from a protected page in user mode */
32711+ up_read(&mm->mmap_sem);
32712+
32713+#ifdef CONFIG_PAX_EMUTRAMP
32714+ switch (pax_handle_fetch_fault(regs)) {
32715+ case 2:
32716+ return 1;
32717+ }
32718+#endif
32719+
32720+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
32721+ do_group_exit(SIGKILL);
32722+ }
32723+
32724+ pmd = pax_get_pmd(mm, address);
32725+ if (unlikely(!pmd))
32726+ return 0;
32727+
32728+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
32729+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
32730+ pte_unmap_unlock(pte, ptl);
32731+ return 0;
32732+ }
32733+
32734+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
32735+ /* write attempt to a protected page in user mode */
32736+ pte_unmap_unlock(pte, ptl);
32737+ return 0;
32738+ }
32739+
32740+#ifdef CONFIG_SMP
32741+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
32742+#else
32743+ if (likely(address > get_limit(regs->cs)))
32744+#endif
32745+ {
32746+ set_pte(pte, pte_mkread(*pte));
32747+ __flush_tlb_one(address);
32748+ pte_unmap_unlock(pte, ptl);
32749+ up_read(&mm->mmap_sem);
32750+ return 1;
32751+ }
32752+
32753+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
32754+
32755+ /*
32756+ * PaX: fill DTLB with user rights and retry
32757+ */
32758+ __asm__ __volatile__ (
32759+ "orb %2,(%1)\n"
32760+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
32761+/*
32762+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
32763+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
32764+ * page fault when examined during a TLB load attempt. this is true not only
32765+ * for PTEs holding a non-present entry but also present entries that will
32766+ * raise a page fault (such as those set up by PaX, or the copy-on-write
32767+ * mechanism). in effect it means that we do *not* need to flush the TLBs
32768+ * for our target pages since their PTEs are simply not in the TLBs at all.
32769+
32770+ * the best thing in omitting it is that we gain around 15-20% speed in the
32771+ * fast path of the page fault handler and can get rid of tracing since we
32772+ * can no longer flush unintended entries.
32773+ */
32774+ "invlpg (%0)\n"
32775+#endif
32776+ __copyuser_seg"testb $0,(%0)\n"
32777+ "xorb %3,(%1)\n"
32778+ :
32779+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
32780+ : "memory", "cc");
32781+ pte_unmap_unlock(pte, ptl);
32782+ up_read(&mm->mmap_sem);
32783+ return 1;
32784+}
32785+#endif
32786+
32787 /*
32788 * Handle a spurious fault caused by a stale TLB entry.
32789 *
32790@@ -991,6 +1196,9 @@ int show_unhandled_signals = 1;
32791 static inline int
32792 access_error(unsigned long error_code, struct vm_area_struct *vma)
32793 {
32794+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
32795+ return 1;
32796+
32797 if (error_code & PF_WRITE) {
32798 /* write, present and write, not present: */
32799 if (unlikely(!(vma->vm_flags & VM_WRITE)))
32800@@ -1025,7 +1233,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
32801 if (error_code & PF_USER)
32802 return false;
32803
32804- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
32805+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
32806 return false;
32807
32808 return true;
32809@@ -1053,6 +1261,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32810 tsk = current;
32811 mm = tsk->mm;
32812
32813+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32814+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
32815+ if (!search_exception_tables(regs->ip)) {
32816+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32817+ bad_area_nosemaphore(regs, error_code, address);
32818+ return;
32819+ }
32820+ if (address < pax_user_shadow_base) {
32821+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
32822+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
32823+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
32824+ } else
32825+ address -= pax_user_shadow_base;
32826+ }
32827+#endif
32828+
32829 /*
32830 * Detect and handle instructions that would cause a page fault for
32831 * both a tracked kernel page and a userspace page.
32832@@ -1130,7 +1354,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
32833 * User-mode registers count as a user access even for any
32834 * potential system fault or CPU buglet:
32835 */
32836- if (user_mode_vm(regs)) {
32837+ if (user_mode(regs)) {
32838 local_irq_enable();
32839 error_code |= PF_USER;
32840 flags |= FAULT_FLAG_USER;
32841@@ -1177,6 +1401,11 @@ retry:
32842 might_sleep();
32843 }
32844
32845+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
32846+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
32847+ return;
32848+#endif
32849+
32850 vma = find_vma(mm, address);
32851 if (unlikely(!vma)) {
32852 bad_area(regs, error_code, address);
32853@@ -1188,18 +1417,24 @@ retry:
32854 bad_area(regs, error_code, address);
32855 return;
32856 }
32857- if (error_code & PF_USER) {
32858- /*
32859- * Accessing the stack below %sp is always a bug.
32860- * The large cushion allows instructions like enter
32861- * and pusha to work. ("enter $65535, $31" pushes
32862- * 32 pointers and then decrements %sp by 65535.)
32863- */
32864- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
32865- bad_area(regs, error_code, address);
32866- return;
32867- }
32868+ /*
32869+ * Accessing the stack below %sp is always a bug.
32870+ * The large cushion allows instructions like enter
32871+ * and pusha to work. ("enter $65535, $31" pushes
32872+ * 32 pointers and then decrements %sp by 65535.)
32873+ */
32874+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
32875+ bad_area(regs, error_code, address);
32876+ return;
32877 }
32878+
32879+#ifdef CONFIG_PAX_SEGMEXEC
32880+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
32881+ bad_area(regs, error_code, address);
32882+ return;
32883+ }
32884+#endif
32885+
32886 if (unlikely(expand_stack(vma, address))) {
32887 bad_area(regs, error_code, address);
32888 return;
32889@@ -1316,3 +1551,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
32890 }
32891 NOKPROBE_SYMBOL(trace_do_page_fault);
32892 #endif /* CONFIG_TRACING */
32893+
32894+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
32895+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
32896+{
32897+ struct mm_struct *mm = current->mm;
32898+ unsigned long ip = regs->ip;
32899+
32900+ if (v8086_mode(regs))
32901+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
32902+
32903+#ifdef CONFIG_PAX_PAGEEXEC
32904+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
32905+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
32906+ return true;
32907+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
32908+ return true;
32909+ return false;
32910+ }
32911+#endif
32912+
32913+#ifdef CONFIG_PAX_SEGMEXEC
32914+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
32915+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
32916+ return true;
32917+ return false;
32918+ }
32919+#endif
32920+
32921+ return false;
32922+}
32923+#endif
32924+
32925+#ifdef CONFIG_PAX_EMUTRAMP
32926+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
32927+{
32928+ int err;
32929+
32930+ do { /* PaX: libffi trampoline emulation */
32931+ unsigned char mov, jmp;
32932+ unsigned int addr1, addr2;
32933+
32934+#ifdef CONFIG_X86_64
32935+ if ((regs->ip + 9) >> 32)
32936+ break;
32937+#endif
32938+
32939+ err = get_user(mov, (unsigned char __user *)regs->ip);
32940+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32941+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32942+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32943+
32944+ if (err)
32945+ break;
32946+
32947+ if (mov == 0xB8 && jmp == 0xE9) {
32948+ regs->ax = addr1;
32949+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
32950+ return 2;
32951+ }
32952+ } while (0);
32953+
32954+ do { /* PaX: gcc trampoline emulation #1 */
32955+ unsigned char mov1, mov2;
32956+ unsigned short jmp;
32957+ unsigned int addr1, addr2;
32958+
32959+#ifdef CONFIG_X86_64
32960+ if ((regs->ip + 11) >> 32)
32961+ break;
32962+#endif
32963+
32964+ err = get_user(mov1, (unsigned char __user *)regs->ip);
32965+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32966+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
32967+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32968+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
32969+
32970+ if (err)
32971+ break;
32972+
32973+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
32974+ regs->cx = addr1;
32975+ regs->ax = addr2;
32976+ regs->ip = addr2;
32977+ return 2;
32978+ }
32979+ } while (0);
32980+
32981+ do { /* PaX: gcc trampoline emulation #2 */
32982+ unsigned char mov, jmp;
32983+ unsigned int addr1, addr2;
32984+
32985+#ifdef CONFIG_X86_64
32986+ if ((regs->ip + 9) >> 32)
32987+ break;
32988+#endif
32989+
32990+ err = get_user(mov, (unsigned char __user *)regs->ip);
32991+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
32992+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
32993+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
32994+
32995+ if (err)
32996+ break;
32997+
32998+ if (mov == 0xB9 && jmp == 0xE9) {
32999+ regs->cx = addr1;
33000+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
33001+ return 2;
33002+ }
33003+ } while (0);
33004+
33005+ return 1; /* PaX in action */
33006+}
33007+
33008+#ifdef CONFIG_X86_64
33009+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
33010+{
33011+ int err;
33012+
33013+ do { /* PaX: libffi trampoline emulation */
33014+ unsigned short mov1, mov2, jmp1;
33015+ unsigned char stcclc, jmp2;
33016+ unsigned long addr1, addr2;
33017+
33018+ err = get_user(mov1, (unsigned short __user *)regs->ip);
33019+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
33020+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
33021+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
33022+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
33023+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
33024+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
33025+
33026+ if (err)
33027+ break;
33028+
33029+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
33030+ regs->r11 = addr1;
33031+ regs->r10 = addr2;
33032+ if (stcclc == 0xF8)
33033+ regs->flags &= ~X86_EFLAGS_CF;
33034+ else
33035+ regs->flags |= X86_EFLAGS_CF;
33036+ regs->ip = addr1;
33037+ return 2;
33038+ }
33039+ } while (0);
33040+
33041+ do { /* PaX: gcc trampoline emulation #1 */
33042+ unsigned short mov1, mov2, jmp1;
33043+ unsigned char jmp2;
33044+ unsigned int addr1;
33045+ unsigned long addr2;
33046+
33047+ err = get_user(mov1, (unsigned short __user *)regs->ip);
33048+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
33049+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
33050+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
33051+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
33052+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
33053+
33054+ if (err)
33055+ break;
33056+
33057+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
33058+ regs->r11 = addr1;
33059+ regs->r10 = addr2;
33060+ regs->ip = addr1;
33061+ return 2;
33062+ }
33063+ } while (0);
33064+
33065+ do { /* PaX: gcc trampoline emulation #2 */
33066+ unsigned short mov1, mov2, jmp1;
33067+ unsigned char jmp2;
33068+ unsigned long addr1, addr2;
33069+
33070+ err = get_user(mov1, (unsigned short __user *)regs->ip);
33071+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
33072+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
33073+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
33074+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
33075+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
33076+
33077+ if (err)
33078+ break;
33079+
33080+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
33081+ regs->r11 = addr1;
33082+ regs->r10 = addr2;
33083+ regs->ip = addr1;
33084+ return 2;
33085+ }
33086+ } while (0);
33087+
33088+ return 1; /* PaX in action */
33089+}
33090+#endif
33091+
33092+/*
33093+ * PaX: decide what to do with offenders (regs->ip = fault address)
33094+ *
33095+ * returns 1 when task should be killed
33096+ * 2 when gcc trampoline was detected
33097+ */
33098+static int pax_handle_fetch_fault(struct pt_regs *regs)
33099+{
33100+ if (v8086_mode(regs))
33101+ return 1;
33102+
33103+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
33104+ return 1;
33105+
33106+#ifdef CONFIG_X86_32
33107+ return pax_handle_fetch_fault_32(regs);
33108+#else
33109+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
33110+ return pax_handle_fetch_fault_32(regs);
33111+ else
33112+ return pax_handle_fetch_fault_64(regs);
33113+#endif
33114+}
33115+#endif
33116+
33117+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
33118+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
33119+{
33120+ long i;
33121+
33122+ printk(KERN_ERR "PAX: bytes at PC: ");
33123+ for (i = 0; i < 20; i++) {
33124+ unsigned char c;
33125+ if (get_user(c, (unsigned char __force_user *)pc+i))
33126+ printk(KERN_CONT "?? ");
33127+ else
33128+ printk(KERN_CONT "%02x ", c);
33129+ }
33130+ printk("\n");
33131+
33132+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
33133+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
33134+ unsigned long c;
33135+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
33136+#ifdef CONFIG_X86_32
33137+ printk(KERN_CONT "???????? ");
33138+#else
33139+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
33140+ printk(KERN_CONT "???????? ???????? ");
33141+ else
33142+ printk(KERN_CONT "???????????????? ");
33143+#endif
33144+ } else {
33145+#ifdef CONFIG_X86_64
33146+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
33147+ printk(KERN_CONT "%08x ", (unsigned int)c);
33148+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
33149+ } else
33150+#endif
33151+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
33152+ }
33153+ }
33154+ printk("\n");
33155+}
33156+#endif
33157+
33158+/**
33159+ * probe_kernel_write(): safely attempt to write to a location
33160+ * @dst: address to write to
33161+ * @src: pointer to the data that shall be written
33162+ * @size: size of the data chunk
33163+ *
33164+ * Safely write to address @dst from the buffer at @src. If a kernel fault
33165+ * happens, handle that and return -EFAULT.
33166+ */
33167+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
33168+{
33169+ long ret;
33170+ mm_segment_t old_fs = get_fs();
33171+
33172+ set_fs(KERNEL_DS);
33173+ pagefault_disable();
33174+ pax_open_kernel();
33175+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
33176+ pax_close_kernel();
33177+ pagefault_enable();
33178+ set_fs(old_fs);
33179+
33180+ return ret ? -EFAULT : 0;
33181+}
33182diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
33183index 207d9aef..69030980 100644
33184--- a/arch/x86/mm/gup.c
33185+++ b/arch/x86/mm/gup.c
33186@@ -268,7 +268,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
33187 addr = start;
33188 len = (unsigned long) nr_pages << PAGE_SHIFT;
33189 end = start + len;
33190- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
33191+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
33192 (void __user *)start, len)))
33193 return 0;
33194
33195@@ -344,6 +344,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
33196 goto slow_irqon;
33197 #endif
33198
33199+ if (unlikely(!access_ok_noprefault(write ? VERIFY_WRITE : VERIFY_READ,
33200+ (void __user *)start, len)))
33201+ return 0;
33202+
33203 /*
33204 * XXX: batch / limit 'nr', to avoid large irq off latency
33205 * needs some instrumenting to determine the common sizes used by
33206diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
33207index 4500142..53a363c 100644
33208--- a/arch/x86/mm/highmem_32.c
33209+++ b/arch/x86/mm/highmem_32.c
33210@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
33211 idx = type + KM_TYPE_NR*smp_processor_id();
33212 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33213 BUG_ON(!pte_none(*(kmap_pte-idx)));
33214+
33215+ pax_open_kernel();
33216 set_pte(kmap_pte-idx, mk_pte(page, prot));
33217+ pax_close_kernel();
33218+
33219 arch_flush_lazy_mmu_mode();
33220
33221 return (void *)vaddr;
33222diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
33223index 8b977eb..4732c33 100644
33224--- a/arch/x86/mm/hugetlbpage.c
33225+++ b/arch/x86/mm/hugetlbpage.c
33226@@ -80,23 +80,24 @@ int pud_huge(pud_t pud)
33227 #ifdef CONFIG_HUGETLB_PAGE
33228 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
33229 unsigned long addr, unsigned long len,
33230- unsigned long pgoff, unsigned long flags)
33231+ unsigned long pgoff, unsigned long flags, unsigned long offset)
33232 {
33233 struct hstate *h = hstate_file(file);
33234 struct vm_unmapped_area_info info;
33235-
33236+
33237 info.flags = 0;
33238 info.length = len;
33239 info.low_limit = current->mm->mmap_legacy_base;
33240 info.high_limit = TASK_SIZE;
33241 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
33242 info.align_offset = 0;
33243+ info.threadstack_offset = offset;
33244 return vm_unmapped_area(&info);
33245 }
33246
33247 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
33248 unsigned long addr0, unsigned long len,
33249- unsigned long pgoff, unsigned long flags)
33250+ unsigned long pgoff, unsigned long flags, unsigned long offset)
33251 {
33252 struct hstate *h = hstate_file(file);
33253 struct vm_unmapped_area_info info;
33254@@ -108,6 +109,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
33255 info.high_limit = current->mm->mmap_base;
33256 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
33257 info.align_offset = 0;
33258+ info.threadstack_offset = offset;
33259 addr = vm_unmapped_area(&info);
33260
33261 /*
33262@@ -120,6 +122,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
33263 VM_BUG_ON(addr != -ENOMEM);
33264 info.flags = 0;
33265 info.low_limit = TASK_UNMAPPED_BASE;
33266+
33267+#ifdef CONFIG_PAX_RANDMMAP
33268+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
33269+ info.low_limit += current->mm->delta_mmap;
33270+#endif
33271+
33272 info.high_limit = TASK_SIZE;
33273 addr = vm_unmapped_area(&info);
33274 }
33275@@ -134,10 +142,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
33276 struct hstate *h = hstate_file(file);
33277 struct mm_struct *mm = current->mm;
33278 struct vm_area_struct *vma;
33279+ unsigned long pax_task_size = TASK_SIZE;
33280+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
33281
33282 if (len & ~huge_page_mask(h))
33283 return -EINVAL;
33284- if (len > TASK_SIZE)
33285+
33286+#ifdef CONFIG_PAX_SEGMEXEC
33287+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
33288+ pax_task_size = SEGMEXEC_TASK_SIZE;
33289+#endif
33290+
33291+ pax_task_size -= PAGE_SIZE;
33292+
33293+ if (len > pax_task_size)
33294 return -ENOMEM;
33295
33296 if (flags & MAP_FIXED) {
33297@@ -146,19 +164,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
33298 return addr;
33299 }
33300
33301+#ifdef CONFIG_PAX_RANDMMAP
33302+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
33303+#endif
33304+
33305 if (addr) {
33306 addr = ALIGN(addr, huge_page_size(h));
33307 vma = find_vma(mm, addr);
33308- if (TASK_SIZE - len >= addr &&
33309- (!vma || addr + len <= vma->vm_start))
33310+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
33311 return addr;
33312 }
33313 if (mm->get_unmapped_area == arch_get_unmapped_area)
33314 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
33315- pgoff, flags);
33316+ pgoff, flags, offset);
33317 else
33318 return hugetlb_get_unmapped_area_topdown(file, addr, len,
33319- pgoff, flags);
33320+ pgoff, flags, offset);
33321 }
33322 #endif /* CONFIG_HUGETLB_PAGE */
33323
33324diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
33325index 66dba36..f8082ec 100644
33326--- a/arch/x86/mm/init.c
33327+++ b/arch/x86/mm/init.c
33328@@ -4,6 +4,7 @@
33329 #include <linux/swap.h>
33330 #include <linux/memblock.h>
33331 #include <linux/bootmem.h> /* for max_low_pfn */
33332+#include <linux/tboot.h>
33333
33334 #include <asm/cacheflush.h>
33335 #include <asm/e820.h>
33336@@ -17,6 +18,8 @@
33337 #include <asm/proto.h>
33338 #include <asm/dma.h> /* for MAX_DMA_PFN */
33339 #include <asm/microcode.h>
33340+#include <asm/desc.h>
33341+#include <asm/bios_ebda.h>
33342
33343 /*
33344 * We need to define the tracepoints somewhere, and tlb.c
33345@@ -570,7 +573,18 @@ void __init init_mem_mapping(void)
33346 early_ioremap_page_table_range_init();
33347 #endif
33348
33349+#ifdef CONFIG_PAX_PER_CPU_PGD
33350+ clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
33351+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
33352+ KERNEL_PGD_PTRS);
33353+ clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
33354+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
33355+ KERNEL_PGD_PTRS);
33356+ load_cr3(get_cpu_pgd(0, kernel));
33357+#else
33358 load_cr3(swapper_pg_dir);
33359+#endif
33360+
33361 __flush_tlb_all();
33362
33363 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
33364@@ -586,10 +600,40 @@ void __init init_mem_mapping(void)
33365 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
33366 * mmio resources as well as potential bios/acpi data regions.
33367 */
33368+
33369+#ifdef CONFIG_GRKERNSEC_KMEM
33370+static unsigned int ebda_start __read_only;
33371+static unsigned int ebda_end __read_only;
33372+#endif
33373+
33374 int devmem_is_allowed(unsigned long pagenr)
33375 {
33376- if (pagenr < 256)
33377+#ifdef CONFIG_GRKERNSEC_KMEM
33378+ /* allow BDA */
33379+ if (!pagenr)
33380 return 1;
33381+ /* allow EBDA */
33382+ if (pagenr >= ebda_start && pagenr < ebda_end)
33383+ return 1;
33384+ /* if tboot is in use, allow access to its hardcoded serial log range */
33385+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
33386+ return 1;
33387+#else
33388+ if (!pagenr)
33389+ return 1;
33390+#ifdef CONFIG_VM86
33391+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
33392+ return 1;
33393+#endif
33394+#endif
33395+
33396+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
33397+ return 1;
33398+#ifdef CONFIG_GRKERNSEC_KMEM
33399+ /* throw out everything else below 1MB */
33400+ if (pagenr <= 256)
33401+ return 0;
33402+#endif
33403 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
33404 return 0;
33405 if (!page_is_ram(pagenr))
33406@@ -635,8 +679,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
33407 #endif
33408 }
33409
33410+#ifdef CONFIG_GRKERNSEC_KMEM
33411+static inline void gr_init_ebda(void)
33412+{
33413+ unsigned int ebda_addr;
33414+ unsigned int ebda_size = 0;
33415+
33416+ ebda_addr = get_bios_ebda();
33417+ if (ebda_addr) {
33418+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
33419+ ebda_size <<= 10;
33420+ }
33421+ if (ebda_addr && ebda_size) {
33422+ ebda_start = ebda_addr >> PAGE_SHIFT;
33423+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
33424+ } else {
33425+ ebda_start = 0x9f000 >> PAGE_SHIFT;
33426+ ebda_end = 0xa0000 >> PAGE_SHIFT;
33427+ }
33428+}
33429+#else
33430+static inline void gr_init_ebda(void) { }
33431+#endif
33432+
33433 void free_initmem(void)
33434 {
33435+#ifdef CONFIG_PAX_KERNEXEC
33436+#ifdef CONFIG_X86_32
33437+ /* PaX: limit KERNEL_CS to actual size */
33438+ unsigned long addr, limit;
33439+ struct desc_struct d;
33440+ int cpu;
33441+#else
33442+ pgd_t *pgd;
33443+ pud_t *pud;
33444+ pmd_t *pmd;
33445+ unsigned long addr, end;
33446+#endif
33447+#endif
33448+
33449+ gr_init_ebda();
33450+
33451+#ifdef CONFIG_PAX_KERNEXEC
33452+#ifdef CONFIG_X86_32
33453+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
33454+ limit = (limit - 1UL) >> PAGE_SHIFT;
33455+
33456+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
33457+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
33458+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
33459+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
33460+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
33461+ }
33462+
33463+ /* PaX: make KERNEL_CS read-only */
33464+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
33465+ if (!paravirt_enabled())
33466+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
33467+/*
33468+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
33469+ pgd = pgd_offset_k(addr);
33470+ pud = pud_offset(pgd, addr);
33471+ pmd = pmd_offset(pud, addr);
33472+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
33473+ }
33474+*/
33475+#ifdef CONFIG_X86_PAE
33476+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
33477+/*
33478+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
33479+ pgd = pgd_offset_k(addr);
33480+ pud = pud_offset(pgd, addr);
33481+ pmd = pmd_offset(pud, addr);
33482+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
33483+ }
33484+*/
33485+#endif
33486+
33487+#ifdef CONFIG_MODULES
33488+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
33489+#endif
33490+
33491+#else
33492+ /* PaX: make kernel code/rodata read-only, rest non-executable */
33493+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
33494+ pgd = pgd_offset_k(addr);
33495+ pud = pud_offset(pgd, addr);
33496+ pmd = pmd_offset(pud, addr);
33497+ if (!pmd_present(*pmd))
33498+ continue;
33499+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
33500+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
33501+ else
33502+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
33503+ }
33504+
33505+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
33506+ end = addr + KERNEL_IMAGE_SIZE;
33507+ for (; addr < end; addr += PMD_SIZE) {
33508+ pgd = pgd_offset_k(addr);
33509+ pud = pud_offset(pgd, addr);
33510+ pmd = pmd_offset(pud, addr);
33511+ if (!pmd_present(*pmd))
33512+ continue;
33513+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
33514+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
33515+ }
33516+#endif
33517+
33518+ flush_tlb_all();
33519+#endif
33520+
33521 free_init_pages("unused kernel",
33522 (unsigned long)(&__init_begin),
33523 (unsigned long)(&__init_end));
33524diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
33525index 7d05565..bfc5338 100644
33526--- a/arch/x86/mm/init_32.c
33527+++ b/arch/x86/mm/init_32.c
33528@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
33529 bool __read_mostly __vmalloc_start_set = false;
33530
33531 /*
33532- * Creates a middle page table and puts a pointer to it in the
33533- * given global directory entry. This only returns the gd entry
33534- * in non-PAE compilation mode, since the middle layer is folded.
33535- */
33536-static pmd_t * __init one_md_table_init(pgd_t *pgd)
33537-{
33538- pud_t *pud;
33539- pmd_t *pmd_table;
33540-
33541-#ifdef CONFIG_X86_PAE
33542- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
33543- pmd_table = (pmd_t *)alloc_low_page();
33544- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
33545- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
33546- pud = pud_offset(pgd, 0);
33547- BUG_ON(pmd_table != pmd_offset(pud, 0));
33548-
33549- return pmd_table;
33550- }
33551-#endif
33552- pud = pud_offset(pgd, 0);
33553- pmd_table = pmd_offset(pud, 0);
33554-
33555- return pmd_table;
33556-}
33557-
33558-/*
33559 * Create a page table and place a pointer to it in a middle page
33560 * directory entry:
33561 */
33562@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
33563 pte_t *page_table = (pte_t *)alloc_low_page();
33564
33565 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
33566+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
33567+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
33568+#else
33569 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
33570+#endif
33571 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
33572 }
33573
33574 return pte_offset_kernel(pmd, 0);
33575 }
33576
33577+static pmd_t * __init one_md_table_init(pgd_t *pgd)
33578+{
33579+ pud_t *pud;
33580+ pmd_t *pmd_table;
33581+
33582+ pud = pud_offset(pgd, 0);
33583+ pmd_table = pmd_offset(pud, 0);
33584+
33585+ return pmd_table;
33586+}
33587+
33588 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
33589 {
33590 int pgd_idx = pgd_index(vaddr);
33591@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33592 int pgd_idx, pmd_idx;
33593 unsigned long vaddr;
33594 pgd_t *pgd;
33595+ pud_t *pud;
33596 pmd_t *pmd;
33597 pte_t *pte = NULL;
33598 unsigned long count = page_table_range_init_count(start, end);
33599@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33600 pgd = pgd_base + pgd_idx;
33601
33602 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
33603- pmd = one_md_table_init(pgd);
33604- pmd = pmd + pmd_index(vaddr);
33605+ pud = pud_offset(pgd, vaddr);
33606+ pmd = pmd_offset(pud, vaddr);
33607+
33608+#ifdef CONFIG_X86_PAE
33609+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33610+#endif
33611+
33612 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
33613 pmd++, pmd_idx++) {
33614 pte = page_table_kmap_check(one_page_table_init(pmd),
33615@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
33616 }
33617 }
33618
33619-static inline int is_kernel_text(unsigned long addr)
33620+static inline int is_kernel_text(unsigned long start, unsigned long end)
33621 {
33622- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
33623- return 1;
33624- return 0;
33625+ if ((start >= ktla_ktva((unsigned long)_etext) ||
33626+ end <= ktla_ktva((unsigned long)_stext)) &&
33627+ (start >= ktla_ktva((unsigned long)_einittext) ||
33628+ end <= ktla_ktva((unsigned long)_sinittext)) &&
33629+
33630+#ifdef CONFIG_ACPI_SLEEP
33631+ (start >= (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
33632+#endif
33633+
33634+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
33635+ return 0;
33636+ return 1;
33637 }
33638
33639 /*
33640@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
33641 unsigned long last_map_addr = end;
33642 unsigned long start_pfn, end_pfn;
33643 pgd_t *pgd_base = swapper_pg_dir;
33644- int pgd_idx, pmd_idx, pte_ofs;
33645+ unsigned int pgd_idx, pmd_idx, pte_ofs;
33646 unsigned long pfn;
33647 pgd_t *pgd;
33648+ pud_t *pud;
33649 pmd_t *pmd;
33650 pte_t *pte;
33651 unsigned pages_2m, pages_4k;
33652@@ -291,8 +295,13 @@ repeat:
33653 pfn = start_pfn;
33654 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33655 pgd = pgd_base + pgd_idx;
33656- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
33657- pmd = one_md_table_init(pgd);
33658+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
33659+ pud = pud_offset(pgd, 0);
33660+ pmd = pmd_offset(pud, 0);
33661+
33662+#ifdef CONFIG_X86_PAE
33663+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
33664+#endif
33665
33666 if (pfn >= end_pfn)
33667 continue;
33668@@ -304,14 +313,13 @@ repeat:
33669 #endif
33670 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
33671 pmd++, pmd_idx++) {
33672- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
33673+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
33674
33675 /*
33676 * Map with big pages if possible, otherwise
33677 * create normal page tables:
33678 */
33679 if (use_pse) {
33680- unsigned int addr2;
33681 pgprot_t prot = PAGE_KERNEL_LARGE;
33682 /*
33683 * first pass will use the same initial
33684@@ -322,11 +330,7 @@ repeat:
33685 _PAGE_PSE);
33686
33687 pfn &= PMD_MASK >> PAGE_SHIFT;
33688- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
33689- PAGE_OFFSET + PAGE_SIZE-1;
33690-
33691- if (is_kernel_text(addr) ||
33692- is_kernel_text(addr2))
33693+ if (is_kernel_text(address, address + PMD_SIZE))
33694 prot = PAGE_KERNEL_LARGE_EXEC;
33695
33696 pages_2m++;
33697@@ -343,7 +347,7 @@ repeat:
33698 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
33699 pte += pte_ofs;
33700 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
33701- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
33702+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
33703 pgprot_t prot = PAGE_KERNEL;
33704 /*
33705 * first pass will use the same initial
33706@@ -351,7 +355,7 @@ repeat:
33707 */
33708 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
33709
33710- if (is_kernel_text(addr))
33711+ if (is_kernel_text(address, address + PAGE_SIZE))
33712 prot = PAGE_KERNEL_EXEC;
33713
33714 pages_4k++;
33715@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
33716
33717 pud = pud_offset(pgd, va);
33718 pmd = pmd_offset(pud, va);
33719- if (!pmd_present(*pmd))
33720+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
33721 break;
33722
33723 /* should not be large page here */
33724@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
33725
33726 static void __init pagetable_init(void)
33727 {
33728- pgd_t *pgd_base = swapper_pg_dir;
33729-
33730- permanent_kmaps_init(pgd_base);
33731+ permanent_kmaps_init(swapper_pg_dir);
33732 }
33733
33734-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
33735+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
33736 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33737
33738 /* user-defined highmem size */
33739@@ -787,10 +789,10 @@ void __init mem_init(void)
33740 ((unsigned long)&__init_end -
33741 (unsigned long)&__init_begin) >> 10,
33742
33743- (unsigned long)&_etext, (unsigned long)&_edata,
33744- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
33745+ (unsigned long)&_sdata, (unsigned long)&_edata,
33746+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
33747
33748- (unsigned long)&_text, (unsigned long)&_etext,
33749+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
33750 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
33751
33752 /*
33753@@ -884,6 +886,7 @@ void set_kernel_text_rw(void)
33754 if (!kernel_set_to_readonly)
33755 return;
33756
33757+ start = ktla_ktva(start);
33758 pr_debug("Set kernel text: %lx - %lx for read write\n",
33759 start, start+size);
33760
33761@@ -898,6 +901,7 @@ void set_kernel_text_ro(void)
33762 if (!kernel_set_to_readonly)
33763 return;
33764
33765+ start = ktla_ktva(start);
33766 pr_debug("Set kernel text: %lx - %lx for read only\n",
33767 start, start+size);
33768
33769@@ -926,6 +930,7 @@ void mark_rodata_ro(void)
33770 unsigned long start = PFN_ALIGN(_text);
33771 unsigned long size = PFN_ALIGN(_etext) - start;
33772
33773+ start = ktla_ktva(start);
33774 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
33775 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
33776 size >> 10);
33777diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
33778index ac7de5f..ceb56df 100644
33779--- a/arch/x86/mm/init_64.c
33780+++ b/arch/x86/mm/init_64.c
33781@@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on);
33782 * around without checking the pgd every time.
33783 */
33784
33785-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
33786+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
33787 EXPORT_SYMBOL_GPL(__supported_pte_mask);
33788
33789 int force_personality32;
33790@@ -184,12 +184,29 @@ void sync_global_pgds(unsigned long start, unsigned long end)
33791
33792 for (address = start; address <= end; address += PGDIR_SIZE) {
33793 const pgd_t *pgd_ref = pgd_offset_k(address);
33794+
33795+#ifdef CONFIG_PAX_PER_CPU_PGD
33796+ unsigned long cpu;
33797+#else
33798 struct page *page;
33799+#endif
33800
33801 if (pgd_none(*pgd_ref))
33802 continue;
33803
33804 spin_lock(&pgd_lock);
33805+
33806+#ifdef CONFIG_PAX_PER_CPU_PGD
33807+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
33808+ pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
33809+
33810+ if (pgd_none(*pgd))
33811+ set_pgd(pgd, *pgd_ref);
33812+ else
33813+ BUG_ON(pgd_page_vaddr(*pgd)
33814+ != pgd_page_vaddr(*pgd_ref));
33815+ pgd = pgd_offset_cpu(cpu, kernel, address);
33816+#else
33817 list_for_each_entry(page, &pgd_list, lru) {
33818 pgd_t *pgd;
33819 spinlock_t *pgt_lock;
33820@@ -198,6 +215,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
33821 /* the pgt_lock only for Xen */
33822 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
33823 spin_lock(pgt_lock);
33824+#endif
33825
33826 if (pgd_none(*pgd))
33827 set_pgd(pgd, *pgd_ref);
33828@@ -205,7 +223,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
33829 BUG_ON(pgd_page_vaddr(*pgd)
33830 != pgd_page_vaddr(*pgd_ref));
33831
33832+#ifndef CONFIG_PAX_PER_CPU_PGD
33833 spin_unlock(pgt_lock);
33834+#endif
33835+
33836 }
33837 spin_unlock(&pgd_lock);
33838 }
33839@@ -238,7 +259,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
33840 {
33841 if (pgd_none(*pgd)) {
33842 pud_t *pud = (pud_t *)spp_getpage();
33843- pgd_populate(&init_mm, pgd, pud);
33844+ pgd_populate_kernel(&init_mm, pgd, pud);
33845 if (pud != pud_offset(pgd, 0))
33846 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
33847 pud, pud_offset(pgd, 0));
33848@@ -250,7 +271,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
33849 {
33850 if (pud_none(*pud)) {
33851 pmd_t *pmd = (pmd_t *) spp_getpage();
33852- pud_populate(&init_mm, pud, pmd);
33853+ pud_populate_kernel(&init_mm, pud, pmd);
33854 if (pmd != pmd_offset(pud, 0))
33855 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
33856 pmd, pmd_offset(pud, 0));
33857@@ -279,7 +300,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
33858 pmd = fill_pmd(pud, vaddr);
33859 pte = fill_pte(pmd, vaddr);
33860
33861+ pax_open_kernel();
33862 set_pte(pte, new_pte);
33863+ pax_close_kernel();
33864
33865 /*
33866 * It's enough to flush this one mapping.
33867@@ -338,14 +361,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
33868 pgd = pgd_offset_k((unsigned long)__va(phys));
33869 if (pgd_none(*pgd)) {
33870 pud = (pud_t *) spp_getpage();
33871- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
33872- _PAGE_USER));
33873+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
33874 }
33875 pud = pud_offset(pgd, (unsigned long)__va(phys));
33876 if (pud_none(*pud)) {
33877 pmd = (pmd_t *) spp_getpage();
33878- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
33879- _PAGE_USER));
33880+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
33881 }
33882 pmd = pmd_offset(pud, phys);
33883 BUG_ON(!pmd_none(*pmd));
33884@@ -586,7 +607,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
33885 prot);
33886
33887 spin_lock(&init_mm.page_table_lock);
33888- pud_populate(&init_mm, pud, pmd);
33889+ pud_populate_kernel(&init_mm, pud, pmd);
33890 spin_unlock(&init_mm.page_table_lock);
33891 }
33892 __flush_tlb_all();
33893@@ -627,7 +648,7 @@ kernel_physical_mapping_init(unsigned long start,
33894 page_size_mask);
33895
33896 spin_lock(&init_mm.page_table_lock);
33897- pgd_populate(&init_mm, pgd, pud);
33898+ pgd_populate_kernel(&init_mm, pgd, pud);
33899 spin_unlock(&init_mm.page_table_lock);
33900 pgd_changed = true;
33901 }
33902@@ -1205,8 +1226,8 @@ static struct vm_operations_struct gate_vma_ops = {
33903 static struct vm_area_struct gate_vma = {
33904 .vm_start = VSYSCALL_ADDR,
33905 .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
33906- .vm_page_prot = PAGE_READONLY_EXEC,
33907- .vm_flags = VM_READ | VM_EXEC,
33908+ .vm_page_prot = PAGE_READONLY,
33909+ .vm_flags = VM_READ,
33910 .vm_ops = &gate_vma_ops,
33911 };
33912
33913diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
33914index 7b179b49..6bd17777 100644
33915--- a/arch/x86/mm/iomap_32.c
33916+++ b/arch/x86/mm/iomap_32.c
33917@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
33918 type = kmap_atomic_idx_push();
33919 idx = type + KM_TYPE_NR * smp_processor_id();
33920 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
33921+
33922+ pax_open_kernel();
33923 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
33924+ pax_close_kernel();
33925+
33926 arch_flush_lazy_mmu_mode();
33927
33928 return (void *)vaddr;
33929diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
33930index baff1da..2816ef4 100644
33931--- a/arch/x86/mm/ioremap.c
33932+++ b/arch/x86/mm/ioremap.c
33933@@ -56,8 +56,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
33934 unsigned long i;
33935
33936 for (i = 0; i < nr_pages; ++i)
33937- if (pfn_valid(start_pfn + i) &&
33938- !PageReserved(pfn_to_page(start_pfn + i)))
33939+ if (pfn_valid(start_pfn + i) && (start_pfn + i >= 0x100 ||
33940+ !PageReserved(pfn_to_page(start_pfn + i))))
33941 return 1;
33942
33943 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
33944@@ -268,7 +268,7 @@ EXPORT_SYMBOL(ioremap_prot);
33945 *
33946 * Caller must ensure there is only one unmapping for the same pointer.
33947 */
33948-void iounmap(volatile void __iomem *addr)
33949+void iounmap(const volatile void __iomem *addr)
33950 {
33951 struct vm_struct *p, *o;
33952
33953@@ -322,6 +322,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
33954
33955 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
33956 if (page_is_ram(start >> PAGE_SHIFT))
33957+#ifdef CONFIG_HIGHMEM
33958+ if ((start >> PAGE_SHIFT) < max_low_pfn)
33959+#endif
33960 return __va(phys);
33961
33962 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
33963@@ -334,13 +337,16 @@ void *xlate_dev_mem_ptr(unsigned long phys)
33964 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
33965 {
33966 if (page_is_ram(phys >> PAGE_SHIFT))
33967+#ifdef CONFIG_HIGHMEM
33968+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
33969+#endif
33970 return;
33971
33972 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
33973 return;
33974 }
33975
33976-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
33977+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
33978
33979 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
33980 {
33981@@ -376,8 +382,7 @@ void __init early_ioremap_init(void)
33982 early_ioremap_setup();
33983
33984 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
33985- memset(bm_pte, 0, sizeof(bm_pte));
33986- pmd_populate_kernel(&init_mm, pmd, bm_pte);
33987+ pmd_populate_user(&init_mm, pmd, bm_pte);
33988
33989 /*
33990 * The boot-ioremap range spans multiple pmds, for which
33991diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
33992index dd89a13..d77bdcc 100644
33993--- a/arch/x86/mm/kmemcheck/kmemcheck.c
33994+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
33995@@ -628,9 +628,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
33996 * memory (e.g. tracked pages)? For now, we need this to avoid
33997 * invoking kmemcheck for PnP BIOS calls.
33998 */
33999- if (regs->flags & X86_VM_MASK)
34000+ if (v8086_mode(regs))
34001 return false;
34002- if (regs->cs != __KERNEL_CS)
34003+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
34004 return false;
34005
34006 pte = kmemcheck_pte_lookup(address);
34007diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
34008index 919b912..9267313 100644
34009--- a/arch/x86/mm/mmap.c
34010+++ b/arch/x86/mm/mmap.c
34011@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
34012 * Leave an at least ~128 MB hole with possible stack randomization.
34013 */
34014 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
34015-#define MAX_GAP (TASK_SIZE/6*5)
34016+#define MAX_GAP (pax_task_size/6*5)
34017
34018 static int mmap_is_legacy(void)
34019 {
34020@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
34021 return rnd << PAGE_SHIFT;
34022 }
34023
34024-static unsigned long mmap_base(void)
34025+static unsigned long mmap_base(struct mm_struct *mm)
34026 {
34027 unsigned long gap = rlimit(RLIMIT_STACK);
34028+ unsigned long pax_task_size = TASK_SIZE;
34029+
34030+#ifdef CONFIG_PAX_SEGMEXEC
34031+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
34032+ pax_task_size = SEGMEXEC_TASK_SIZE;
34033+#endif
34034
34035 if (gap < MIN_GAP)
34036 gap = MIN_GAP;
34037 else if (gap > MAX_GAP)
34038 gap = MAX_GAP;
34039
34040- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
34041+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
34042 }
34043
34044 /*
34045 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
34046 * does, but not when emulating X86_32
34047 */
34048-static unsigned long mmap_legacy_base(void)
34049+static unsigned long mmap_legacy_base(struct mm_struct *mm)
34050 {
34051- if (mmap_is_ia32())
34052+ if (mmap_is_ia32()) {
34053+
34054+#ifdef CONFIG_PAX_SEGMEXEC
34055+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
34056+ return SEGMEXEC_TASK_UNMAPPED_BASE;
34057+ else
34058+#endif
34059+
34060 return TASK_UNMAPPED_BASE;
34061- else
34062+ } else
34063 return TASK_UNMAPPED_BASE + mmap_rnd();
34064 }
34065
34066@@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
34067 */
34068 void arch_pick_mmap_layout(struct mm_struct *mm)
34069 {
34070- mm->mmap_legacy_base = mmap_legacy_base();
34071- mm->mmap_base = mmap_base();
34072+ mm->mmap_legacy_base = mmap_legacy_base(mm);
34073+ mm->mmap_base = mmap_base(mm);
34074+
34075+#ifdef CONFIG_PAX_RANDMMAP
34076+ if (mm->pax_flags & MF_PAX_RANDMMAP) {
34077+ mm->mmap_legacy_base += mm->delta_mmap;
34078+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
34079+ }
34080+#endif
34081
34082 if (mmap_is_legacy()) {
34083 mm->mmap_base = mm->mmap_legacy_base;
34084diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
34085index 0057a7a..95c7edd 100644
34086--- a/arch/x86/mm/mmio-mod.c
34087+++ b/arch/x86/mm/mmio-mod.c
34088@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
34089 break;
34090 default:
34091 {
34092- unsigned char *ip = (unsigned char *)instptr;
34093+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
34094 my_trace->opcode = MMIO_UNKNOWN_OP;
34095 my_trace->width = 0;
34096 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
34097@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
34098 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
34099 void __iomem *addr)
34100 {
34101- static atomic_t next_id;
34102+ static atomic_unchecked_t next_id;
34103 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
34104 /* These are page-unaligned. */
34105 struct mmiotrace_map map = {
34106@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
34107 .private = trace
34108 },
34109 .phys = offset,
34110- .id = atomic_inc_return(&next_id)
34111+ .id = atomic_inc_return_unchecked(&next_id)
34112 };
34113 map.map_id = trace->id;
34114
34115@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
34116 ioremap_trace_core(offset, size, addr);
34117 }
34118
34119-static void iounmap_trace_core(volatile void __iomem *addr)
34120+static void iounmap_trace_core(const volatile void __iomem *addr)
34121 {
34122 struct mmiotrace_map map = {
34123 .phys = 0,
34124@@ -328,7 +328,7 @@ not_enabled:
34125 }
34126 }
34127
34128-void mmiotrace_iounmap(volatile void __iomem *addr)
34129+void mmiotrace_iounmap(const volatile void __iomem *addr)
34130 {
34131 might_sleep();
34132 if (is_enabled()) /* recheck and proper locking in *_core() */
34133diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
34134index a32b706..efb308b 100644
34135--- a/arch/x86/mm/numa.c
34136+++ b/arch/x86/mm/numa.c
34137@@ -478,7 +478,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
34138 return true;
34139 }
34140
34141-static int __init numa_register_memblks(struct numa_meminfo *mi)
34142+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
34143 {
34144 unsigned long uninitialized_var(pfn_align);
34145 int i, nid;
34146diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
34147index 36de293..b820ddc 100644
34148--- a/arch/x86/mm/pageattr.c
34149+++ b/arch/x86/mm/pageattr.c
34150@@ -262,7 +262,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
34151 */
34152 #ifdef CONFIG_PCI_BIOS
34153 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
34154- pgprot_val(forbidden) |= _PAGE_NX;
34155+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
34156 #endif
34157
34158 /*
34159@@ -270,9 +270,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
34160 * Does not cover __inittext since that is gone later on. On
34161 * 64bit we do not enforce !NX on the low mapping
34162 */
34163- if (within(address, (unsigned long)_text, (unsigned long)_etext))
34164- pgprot_val(forbidden) |= _PAGE_NX;
34165+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
34166+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
34167
34168+#ifdef CONFIG_DEBUG_RODATA
34169 /*
34170 * The .rodata section needs to be read-only. Using the pfn
34171 * catches all aliases.
34172@@ -280,6 +281,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
34173 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
34174 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
34175 pgprot_val(forbidden) |= _PAGE_RW;
34176+#endif
34177
34178 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
34179 /*
34180@@ -318,6 +320,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
34181 }
34182 #endif
34183
34184+#ifdef CONFIG_PAX_KERNEXEC
34185+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
34186+ pgprot_val(forbidden) |= _PAGE_RW;
34187+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
34188+ }
34189+#endif
34190+
34191 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
34192
34193 return prot;
34194@@ -420,23 +429,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
34195 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
34196 {
34197 /* change init_mm */
34198+ pax_open_kernel();
34199 set_pte_atomic(kpte, pte);
34200+
34201 #ifdef CONFIG_X86_32
34202 if (!SHARED_KERNEL_PMD) {
34203+
34204+#ifdef CONFIG_PAX_PER_CPU_PGD
34205+ unsigned long cpu;
34206+#else
34207 struct page *page;
34208+#endif
34209
34210+#ifdef CONFIG_PAX_PER_CPU_PGD
34211+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
34212+ pgd_t *pgd = get_cpu_pgd(cpu, kernel);
34213+#else
34214 list_for_each_entry(page, &pgd_list, lru) {
34215- pgd_t *pgd;
34216+ pgd_t *pgd = (pgd_t *)page_address(page);
34217+#endif
34218+
34219 pud_t *pud;
34220 pmd_t *pmd;
34221
34222- pgd = (pgd_t *)page_address(page) + pgd_index(address);
34223+ pgd += pgd_index(address);
34224 pud = pud_offset(pgd, address);
34225 pmd = pmd_offset(pud, address);
34226 set_pte_atomic((pte_t *)pmd, pte);
34227 }
34228 }
34229 #endif
34230+ pax_close_kernel();
34231 }
34232
34233 static int
34234diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
34235index 6574388..87e9bef 100644
34236--- a/arch/x86/mm/pat.c
34237+++ b/arch/x86/mm/pat.c
34238@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
34239
34240 if (!entry) {
34241 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
34242- current->comm, current->pid, start, end - 1);
34243+ current->comm, task_pid_nr(current), start, end - 1);
34244 return -EINVAL;
34245 }
34246
34247@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34248
34249 while (cursor < to) {
34250 if (!devmem_is_allowed(pfn)) {
34251- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
34252- current->comm, from, to - 1);
34253+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
34254+ current->comm, from, to - 1, cursor);
34255 return 0;
34256 }
34257 cursor += PAGE_SIZE;
34258@@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
34259 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
34260 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
34261 "for [mem %#010Lx-%#010Lx]\n",
34262- current->comm, current->pid,
34263+ current->comm, task_pid_nr(current),
34264 cattr_name(flags),
34265 base, (unsigned long long)(base + size-1));
34266 return -EINVAL;
34267@@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
34268 flags = lookup_memtype(paddr);
34269 if (want_flags != flags) {
34270 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
34271- current->comm, current->pid,
34272+ current->comm, task_pid_nr(current),
34273 cattr_name(want_flags),
34274 (unsigned long long)paddr,
34275 (unsigned long long)(paddr + size - 1),
34276@@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
34277 free_memtype(paddr, paddr + size);
34278 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
34279 " for [mem %#010Lx-%#010Lx], got %s\n",
34280- current->comm, current->pid,
34281+ current->comm, task_pid_nr(current),
34282 cattr_name(want_flags),
34283 (unsigned long long)paddr,
34284 (unsigned long long)(paddr + size - 1),
34285diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
34286index 415f6c4..d319983 100644
34287--- a/arch/x86/mm/pat_rbtree.c
34288+++ b/arch/x86/mm/pat_rbtree.c
34289@@ -160,7 +160,7 @@ success:
34290
34291 failure:
34292 printk(KERN_INFO "%s:%d conflicting memory types "
34293- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
34294+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
34295 end, cattr_name(found_type), cattr_name(match->type));
34296 return -EBUSY;
34297 }
34298diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
34299index 9f0614d..92ae64a 100644
34300--- a/arch/x86/mm/pf_in.c
34301+++ b/arch/x86/mm/pf_in.c
34302@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
34303 int i;
34304 enum reason_type rv = OTHERS;
34305
34306- p = (unsigned char *)ins_addr;
34307+ p = (unsigned char *)ktla_ktva(ins_addr);
34308 p += skip_prefix(p, &prf);
34309 p += get_opcode(p, &opcode);
34310
34311@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
34312 struct prefix_bits prf;
34313 int i;
34314
34315- p = (unsigned char *)ins_addr;
34316+ p = (unsigned char *)ktla_ktva(ins_addr);
34317 p += skip_prefix(p, &prf);
34318 p += get_opcode(p, &opcode);
34319
34320@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
34321 struct prefix_bits prf;
34322 int i;
34323
34324- p = (unsigned char *)ins_addr;
34325+ p = (unsigned char *)ktla_ktva(ins_addr);
34326 p += skip_prefix(p, &prf);
34327 p += get_opcode(p, &opcode);
34328
34329@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
34330 struct prefix_bits prf;
34331 int i;
34332
34333- p = (unsigned char *)ins_addr;
34334+ p = (unsigned char *)ktla_ktva(ins_addr);
34335 p += skip_prefix(p, &prf);
34336 p += get_opcode(p, &opcode);
34337 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
34338@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
34339 struct prefix_bits prf;
34340 int i;
34341
34342- p = (unsigned char *)ins_addr;
34343+ p = (unsigned char *)ktla_ktva(ins_addr);
34344 p += skip_prefix(p, &prf);
34345 p += get_opcode(p, &opcode);
34346 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
34347diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
34348index 6fb6927..4fc13c0 100644
34349--- a/arch/x86/mm/pgtable.c
34350+++ b/arch/x86/mm/pgtable.c
34351@@ -97,10 +97,71 @@ static inline void pgd_list_del(pgd_t *pgd)
34352 list_del(&page->lru);
34353 }
34354
34355-#define UNSHARED_PTRS_PER_PGD \
34356- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
34357+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
34358+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
34359
34360+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
34361+{
34362+ unsigned int count = USER_PGD_PTRS;
34363
34364+ if (!pax_user_shadow_base)
34365+ return;
34366+
34367+ while (count--)
34368+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
34369+}
34370+#endif
34371+
34372+#ifdef CONFIG_PAX_PER_CPU_PGD
34373+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
34374+{
34375+ unsigned int count = USER_PGD_PTRS;
34376+
34377+ while (count--) {
34378+ pgd_t pgd;
34379+
34380+#ifdef CONFIG_X86_64
34381+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
34382+#else
34383+ pgd = *src++;
34384+#endif
34385+
34386+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
34387+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
34388+#endif
34389+
34390+ *dst++ = pgd;
34391+ }
34392+
34393+}
34394+#endif
34395+
34396+#ifdef CONFIG_X86_64
34397+#define pxd_t pud_t
34398+#define pyd_t pgd_t
34399+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
34400+#define pgtable_pxd_page_ctor(page) true
34401+#define pgtable_pxd_page_dtor(page)
34402+#define pxd_free(mm, pud) pud_free((mm), (pud))
34403+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
34404+#define pyd_offset(mm, address) pgd_offset((mm), (address))
34405+#define PYD_SIZE PGDIR_SIZE
34406+#else
34407+#define pxd_t pmd_t
34408+#define pyd_t pud_t
34409+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
34410+#define pgtable_pxd_page_ctor(page) pgtable_pmd_page_ctor(page)
34411+#define pgtable_pxd_page_dtor(page) pgtable_pmd_page_dtor(page)
34412+#define pxd_free(mm, pud) pmd_free((mm), (pud))
34413+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
34414+#define pyd_offset(mm, address) pud_offset((mm), (address))
34415+#define PYD_SIZE PUD_SIZE
34416+#endif
34417+
34418+#ifdef CONFIG_PAX_PER_CPU_PGD
34419+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
34420+static inline void pgd_dtor(pgd_t *pgd) {}
34421+#else
34422 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
34423 {
34424 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
34425@@ -141,6 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
34426 pgd_list_del(pgd);
34427 spin_unlock(&pgd_lock);
34428 }
34429+#endif
34430
34431 /*
34432 * List of all pgd's needed for non-PAE so it can invalidate entries
34433@@ -153,7 +215,7 @@ static void pgd_dtor(pgd_t *pgd)
34434 * -- nyc
34435 */
34436
34437-#ifdef CONFIG_X86_PAE
34438+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
34439 /*
34440 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
34441 * updating the top-level pagetable entries to guarantee the
34442@@ -165,7 +227,7 @@ static void pgd_dtor(pgd_t *pgd)
34443 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
34444 * and initialize the kernel pmds here.
34445 */
34446-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
34447+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
34448
34449 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
34450 {
34451@@ -183,43 +245,45 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
34452 */
34453 flush_tlb_mm(mm);
34454 }
34455+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
34456+#define PREALLOCATED_PXDS USER_PGD_PTRS
34457 #else /* !CONFIG_X86_PAE */
34458
34459 /* No need to prepopulate any pagetable entries in non-PAE modes. */
34460-#define PREALLOCATED_PMDS 0
34461+#define PREALLOCATED_PXDS 0
34462
34463 #endif /* CONFIG_X86_PAE */
34464
34465-static void free_pmds(pmd_t *pmds[])
34466+static void free_pxds(pxd_t *pxds[])
34467 {
34468 int i;
34469
34470- for(i = 0; i < PREALLOCATED_PMDS; i++)
34471- if (pmds[i]) {
34472- pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
34473- free_page((unsigned long)pmds[i]);
34474+ for(i = 0; i < PREALLOCATED_PXDS; i++)
34475+ if (pxds[i]) {
34476+ pgtable_pxd_page_dtor(virt_to_page(pxds[i]));
34477+ free_page((unsigned long)pxds[i]);
34478 }
34479 }
34480
34481-static int preallocate_pmds(pmd_t *pmds[])
34482+static int preallocate_pxds(pxd_t *pxds[])
34483 {
34484 int i;
34485 bool failed = false;
34486
34487- for(i = 0; i < PREALLOCATED_PMDS; i++) {
34488- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
34489- if (!pmd)
34490+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
34491+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
34492+ if (!pxd)
34493 failed = true;
34494- if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
34495- free_page((unsigned long)pmd);
34496- pmd = NULL;
34497+ if (pxd && !pgtable_pxd_page_ctor(virt_to_page(pxd))) {
34498+ free_page((unsigned long)pxd);
34499+ pxd = NULL;
34500 failed = true;
34501 }
34502- pmds[i] = pmd;
34503+ pxds[i] = pxd;
34504 }
34505
34506 if (failed) {
34507- free_pmds(pmds);
34508+ free_pxds(pxds);
34509 return -ENOMEM;
34510 }
34511
34512@@ -232,49 +296,52 @@ static int preallocate_pmds(pmd_t *pmds[])
34513 * preallocate which never got a corresponding vma will need to be
34514 * freed manually.
34515 */
34516-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
34517+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
34518 {
34519 int i;
34520
34521- for(i = 0; i < PREALLOCATED_PMDS; i++) {
34522+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
34523 pgd_t pgd = pgdp[i];
34524
34525 if (pgd_val(pgd) != 0) {
34526- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
34527+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
34528
34529- pgdp[i] = native_make_pgd(0);
34530+ set_pgd(pgdp + i, native_make_pgd(0));
34531
34532- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
34533- pmd_free(mm, pmd);
34534+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
34535+ pxd_free(mm, pxd);
34536 }
34537 }
34538 }
34539
34540-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
34541+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
34542 {
34543- pud_t *pud;
34544+ pyd_t *pyd;
34545 int i;
34546
34547- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
34548+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
34549 return;
34550
34551- pud = pud_offset(pgd, 0);
34552-
34553- for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
34554- pmd_t *pmd = pmds[i];
34555+#ifdef CONFIG_X86_64
34556+ pyd = pyd_offset(mm, 0L);
34557+#else
34558+ pyd = pyd_offset(pgd, 0L);
34559+#endif
34560
34561+ for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
34562+ pxd_t *pxd = pxds[i];
34563 if (i >= KERNEL_PGD_BOUNDARY)
34564- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
34565- sizeof(pmd_t) * PTRS_PER_PMD);
34566+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
34567+ sizeof(pxd_t) * PTRS_PER_PMD);
34568
34569- pud_populate(mm, pud, pmd);
34570+ pyd_populate(mm, pyd, pxd);
34571 }
34572 }
34573
34574 pgd_t *pgd_alloc(struct mm_struct *mm)
34575 {
34576 pgd_t *pgd;
34577- pmd_t *pmds[PREALLOCATED_PMDS];
34578+ pxd_t *pxds[PREALLOCATED_PXDS];
34579
34580 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
34581
34582@@ -283,11 +350,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34583
34584 mm->pgd = pgd;
34585
34586- if (preallocate_pmds(pmds) != 0)
34587+ if (preallocate_pxds(pxds) != 0)
34588 goto out_free_pgd;
34589
34590 if (paravirt_pgd_alloc(mm) != 0)
34591- goto out_free_pmds;
34592+ goto out_free_pxds;
34593
34594 /*
34595 * Make sure that pre-populating the pmds is atomic with
34596@@ -297,14 +364,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
34597 spin_lock(&pgd_lock);
34598
34599 pgd_ctor(mm, pgd);
34600- pgd_prepopulate_pmd(mm, pgd, pmds);
34601+ pgd_prepopulate_pxd(mm, pgd, pxds);
34602
34603 spin_unlock(&pgd_lock);
34604
34605 return pgd;
34606
34607-out_free_pmds:
34608- free_pmds(pmds);
34609+out_free_pxds:
34610+ free_pxds(pxds);
34611 out_free_pgd:
34612 free_page((unsigned long)pgd);
34613 out:
34614@@ -313,7 +380,7 @@ out:
34615
34616 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
34617 {
34618- pgd_mop_up_pmds(mm, pgd);
34619+ pgd_mop_up_pxds(mm, pgd);
34620 pgd_dtor(pgd);
34621 paravirt_pgd_free(mm, pgd);
34622 free_page((unsigned long)pgd);
34623diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
34624index 4dd8cf6..f9d143e 100644
34625--- a/arch/x86/mm/pgtable_32.c
34626+++ b/arch/x86/mm/pgtable_32.c
34627@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
34628 return;
34629 }
34630 pte = pte_offset_kernel(pmd, vaddr);
34631+
34632+ pax_open_kernel();
34633 if (pte_val(pteval))
34634 set_pte_at(&init_mm, vaddr, pte, pteval);
34635 else
34636 pte_clear(&init_mm, vaddr, pte);
34637+ pax_close_kernel();
34638
34639 /*
34640 * It's enough to flush this one mapping.
34641diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
34642index e666cbb..61788c45 100644
34643--- a/arch/x86/mm/physaddr.c
34644+++ b/arch/x86/mm/physaddr.c
34645@@ -10,7 +10,7 @@
34646 #ifdef CONFIG_X86_64
34647
34648 #ifdef CONFIG_DEBUG_VIRTUAL
34649-unsigned long __phys_addr(unsigned long x)
34650+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34651 {
34652 unsigned long y = x - __START_KERNEL_map;
34653
34654@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
34655 #else
34656
34657 #ifdef CONFIG_DEBUG_VIRTUAL
34658-unsigned long __phys_addr(unsigned long x)
34659+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
34660 {
34661 unsigned long phys_addr = x - PAGE_OFFSET;
34662 /* VMALLOC_* aren't constants */
34663diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
34664index 90555bf..f5f1828 100644
34665--- a/arch/x86/mm/setup_nx.c
34666+++ b/arch/x86/mm/setup_nx.c
34667@@ -5,8 +5,10 @@
34668 #include <asm/pgtable.h>
34669 #include <asm/proto.h>
34670
34671+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34672 static int disable_nx;
34673
34674+#ifndef CONFIG_PAX_PAGEEXEC
34675 /*
34676 * noexec = on|off
34677 *
34678@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
34679 return 0;
34680 }
34681 early_param("noexec", noexec_setup);
34682+#endif
34683+
34684+#endif
34685
34686 void x86_configure_nx(void)
34687 {
34688+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34689 if (cpu_has_nx && !disable_nx)
34690 __supported_pte_mask |= _PAGE_NX;
34691 else
34692+#endif
34693 __supported_pte_mask &= ~_PAGE_NX;
34694 }
34695
34696diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
34697index ee61c36..e6fedeb 100644
34698--- a/arch/x86/mm/tlb.c
34699+++ b/arch/x86/mm/tlb.c
34700@@ -48,7 +48,11 @@ void leave_mm(int cpu)
34701 BUG();
34702 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
34703 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
34704+
34705+#ifndef CONFIG_PAX_PER_CPU_PGD
34706 load_cr3(swapper_pg_dir);
34707+#endif
34708+
34709 /*
34710 * This gets called in the idle path where RCU
34711 * functions differently. Tracing normally
34712diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
34713new file mode 100644
34714index 0000000..dace51c
34715--- /dev/null
34716+++ b/arch/x86/mm/uderef_64.c
34717@@ -0,0 +1,37 @@
34718+#include <linux/mm.h>
34719+#include <asm/pgtable.h>
34720+#include <asm/uaccess.h>
34721+
34722+#ifdef CONFIG_PAX_MEMORY_UDEREF
34723+/* PaX: due to the special call convention these functions must
34724+ * - remain leaf functions under all configurations,
34725+ * - never be called directly, only dereferenced from the wrappers.
34726+ */
34727+void __pax_open_userland(void)
34728+{
34729+ unsigned int cpu;
34730+
34731+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34732+ return;
34733+
34734+ cpu = raw_get_cpu();
34735+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
34736+ write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
34737+ raw_put_cpu_no_resched();
34738+}
34739+EXPORT_SYMBOL(__pax_open_userland);
34740+
34741+void __pax_close_userland(void)
34742+{
34743+ unsigned int cpu;
34744+
34745+ if (unlikely(!segment_eq(get_fs(), USER_DS)))
34746+ return;
34747+
34748+ cpu = raw_get_cpu();
34749+ BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
34750+ write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
34751+ raw_put_cpu_no_resched();
34752+}
34753+EXPORT_SYMBOL(__pax_close_userland);
34754+#endif
34755diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
34756index 6440221..f84b5c7 100644
34757--- a/arch/x86/net/bpf_jit.S
34758+++ b/arch/x86/net/bpf_jit.S
34759@@ -9,6 +9,7 @@
34760 */
34761 #include <linux/linkage.h>
34762 #include <asm/dwarf2.h>
34763+#include <asm/alternative-asm.h>
34764
34765 /*
34766 * Calling convention :
34767@@ -38,6 +39,7 @@ sk_load_word_positive_offset:
34768 jle bpf_slow_path_word
34769 mov (SKBDATA,%rsi),%eax
34770 bswap %eax /* ntohl() */
34771+ pax_force_retaddr
34772 ret
34773
34774 sk_load_half:
34775@@ -55,6 +57,7 @@ sk_load_half_positive_offset:
34776 jle bpf_slow_path_half
34777 movzwl (SKBDATA,%rsi),%eax
34778 rol $8,%ax # ntohs()
34779+ pax_force_retaddr
34780 ret
34781
34782 sk_load_byte:
34783@@ -69,6 +72,7 @@ sk_load_byte_positive_offset:
34784 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
34785 jle bpf_slow_path_byte
34786 movzbl (SKBDATA,%rsi),%eax
34787+ pax_force_retaddr
34788 ret
34789
34790 /* rsi contains offset and can be scratched */
34791@@ -90,6 +94,7 @@ bpf_slow_path_word:
34792 js bpf_error
34793 mov - MAX_BPF_STACK + 32(%rbp),%eax
34794 bswap %eax
34795+ pax_force_retaddr
34796 ret
34797
34798 bpf_slow_path_half:
34799@@ -98,12 +103,14 @@ bpf_slow_path_half:
34800 mov - MAX_BPF_STACK + 32(%rbp),%ax
34801 rol $8,%ax
34802 movzwl %ax,%eax
34803+ pax_force_retaddr
34804 ret
34805
34806 bpf_slow_path_byte:
34807 bpf_slow_path_common(1)
34808 js bpf_error
34809 movzbl - MAX_BPF_STACK + 32(%rbp),%eax
34810+ pax_force_retaddr
34811 ret
34812
34813 #define sk_negative_common(SIZE) \
34814@@ -126,6 +133,7 @@ sk_load_word_negative_offset:
34815 sk_negative_common(4)
34816 mov (%rax), %eax
34817 bswap %eax
34818+ pax_force_retaddr
34819 ret
34820
34821 bpf_slow_path_half_neg:
34822@@ -137,6 +145,7 @@ sk_load_half_negative_offset:
34823 mov (%rax),%ax
34824 rol $8,%ax
34825 movzwl %ax,%eax
34826+ pax_force_retaddr
34827 ret
34828
34829 bpf_slow_path_byte_neg:
34830@@ -146,6 +155,7 @@ sk_load_byte_negative_offset:
34831 .globl sk_load_byte_negative_offset
34832 sk_negative_common(1)
34833 movzbl (%rax), %eax
34834+ pax_force_retaddr
34835 ret
34836
34837 bpf_error:
34838@@ -156,4 +166,5 @@ bpf_error:
34839 mov - MAX_BPF_STACK + 16(%rbp),%r14
34840 mov - MAX_BPF_STACK + 24(%rbp),%r15
34841 leaveq
34842+ pax_force_retaddr
34843 ret
34844diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
34845index c881ba8..71aca2e 100644
34846--- a/arch/x86/net/bpf_jit_comp.c
34847+++ b/arch/x86/net/bpf_jit_comp.c
34848@@ -15,7 +15,11 @@
34849 #include <linux/if_vlan.h>
34850 #include <linux/random.h>
34851
34852+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
34853+int bpf_jit_enable __read_only;
34854+#else
34855 int bpf_jit_enable __read_mostly;
34856+#endif
34857
34858 /*
34859 * assembly code in arch/x86/net/bpf_jit.S
34860@@ -109,36 +113,32 @@ static inline void bpf_flush_icache(void *start, void *end)
34861 #define CHOOSE_LOAD_FUNC(K, func) \
34862 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
34863
34864-struct bpf_binary_header {
34865- unsigned int pages;
34866- /* Note : for security reasons, bpf code will follow a randomly
34867- * sized amount of int3 instructions
34868- */
34869- u8 image[];
34870-};
34871-
34872-static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
34873+/* Note : for security reasons, bpf code will follow a randomly
34874+ * sized amount of int3 instructions
34875+ */
34876+static u8 *bpf_alloc_binary(unsigned int proglen,
34877 u8 **image_ptr)
34878 {
34879 unsigned int sz, hole;
34880- struct bpf_binary_header *header;
34881+ u8 *header;
34882
34883 /* Most of BPF filters are really small,
34884 * but if some of them fill a page, allow at least
34885 * 128 extra bytes to insert a random section of int3
34886 */
34887- sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
34888- header = module_alloc(sz);
34889+ sz = round_up(proglen + 128, PAGE_SIZE);
34890+ header = module_alloc_exec(sz);
34891 if (!header)
34892 return NULL;
34893
34894+ pax_open_kernel();
34895 memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
34896+ pax_close_kernel();
34897
34898- header->pages = sz / PAGE_SIZE;
34899- hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
34900+ hole = PAGE_SIZE - (proglen & ~PAGE_MASK);
34901
34902 /* insert a random number of int3 instructions before BPF code */
34903- *image_ptr = &header->image[prandom_u32() % hole];
34904+ *image_ptr = &header[prandom_u32() % hole];
34905 return header;
34906 }
34907
34908@@ -864,7 +864,9 @@ common_load:
34909 pr_err("bpf_jit_compile fatal error\n");
34910 return -EFAULT;
34911 }
34912+ pax_open_kernel();
34913 memcpy(image + proglen, temp, ilen);
34914+ pax_close_kernel();
34915 }
34916 proglen += ilen;
34917 addrs[i] = proglen;
34918@@ -879,7 +881,7 @@ void bpf_jit_compile(struct bpf_prog *prog)
34919
34920 void bpf_int_jit_compile(struct bpf_prog *prog)
34921 {
34922- struct bpf_binary_header *header = NULL;
34923+ u8 *header = NULL;
34924 int proglen, oldproglen = 0;
34925 struct jit_context ctx = {};
34926 u8 *image = NULL;
34927@@ -911,7 +913,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
34928 if (proglen <= 0) {
34929 image = NULL;
34930 if (header)
34931- module_free(NULL, header);
34932+ module_free_exec(NULL, image);
34933 goto out;
34934 }
34935 if (image) {
34936@@ -935,7 +937,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
34937
34938 if (image) {
34939 bpf_flush_icache(header, image + proglen);
34940- set_memory_ro((unsigned long)header, header->pages);
34941 prog->bpf_func = (void *)image;
34942 prog->jited = 1;
34943 }
34944@@ -943,23 +944,15 @@ out:
34945 kfree(addrs);
34946 }
34947
34948-static void bpf_jit_free_deferred(struct work_struct *work)
34949-{
34950- struct bpf_prog *fp = container_of(work, struct bpf_prog, work);
34951- unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
34952- struct bpf_binary_header *header = (void *)addr;
34953-
34954- set_memory_rw(addr, header->pages);
34955- module_free(NULL, header);
34956- kfree(fp);
34957-}
34958-
34959 void bpf_jit_free(struct bpf_prog *fp)
34960 {
34961- if (fp->jited) {
34962- INIT_WORK(&fp->work, bpf_jit_free_deferred);
34963- schedule_work(&fp->work);
34964- } else {
34965- kfree(fp);
34966- }
34967+ unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
34968+
34969+ if (!fp->jited)
34970+ goto free_filter;
34971+
34972+ module_free_exec(NULL, (void *)addr);
34973+
34974+free_filter:
34975+ bpf_prog_unlock_free(fp);
34976 }
34977diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
34978index 5d04be5..2beeaa2 100644
34979--- a/arch/x86/oprofile/backtrace.c
34980+++ b/arch/x86/oprofile/backtrace.c
34981@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
34982 struct stack_frame_ia32 *fp;
34983 unsigned long bytes;
34984
34985- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
34986+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
34987 if (bytes != 0)
34988 return NULL;
34989
34990- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
34991+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
34992
34993 oprofile_add_trace(bufhead[0].return_address);
34994
34995@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
34996 struct stack_frame bufhead[2];
34997 unsigned long bytes;
34998
34999- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
35000+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
35001 if (bytes != 0)
35002 return NULL;
35003
35004@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
35005 {
35006 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
35007
35008- if (!user_mode_vm(regs)) {
35009+ if (!user_mode(regs)) {
35010 unsigned long stack = kernel_stack_pointer(regs);
35011 if (depth)
35012 dump_trace(NULL, regs, (unsigned long *)stack, 0,
35013diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
35014index 379e8bd..6386e09 100644
35015--- a/arch/x86/oprofile/nmi_int.c
35016+++ b/arch/x86/oprofile/nmi_int.c
35017@@ -23,6 +23,7 @@
35018 #include <asm/nmi.h>
35019 #include <asm/msr.h>
35020 #include <asm/apic.h>
35021+#include <asm/pgtable.h>
35022
35023 #include "op_counter.h"
35024 #include "op_x86_model.h"
35025@@ -785,8 +786,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
35026 if (ret)
35027 return ret;
35028
35029- if (!model->num_virt_counters)
35030- model->num_virt_counters = model->num_counters;
35031+ if (!model->num_virt_counters) {
35032+ pax_open_kernel();
35033+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
35034+ pax_close_kernel();
35035+ }
35036
35037 mux_init(ops);
35038
35039diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
35040index 50d86c0..7985318 100644
35041--- a/arch/x86/oprofile/op_model_amd.c
35042+++ b/arch/x86/oprofile/op_model_amd.c
35043@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
35044 num_counters = AMD64_NUM_COUNTERS;
35045 }
35046
35047- op_amd_spec.num_counters = num_counters;
35048- op_amd_spec.num_controls = num_counters;
35049- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
35050+ pax_open_kernel();
35051+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
35052+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
35053+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
35054+ pax_close_kernel();
35055
35056 return 0;
35057 }
35058diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
35059index d90528e..0127e2b 100644
35060--- a/arch/x86/oprofile/op_model_ppro.c
35061+++ b/arch/x86/oprofile/op_model_ppro.c
35062@@ -19,6 +19,7 @@
35063 #include <asm/msr.h>
35064 #include <asm/apic.h>
35065 #include <asm/nmi.h>
35066+#include <asm/pgtable.h>
35067
35068 #include "op_x86_model.h"
35069 #include "op_counter.h"
35070@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
35071
35072 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
35073
35074- op_arch_perfmon_spec.num_counters = num_counters;
35075- op_arch_perfmon_spec.num_controls = num_counters;
35076+ pax_open_kernel();
35077+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
35078+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
35079+ pax_close_kernel();
35080 }
35081
35082 static int arch_perfmon_init(struct oprofile_operations *ignore)
35083diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
35084index 71e8a67..6a313bb 100644
35085--- a/arch/x86/oprofile/op_x86_model.h
35086+++ b/arch/x86/oprofile/op_x86_model.h
35087@@ -52,7 +52,7 @@ struct op_x86_model_spec {
35088 void (*switch_ctrl)(struct op_x86_model_spec const *model,
35089 struct op_msrs const * const msrs);
35090 #endif
35091-};
35092+} __do_const;
35093
35094 struct op_counter_config;
35095
35096diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
35097index b9958c3..24229ab 100644
35098--- a/arch/x86/pci/intel_mid_pci.c
35099+++ b/arch/x86/pci/intel_mid_pci.c
35100@@ -250,7 +250,7 @@ int __init intel_mid_pci_init(void)
35101 pci_mmcfg_late_init();
35102 pcibios_enable_irq = intel_mid_pci_irq_enable;
35103 pcibios_disable_irq = intel_mid_pci_irq_disable;
35104- pci_root_ops = intel_mid_pci_ops;
35105+ memcpy((void *)&pci_root_ops, &intel_mid_pci_ops, sizeof pci_root_ops);
35106 pci_soc_mode = 1;
35107 /* Continue with standard init */
35108 return 1;
35109diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
35110index eb500c2..eab9e70 100644
35111--- a/arch/x86/pci/irq.c
35112+++ b/arch/x86/pci/irq.c
35113@@ -51,7 +51,7 @@ struct irq_router {
35114 struct irq_router_handler {
35115 u16 vendor;
35116 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
35117-};
35118+} __do_const;
35119
35120 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
35121 void (*pcibios_disable_irq)(struct pci_dev *dev) = pirq_disable_irq;
35122@@ -791,7 +791,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
35123 return 0;
35124 }
35125
35126-static __initdata struct irq_router_handler pirq_routers[] = {
35127+static __initconst const struct irq_router_handler pirq_routers[] = {
35128 { PCI_VENDOR_ID_INTEL, intel_router_probe },
35129 { PCI_VENDOR_ID_AL, ali_router_probe },
35130 { PCI_VENDOR_ID_ITE, ite_router_probe },
35131@@ -818,7 +818,7 @@ static struct pci_dev *pirq_router_dev;
35132 static void __init pirq_find_router(struct irq_router *r)
35133 {
35134 struct irq_routing_table *rt = pirq_table;
35135- struct irq_router_handler *h;
35136+ const struct irq_router_handler *h;
35137
35138 #ifdef CONFIG_PCI_BIOS
35139 if (!rt->signature) {
35140@@ -1091,7 +1091,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
35141 return 0;
35142 }
35143
35144-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
35145+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
35146 {
35147 .callback = fix_broken_hp_bios_irq9,
35148 .ident = "HP Pavilion N5400 Series Laptop",
35149diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
35150index c77b24a..c979855 100644
35151--- a/arch/x86/pci/pcbios.c
35152+++ b/arch/x86/pci/pcbios.c
35153@@ -79,7 +79,7 @@ union bios32 {
35154 static struct {
35155 unsigned long address;
35156 unsigned short segment;
35157-} bios32_indirect = { 0, __KERNEL_CS };
35158+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
35159
35160 /*
35161 * Returns the entry point for the given service, NULL on error
35162@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
35163 unsigned long length; /* %ecx */
35164 unsigned long entry; /* %edx */
35165 unsigned long flags;
35166+ struct desc_struct d, *gdt;
35167
35168 local_irq_save(flags);
35169- __asm__("lcall *(%%edi); cld"
35170+
35171+ gdt = get_cpu_gdt_table(smp_processor_id());
35172+
35173+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
35174+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
35175+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
35176+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
35177+
35178+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
35179 : "=a" (return_code),
35180 "=b" (address),
35181 "=c" (length),
35182 "=d" (entry)
35183 : "0" (service),
35184 "1" (0),
35185- "D" (&bios32_indirect));
35186+ "D" (&bios32_indirect),
35187+ "r"(__PCIBIOS_DS)
35188+ : "memory");
35189+
35190+ pax_open_kernel();
35191+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
35192+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
35193+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
35194+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
35195+ pax_close_kernel();
35196+
35197 local_irq_restore(flags);
35198
35199 switch (return_code) {
35200- case 0:
35201- return address + entry;
35202- case 0x80: /* Not present */
35203- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
35204- return 0;
35205- default: /* Shouldn't happen */
35206- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
35207- service, return_code);
35208+ case 0: {
35209+ int cpu;
35210+ unsigned char flags;
35211+
35212+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
35213+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
35214+ printk(KERN_WARNING "bios32_service: not valid\n");
35215 return 0;
35216+ }
35217+ address = address + PAGE_OFFSET;
35218+ length += 16UL; /* some BIOSs underreport this... */
35219+ flags = 4;
35220+ if (length >= 64*1024*1024) {
35221+ length >>= PAGE_SHIFT;
35222+ flags |= 8;
35223+ }
35224+
35225+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
35226+ gdt = get_cpu_gdt_table(cpu);
35227+ pack_descriptor(&d, address, length, 0x9b, flags);
35228+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
35229+ pack_descriptor(&d, address, length, 0x93, flags);
35230+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
35231+ }
35232+ return entry;
35233+ }
35234+ case 0x80: /* Not present */
35235+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
35236+ return 0;
35237+ default: /* Shouldn't happen */
35238+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
35239+ service, return_code);
35240+ return 0;
35241 }
35242 }
35243
35244 static struct {
35245 unsigned long address;
35246 unsigned short segment;
35247-} pci_indirect = { 0, __KERNEL_CS };
35248+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
35249
35250-static int pci_bios_present;
35251+static int pci_bios_present __read_only;
35252
35253 static int check_pcibios(void)
35254 {
35255@@ -131,11 +174,13 @@ static int check_pcibios(void)
35256 unsigned long flags, pcibios_entry;
35257
35258 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
35259- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
35260+ pci_indirect.address = pcibios_entry;
35261
35262 local_irq_save(flags);
35263- __asm__(
35264- "lcall *(%%edi); cld\n\t"
35265+ __asm__("movw %w6, %%ds\n\t"
35266+ "lcall *%%ss:(%%edi); cld\n\t"
35267+ "push %%ss\n\t"
35268+ "pop %%ds\n\t"
35269 "jc 1f\n\t"
35270 "xor %%ah, %%ah\n"
35271 "1:"
35272@@ -144,7 +189,8 @@ static int check_pcibios(void)
35273 "=b" (ebx),
35274 "=c" (ecx)
35275 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
35276- "D" (&pci_indirect)
35277+ "D" (&pci_indirect),
35278+ "r" (__PCIBIOS_DS)
35279 : "memory");
35280 local_irq_restore(flags);
35281
35282@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
35283
35284 switch (len) {
35285 case 1:
35286- __asm__("lcall *(%%esi); cld\n\t"
35287+ __asm__("movw %w6, %%ds\n\t"
35288+ "lcall *%%ss:(%%esi); cld\n\t"
35289+ "push %%ss\n\t"
35290+ "pop %%ds\n\t"
35291 "jc 1f\n\t"
35292 "xor %%ah, %%ah\n"
35293 "1:"
35294@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
35295 : "1" (PCIBIOS_READ_CONFIG_BYTE),
35296 "b" (bx),
35297 "D" ((long)reg),
35298- "S" (&pci_indirect));
35299+ "S" (&pci_indirect),
35300+ "r" (__PCIBIOS_DS));
35301 /*
35302 * Zero-extend the result beyond 8 bits, do not trust the
35303 * BIOS having done it:
35304@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
35305 *value &= 0xff;
35306 break;
35307 case 2:
35308- __asm__("lcall *(%%esi); cld\n\t"
35309+ __asm__("movw %w6, %%ds\n\t"
35310+ "lcall *%%ss:(%%esi); cld\n\t"
35311+ "push %%ss\n\t"
35312+ "pop %%ds\n\t"
35313 "jc 1f\n\t"
35314 "xor %%ah, %%ah\n"
35315 "1:"
35316@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
35317 : "1" (PCIBIOS_READ_CONFIG_WORD),
35318 "b" (bx),
35319 "D" ((long)reg),
35320- "S" (&pci_indirect));
35321+ "S" (&pci_indirect),
35322+ "r" (__PCIBIOS_DS));
35323 /*
35324 * Zero-extend the result beyond 16 bits, do not trust the
35325 * BIOS having done it:
35326@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
35327 *value &= 0xffff;
35328 break;
35329 case 4:
35330- __asm__("lcall *(%%esi); cld\n\t"
35331+ __asm__("movw %w6, %%ds\n\t"
35332+ "lcall *%%ss:(%%esi); cld\n\t"
35333+ "push %%ss\n\t"
35334+ "pop %%ds\n\t"
35335 "jc 1f\n\t"
35336 "xor %%ah, %%ah\n"
35337 "1:"
35338@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
35339 : "1" (PCIBIOS_READ_CONFIG_DWORD),
35340 "b" (bx),
35341 "D" ((long)reg),
35342- "S" (&pci_indirect));
35343+ "S" (&pci_indirect),
35344+ "r" (__PCIBIOS_DS));
35345 break;
35346 }
35347
35348@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
35349
35350 switch (len) {
35351 case 1:
35352- __asm__("lcall *(%%esi); cld\n\t"
35353+ __asm__("movw %w6, %%ds\n\t"
35354+ "lcall *%%ss:(%%esi); cld\n\t"
35355+ "push %%ss\n\t"
35356+ "pop %%ds\n\t"
35357 "jc 1f\n\t"
35358 "xor %%ah, %%ah\n"
35359 "1:"
35360@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
35361 "c" (value),
35362 "b" (bx),
35363 "D" ((long)reg),
35364- "S" (&pci_indirect));
35365+ "S" (&pci_indirect),
35366+ "r" (__PCIBIOS_DS));
35367 break;
35368 case 2:
35369- __asm__("lcall *(%%esi); cld\n\t"
35370+ __asm__("movw %w6, %%ds\n\t"
35371+ "lcall *%%ss:(%%esi); cld\n\t"
35372+ "push %%ss\n\t"
35373+ "pop %%ds\n\t"
35374 "jc 1f\n\t"
35375 "xor %%ah, %%ah\n"
35376 "1:"
35377@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
35378 "c" (value),
35379 "b" (bx),
35380 "D" ((long)reg),
35381- "S" (&pci_indirect));
35382+ "S" (&pci_indirect),
35383+ "r" (__PCIBIOS_DS));
35384 break;
35385 case 4:
35386- __asm__("lcall *(%%esi); cld\n\t"
35387+ __asm__("movw %w6, %%ds\n\t"
35388+ "lcall *%%ss:(%%esi); cld\n\t"
35389+ "push %%ss\n\t"
35390+ "pop %%ds\n\t"
35391 "jc 1f\n\t"
35392 "xor %%ah, %%ah\n"
35393 "1:"
35394@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
35395 "c" (value),
35396 "b" (bx),
35397 "D" ((long)reg),
35398- "S" (&pci_indirect));
35399+ "S" (&pci_indirect),
35400+ "r" (__PCIBIOS_DS));
35401 break;
35402 }
35403
35404@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
35405
35406 DBG("PCI: Fetching IRQ routing table... ");
35407 __asm__("push %%es\n\t"
35408+ "movw %w8, %%ds\n\t"
35409 "push %%ds\n\t"
35410 "pop %%es\n\t"
35411- "lcall *(%%esi); cld\n\t"
35412+ "lcall *%%ss:(%%esi); cld\n\t"
35413 "pop %%es\n\t"
35414+ "push %%ss\n\t"
35415+ "pop %%ds\n"
35416 "jc 1f\n\t"
35417 "xor %%ah, %%ah\n"
35418 "1:"
35419@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
35420 "1" (0),
35421 "D" ((long) &opt),
35422 "S" (&pci_indirect),
35423- "m" (opt)
35424+ "m" (opt),
35425+ "r" (__PCIBIOS_DS)
35426 : "memory");
35427 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
35428 if (ret & 0xff00)
35429@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
35430 {
35431 int ret;
35432
35433- __asm__("lcall *(%%esi); cld\n\t"
35434+ __asm__("movw %w5, %%ds\n\t"
35435+ "lcall *%%ss:(%%esi); cld\n\t"
35436+ "push %%ss\n\t"
35437+ "pop %%ds\n"
35438 "jc 1f\n\t"
35439 "xor %%ah, %%ah\n"
35440 "1:"
35441@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
35442 : "0" (PCIBIOS_SET_PCI_HW_INT),
35443 "b" ((dev->bus->number << 8) | dev->devfn),
35444 "c" ((irq << 8) | (pin + 10)),
35445- "S" (&pci_indirect));
35446+ "S" (&pci_indirect),
35447+ "r" (__PCIBIOS_DS));
35448 return !(ret & 0xff00);
35449 }
35450 EXPORT_SYMBOL(pcibios_set_irq_routing);
35451diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
35452index 9ee3491..872192f 100644
35453--- a/arch/x86/platform/efi/efi_32.c
35454+++ b/arch/x86/platform/efi/efi_32.c
35455@@ -59,11 +59,22 @@ void efi_call_phys_prelog(void)
35456 {
35457 struct desc_ptr gdt_descr;
35458
35459+#ifdef CONFIG_PAX_KERNEXEC
35460+ struct desc_struct d;
35461+#endif
35462+
35463 local_irq_save(efi_rt_eflags);
35464
35465 load_cr3(initial_page_table);
35466 __flush_tlb_all();
35467
35468+#ifdef CONFIG_PAX_KERNEXEC
35469+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
35470+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
35471+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
35472+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
35473+#endif
35474+
35475 gdt_descr.address = __pa(get_cpu_gdt_table(0));
35476 gdt_descr.size = GDT_SIZE - 1;
35477 load_gdt(&gdt_descr);
35478@@ -73,11 +84,24 @@ void efi_call_phys_epilog(void)
35479 {
35480 struct desc_ptr gdt_descr;
35481
35482+#ifdef CONFIG_PAX_KERNEXEC
35483+ struct desc_struct d;
35484+
35485+ memset(&d, 0, sizeof d);
35486+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
35487+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
35488+#endif
35489+
35490 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
35491 gdt_descr.size = GDT_SIZE - 1;
35492 load_gdt(&gdt_descr);
35493
35494+#ifdef CONFIG_PAX_PER_CPU_PGD
35495+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
35496+#else
35497 load_cr3(swapper_pg_dir);
35498+#endif
35499+
35500 __flush_tlb_all();
35501
35502 local_irq_restore(efi_rt_eflags);
35503diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
35504index 290d397..e09d270 100644
35505--- a/arch/x86/platform/efi/efi_64.c
35506+++ b/arch/x86/platform/efi/efi_64.c
35507@@ -99,6 +99,11 @@ void __init efi_call_phys_prelog(void)
35508 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
35509 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
35510 }
35511+
35512+#ifdef CONFIG_PAX_PER_CPU_PGD
35513+ load_cr3(swapper_pg_dir);
35514+#endif
35515+
35516 __flush_tlb_all();
35517 }
35518
35519@@ -116,6 +121,11 @@ void __init efi_call_phys_epilog(void)
35520 for (pgd = 0; pgd < n_pgds; pgd++)
35521 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
35522 kfree(save_pgd);
35523+
35524+#ifdef CONFIG_PAX_PER_CPU_PGD
35525+ load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
35526+#endif
35527+
35528 __flush_tlb_all();
35529 local_irq_restore(efi_flags);
35530 early_code_mapping_set_exec(0);
35531@@ -146,8 +156,23 @@ int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
35532 unsigned npages;
35533 pgd_t *pgd;
35534
35535- if (efi_enabled(EFI_OLD_MEMMAP))
35536+ if (efi_enabled(EFI_OLD_MEMMAP)) {
35537+ /* PaX: We need to disable the NX bit in the PGD, otherwise we won't be
35538+ * able to execute the EFI services.
35539+ */
35540+ if (__supported_pte_mask & _PAGE_NX) {
35541+ unsigned long addr = (unsigned long) __va(0);
35542+ pgd_t pe = __pgd(pgd_val(*pgd_offset_k(addr)) & ~_PAGE_NX);
35543+
35544+ pr_alert("PAX: Disabling NX protection for low memory map. Try booting without \"efi=old_map\"\n");
35545+#ifdef CONFIG_PAX_PER_CPU_PGD
35546+ set_pgd(pgd_offset_cpu(0, kernel, addr), pe);
35547+#endif
35548+ set_pgd(pgd_offset_k(addr), pe);
35549+ }
35550+
35551 return 0;
35552+ }
35553
35554 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
35555 pgd = __va(efi_scratch.efi_pgt);
35556diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
35557index fbe66e6..eae5e38 100644
35558--- a/arch/x86/platform/efi/efi_stub_32.S
35559+++ b/arch/x86/platform/efi/efi_stub_32.S
35560@@ -6,7 +6,9 @@
35561 */
35562
35563 #include <linux/linkage.h>
35564+#include <linux/init.h>
35565 #include <asm/page_types.h>
35566+#include <asm/segment.h>
35567
35568 /*
35569 * efi_call_phys(void *, ...) is a function with variable parameters.
35570@@ -20,7 +22,7 @@
35571 * service functions will comply with gcc calling convention, too.
35572 */
35573
35574-.text
35575+__INIT
35576 ENTRY(efi_call_phys)
35577 /*
35578 * 0. The function can only be called in Linux kernel. So CS has been
35579@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
35580 * The mapping of lower virtual memory has been created in prelog and
35581 * epilog.
35582 */
35583- movl $1f, %edx
35584- subl $__PAGE_OFFSET, %edx
35585- jmp *%edx
35586+#ifdef CONFIG_PAX_KERNEXEC
35587+ movl $(__KERNEXEC_EFI_DS), %edx
35588+ mov %edx, %ds
35589+ mov %edx, %es
35590+ mov %edx, %ss
35591+ addl $2f,(1f)
35592+ ljmp *(1f)
35593+
35594+__INITDATA
35595+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
35596+.previous
35597+
35598+2:
35599+ subl $2b,(1b)
35600+#else
35601+ jmp 1f-__PAGE_OFFSET
35602 1:
35603+#endif
35604
35605 /*
35606 * 2. Now on the top of stack is the return
35607@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
35608 * parameter 2, ..., param n. To make things easy, we save the return
35609 * address of efi_call_phys in a global variable.
35610 */
35611- popl %edx
35612- movl %edx, saved_return_addr
35613- /* get the function pointer into ECX*/
35614- popl %ecx
35615- movl %ecx, efi_rt_function_ptr
35616- movl $2f, %edx
35617- subl $__PAGE_OFFSET, %edx
35618- pushl %edx
35619+ popl (saved_return_addr)
35620+ popl (efi_rt_function_ptr)
35621
35622 /*
35623 * 3. Clear PG bit in %CR0.
35624@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
35625 /*
35626 * 5. Call the physical function.
35627 */
35628- jmp *%ecx
35629+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
35630
35631-2:
35632 /*
35633 * 6. After EFI runtime service returns, control will return to
35634 * following instruction. We'd better readjust stack pointer first.
35635@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
35636 movl %cr0, %edx
35637 orl $0x80000000, %edx
35638 movl %edx, %cr0
35639- jmp 1f
35640-1:
35641+
35642 /*
35643 * 8. Now restore the virtual mode from flat mode by
35644 * adding EIP with PAGE_OFFSET.
35645 */
35646- movl $1f, %edx
35647- jmp *%edx
35648+#ifdef CONFIG_PAX_KERNEXEC
35649+ movl $(__KERNEL_DS), %edx
35650+ mov %edx, %ds
35651+ mov %edx, %es
35652+ mov %edx, %ss
35653+ ljmp $(__KERNEL_CS),$1f
35654+#else
35655+ jmp 1f+__PAGE_OFFSET
35656+#endif
35657 1:
35658
35659 /*
35660 * 9. Balance the stack. And because EAX contain the return value,
35661 * we'd better not clobber it.
35662 */
35663- leal efi_rt_function_ptr, %edx
35664- movl (%edx), %ecx
35665- pushl %ecx
35666+ pushl (efi_rt_function_ptr)
35667
35668 /*
35669- * 10. Push the saved return address onto the stack and return.
35670+ * 10. Return to the saved return address.
35671 */
35672- leal saved_return_addr, %edx
35673- movl (%edx), %ecx
35674- pushl %ecx
35675- ret
35676+ jmpl *(saved_return_addr)
35677 ENDPROC(efi_call_phys)
35678 .previous
35679
35680-.data
35681+__INITDATA
35682 saved_return_addr:
35683 .long 0
35684 efi_rt_function_ptr:
35685diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
35686index 5fcda72..cd4dc41 100644
35687--- a/arch/x86/platform/efi/efi_stub_64.S
35688+++ b/arch/x86/platform/efi/efi_stub_64.S
35689@@ -11,6 +11,7 @@
35690 #include <asm/msr.h>
35691 #include <asm/processor-flags.h>
35692 #include <asm/page_types.h>
35693+#include <asm/alternative-asm.h>
35694
35695 #define SAVE_XMM \
35696 mov %rsp, %rax; \
35697@@ -88,6 +89,7 @@ ENTRY(efi_call)
35698 RESTORE_PGT
35699 addq $48, %rsp
35700 RESTORE_XMM
35701+ pax_force_retaddr 0, 1
35702 ret
35703 ENDPROC(efi_call)
35704
35705@@ -245,8 +247,8 @@ efi_gdt64:
35706 .long 0 /* Filled out by user */
35707 .word 0
35708 .quad 0x0000000000000000 /* NULL descriptor */
35709- .quad 0x00af9a000000ffff /* __KERNEL_CS */
35710- .quad 0x00cf92000000ffff /* __KERNEL_DS */
35711+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
35712+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
35713 .quad 0x0080890000000000 /* TS descriptor */
35714 .quad 0x0000000000000000 /* TS continued */
35715 efi_gdt64_end:
35716diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
35717index 1bbedc4..eb795b5 100644
35718--- a/arch/x86/platform/intel-mid/intel-mid.c
35719+++ b/arch/x86/platform/intel-mid/intel-mid.c
35720@@ -71,9 +71,10 @@ static void intel_mid_power_off(void)
35721 {
35722 };
35723
35724-static void intel_mid_reboot(void)
35725+static void __noreturn intel_mid_reboot(void)
35726 {
35727 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
35728+ BUG();
35729 }
35730
35731 static unsigned long __init intel_mid_calibrate_tsc(void)
35732diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35733index 46aa25c..59a68ed 100644
35734--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35735+++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
35736@@ -10,10 +10,9 @@
35737 */
35738
35739
35740-/* __attribute__((weak)) makes these declarations overridable */
35741 /* For every CPU addition a new get_<cpuname>_ops interface needs
35742 * to be added.
35743 */
35744-extern void *get_penwell_ops(void) __attribute__((weak));
35745-extern void *get_cloverview_ops(void) __attribute__((weak));
35746-extern void *get_tangier_ops(void) __attribute__((weak));
35747+extern const void *get_penwell_ops(void);
35748+extern const void *get_cloverview_ops(void);
35749+extern const void *get_tangier_ops(void);
35750diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
35751index 23381d2..8ddc10e 100644
35752--- a/arch/x86/platform/intel-mid/mfld.c
35753+++ b/arch/x86/platform/intel-mid/mfld.c
35754@@ -64,12 +64,12 @@ static void __init penwell_arch_setup(void)
35755 pm_power_off = mfld_power_off;
35756 }
35757
35758-void *get_penwell_ops(void)
35759+const void *get_penwell_ops(void)
35760 {
35761 return &penwell_ops;
35762 }
35763
35764-void *get_cloverview_ops(void)
35765+const void *get_cloverview_ops(void)
35766 {
35767 return &penwell_ops;
35768 }
35769diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
35770index aaca917..66eadbc 100644
35771--- a/arch/x86/platform/intel-mid/mrfl.c
35772+++ b/arch/x86/platform/intel-mid/mrfl.c
35773@@ -97,7 +97,7 @@ static struct intel_mid_ops tangier_ops = {
35774 .arch_setup = tangier_arch_setup,
35775 };
35776
35777-void *get_tangier_ops(void)
35778+const void *get_tangier_ops(void)
35779 {
35780 return &tangier_ops;
35781 }
35782diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
35783index d6ee929..3637cb5 100644
35784--- a/arch/x86/platform/olpc/olpc_dt.c
35785+++ b/arch/x86/platform/olpc/olpc_dt.c
35786@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
35787 return res;
35788 }
35789
35790-static struct of_pdt_ops prom_olpc_ops __initdata = {
35791+static struct of_pdt_ops prom_olpc_ops __initconst = {
35792 .nextprop = olpc_dt_nextprop,
35793 .getproplen = olpc_dt_getproplen,
35794 .getproperty = olpc_dt_getproperty,
35795diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
35796index 6ec7910..ecdbb11 100644
35797--- a/arch/x86/power/cpu.c
35798+++ b/arch/x86/power/cpu.c
35799@@ -137,11 +137,8 @@ static void do_fpu_end(void)
35800 static void fix_processor_context(void)
35801 {
35802 int cpu = smp_processor_id();
35803- struct tss_struct *t = &per_cpu(init_tss, cpu);
35804-#ifdef CONFIG_X86_64
35805- struct desc_struct *desc = get_cpu_gdt_table(cpu);
35806- tss_desc tss;
35807-#endif
35808+ struct tss_struct *t = init_tss + cpu;
35809+
35810 set_tss_desc(cpu, t); /*
35811 * This just modifies memory; should not be
35812 * necessary. But... This is necessary, because
35813@@ -150,10 +147,6 @@ static void fix_processor_context(void)
35814 */
35815
35816 #ifdef CONFIG_X86_64
35817- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
35818- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
35819- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
35820-
35821 syscall_init(); /* This sets MSR_*STAR and related */
35822 #endif
35823 load_TR_desc(); /* This does ltr */
35824diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
35825index bad628a..a102610 100644
35826--- a/arch/x86/realmode/init.c
35827+++ b/arch/x86/realmode/init.c
35828@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
35829 __va(real_mode_header->trampoline_header);
35830
35831 #ifdef CONFIG_X86_32
35832- trampoline_header->start = __pa_symbol(startup_32_smp);
35833+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
35834+
35835+#ifdef CONFIG_PAX_KERNEXEC
35836+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
35837+#endif
35838+
35839+ trampoline_header->boot_cs = __BOOT_CS;
35840 trampoline_header->gdt_limit = __BOOT_DS + 7;
35841 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
35842 #else
35843@@ -84,7 +90,7 @@ void __init setup_real_mode(void)
35844 *trampoline_cr4_features = read_cr4();
35845
35846 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
35847- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
35848+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
35849 trampoline_pgd[511] = init_level4_pgt[511].pgd;
35850 #endif
35851 }
35852diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
35853index 7c0d7be..d24dc88 100644
35854--- a/arch/x86/realmode/rm/Makefile
35855+++ b/arch/x86/realmode/rm/Makefile
35856@@ -67,5 +67,8 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
35857
35858 KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
35859 -I$(srctree)/arch/x86/boot
35860+ifdef CONSTIFY_PLUGIN
35861+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
35862+endif
35863 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
35864 GCOV_PROFILE := n
35865diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
35866index a28221d..93c40f1 100644
35867--- a/arch/x86/realmode/rm/header.S
35868+++ b/arch/x86/realmode/rm/header.S
35869@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
35870 #endif
35871 /* APM/BIOS reboot */
35872 .long pa_machine_real_restart_asm
35873-#ifdef CONFIG_X86_64
35874+#ifdef CONFIG_X86_32
35875+ .long __KERNEL_CS
35876+#else
35877 .long __KERNEL32_CS
35878 #endif
35879 END(real_mode_header)
35880diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
35881index 48ddd76..c26749f 100644
35882--- a/arch/x86/realmode/rm/trampoline_32.S
35883+++ b/arch/x86/realmode/rm/trampoline_32.S
35884@@ -24,6 +24,12 @@
35885 #include <asm/page_types.h>
35886 #include "realmode.h"
35887
35888+#ifdef CONFIG_PAX_KERNEXEC
35889+#define ta(X) (X)
35890+#else
35891+#define ta(X) (pa_ ## X)
35892+#endif
35893+
35894 .text
35895 .code16
35896
35897@@ -38,8 +44,6 @@ ENTRY(trampoline_start)
35898
35899 cli # We should be safe anyway
35900
35901- movl tr_start, %eax # where we need to go
35902-
35903 movl $0xA5A5A5A5, trampoline_status
35904 # write marker for master knows we're running
35905
35906@@ -55,7 +59,7 @@ ENTRY(trampoline_start)
35907 movw $1, %dx # protected mode (PE) bit
35908 lmsw %dx # into protected mode
35909
35910- ljmpl $__BOOT_CS, $pa_startup_32
35911+ ljmpl *(trampoline_header)
35912
35913 .section ".text32","ax"
35914 .code32
35915@@ -66,7 +70,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
35916 .balign 8
35917 GLOBAL(trampoline_header)
35918 tr_start: .space 4
35919- tr_gdt_pad: .space 2
35920+ tr_boot_cs: .space 2
35921 tr_gdt: .space 6
35922 END(trampoline_header)
35923
35924diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
35925index dac7b20..72dbaca 100644
35926--- a/arch/x86/realmode/rm/trampoline_64.S
35927+++ b/arch/x86/realmode/rm/trampoline_64.S
35928@@ -93,6 +93,7 @@ ENTRY(startup_32)
35929 movl %edx, %gs
35930
35931 movl pa_tr_cr4, %eax
35932+ andl $~X86_CR4_PCIDE, %eax
35933 movl %eax, %cr4 # Enable PAE mode
35934
35935 # Setup trampoline 4 level pagetables
35936@@ -106,7 +107,7 @@ ENTRY(startup_32)
35937 wrmsr
35938
35939 # Enable paging and in turn activate Long Mode
35940- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
35941+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
35942 movl %eax, %cr0
35943
35944 /*
35945diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
35946index 9e7e147..25a4158 100644
35947--- a/arch/x86/realmode/rm/wakeup_asm.S
35948+++ b/arch/x86/realmode/rm/wakeup_asm.S
35949@@ -126,11 +126,10 @@ ENTRY(wakeup_start)
35950 lgdtl pmode_gdt
35951
35952 /* This really couldn't... */
35953- movl pmode_entry, %eax
35954 movl pmode_cr0, %ecx
35955 movl %ecx, %cr0
35956- ljmpl $__KERNEL_CS, $pa_startup_32
35957- /* -> jmp *%eax in trampoline_32.S */
35958+
35959+ ljmpl *pmode_entry
35960 #else
35961 jmp trampoline_start
35962 #endif
35963diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
35964index 604a37e..e49702a 100644
35965--- a/arch/x86/tools/Makefile
35966+++ b/arch/x86/tools/Makefile
35967@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
35968
35969 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
35970
35971-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
35972+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
35973 hostprogs-y += relocs
35974 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
35975 PHONY += relocs
35976diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
35977index bbb1d22..e505211 100644
35978--- a/arch/x86/tools/relocs.c
35979+++ b/arch/x86/tools/relocs.c
35980@@ -1,5 +1,7 @@
35981 /* This is included from relocs_32/64.c */
35982
35983+#include "../../../include/generated/autoconf.h"
35984+
35985 #define ElfW(type) _ElfW(ELF_BITS, type)
35986 #define _ElfW(bits, type) __ElfW(bits, type)
35987 #define __ElfW(bits, type) Elf##bits##_##type
35988@@ -11,6 +13,7 @@
35989 #define Elf_Sym ElfW(Sym)
35990
35991 static Elf_Ehdr ehdr;
35992+static Elf_Phdr *phdr;
35993
35994 struct relocs {
35995 uint32_t *offset;
35996@@ -383,9 +386,39 @@ static void read_ehdr(FILE *fp)
35997 }
35998 }
35999
36000+static void read_phdrs(FILE *fp)
36001+{
36002+ unsigned int i;
36003+
36004+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
36005+ if (!phdr) {
36006+ die("Unable to allocate %d program headers\n",
36007+ ehdr.e_phnum);
36008+ }
36009+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
36010+ die("Seek to %d failed: %s\n",
36011+ ehdr.e_phoff, strerror(errno));
36012+ }
36013+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
36014+ die("Cannot read ELF program headers: %s\n",
36015+ strerror(errno));
36016+ }
36017+ for(i = 0; i < ehdr.e_phnum; i++) {
36018+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
36019+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
36020+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
36021+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
36022+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
36023+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
36024+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
36025+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
36026+ }
36027+
36028+}
36029+
36030 static void read_shdrs(FILE *fp)
36031 {
36032- int i;
36033+ unsigned int i;
36034 Elf_Shdr shdr;
36035
36036 secs = calloc(ehdr.e_shnum, sizeof(struct section));
36037@@ -420,7 +453,7 @@ static void read_shdrs(FILE *fp)
36038
36039 static void read_strtabs(FILE *fp)
36040 {
36041- int i;
36042+ unsigned int i;
36043 for (i = 0; i < ehdr.e_shnum; i++) {
36044 struct section *sec = &secs[i];
36045 if (sec->shdr.sh_type != SHT_STRTAB) {
36046@@ -445,7 +478,7 @@ static void read_strtabs(FILE *fp)
36047
36048 static void read_symtabs(FILE *fp)
36049 {
36050- int i,j;
36051+ unsigned int i,j;
36052 for (i = 0; i < ehdr.e_shnum; i++) {
36053 struct section *sec = &secs[i];
36054 if (sec->shdr.sh_type != SHT_SYMTAB) {
36055@@ -476,9 +509,11 @@ static void read_symtabs(FILE *fp)
36056 }
36057
36058
36059-static void read_relocs(FILE *fp)
36060+static void read_relocs(FILE *fp, int use_real_mode)
36061 {
36062- int i,j;
36063+ unsigned int i,j;
36064+ uint32_t base;
36065+
36066 for (i = 0; i < ehdr.e_shnum; i++) {
36067 struct section *sec = &secs[i];
36068 if (sec->shdr.sh_type != SHT_REL_TYPE) {
36069@@ -498,9 +533,22 @@ static void read_relocs(FILE *fp)
36070 die("Cannot read symbol table: %s\n",
36071 strerror(errno));
36072 }
36073+ base = 0;
36074+
36075+#ifdef CONFIG_X86_32
36076+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
36077+ if (phdr[j].p_type != PT_LOAD )
36078+ continue;
36079+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
36080+ continue;
36081+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
36082+ break;
36083+ }
36084+#endif
36085+
36086 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
36087 Elf_Rel *rel = &sec->reltab[j];
36088- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
36089+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
36090 rel->r_info = elf_xword_to_cpu(rel->r_info);
36091 #if (SHT_REL_TYPE == SHT_RELA)
36092 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
36093@@ -512,7 +560,7 @@ static void read_relocs(FILE *fp)
36094
36095 static void print_absolute_symbols(void)
36096 {
36097- int i;
36098+ unsigned int i;
36099 const char *format;
36100
36101 if (ELF_BITS == 64)
36102@@ -525,7 +573,7 @@ static void print_absolute_symbols(void)
36103 for (i = 0; i < ehdr.e_shnum; i++) {
36104 struct section *sec = &secs[i];
36105 char *sym_strtab;
36106- int j;
36107+ unsigned int j;
36108
36109 if (sec->shdr.sh_type != SHT_SYMTAB) {
36110 continue;
36111@@ -552,7 +600,7 @@ static void print_absolute_symbols(void)
36112
36113 static void print_absolute_relocs(void)
36114 {
36115- int i, printed = 0;
36116+ unsigned int i, printed = 0;
36117 const char *format;
36118
36119 if (ELF_BITS == 64)
36120@@ -565,7 +613,7 @@ static void print_absolute_relocs(void)
36121 struct section *sec_applies, *sec_symtab;
36122 char *sym_strtab;
36123 Elf_Sym *sh_symtab;
36124- int j;
36125+ unsigned int j;
36126 if (sec->shdr.sh_type != SHT_REL_TYPE) {
36127 continue;
36128 }
36129@@ -642,13 +690,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
36130 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
36131 Elf_Sym *sym, const char *symname))
36132 {
36133- int i;
36134+ unsigned int i;
36135 /* Walk through the relocations */
36136 for (i = 0; i < ehdr.e_shnum; i++) {
36137 char *sym_strtab;
36138 Elf_Sym *sh_symtab;
36139 struct section *sec_applies, *sec_symtab;
36140- int j;
36141+ unsigned int j;
36142 struct section *sec = &secs[i];
36143
36144 if (sec->shdr.sh_type != SHT_REL_TYPE) {
36145@@ -822,6 +870,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
36146 {
36147 unsigned r_type = ELF32_R_TYPE(rel->r_info);
36148 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
36149+ char *sym_strtab = sec->link->link->strtab;
36150+
36151+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
36152+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
36153+ return 0;
36154+
36155+#ifdef CONFIG_PAX_KERNEXEC
36156+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
36157+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
36158+ return 0;
36159+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
36160+ return 0;
36161+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
36162+ return 0;
36163+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
36164+ return 0;
36165+#endif
36166
36167 switch (r_type) {
36168 case R_386_NONE:
36169@@ -960,7 +1025,7 @@ static int write32_as_text(uint32_t v, FILE *f)
36170
36171 static void emit_relocs(int as_text, int use_real_mode)
36172 {
36173- int i;
36174+ unsigned int i;
36175 int (*write_reloc)(uint32_t, FILE *) = write32;
36176 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
36177 const char *symname);
36178@@ -1060,10 +1125,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
36179 {
36180 regex_init(use_real_mode);
36181 read_ehdr(fp);
36182+ read_phdrs(fp);
36183 read_shdrs(fp);
36184 read_strtabs(fp);
36185 read_symtabs(fp);
36186- read_relocs(fp);
36187+ read_relocs(fp, use_real_mode);
36188 if (ELF_BITS == 64)
36189 percpu_init();
36190 if (show_absolute_syms) {
36191diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
36192index f40281e..92728c9 100644
36193--- a/arch/x86/um/mem_32.c
36194+++ b/arch/x86/um/mem_32.c
36195@@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
36196 gate_vma.vm_start = FIXADDR_USER_START;
36197 gate_vma.vm_end = FIXADDR_USER_END;
36198 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
36199- gate_vma.vm_page_prot = __P101;
36200+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
36201
36202 return 0;
36203 }
36204diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
36205index 80ffa5b..a33bd15 100644
36206--- a/arch/x86/um/tls_32.c
36207+++ b/arch/x86/um/tls_32.c
36208@@ -260,7 +260,7 @@ out:
36209 if (unlikely(task == current &&
36210 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
36211 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
36212- "without flushed TLS.", current->pid);
36213+ "without flushed TLS.", task_pid_nr(current));
36214 }
36215
36216 return 0;
36217diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
36218index 5a4affe..9e2d522 100644
36219--- a/arch/x86/vdso/Makefile
36220+++ b/arch/x86/vdso/Makefile
36221@@ -174,7 +174,7 @@ quiet_cmd_vdso = VDSO $@
36222 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
36223 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
36224
36225-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
36226+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
36227 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
36228 GCOV_PROFILE := n
36229
36230diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
36231index e904c27..b9eaa03 100644
36232--- a/arch/x86/vdso/vdso32-setup.c
36233+++ b/arch/x86/vdso/vdso32-setup.c
36234@@ -14,6 +14,7 @@
36235 #include <asm/cpufeature.h>
36236 #include <asm/processor.h>
36237 #include <asm/vdso.h>
36238+#include <asm/mman.h>
36239
36240 #ifdef CONFIG_COMPAT_VDSO
36241 #define VDSO_DEFAULT 0
36242diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
36243index 970463b..da82d3e 100644
36244--- a/arch/x86/vdso/vma.c
36245+++ b/arch/x86/vdso/vma.c
36246@@ -16,10 +16,9 @@
36247 #include <asm/vdso.h>
36248 #include <asm/page.h>
36249 #include <asm/hpet.h>
36250+#include <asm/mman.h>
36251
36252 #if defined(CONFIG_X86_64)
36253-unsigned int __read_mostly vdso64_enabled = 1;
36254-
36255 extern unsigned short vdso_sync_cpuid;
36256 #endif
36257
36258@@ -101,6 +100,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
36259 .pages = no_pages,
36260 };
36261
36262+#ifdef CONFIG_PAX_RANDMMAP
36263+ if (mm->pax_flags & MF_PAX_RANDMMAP)
36264+ calculate_addr = false;
36265+#endif
36266+
36267 if (calculate_addr) {
36268 addr = vdso_addr(current->mm->start_stack,
36269 image->size - image->sym_vvar_start);
36270@@ -111,14 +115,14 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
36271 down_write(&mm->mmap_sem);
36272
36273 addr = get_unmapped_area(NULL, addr,
36274- image->size - image->sym_vvar_start, 0, 0);
36275+ image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE);
36276 if (IS_ERR_VALUE(addr)) {
36277 ret = addr;
36278 goto up_fail;
36279 }
36280
36281 text_start = addr - image->sym_vvar_start;
36282- current->mm->context.vdso = (void __user *)text_start;
36283+ mm->context.vdso = text_start;
36284
36285 /*
36286 * MAYWRITE to allow gdb to COW and set breakpoints
36287@@ -163,15 +167,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
36288 hpet_address >> PAGE_SHIFT,
36289 PAGE_SIZE,
36290 pgprot_noncached(PAGE_READONLY));
36291-
36292- if (ret)
36293- goto up_fail;
36294 }
36295 #endif
36296
36297 up_fail:
36298 if (ret)
36299- current->mm->context.vdso = NULL;
36300+ current->mm->context.vdso = 0;
36301
36302 up_write(&mm->mmap_sem);
36303 return ret;
36304@@ -191,8 +192,8 @@ static int load_vdso32(void)
36305
36306 if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
36307 current_thread_info()->sysenter_return =
36308- current->mm->context.vdso +
36309- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
36310+ (void __force_user *)(current->mm->context.vdso +
36311+ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
36312
36313 return 0;
36314 }
36315@@ -201,9 +202,6 @@ static int load_vdso32(void)
36316 #ifdef CONFIG_X86_64
36317 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
36318 {
36319- if (!vdso64_enabled)
36320- return 0;
36321-
36322 return map_vdso(&vdso_image_64, true);
36323 }
36324
36325@@ -212,12 +210,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
36326 int uses_interp)
36327 {
36328 #ifdef CONFIG_X86_X32_ABI
36329- if (test_thread_flag(TIF_X32)) {
36330- if (!vdso64_enabled)
36331- return 0;
36332-
36333+ if (test_thread_flag(TIF_X32))
36334 return map_vdso(&vdso_image_x32, true);
36335- }
36336 #endif
36337
36338 return load_vdso32();
36339@@ -229,12 +223,3 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
36340 return load_vdso32();
36341 }
36342 #endif
36343-
36344-#ifdef CONFIG_X86_64
36345-static __init int vdso_setup(char *s)
36346-{
36347- vdso64_enabled = simple_strtoul(s, NULL, 0);
36348- return 0;
36349-}
36350-__setup("vdso=", vdso_setup);
36351-#endif
36352diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
36353index e88fda8..76ce7ce 100644
36354--- a/arch/x86/xen/Kconfig
36355+++ b/arch/x86/xen/Kconfig
36356@@ -9,6 +9,7 @@ config XEN
36357 select XEN_HAVE_PVMMU
36358 depends on X86_64 || (X86_32 && X86_PAE)
36359 depends on X86_TSC
36360+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_XEN
36361 help
36362 This is the Linux Xen port. Enabling this will allow the
36363 kernel to boot in a paravirtualized environment under the
36364diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
36365index c0cb11f..bed56ff 100644
36366--- a/arch/x86/xen/enlighten.c
36367+++ b/arch/x86/xen/enlighten.c
36368@@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
36369
36370 struct shared_info xen_dummy_shared_info;
36371
36372-void *xen_initial_gdt;
36373-
36374 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
36375 __read_mostly int xen_have_vector_callback;
36376 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
36377@@ -542,8 +540,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
36378 {
36379 unsigned long va = dtr->address;
36380 unsigned int size = dtr->size + 1;
36381- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
36382- unsigned long frames[pages];
36383+ unsigned long frames[65536 / PAGE_SIZE];
36384 int f;
36385
36386 /*
36387@@ -591,8 +588,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
36388 {
36389 unsigned long va = dtr->address;
36390 unsigned int size = dtr->size + 1;
36391- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
36392- unsigned long frames[pages];
36393+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
36394 int f;
36395
36396 /*
36397@@ -600,7 +596,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
36398 * 8-byte entries, or 16 4k pages..
36399 */
36400
36401- BUG_ON(size > 65536);
36402+ BUG_ON(size > GDT_SIZE);
36403 BUG_ON(va & ~PAGE_MASK);
36404
36405 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
36406@@ -989,7 +985,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
36407 return 0;
36408 }
36409
36410-static void set_xen_basic_apic_ops(void)
36411+static void __init set_xen_basic_apic_ops(void)
36412 {
36413 apic->read = xen_apic_read;
36414 apic->write = xen_apic_write;
36415@@ -1295,30 +1291,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
36416 #endif
36417 };
36418
36419-static void xen_reboot(int reason)
36420+static __noreturn void xen_reboot(int reason)
36421 {
36422 struct sched_shutdown r = { .reason = reason };
36423
36424- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
36425- BUG();
36426+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
36427+ BUG();
36428 }
36429
36430-static void xen_restart(char *msg)
36431+static __noreturn void xen_restart(char *msg)
36432 {
36433 xen_reboot(SHUTDOWN_reboot);
36434 }
36435
36436-static void xen_emergency_restart(void)
36437+static __noreturn void xen_emergency_restart(void)
36438 {
36439 xen_reboot(SHUTDOWN_reboot);
36440 }
36441
36442-static void xen_machine_halt(void)
36443+static __noreturn void xen_machine_halt(void)
36444 {
36445 xen_reboot(SHUTDOWN_poweroff);
36446 }
36447
36448-static void xen_machine_power_off(void)
36449+static __noreturn void xen_machine_power_off(void)
36450 {
36451 if (pm_power_off)
36452 pm_power_off();
36453@@ -1568,7 +1564,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
36454 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
36455
36456 /* Work out if we support NX */
36457- x86_configure_nx();
36458+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
36459+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
36460+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
36461+ unsigned l, h;
36462+
36463+ __supported_pte_mask |= _PAGE_NX;
36464+ rdmsr(MSR_EFER, l, h);
36465+ l |= EFER_NX;
36466+ wrmsr(MSR_EFER, l, h);
36467+ }
36468+#endif
36469
36470 /* Get mfn list */
36471 xen_build_dynamic_phys_to_machine();
36472@@ -1596,13 +1602,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
36473
36474 machine_ops = xen_machine_ops;
36475
36476- /*
36477- * The only reliable way to retain the initial address of the
36478- * percpu gdt_page is to remember it here, so we can go and
36479- * mark it RW later, when the initial percpu area is freed.
36480- */
36481- xen_initial_gdt = &per_cpu(gdt_page, 0);
36482-
36483 xen_smp_init();
36484
36485 #ifdef CONFIG_ACPI_NUMA
36486diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
36487index 16fb009..02b7801 100644
36488--- a/arch/x86/xen/mmu.c
36489+++ b/arch/x86/xen/mmu.c
36490@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
36491 return val;
36492 }
36493
36494-static pteval_t pte_pfn_to_mfn(pteval_t val)
36495+static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
36496 {
36497 if (val & _PAGE_PRESENT) {
36498 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
36499@@ -1904,7 +1904,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
36500 * L3_k[511] -> level2_fixmap_pgt */
36501 convert_pfn_mfn(level3_kernel_pgt);
36502
36503+ convert_pfn_mfn(level3_vmalloc_start_pgt);
36504+ convert_pfn_mfn(level3_vmalloc_end_pgt);
36505+ convert_pfn_mfn(level3_vmemmap_pgt);
36506 /* L3_k[511][506] -> level1_fixmap_pgt */
36507+ /* L3_k[511][507] -> level1_vsyscall_pgt */
36508 convert_pfn_mfn(level2_fixmap_pgt);
36509 }
36510 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
36511@@ -1929,11 +1933,16 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
36512 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
36513 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
36514 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
36515+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
36516+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
36517+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
36518 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
36519 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
36520+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
36521 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
36522 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
36523 set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
36524+ set_page_prot(level1_vsyscall_pgt, PAGE_KERNEL_RO);
36525
36526 /* Pin down new L4 */
36527 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
36528@@ -2117,6 +2126,7 @@ static void __init xen_post_allocator_init(void)
36529 pv_mmu_ops.set_pud = xen_set_pud;
36530 #if PAGETABLE_LEVELS == 4
36531 pv_mmu_ops.set_pgd = xen_set_pgd;
36532+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
36533 #endif
36534
36535 /* This will work as long as patching hasn't happened yet
36536@@ -2195,6 +2205,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
36537 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
36538 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
36539 .set_pgd = xen_set_pgd_hyper,
36540+ .set_pgd_batched = xen_set_pgd_hyper,
36541
36542 .alloc_pud = xen_alloc_pmd_init,
36543 .release_pud = xen_release_pmd_init,
36544diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
36545index 7005974..54fb05f 100644
36546--- a/arch/x86/xen/smp.c
36547+++ b/arch/x86/xen/smp.c
36548@@ -283,17 +283,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
36549
36550 if (xen_pv_domain()) {
36551 if (!xen_feature(XENFEAT_writable_page_tables))
36552- /* We've switched to the "real" per-cpu gdt, so make
36553- * sure the old memory can be recycled. */
36554- make_lowmem_page_readwrite(xen_initial_gdt);
36555-
36556 #ifdef CONFIG_X86_32
36557 /*
36558 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
36559 * expects __USER_DS
36560 */
36561- loadsegment(ds, __USER_DS);
36562- loadsegment(es, __USER_DS);
36563+ loadsegment(ds, __KERNEL_DS);
36564+ loadsegment(es, __KERNEL_DS);
36565 #endif
36566
36567 xen_filter_cpu_maps();
36568@@ -372,7 +368,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
36569 #ifdef CONFIG_X86_32
36570 /* Note: PVH is not yet supported on x86_32. */
36571 ctxt->user_regs.fs = __KERNEL_PERCPU;
36572- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
36573+ savesegment(gs, ctxt->user_regs.gs);
36574 #endif
36575 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
36576
36577@@ -381,8 +377,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
36578 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
36579 ctxt->flags = VGCF_IN_KERNEL;
36580 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
36581- ctxt->user_regs.ds = __USER_DS;
36582- ctxt->user_regs.es = __USER_DS;
36583+ ctxt->user_regs.ds = __KERNEL_DS;
36584+ ctxt->user_regs.es = __KERNEL_DS;
36585 ctxt->user_regs.ss = __KERNEL_DS;
36586
36587 xen_copy_trap_info(ctxt->trap_ctxt);
36588@@ -437,14 +433,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
36589 int rc;
36590
36591 per_cpu(current_task, cpu) = idle;
36592+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
36593 #ifdef CONFIG_X86_32
36594 irq_ctx_init(cpu);
36595 #else
36596 clear_tsk_thread_flag(idle, TIF_FORK);
36597 #endif
36598- per_cpu(kernel_stack, cpu) =
36599- (unsigned long)task_stack_page(idle) -
36600- KERNEL_STACK_OFFSET + THREAD_SIZE;
36601+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
36602
36603 xen_setup_runstate_info(cpu);
36604 xen_setup_timer(cpu);
36605@@ -720,7 +715,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
36606
36607 void __init xen_smp_init(void)
36608 {
36609- smp_ops = xen_smp_ops;
36610+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
36611 xen_fill_possible_map();
36612 }
36613
36614diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
36615index fd92a64..1f72641 100644
36616--- a/arch/x86/xen/xen-asm_32.S
36617+++ b/arch/x86/xen/xen-asm_32.S
36618@@ -99,7 +99,7 @@ ENTRY(xen_iret)
36619 pushw %fs
36620 movl $(__KERNEL_PERCPU), %eax
36621 movl %eax, %fs
36622- movl %fs:xen_vcpu, %eax
36623+ mov PER_CPU_VAR(xen_vcpu), %eax
36624 POP_FS
36625 #else
36626 movl %ss:xen_vcpu, %eax
36627diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
36628index 485b695..fda3e7c 100644
36629--- a/arch/x86/xen/xen-head.S
36630+++ b/arch/x86/xen/xen-head.S
36631@@ -39,6 +39,17 @@ ENTRY(startup_xen)
36632 #ifdef CONFIG_X86_32
36633 mov %esi,xen_start_info
36634 mov $init_thread_union+THREAD_SIZE,%esp
36635+#ifdef CONFIG_SMP
36636+ movl $cpu_gdt_table,%edi
36637+ movl $__per_cpu_load,%eax
36638+ movw %ax,__KERNEL_PERCPU + 2(%edi)
36639+ rorl $16,%eax
36640+ movb %al,__KERNEL_PERCPU + 4(%edi)
36641+ movb %ah,__KERNEL_PERCPU + 7(%edi)
36642+ movl $__per_cpu_end - 1,%eax
36643+ subl $__per_cpu_start,%eax
36644+ movw %ax,__KERNEL_PERCPU + 0(%edi)
36645+#endif
36646 #else
36647 mov %rsi,xen_start_info
36648 mov $init_thread_union+THREAD_SIZE,%rsp
36649diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
36650index 28c7e0b..2acfec7 100644
36651--- a/arch/x86/xen/xen-ops.h
36652+++ b/arch/x86/xen/xen-ops.h
36653@@ -10,8 +10,6 @@
36654 extern const char xen_hypervisor_callback[];
36655 extern const char xen_failsafe_callback[];
36656
36657-extern void *xen_initial_gdt;
36658-
36659 struct trap_info;
36660 void xen_copy_trap_info(struct trap_info *traps);
36661
36662diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
36663index 525bd3d..ef888b1 100644
36664--- a/arch/xtensa/variants/dc232b/include/variant/core.h
36665+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
36666@@ -119,9 +119,9 @@
36667 ----------------------------------------------------------------------*/
36668
36669 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
36670-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
36671 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
36672 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
36673+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36674
36675 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
36676 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
36677diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
36678index 2f33760..835e50a 100644
36679--- a/arch/xtensa/variants/fsf/include/variant/core.h
36680+++ b/arch/xtensa/variants/fsf/include/variant/core.h
36681@@ -11,6 +11,7 @@
36682 #ifndef _XTENSA_CORE_H
36683 #define _XTENSA_CORE_H
36684
36685+#include <linux/const.h>
36686
36687 /****************************************************************************
36688 Parameters Useful for Any Code, USER or PRIVILEGED
36689@@ -112,9 +113,9 @@
36690 ----------------------------------------------------------------------*/
36691
36692 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
36693-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
36694 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
36695 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
36696+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36697
36698 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
36699 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
36700diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
36701index af00795..2bb8105 100644
36702--- a/arch/xtensa/variants/s6000/include/variant/core.h
36703+++ b/arch/xtensa/variants/s6000/include/variant/core.h
36704@@ -11,6 +11,7 @@
36705 #ifndef _XTENSA_CORE_CONFIGURATION_H
36706 #define _XTENSA_CORE_CONFIGURATION_H
36707
36708+#include <linux/const.h>
36709
36710 /****************************************************************************
36711 Parameters Useful for Any Code, USER or PRIVILEGED
36712@@ -118,9 +119,9 @@
36713 ----------------------------------------------------------------------*/
36714
36715 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
36716-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
36717 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
36718 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
36719+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
36720
36721 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
36722 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
36723diff --git a/block/bio.c b/block/bio.c
36724index 3e6331d..f970433 100644
36725--- a/block/bio.c
36726+++ b/block/bio.c
36727@@ -1160,7 +1160,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
36728 /*
36729 * Overflow, abort
36730 */
36731- if (end < start)
36732+ if (end < start || end - start > INT_MAX - nr_pages)
36733 return ERR_PTR(-EINVAL);
36734
36735 nr_pages += end - start;
36736@@ -1294,7 +1294,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
36737 /*
36738 * Overflow, abort
36739 */
36740- if (end < start)
36741+ if (end < start || end - start > INT_MAX - nr_pages)
36742 return ERR_PTR(-EINVAL);
36743
36744 nr_pages += end - start;
36745@@ -1556,7 +1556,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
36746 const int read = bio_data_dir(bio) == READ;
36747 struct bio_map_data *bmd = bio->bi_private;
36748 int i;
36749- char *p = bmd->sgvecs[0].iov_base;
36750+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
36751
36752 bio_for_each_segment_all(bvec, bio, i) {
36753 char *addr = page_address(bvec->bv_page);
36754diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
36755index e17da94..e01cce1 100644
36756--- a/block/blk-cgroup.c
36757+++ b/block/blk-cgroup.c
36758@@ -822,7 +822,7 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
36759 static struct cgroup_subsys_state *
36760 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
36761 {
36762- static atomic64_t id_seq = ATOMIC64_INIT(0);
36763+ static atomic64_unchecked_t id_seq = ATOMIC64_INIT(0);
36764 struct blkcg *blkcg;
36765
36766 if (!parent_css) {
36767@@ -836,7 +836,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
36768
36769 blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
36770 blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
36771- blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
36772+ blkcg->id = atomic64_inc_return_unchecked(&id_seq); /* root is 0, start from 1 */
36773 done:
36774 spin_lock_init(&blkcg->lock);
36775 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
36776diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
36777index 0736729..2ec3b48 100644
36778--- a/block/blk-iopoll.c
36779+++ b/block/blk-iopoll.c
36780@@ -74,7 +74,7 @@ void blk_iopoll_complete(struct blk_iopoll *iop)
36781 }
36782 EXPORT_SYMBOL(blk_iopoll_complete);
36783
36784-static void blk_iopoll_softirq(struct softirq_action *h)
36785+static __latent_entropy void blk_iopoll_softirq(void)
36786 {
36787 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
36788 int rearm = 0, budget = blk_iopoll_budget;
36789diff --git a/block/blk-map.c b/block/blk-map.c
36790index f890d43..97b0482 100644
36791--- a/block/blk-map.c
36792+++ b/block/blk-map.c
36793@@ -300,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
36794 if (!len || !kbuf)
36795 return -EINVAL;
36796
36797- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
36798+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
36799 if (do_copy)
36800 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
36801 else
36802diff --git a/block/blk-softirq.c b/block/blk-softirq.c
36803index 53b1737..08177d2e 100644
36804--- a/block/blk-softirq.c
36805+++ b/block/blk-softirq.c
36806@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
36807 * Softirq action handler - move entries to local list and loop over them
36808 * while passing them to the queue registered handler.
36809 */
36810-static void blk_done_softirq(struct softirq_action *h)
36811+static __latent_entropy void blk_done_softirq(void)
36812 {
36813 struct list_head *cpu_list, local_list;
36814
36815diff --git a/block/bsg.c b/block/bsg.c
36816index ff46add..c4ba8ee 100644
36817--- a/block/bsg.c
36818+++ b/block/bsg.c
36819@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
36820 struct sg_io_v4 *hdr, struct bsg_device *bd,
36821 fmode_t has_write_perm)
36822 {
36823+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36824+ unsigned char *cmdptr;
36825+
36826 if (hdr->request_len > BLK_MAX_CDB) {
36827 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
36828 if (!rq->cmd)
36829 return -ENOMEM;
36830- }
36831+ cmdptr = rq->cmd;
36832+ } else
36833+ cmdptr = tmpcmd;
36834
36835- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
36836+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
36837 hdr->request_len))
36838 return -EFAULT;
36839
36840+ if (cmdptr != rq->cmd)
36841+ memcpy(rq->cmd, cmdptr, hdr->request_len);
36842+
36843 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
36844 if (blk_verify_command(rq->cmd, has_write_perm))
36845 return -EPERM;
36846diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
36847index 18b282c..050dbe5 100644
36848--- a/block/compat_ioctl.c
36849+++ b/block/compat_ioctl.c
36850@@ -156,7 +156,7 @@ static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode,
36851 cgc = compat_alloc_user_space(sizeof(*cgc));
36852 cgc32 = compat_ptr(arg);
36853
36854- if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) ||
36855+ if (copy_in_user(cgc->cmd, cgc32->cmd, sizeof(cgc->cmd)) ||
36856 get_user(data, &cgc32->buffer) ||
36857 put_user(compat_ptr(data), &cgc->buffer) ||
36858 copy_in_user(&cgc->buflen, &cgc32->buflen,
36859@@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
36860 err |= __get_user(f->spec1, &uf->spec1);
36861 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
36862 err |= __get_user(name, &uf->name);
36863- f->name = compat_ptr(name);
36864+ f->name = (void __force_kernel *)compat_ptr(name);
36865 if (err) {
36866 err = -EFAULT;
36867 goto out;
36868diff --git a/block/genhd.c b/block/genhd.c
36869index e6723bd..703e4ac 100644
36870--- a/block/genhd.c
36871+++ b/block/genhd.c
36872@@ -469,21 +469,24 @@ static char *bdevt_str(dev_t devt, char *buf)
36873
36874 /*
36875 * Register device numbers dev..(dev+range-1)
36876- * range must be nonzero
36877+ * Noop if @range is zero.
36878 * The hash chain is sorted on range, so that subranges can override.
36879 */
36880 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
36881 struct kobject *(*probe)(dev_t, int *, void *),
36882 int (*lock)(dev_t, void *), void *data)
36883 {
36884- kobj_map(bdev_map, devt, range, module, probe, lock, data);
36885+ if (range)
36886+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
36887 }
36888
36889 EXPORT_SYMBOL(blk_register_region);
36890
36891+/* undo blk_register_region(), noop if @range is zero */
36892 void blk_unregister_region(dev_t devt, unsigned long range)
36893 {
36894- kobj_unmap(bdev_map, devt, range);
36895+ if (range)
36896+ kobj_unmap(bdev_map, devt, range);
36897 }
36898
36899 EXPORT_SYMBOL(blk_unregister_region);
36900diff --git a/block/partitions/efi.c b/block/partitions/efi.c
36901index 56d08fd..2e07090 100644
36902--- a/block/partitions/efi.c
36903+++ b/block/partitions/efi.c
36904@@ -293,14 +293,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
36905 if (!gpt)
36906 return NULL;
36907
36908+ if (!le32_to_cpu(gpt->num_partition_entries))
36909+ return NULL;
36910+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
36911+ if (!pte)
36912+ return NULL;
36913+
36914 count = le32_to_cpu(gpt->num_partition_entries) *
36915 le32_to_cpu(gpt->sizeof_partition_entry);
36916- if (!count)
36917- return NULL;
36918- pte = kmalloc(count, GFP_KERNEL);
36919- if (!pte)
36920- return NULL;
36921-
36922 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
36923 (u8 *) pte, count) < count) {
36924 kfree(pte);
36925diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
36926index a6d6270..c4bb72f 100644
36927--- a/block/scsi_ioctl.c
36928+++ b/block/scsi_ioctl.c
36929@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
36930 return put_user(0, p);
36931 }
36932
36933-static int sg_get_timeout(struct request_queue *q)
36934+static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
36935 {
36936 return jiffies_to_clock_t(q->sg_timeout);
36937 }
36938@@ -227,8 +227,20 @@ EXPORT_SYMBOL(blk_verify_command);
36939 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
36940 struct sg_io_hdr *hdr, fmode_t mode)
36941 {
36942- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
36943+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36944+ unsigned char *cmdptr;
36945+
36946+ if (rq->cmd != rq->__cmd)
36947+ cmdptr = rq->cmd;
36948+ else
36949+ cmdptr = tmpcmd;
36950+
36951+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
36952 return -EFAULT;
36953+
36954+ if (cmdptr != rq->cmd)
36955+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
36956+
36957 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
36958 return -EPERM;
36959
36960@@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36961 int err;
36962 unsigned int in_len, out_len, bytes, opcode, cmdlen;
36963 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
36964+ unsigned char tmpcmd[sizeof(rq->__cmd)];
36965+ unsigned char *cmdptr;
36966
36967 if (!sic)
36968 return -EINVAL;
36969@@ -470,9 +484,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
36970 */
36971 err = -EFAULT;
36972 rq->cmd_len = cmdlen;
36973- if (copy_from_user(rq->cmd, sic->data, cmdlen))
36974+
36975+ if (rq->cmd != rq->__cmd)
36976+ cmdptr = rq->cmd;
36977+ else
36978+ cmdptr = tmpcmd;
36979+
36980+ if (copy_from_user(cmdptr, sic->data, cmdlen))
36981 goto error;
36982
36983+ if (rq->cmd != cmdptr)
36984+ memcpy(rq->cmd, cmdptr, cmdlen);
36985+
36986 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
36987 goto error;
36988
36989diff --git a/crypto/cryptd.c b/crypto/cryptd.c
36990index e592c90..c566114 100644
36991--- a/crypto/cryptd.c
36992+++ b/crypto/cryptd.c
36993@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
36994
36995 struct cryptd_blkcipher_request_ctx {
36996 crypto_completion_t complete;
36997-};
36998+} __no_const;
36999
37000 struct cryptd_hash_ctx {
37001 struct crypto_shash *child;
37002@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
37003
37004 struct cryptd_aead_request_ctx {
37005 crypto_completion_t complete;
37006-};
37007+} __no_const;
37008
37009 static void cryptd_queue_worker(struct work_struct *work);
37010
37011diff --git a/crypto/cts.c b/crypto/cts.c
37012index 042223f..133f087 100644
37013--- a/crypto/cts.c
37014+++ b/crypto/cts.c
37015@@ -202,7 +202,8 @@ static int cts_cbc_decrypt(struct crypto_cts_ctx *ctx,
37016 /* 5. Append the tail (BB - Ln) bytes of Xn (tmp) to Cn to create En */
37017 memcpy(s + bsize + lastn, tmp + lastn, bsize - lastn);
37018 /* 6. Decrypt En to create Pn-1 */
37019- memset(iv, 0, sizeof(iv));
37020+ memzero_explicit(iv, sizeof(iv));
37021+
37022 sg_set_buf(&sgsrc[0], s + bsize, bsize);
37023 sg_set_buf(&sgdst[0], d, bsize);
37024 err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize);
37025diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
37026index 309d345..1632720 100644
37027--- a/crypto/pcrypt.c
37028+++ b/crypto/pcrypt.c
37029@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
37030 int ret;
37031
37032 pinst->kobj.kset = pcrypt_kset;
37033- ret = kobject_add(&pinst->kobj, NULL, name);
37034+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
37035 if (!ret)
37036 kobject_uevent(&pinst->kobj, KOBJ_ADD);
37037
37038diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
37039index 4279480..7bb0474 100644
37040--- a/crypto/sha1_generic.c
37041+++ b/crypto/sha1_generic.c
37042@@ -64,7 +64,7 @@ int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
37043 src = data + done;
37044 } while (done + SHA1_BLOCK_SIZE <= len);
37045
37046- memset(temp, 0, sizeof(temp));
37047+ memzero_explicit(temp, sizeof(temp));
37048 partial = 0;
37049 }
37050 memcpy(sctx->buffer + partial, src, len - done);
37051diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
37052index 5433667..32c5e5e 100644
37053--- a/crypto/sha256_generic.c
37054+++ b/crypto/sha256_generic.c
37055@@ -210,10 +210,9 @@ static void sha256_transform(u32 *state, const u8 *input)
37056
37057 /* clear any sensitive info... */
37058 a = b = c = d = e = f = g = h = t1 = t2 = 0;
37059- memset(W, 0, 64 * sizeof(u32));
37060+ memzero_explicit(W, 64 * sizeof(u32));
37061 }
37062
37063-
37064 static int sha224_init(struct shash_desc *desc)
37065 {
37066 struct sha256_state *sctx = shash_desc_ctx(desc);
37067@@ -316,7 +315,7 @@ static int sha224_final(struct shash_desc *desc, u8 *hash)
37068 sha256_final(desc, D);
37069
37070 memcpy(hash, D, SHA224_DIGEST_SIZE);
37071- memset(D, 0, SHA256_DIGEST_SIZE);
37072+ memzero_explicit(D, SHA256_DIGEST_SIZE);
37073
37074 return 0;
37075 }
37076diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
37077index 6ed124f..04d295a 100644
37078--- a/crypto/sha512_generic.c
37079+++ b/crypto/sha512_generic.c
37080@@ -238,7 +238,7 @@ static int sha384_final(struct shash_desc *desc, u8 *hash)
37081 sha512_final(desc, D);
37082
37083 memcpy(hash, D, 48);
37084- memset(D, 0, 64);
37085+ memzero_explicit(D, 64);
37086
37087 return 0;
37088 }
37089diff --git a/crypto/tgr192.c b/crypto/tgr192.c
37090index 8740355..3c7af0d 100644
37091--- a/crypto/tgr192.c
37092+++ b/crypto/tgr192.c
37093@@ -612,7 +612,7 @@ static int tgr160_final(struct shash_desc *desc, u8 * out)
37094
37095 tgr192_final(desc, D);
37096 memcpy(out, D, TGR160_DIGEST_SIZE);
37097- memset(D, 0, TGR192_DIGEST_SIZE);
37098+ memzero_explicit(D, TGR192_DIGEST_SIZE);
37099
37100 return 0;
37101 }
37102@@ -623,7 +623,7 @@ static int tgr128_final(struct shash_desc *desc, u8 * out)
37103
37104 tgr192_final(desc, D);
37105 memcpy(out, D, TGR128_DIGEST_SIZE);
37106- memset(D, 0, TGR192_DIGEST_SIZE);
37107+ memzero_explicit(D, TGR192_DIGEST_SIZE);
37108
37109 return 0;
37110 }
37111diff --git a/crypto/vmac.c b/crypto/vmac.c
37112index 2eb11a3..d84c24b 100644
37113--- a/crypto/vmac.c
37114+++ b/crypto/vmac.c
37115@@ -613,7 +613,7 @@ static int vmac_final(struct shash_desc *pdesc, u8 *out)
37116 }
37117 mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx);
37118 memcpy(out, &mac, sizeof(vmac_t));
37119- memset(&mac, 0, sizeof(vmac_t));
37120+ memzero_explicit(&mac, sizeof(vmac_t));
37121 memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
37122 ctx->partial_size = 0;
37123 return 0;
37124diff --git a/crypto/wp512.c b/crypto/wp512.c
37125index 180f1d6..ec64e77 100644
37126--- a/crypto/wp512.c
37127+++ b/crypto/wp512.c
37128@@ -1102,8 +1102,8 @@ static int wp384_final(struct shash_desc *desc, u8 *out)
37129 u8 D[64];
37130
37131 wp512_final(desc, D);
37132- memcpy (out, D, WP384_DIGEST_SIZE);
37133- memset (D, 0, WP512_DIGEST_SIZE);
37134+ memcpy(out, D, WP384_DIGEST_SIZE);
37135+ memzero_explicit(D, WP512_DIGEST_SIZE);
37136
37137 return 0;
37138 }
37139@@ -1113,8 +1113,8 @@ static int wp256_final(struct shash_desc *desc, u8 *out)
37140 u8 D[64];
37141
37142 wp512_final(desc, D);
37143- memcpy (out, D, WP256_DIGEST_SIZE);
37144- memset (D, 0, WP512_DIGEST_SIZE);
37145+ memcpy(out, D, WP256_DIGEST_SIZE);
37146+ memzero_explicit(D, WP512_DIGEST_SIZE);
37147
37148 return 0;
37149 }
37150diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
37151index 6921c7f..78e1af7 100644
37152--- a/drivers/acpi/acpica/hwxfsleep.c
37153+++ b/drivers/acpi/acpica/hwxfsleep.c
37154@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
37155 /* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
37156
37157 static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
37158- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
37159- acpi_hw_extended_sleep},
37160- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
37161- acpi_hw_extended_wake_prep},
37162- {ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake), acpi_hw_extended_wake}
37163+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep),
37164+ .extended_function = acpi_hw_extended_sleep},
37165+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep),
37166+ .extended_function = acpi_hw_extended_wake_prep},
37167+ {.legacy_function = ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake),
37168+ .extended_function = acpi_hw_extended_wake}
37169 };
37170
37171 /*
37172diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
37173index 16129c7..8b675cd 100644
37174--- a/drivers/acpi/apei/apei-internal.h
37175+++ b/drivers/acpi/apei/apei-internal.h
37176@@ -19,7 +19,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
37177 struct apei_exec_ins_type {
37178 u32 flags;
37179 apei_exec_ins_func_t run;
37180-};
37181+} __do_const;
37182
37183 struct apei_exec_context {
37184 u32 ip;
37185diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
37186index fc5f780..e5ac91a 100644
37187--- a/drivers/acpi/apei/ghes.c
37188+++ b/drivers/acpi/apei/ghes.c
37189@@ -478,7 +478,7 @@ static void __ghes_print_estatus(const char *pfx,
37190 const struct acpi_hest_generic *generic,
37191 const struct acpi_hest_generic_status *estatus)
37192 {
37193- static atomic_t seqno;
37194+ static atomic_unchecked_t seqno;
37195 unsigned int curr_seqno;
37196 char pfx_seq[64];
37197
37198@@ -489,7 +489,7 @@ static void __ghes_print_estatus(const char *pfx,
37199 else
37200 pfx = KERN_ERR;
37201 }
37202- curr_seqno = atomic_inc_return(&seqno);
37203+ curr_seqno = atomic_inc_return_unchecked(&seqno);
37204 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
37205 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
37206 pfx_seq, generic->header.source_id);
37207diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
37208index a83e3c6..c3d617f 100644
37209--- a/drivers/acpi/bgrt.c
37210+++ b/drivers/acpi/bgrt.c
37211@@ -86,8 +86,10 @@ static int __init bgrt_init(void)
37212 if (!bgrt_image)
37213 return -ENODEV;
37214
37215- bin_attr_image.private = bgrt_image;
37216- bin_attr_image.size = bgrt_image_size;
37217+ pax_open_kernel();
37218+ *(void **)&bin_attr_image.private = bgrt_image;
37219+ *(size_t *)&bin_attr_image.size = bgrt_image_size;
37220+ pax_close_kernel();
37221
37222 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
37223 if (!bgrt_kobj)
37224diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
37225index 36eb42e..3b2f47e 100644
37226--- a/drivers/acpi/blacklist.c
37227+++ b/drivers/acpi/blacklist.c
37228@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
37229 u32 is_critical_error;
37230 };
37231
37232-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
37233+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
37234
37235 /*
37236 * POLICY: If *anything* doesn't work, put it on the blacklist.
37237@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
37238 return 0;
37239 }
37240
37241-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
37242+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
37243 {
37244 .callback = dmi_disable_osi_vista,
37245 .ident = "Fujitsu Siemens",
37246diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
37247index c68e724..e863008 100644
37248--- a/drivers/acpi/custom_method.c
37249+++ b/drivers/acpi/custom_method.c
37250@@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
37251 struct acpi_table_header table;
37252 acpi_status status;
37253
37254+#ifdef CONFIG_GRKERNSEC_KMEM
37255+ return -EPERM;
37256+#endif
37257+
37258 if (!(*ppos)) {
37259 /* parse the table header to get the table length */
37260 if (count <= sizeof(struct acpi_table_header))
37261diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
37262index 17f9ec5..d9a455e 100644
37263--- a/drivers/acpi/processor_idle.c
37264+++ b/drivers/acpi/processor_idle.c
37265@@ -952,7 +952,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
37266 {
37267 int i, count = CPUIDLE_DRIVER_STATE_START;
37268 struct acpi_processor_cx *cx;
37269- struct cpuidle_state *state;
37270+ cpuidle_state_no_const *state;
37271 struct cpuidle_driver *drv = &acpi_idle_driver;
37272
37273 if (!pr->flags.power_setup_done)
37274diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
37275index 38cb978..352c761 100644
37276--- a/drivers/acpi/sysfs.c
37277+++ b/drivers/acpi/sysfs.c
37278@@ -423,11 +423,11 @@ static u32 num_counters;
37279 static struct attribute **all_attrs;
37280 static u32 acpi_gpe_count;
37281
37282-static struct attribute_group interrupt_stats_attr_group = {
37283+static attribute_group_no_const interrupt_stats_attr_group = {
37284 .name = "interrupts",
37285 };
37286
37287-static struct kobj_attribute *counter_attrs;
37288+static kobj_attribute_no_const *counter_attrs;
37289
37290 static void delete_gpe_attr_array(void)
37291 {
37292diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
37293index b784e9d..a69a049 100644
37294--- a/drivers/ata/libahci.c
37295+++ b/drivers/ata/libahci.c
37296@@ -1252,7 +1252,7 @@ int ahci_kick_engine(struct ata_port *ap)
37297 }
37298 EXPORT_SYMBOL_GPL(ahci_kick_engine);
37299
37300-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
37301+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
37302 struct ata_taskfile *tf, int is_cmd, u16 flags,
37303 unsigned long timeout_msec)
37304 {
37305diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
37306index 6f67490..f951ead 100644
37307--- a/drivers/ata/libata-core.c
37308+++ b/drivers/ata/libata-core.c
37309@@ -99,7 +99,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
37310 static void ata_dev_xfermask(struct ata_device *dev);
37311 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
37312
37313-atomic_t ata_print_id = ATOMIC_INIT(0);
37314+atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
37315
37316 struct ata_force_param {
37317 const char *name;
37318@@ -4797,7 +4797,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
37319 struct ata_port *ap;
37320 unsigned int tag;
37321
37322- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
37323+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
37324 ap = qc->ap;
37325
37326 qc->flags = 0;
37327@@ -4813,7 +4813,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
37328 struct ata_port *ap;
37329 struct ata_link *link;
37330
37331- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
37332+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
37333 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
37334 ap = qc->ap;
37335 link = qc->dev->link;
37336@@ -5917,6 +5917,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
37337 return;
37338
37339 spin_lock(&lock);
37340+ pax_open_kernel();
37341
37342 for (cur = ops->inherits; cur; cur = cur->inherits) {
37343 void **inherit = (void **)cur;
37344@@ -5930,8 +5931,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
37345 if (IS_ERR(*pp))
37346 *pp = NULL;
37347
37348- ops->inherits = NULL;
37349+ *(struct ata_port_operations **)&ops->inherits = NULL;
37350
37351+ pax_close_kernel();
37352 spin_unlock(&lock);
37353 }
37354
37355@@ -6127,7 +6129,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
37356
37357 /* give ports names and add SCSI hosts */
37358 for (i = 0; i < host->n_ports; i++) {
37359- host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
37360+ host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
37361 host->ports[i]->local_port_no = i + 1;
37362 }
37363
37364diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
37365index 0586f66..1a8f74a 100644
37366--- a/drivers/ata/libata-scsi.c
37367+++ b/drivers/ata/libata-scsi.c
37368@@ -4151,7 +4151,7 @@ int ata_sas_port_init(struct ata_port *ap)
37369
37370 if (rc)
37371 return rc;
37372- ap->print_id = atomic_inc_return(&ata_print_id);
37373+ ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
37374 return 0;
37375 }
37376 EXPORT_SYMBOL_GPL(ata_sas_port_init);
37377diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
37378index 5f4e0cc..ff2c347 100644
37379--- a/drivers/ata/libata.h
37380+++ b/drivers/ata/libata.h
37381@@ -53,7 +53,7 @@ enum {
37382 ATA_DNXFER_QUIET = (1 << 31),
37383 };
37384
37385-extern atomic_t ata_print_id;
37386+extern atomic_unchecked_t ata_print_id;
37387 extern int atapi_passthru16;
37388 extern int libata_fua;
37389 extern int libata_noacpi;
37390diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
37391index 4edb1a8..84e1658 100644
37392--- a/drivers/ata/pata_arasan_cf.c
37393+++ b/drivers/ata/pata_arasan_cf.c
37394@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
37395 /* Handle platform specific quirks */
37396 if (quirk) {
37397 if (quirk & CF_BROKEN_PIO) {
37398- ap->ops->set_piomode = NULL;
37399+ pax_open_kernel();
37400+ *(void **)&ap->ops->set_piomode = NULL;
37401+ pax_close_kernel();
37402 ap->pio_mask = 0;
37403 }
37404 if (quirk & CF_BROKEN_MWDMA)
37405diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
37406index f9b983a..887b9d8 100644
37407--- a/drivers/atm/adummy.c
37408+++ b/drivers/atm/adummy.c
37409@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
37410 vcc->pop(vcc, skb);
37411 else
37412 dev_kfree_skb_any(skb);
37413- atomic_inc(&vcc->stats->tx);
37414+ atomic_inc_unchecked(&vcc->stats->tx);
37415
37416 return 0;
37417 }
37418diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
37419index f1a9198..f466a4a 100644
37420--- a/drivers/atm/ambassador.c
37421+++ b/drivers/atm/ambassador.c
37422@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
37423 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
37424
37425 // VC layer stats
37426- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
37427+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
37428
37429 // free the descriptor
37430 kfree (tx_descr);
37431@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
37432 dump_skb ("<<<", vc, skb);
37433
37434 // VC layer stats
37435- atomic_inc(&atm_vcc->stats->rx);
37436+ atomic_inc_unchecked(&atm_vcc->stats->rx);
37437 __net_timestamp(skb);
37438 // end of our responsibility
37439 atm_vcc->push (atm_vcc, skb);
37440@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
37441 } else {
37442 PRINTK (KERN_INFO, "dropped over-size frame");
37443 // should we count this?
37444- atomic_inc(&atm_vcc->stats->rx_drop);
37445+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
37446 }
37447
37448 } else {
37449@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
37450 }
37451
37452 if (check_area (skb->data, skb->len)) {
37453- atomic_inc(&atm_vcc->stats->tx_err);
37454+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
37455 return -ENOMEM; // ?
37456 }
37457
37458diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
37459index 480fa6f..947067c 100644
37460--- a/drivers/atm/atmtcp.c
37461+++ b/drivers/atm/atmtcp.c
37462@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
37463 if (vcc->pop) vcc->pop(vcc,skb);
37464 else dev_kfree_skb(skb);
37465 if (dev_data) return 0;
37466- atomic_inc(&vcc->stats->tx_err);
37467+ atomic_inc_unchecked(&vcc->stats->tx_err);
37468 return -ENOLINK;
37469 }
37470 size = skb->len+sizeof(struct atmtcp_hdr);
37471@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
37472 if (!new_skb) {
37473 if (vcc->pop) vcc->pop(vcc,skb);
37474 else dev_kfree_skb(skb);
37475- atomic_inc(&vcc->stats->tx_err);
37476+ atomic_inc_unchecked(&vcc->stats->tx_err);
37477 return -ENOBUFS;
37478 }
37479 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
37480@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
37481 if (vcc->pop) vcc->pop(vcc,skb);
37482 else dev_kfree_skb(skb);
37483 out_vcc->push(out_vcc,new_skb);
37484- atomic_inc(&vcc->stats->tx);
37485- atomic_inc(&out_vcc->stats->rx);
37486+ atomic_inc_unchecked(&vcc->stats->tx);
37487+ atomic_inc_unchecked(&out_vcc->stats->rx);
37488 return 0;
37489 }
37490
37491@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
37492 read_unlock(&vcc_sklist_lock);
37493 if (!out_vcc) {
37494 result = -EUNATCH;
37495- atomic_inc(&vcc->stats->tx_err);
37496+ atomic_inc_unchecked(&vcc->stats->tx_err);
37497 goto done;
37498 }
37499 skb_pull(skb,sizeof(struct atmtcp_hdr));
37500@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
37501 __net_timestamp(new_skb);
37502 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
37503 out_vcc->push(out_vcc,new_skb);
37504- atomic_inc(&vcc->stats->tx);
37505- atomic_inc(&out_vcc->stats->rx);
37506+ atomic_inc_unchecked(&vcc->stats->tx);
37507+ atomic_inc_unchecked(&out_vcc->stats->rx);
37508 done:
37509 if (vcc->pop) vcc->pop(vcc,skb);
37510 else dev_kfree_skb(skb);
37511diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
37512index d65975a..0b87e20 100644
37513--- a/drivers/atm/eni.c
37514+++ b/drivers/atm/eni.c
37515@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
37516 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
37517 vcc->dev->number);
37518 length = 0;
37519- atomic_inc(&vcc->stats->rx_err);
37520+ atomic_inc_unchecked(&vcc->stats->rx_err);
37521 }
37522 else {
37523 length = ATM_CELL_SIZE-1; /* no HEC */
37524@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
37525 size);
37526 }
37527 eff = length = 0;
37528- atomic_inc(&vcc->stats->rx_err);
37529+ atomic_inc_unchecked(&vcc->stats->rx_err);
37530 }
37531 else {
37532 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
37533@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
37534 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
37535 vcc->dev->number,vcc->vci,length,size << 2,descr);
37536 length = eff = 0;
37537- atomic_inc(&vcc->stats->rx_err);
37538+ atomic_inc_unchecked(&vcc->stats->rx_err);
37539 }
37540 }
37541 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
37542@@ -767,7 +767,7 @@ rx_dequeued++;
37543 vcc->push(vcc,skb);
37544 pushed++;
37545 }
37546- atomic_inc(&vcc->stats->rx);
37547+ atomic_inc_unchecked(&vcc->stats->rx);
37548 }
37549 wake_up(&eni_dev->rx_wait);
37550 }
37551@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
37552 PCI_DMA_TODEVICE);
37553 if (vcc->pop) vcc->pop(vcc,skb);
37554 else dev_kfree_skb_irq(skb);
37555- atomic_inc(&vcc->stats->tx);
37556+ atomic_inc_unchecked(&vcc->stats->tx);
37557 wake_up(&eni_dev->tx_wait);
37558 dma_complete++;
37559 }
37560diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
37561index 82f2ae0..f205c02 100644
37562--- a/drivers/atm/firestream.c
37563+++ b/drivers/atm/firestream.c
37564@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
37565 }
37566 }
37567
37568- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
37569+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
37570
37571 fs_dprintk (FS_DEBUG_TXMEM, "i");
37572 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
37573@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
37574 #endif
37575 skb_put (skb, qe->p1 & 0xffff);
37576 ATM_SKB(skb)->vcc = atm_vcc;
37577- atomic_inc(&atm_vcc->stats->rx);
37578+ atomic_inc_unchecked(&atm_vcc->stats->rx);
37579 __net_timestamp(skb);
37580 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
37581 atm_vcc->push (atm_vcc, skb);
37582@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
37583 kfree (pe);
37584 }
37585 if (atm_vcc)
37586- atomic_inc(&atm_vcc->stats->rx_drop);
37587+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
37588 break;
37589 case 0x1f: /* Reassembly abort: no buffers. */
37590 /* Silently increment error counter. */
37591 if (atm_vcc)
37592- atomic_inc(&atm_vcc->stats->rx_drop);
37593+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
37594 break;
37595 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
37596 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
37597diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
37598index d4725fc..2d4ea65 100644
37599--- a/drivers/atm/fore200e.c
37600+++ b/drivers/atm/fore200e.c
37601@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
37602 #endif
37603 /* check error condition */
37604 if (*entry->status & STATUS_ERROR)
37605- atomic_inc(&vcc->stats->tx_err);
37606+ atomic_inc_unchecked(&vcc->stats->tx_err);
37607 else
37608- atomic_inc(&vcc->stats->tx);
37609+ atomic_inc_unchecked(&vcc->stats->tx);
37610 }
37611 }
37612
37613@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
37614 if (skb == NULL) {
37615 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
37616
37617- atomic_inc(&vcc->stats->rx_drop);
37618+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37619 return -ENOMEM;
37620 }
37621
37622@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
37623
37624 dev_kfree_skb_any(skb);
37625
37626- atomic_inc(&vcc->stats->rx_drop);
37627+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37628 return -ENOMEM;
37629 }
37630
37631 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
37632
37633 vcc->push(vcc, skb);
37634- atomic_inc(&vcc->stats->rx);
37635+ atomic_inc_unchecked(&vcc->stats->rx);
37636
37637 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
37638
37639@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
37640 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
37641 fore200e->atm_dev->number,
37642 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
37643- atomic_inc(&vcc->stats->rx_err);
37644+ atomic_inc_unchecked(&vcc->stats->rx_err);
37645 }
37646 }
37647
37648@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
37649 goto retry_here;
37650 }
37651
37652- atomic_inc(&vcc->stats->tx_err);
37653+ atomic_inc_unchecked(&vcc->stats->tx_err);
37654
37655 fore200e->tx_sat++;
37656 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
37657diff --git a/drivers/atm/he.c b/drivers/atm/he.c
37658index c39702b..785b73b 100644
37659--- a/drivers/atm/he.c
37660+++ b/drivers/atm/he.c
37661@@ -1689,7 +1689,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37662
37663 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
37664 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
37665- atomic_inc(&vcc->stats->rx_drop);
37666+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37667 goto return_host_buffers;
37668 }
37669
37670@@ -1716,7 +1716,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37671 RBRQ_LEN_ERR(he_dev->rbrq_head)
37672 ? "LEN_ERR" : "",
37673 vcc->vpi, vcc->vci);
37674- atomic_inc(&vcc->stats->rx_err);
37675+ atomic_inc_unchecked(&vcc->stats->rx_err);
37676 goto return_host_buffers;
37677 }
37678
37679@@ -1768,7 +1768,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
37680 vcc->push(vcc, skb);
37681 spin_lock(&he_dev->global_lock);
37682
37683- atomic_inc(&vcc->stats->rx);
37684+ atomic_inc_unchecked(&vcc->stats->rx);
37685
37686 return_host_buffers:
37687 ++pdus_assembled;
37688@@ -2094,7 +2094,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
37689 tpd->vcc->pop(tpd->vcc, tpd->skb);
37690 else
37691 dev_kfree_skb_any(tpd->skb);
37692- atomic_inc(&tpd->vcc->stats->tx_err);
37693+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
37694 }
37695 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
37696 return;
37697@@ -2506,7 +2506,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37698 vcc->pop(vcc, skb);
37699 else
37700 dev_kfree_skb_any(skb);
37701- atomic_inc(&vcc->stats->tx_err);
37702+ atomic_inc_unchecked(&vcc->stats->tx_err);
37703 return -EINVAL;
37704 }
37705
37706@@ -2517,7 +2517,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37707 vcc->pop(vcc, skb);
37708 else
37709 dev_kfree_skb_any(skb);
37710- atomic_inc(&vcc->stats->tx_err);
37711+ atomic_inc_unchecked(&vcc->stats->tx_err);
37712 return -EINVAL;
37713 }
37714 #endif
37715@@ -2529,7 +2529,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37716 vcc->pop(vcc, skb);
37717 else
37718 dev_kfree_skb_any(skb);
37719- atomic_inc(&vcc->stats->tx_err);
37720+ atomic_inc_unchecked(&vcc->stats->tx_err);
37721 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37722 return -ENOMEM;
37723 }
37724@@ -2571,7 +2571,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37725 vcc->pop(vcc, skb);
37726 else
37727 dev_kfree_skb_any(skb);
37728- atomic_inc(&vcc->stats->tx_err);
37729+ atomic_inc_unchecked(&vcc->stats->tx_err);
37730 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37731 return -ENOMEM;
37732 }
37733@@ -2602,7 +2602,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
37734 __enqueue_tpd(he_dev, tpd, cid);
37735 spin_unlock_irqrestore(&he_dev->global_lock, flags);
37736
37737- atomic_inc(&vcc->stats->tx);
37738+ atomic_inc_unchecked(&vcc->stats->tx);
37739
37740 return 0;
37741 }
37742diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
37743index 1dc0519..1aadaf7 100644
37744--- a/drivers/atm/horizon.c
37745+++ b/drivers/atm/horizon.c
37746@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
37747 {
37748 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
37749 // VC layer stats
37750- atomic_inc(&vcc->stats->rx);
37751+ atomic_inc_unchecked(&vcc->stats->rx);
37752 __net_timestamp(skb);
37753 // end of our responsibility
37754 vcc->push (vcc, skb);
37755@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
37756 dev->tx_iovec = NULL;
37757
37758 // VC layer stats
37759- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
37760+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
37761
37762 // free the skb
37763 hrz_kfree_skb (skb);
37764diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
37765index 2b24ed0..b3d6acc 100644
37766--- a/drivers/atm/idt77252.c
37767+++ b/drivers/atm/idt77252.c
37768@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
37769 else
37770 dev_kfree_skb(skb);
37771
37772- atomic_inc(&vcc->stats->tx);
37773+ atomic_inc_unchecked(&vcc->stats->tx);
37774 }
37775
37776 atomic_dec(&scq->used);
37777@@ -1072,13 +1072,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37778 if ((sb = dev_alloc_skb(64)) == NULL) {
37779 printk("%s: Can't allocate buffers for aal0.\n",
37780 card->name);
37781- atomic_add(i, &vcc->stats->rx_drop);
37782+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
37783 break;
37784 }
37785 if (!atm_charge(vcc, sb->truesize)) {
37786 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
37787 card->name);
37788- atomic_add(i - 1, &vcc->stats->rx_drop);
37789+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
37790 dev_kfree_skb(sb);
37791 break;
37792 }
37793@@ -1095,7 +1095,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37794 ATM_SKB(sb)->vcc = vcc;
37795 __net_timestamp(sb);
37796 vcc->push(vcc, sb);
37797- atomic_inc(&vcc->stats->rx);
37798+ atomic_inc_unchecked(&vcc->stats->rx);
37799
37800 cell += ATM_CELL_PAYLOAD;
37801 }
37802@@ -1132,13 +1132,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37803 "(CDC: %08x)\n",
37804 card->name, len, rpp->len, readl(SAR_REG_CDC));
37805 recycle_rx_pool_skb(card, rpp);
37806- atomic_inc(&vcc->stats->rx_err);
37807+ atomic_inc_unchecked(&vcc->stats->rx_err);
37808 return;
37809 }
37810 if (stat & SAR_RSQE_CRC) {
37811 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
37812 recycle_rx_pool_skb(card, rpp);
37813- atomic_inc(&vcc->stats->rx_err);
37814+ atomic_inc_unchecked(&vcc->stats->rx_err);
37815 return;
37816 }
37817 if (skb_queue_len(&rpp->queue) > 1) {
37818@@ -1149,7 +1149,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37819 RXPRINTK("%s: Can't alloc RX skb.\n",
37820 card->name);
37821 recycle_rx_pool_skb(card, rpp);
37822- atomic_inc(&vcc->stats->rx_err);
37823+ atomic_inc_unchecked(&vcc->stats->rx_err);
37824 return;
37825 }
37826 if (!atm_charge(vcc, skb->truesize)) {
37827@@ -1168,7 +1168,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37828 __net_timestamp(skb);
37829
37830 vcc->push(vcc, skb);
37831- atomic_inc(&vcc->stats->rx);
37832+ atomic_inc_unchecked(&vcc->stats->rx);
37833
37834 return;
37835 }
37836@@ -1190,7 +1190,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
37837 __net_timestamp(skb);
37838
37839 vcc->push(vcc, skb);
37840- atomic_inc(&vcc->stats->rx);
37841+ atomic_inc_unchecked(&vcc->stats->rx);
37842
37843 if (skb->truesize > SAR_FB_SIZE_3)
37844 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
37845@@ -1301,14 +1301,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
37846 if (vcc->qos.aal != ATM_AAL0) {
37847 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
37848 card->name, vpi, vci);
37849- atomic_inc(&vcc->stats->rx_drop);
37850+ atomic_inc_unchecked(&vcc->stats->rx_drop);
37851 goto drop;
37852 }
37853
37854 if ((sb = dev_alloc_skb(64)) == NULL) {
37855 printk("%s: Can't allocate buffers for AAL0.\n",
37856 card->name);
37857- atomic_inc(&vcc->stats->rx_err);
37858+ atomic_inc_unchecked(&vcc->stats->rx_err);
37859 goto drop;
37860 }
37861
37862@@ -1327,7 +1327,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
37863 ATM_SKB(sb)->vcc = vcc;
37864 __net_timestamp(sb);
37865 vcc->push(vcc, sb);
37866- atomic_inc(&vcc->stats->rx);
37867+ atomic_inc_unchecked(&vcc->stats->rx);
37868
37869 drop:
37870 skb_pull(queue, 64);
37871@@ -1952,13 +1952,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37872
37873 if (vc == NULL) {
37874 printk("%s: NULL connection in send().\n", card->name);
37875- atomic_inc(&vcc->stats->tx_err);
37876+ atomic_inc_unchecked(&vcc->stats->tx_err);
37877 dev_kfree_skb(skb);
37878 return -EINVAL;
37879 }
37880 if (!test_bit(VCF_TX, &vc->flags)) {
37881 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
37882- atomic_inc(&vcc->stats->tx_err);
37883+ atomic_inc_unchecked(&vcc->stats->tx_err);
37884 dev_kfree_skb(skb);
37885 return -EINVAL;
37886 }
37887@@ -1970,14 +1970,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37888 break;
37889 default:
37890 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
37891- atomic_inc(&vcc->stats->tx_err);
37892+ atomic_inc_unchecked(&vcc->stats->tx_err);
37893 dev_kfree_skb(skb);
37894 return -EINVAL;
37895 }
37896
37897 if (skb_shinfo(skb)->nr_frags != 0) {
37898 printk("%s: No scatter-gather yet.\n", card->name);
37899- atomic_inc(&vcc->stats->tx_err);
37900+ atomic_inc_unchecked(&vcc->stats->tx_err);
37901 dev_kfree_skb(skb);
37902 return -EINVAL;
37903 }
37904@@ -1985,7 +1985,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
37905
37906 err = queue_skb(card, vc, skb, oam);
37907 if (err) {
37908- atomic_inc(&vcc->stats->tx_err);
37909+ atomic_inc_unchecked(&vcc->stats->tx_err);
37910 dev_kfree_skb(skb);
37911 return err;
37912 }
37913@@ -2008,7 +2008,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
37914 skb = dev_alloc_skb(64);
37915 if (!skb) {
37916 printk("%s: Out of memory in send_oam().\n", card->name);
37917- atomic_inc(&vcc->stats->tx_err);
37918+ atomic_inc_unchecked(&vcc->stats->tx_err);
37919 return -ENOMEM;
37920 }
37921 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
37922diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
37923index 4217f29..88f547a 100644
37924--- a/drivers/atm/iphase.c
37925+++ b/drivers/atm/iphase.c
37926@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
37927 status = (u_short) (buf_desc_ptr->desc_mode);
37928 if (status & (RX_CER | RX_PTE | RX_OFL))
37929 {
37930- atomic_inc(&vcc->stats->rx_err);
37931+ atomic_inc_unchecked(&vcc->stats->rx_err);
37932 IF_ERR(printk("IA: bad packet, dropping it");)
37933 if (status & RX_CER) {
37934 IF_ERR(printk(" cause: packet CRC error\n");)
37935@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
37936 len = dma_addr - buf_addr;
37937 if (len > iadev->rx_buf_sz) {
37938 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
37939- atomic_inc(&vcc->stats->rx_err);
37940+ atomic_inc_unchecked(&vcc->stats->rx_err);
37941 goto out_free_desc;
37942 }
37943
37944@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37945 ia_vcc = INPH_IA_VCC(vcc);
37946 if (ia_vcc == NULL)
37947 {
37948- atomic_inc(&vcc->stats->rx_err);
37949+ atomic_inc_unchecked(&vcc->stats->rx_err);
37950 atm_return(vcc, skb->truesize);
37951 dev_kfree_skb_any(skb);
37952 goto INCR_DLE;
37953@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37954 if ((length > iadev->rx_buf_sz) || (length >
37955 (skb->len - sizeof(struct cpcs_trailer))))
37956 {
37957- atomic_inc(&vcc->stats->rx_err);
37958+ atomic_inc_unchecked(&vcc->stats->rx_err);
37959 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
37960 length, skb->len);)
37961 atm_return(vcc, skb->truesize);
37962@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
37963
37964 IF_RX(printk("rx_dle_intr: skb push");)
37965 vcc->push(vcc,skb);
37966- atomic_inc(&vcc->stats->rx);
37967+ atomic_inc_unchecked(&vcc->stats->rx);
37968 iadev->rx_pkt_cnt++;
37969 }
37970 INCR_DLE:
37971@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
37972 {
37973 struct k_sonet_stats *stats;
37974 stats = &PRIV(_ia_dev[board])->sonet_stats;
37975- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
37976- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
37977- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
37978- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
37979- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
37980- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
37981- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
37982- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
37983- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
37984+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
37985+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
37986+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
37987+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
37988+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
37989+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
37990+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
37991+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
37992+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
37993 }
37994 ia_cmds.status = 0;
37995 break;
37996@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
37997 if ((desc == 0) || (desc > iadev->num_tx_desc))
37998 {
37999 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
38000- atomic_inc(&vcc->stats->tx);
38001+ atomic_inc_unchecked(&vcc->stats->tx);
38002 if (vcc->pop)
38003 vcc->pop(vcc, skb);
38004 else
38005@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
38006 ATM_DESC(skb) = vcc->vci;
38007 skb_queue_tail(&iadev->tx_dma_q, skb);
38008
38009- atomic_inc(&vcc->stats->tx);
38010+ atomic_inc_unchecked(&vcc->stats->tx);
38011 iadev->tx_pkt_cnt++;
38012 /* Increment transaction counter */
38013 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
38014
38015 #if 0
38016 /* add flow control logic */
38017- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
38018+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
38019 if (iavcc->vc_desc_cnt > 10) {
38020 vcc->tx_quota = vcc->tx_quota * 3 / 4;
38021 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
38022diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
38023index fa7d7019..1e404c7 100644
38024--- a/drivers/atm/lanai.c
38025+++ b/drivers/atm/lanai.c
38026@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
38027 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
38028 lanai_endtx(lanai, lvcc);
38029 lanai_free_skb(lvcc->tx.atmvcc, skb);
38030- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
38031+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
38032 }
38033
38034 /* Try to fill the buffer - don't call unless there is backlog */
38035@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
38036 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
38037 __net_timestamp(skb);
38038 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
38039- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
38040+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
38041 out:
38042 lvcc->rx.buf.ptr = end;
38043 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
38044@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38045 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
38046 "vcc %d\n", lanai->number, (unsigned int) s, vci);
38047 lanai->stats.service_rxnotaal5++;
38048- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38049+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38050 return 0;
38051 }
38052 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
38053@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38054 int bytes;
38055 read_unlock(&vcc_sklist_lock);
38056 DPRINTK("got trashed rx pdu on vci %d\n", vci);
38057- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38058+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38059 lvcc->stats.x.aal5.service_trash++;
38060 bytes = (SERVICE_GET_END(s) * 16) -
38061 (((unsigned long) lvcc->rx.buf.ptr) -
38062@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38063 }
38064 if (s & SERVICE_STREAM) {
38065 read_unlock(&vcc_sklist_lock);
38066- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38067+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38068 lvcc->stats.x.aal5.service_stream++;
38069 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
38070 "PDU on VCI %d!\n", lanai->number, vci);
38071@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
38072 return 0;
38073 }
38074 DPRINTK("got rx crc error on vci %d\n", vci);
38075- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
38076+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
38077 lvcc->stats.x.aal5.service_rxcrc++;
38078 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
38079 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
38080diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
38081index 9988ac9..7c52585 100644
38082--- a/drivers/atm/nicstar.c
38083+++ b/drivers/atm/nicstar.c
38084@@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
38085 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
38086 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
38087 card->index);
38088- atomic_inc(&vcc->stats->tx_err);
38089+ atomic_inc_unchecked(&vcc->stats->tx_err);
38090 dev_kfree_skb_any(skb);
38091 return -EINVAL;
38092 }
38093@@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
38094 if (!vc->tx) {
38095 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
38096 card->index);
38097- atomic_inc(&vcc->stats->tx_err);
38098+ atomic_inc_unchecked(&vcc->stats->tx_err);
38099 dev_kfree_skb_any(skb);
38100 return -EINVAL;
38101 }
38102@@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
38103 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
38104 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
38105 card->index);
38106- atomic_inc(&vcc->stats->tx_err);
38107+ atomic_inc_unchecked(&vcc->stats->tx_err);
38108 dev_kfree_skb_any(skb);
38109 return -EINVAL;
38110 }
38111
38112 if (skb_shinfo(skb)->nr_frags != 0) {
38113 printk("nicstar%d: No scatter-gather yet.\n", card->index);
38114- atomic_inc(&vcc->stats->tx_err);
38115+ atomic_inc_unchecked(&vcc->stats->tx_err);
38116 dev_kfree_skb_any(skb);
38117 return -EINVAL;
38118 }
38119@@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
38120 }
38121
38122 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
38123- atomic_inc(&vcc->stats->tx_err);
38124+ atomic_inc_unchecked(&vcc->stats->tx_err);
38125 dev_kfree_skb_any(skb);
38126 return -EIO;
38127 }
38128- atomic_inc(&vcc->stats->tx);
38129+ atomic_inc_unchecked(&vcc->stats->tx);
38130
38131 return 0;
38132 }
38133@@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38134 printk
38135 ("nicstar%d: Can't allocate buffers for aal0.\n",
38136 card->index);
38137- atomic_add(i, &vcc->stats->rx_drop);
38138+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
38139 break;
38140 }
38141 if (!atm_charge(vcc, sb->truesize)) {
38142 RXPRINTK
38143 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
38144 card->index);
38145- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
38146+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
38147 dev_kfree_skb_any(sb);
38148 break;
38149 }
38150@@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38151 ATM_SKB(sb)->vcc = vcc;
38152 __net_timestamp(sb);
38153 vcc->push(vcc, sb);
38154- atomic_inc(&vcc->stats->rx);
38155+ atomic_inc_unchecked(&vcc->stats->rx);
38156 cell += ATM_CELL_PAYLOAD;
38157 }
38158
38159@@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38160 if (iovb == NULL) {
38161 printk("nicstar%d: Out of iovec buffers.\n",
38162 card->index);
38163- atomic_inc(&vcc->stats->rx_drop);
38164+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38165 recycle_rx_buf(card, skb);
38166 return;
38167 }
38168@@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38169 small or large buffer itself. */
38170 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
38171 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
38172- atomic_inc(&vcc->stats->rx_err);
38173+ atomic_inc_unchecked(&vcc->stats->rx_err);
38174 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
38175 NS_MAX_IOVECS);
38176 NS_PRV_IOVCNT(iovb) = 0;
38177@@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38178 ("nicstar%d: Expected a small buffer, and this is not one.\n",
38179 card->index);
38180 which_list(card, skb);
38181- atomic_inc(&vcc->stats->rx_err);
38182+ atomic_inc_unchecked(&vcc->stats->rx_err);
38183 recycle_rx_buf(card, skb);
38184 vc->rx_iov = NULL;
38185 recycle_iov_buf(card, iovb);
38186@@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38187 ("nicstar%d: Expected a large buffer, and this is not one.\n",
38188 card->index);
38189 which_list(card, skb);
38190- atomic_inc(&vcc->stats->rx_err);
38191+ atomic_inc_unchecked(&vcc->stats->rx_err);
38192 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
38193 NS_PRV_IOVCNT(iovb));
38194 vc->rx_iov = NULL;
38195@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38196 printk(" - PDU size mismatch.\n");
38197 else
38198 printk(".\n");
38199- atomic_inc(&vcc->stats->rx_err);
38200+ atomic_inc_unchecked(&vcc->stats->rx_err);
38201 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
38202 NS_PRV_IOVCNT(iovb));
38203 vc->rx_iov = NULL;
38204@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38205 /* skb points to a small buffer */
38206 if (!atm_charge(vcc, skb->truesize)) {
38207 push_rxbufs(card, skb);
38208- atomic_inc(&vcc->stats->rx_drop);
38209+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38210 } else {
38211 skb_put(skb, len);
38212 dequeue_sm_buf(card, skb);
38213@@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38214 ATM_SKB(skb)->vcc = vcc;
38215 __net_timestamp(skb);
38216 vcc->push(vcc, skb);
38217- atomic_inc(&vcc->stats->rx);
38218+ atomic_inc_unchecked(&vcc->stats->rx);
38219 }
38220 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
38221 struct sk_buff *sb;
38222@@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38223 if (len <= NS_SMBUFSIZE) {
38224 if (!atm_charge(vcc, sb->truesize)) {
38225 push_rxbufs(card, sb);
38226- atomic_inc(&vcc->stats->rx_drop);
38227+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38228 } else {
38229 skb_put(sb, len);
38230 dequeue_sm_buf(card, sb);
38231@@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38232 ATM_SKB(sb)->vcc = vcc;
38233 __net_timestamp(sb);
38234 vcc->push(vcc, sb);
38235- atomic_inc(&vcc->stats->rx);
38236+ atomic_inc_unchecked(&vcc->stats->rx);
38237 }
38238
38239 push_rxbufs(card, skb);
38240@@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38241
38242 if (!atm_charge(vcc, skb->truesize)) {
38243 push_rxbufs(card, skb);
38244- atomic_inc(&vcc->stats->rx_drop);
38245+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38246 } else {
38247 dequeue_lg_buf(card, skb);
38248 #ifdef NS_USE_DESTRUCTORS
38249@@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38250 ATM_SKB(skb)->vcc = vcc;
38251 __net_timestamp(skb);
38252 vcc->push(vcc, skb);
38253- atomic_inc(&vcc->stats->rx);
38254+ atomic_inc_unchecked(&vcc->stats->rx);
38255 }
38256
38257 push_rxbufs(card, sb);
38258@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38259 printk
38260 ("nicstar%d: Out of huge buffers.\n",
38261 card->index);
38262- atomic_inc(&vcc->stats->rx_drop);
38263+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38264 recycle_iovec_rx_bufs(card,
38265 (struct iovec *)
38266 iovb->data,
38267@@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38268 card->hbpool.count++;
38269 } else
38270 dev_kfree_skb_any(hb);
38271- atomic_inc(&vcc->stats->rx_drop);
38272+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38273 } else {
38274 /* Copy the small buffer to the huge buffer */
38275 sb = (struct sk_buff *)iov->iov_base;
38276@@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
38277 #endif /* NS_USE_DESTRUCTORS */
38278 __net_timestamp(hb);
38279 vcc->push(vcc, hb);
38280- atomic_inc(&vcc->stats->rx);
38281+ atomic_inc_unchecked(&vcc->stats->rx);
38282 }
38283 }
38284
38285diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
38286index 7652e8d..db45069 100644
38287--- a/drivers/atm/solos-pci.c
38288+++ b/drivers/atm/solos-pci.c
38289@@ -838,7 +838,7 @@ static void solos_bh(unsigned long card_arg)
38290 }
38291 atm_charge(vcc, skb->truesize);
38292 vcc->push(vcc, skb);
38293- atomic_inc(&vcc->stats->rx);
38294+ atomic_inc_unchecked(&vcc->stats->rx);
38295 break;
38296
38297 case PKT_STATUS:
38298@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
38299 vcc = SKB_CB(oldskb)->vcc;
38300
38301 if (vcc) {
38302- atomic_inc(&vcc->stats->tx);
38303+ atomic_inc_unchecked(&vcc->stats->tx);
38304 solos_pop(vcc, oldskb);
38305 } else {
38306 dev_kfree_skb_irq(oldskb);
38307diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
38308index 0215934..ce9f5b1 100644
38309--- a/drivers/atm/suni.c
38310+++ b/drivers/atm/suni.c
38311@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
38312
38313
38314 #define ADD_LIMITED(s,v) \
38315- atomic_add((v),&stats->s); \
38316- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
38317+ atomic_add_unchecked((v),&stats->s); \
38318+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
38319
38320
38321 static void suni_hz(unsigned long from_timer)
38322diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
38323index 5120a96..e2572bd 100644
38324--- a/drivers/atm/uPD98402.c
38325+++ b/drivers/atm/uPD98402.c
38326@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
38327 struct sonet_stats tmp;
38328 int error = 0;
38329
38330- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
38331+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
38332 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
38333 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
38334 if (zero && !error) {
38335@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
38336
38337
38338 #define ADD_LIMITED(s,v) \
38339- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
38340- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
38341- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
38342+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
38343+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
38344+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
38345
38346
38347 static void stat_event(struct atm_dev *dev)
38348@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
38349 if (reason & uPD98402_INT_PFM) stat_event(dev);
38350 if (reason & uPD98402_INT_PCO) {
38351 (void) GET(PCOCR); /* clear interrupt cause */
38352- atomic_add(GET(HECCT),
38353+ atomic_add_unchecked(GET(HECCT),
38354 &PRIV(dev)->sonet_stats.uncorr_hcs);
38355 }
38356 if ((reason & uPD98402_INT_RFO) &&
38357@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
38358 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
38359 uPD98402_INT_LOS),PIMR); /* enable them */
38360 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
38361- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
38362- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
38363- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
38364+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
38365+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
38366+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
38367 return 0;
38368 }
38369
38370diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
38371index 969c3c2..9b72956 100644
38372--- a/drivers/atm/zatm.c
38373+++ b/drivers/atm/zatm.c
38374@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
38375 }
38376 if (!size) {
38377 dev_kfree_skb_irq(skb);
38378- if (vcc) atomic_inc(&vcc->stats->rx_err);
38379+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
38380 continue;
38381 }
38382 if (!atm_charge(vcc,skb->truesize)) {
38383@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
38384 skb->len = size;
38385 ATM_SKB(skb)->vcc = vcc;
38386 vcc->push(vcc,skb);
38387- atomic_inc(&vcc->stats->rx);
38388+ atomic_inc_unchecked(&vcc->stats->rx);
38389 }
38390 zout(pos & 0xffff,MTA(mbx));
38391 #if 0 /* probably a stupid idea */
38392@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
38393 skb_queue_head(&zatm_vcc->backlog,skb);
38394 break;
38395 }
38396- atomic_inc(&vcc->stats->tx);
38397+ atomic_inc_unchecked(&vcc->stats->tx);
38398 wake_up(&zatm_vcc->tx_wait);
38399 }
38400
38401diff --git a/drivers/base/bus.c b/drivers/base/bus.c
38402index 83e910a..b224a73 100644
38403--- a/drivers/base/bus.c
38404+++ b/drivers/base/bus.c
38405@@ -1124,7 +1124,7 @@ int subsys_interface_register(struct subsys_interface *sif)
38406 return -EINVAL;
38407
38408 mutex_lock(&subsys->p->mutex);
38409- list_add_tail(&sif->node, &subsys->p->interfaces);
38410+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
38411 if (sif->add_dev) {
38412 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
38413 while ((dev = subsys_dev_iter_next(&iter)))
38414@@ -1149,7 +1149,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
38415 subsys = sif->subsys;
38416
38417 mutex_lock(&subsys->p->mutex);
38418- list_del_init(&sif->node);
38419+ pax_list_del_init((struct list_head *)&sif->node);
38420 if (sif->remove_dev) {
38421 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
38422 while ((dev = subsys_dev_iter_next(&iter)))
38423diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
38424index 25798db..15f130e 100644
38425--- a/drivers/base/devtmpfs.c
38426+++ b/drivers/base/devtmpfs.c
38427@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
38428 if (!thread)
38429 return 0;
38430
38431- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
38432+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
38433 if (err)
38434 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
38435 else
38436@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
38437 *err = sys_unshare(CLONE_NEWNS);
38438 if (*err)
38439 goto out;
38440- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
38441+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
38442 if (*err)
38443 goto out;
38444- sys_chdir("/.."); /* will traverse into overmounted root */
38445- sys_chroot(".");
38446+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
38447+ sys_chroot((char __force_user *)".");
38448 complete(&setup_done);
38449 while (1) {
38450 spin_lock(&req_lock);
38451diff --git a/drivers/base/node.c b/drivers/base/node.c
38452index d51c49c..28908df 100644
38453--- a/drivers/base/node.c
38454+++ b/drivers/base/node.c
38455@@ -623,7 +623,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
38456 struct node_attr {
38457 struct device_attribute attr;
38458 enum node_states state;
38459-};
38460+} __do_const;
38461
38462 static ssize_t show_node_state(struct device *dev,
38463 struct device_attribute *attr, char *buf)
38464diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
38465index eee55c1..b8c9393 100644
38466--- a/drivers/base/power/domain.c
38467+++ b/drivers/base/power/domain.c
38468@@ -1821,9 +1821,9 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
38469
38470 if (dev->power.subsys_data->domain_data) {
38471 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
38472- gpd_data->ops = (struct gpd_dev_ops){ NULL };
38473+ memset(&gpd_data->ops, 0, sizeof(gpd_data->ops));
38474 if (clear_td)
38475- gpd_data->td = (struct gpd_timing_data){ 0 };
38476+ memset(&gpd_data->td, 0, sizeof(gpd_data->td));
38477
38478 if (--gpd_data->refcount == 0) {
38479 dev->power.subsys_data->domain_data = NULL;
38480@@ -1862,7 +1862,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
38481 {
38482 struct cpuidle_driver *cpuidle_drv;
38483 struct gpd_cpu_data *cpu_data;
38484- struct cpuidle_state *idle_state;
38485+ cpuidle_state_no_const *idle_state;
38486 int ret = 0;
38487
38488 if (IS_ERR_OR_NULL(genpd) || state < 0)
38489@@ -1930,7 +1930,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
38490 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
38491 {
38492 struct gpd_cpu_data *cpu_data;
38493- struct cpuidle_state *idle_state;
38494+ cpuidle_state_no_const *idle_state;
38495 int ret = 0;
38496
38497 if (IS_ERR_OR_NULL(genpd))
38498diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
38499index 95b181d1..c4f0e19 100644
38500--- a/drivers/base/power/sysfs.c
38501+++ b/drivers/base/power/sysfs.c
38502@@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct device *dev,
38503 return -EIO;
38504 }
38505 }
38506- return sprintf(buf, p);
38507+ return sprintf(buf, "%s", p);
38508 }
38509
38510 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
38511diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
38512index eb1bd2e..2667d3a 100644
38513--- a/drivers/base/power/wakeup.c
38514+++ b/drivers/base/power/wakeup.c
38515@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
38516 * They need to be modified together atomically, so it's better to use one
38517 * atomic variable to hold them both.
38518 */
38519-static atomic_t combined_event_count = ATOMIC_INIT(0);
38520+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
38521
38522 #define IN_PROGRESS_BITS (sizeof(int) * 4)
38523 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
38524
38525 static void split_counters(unsigned int *cnt, unsigned int *inpr)
38526 {
38527- unsigned int comb = atomic_read(&combined_event_count);
38528+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
38529
38530 *cnt = (comb >> IN_PROGRESS_BITS);
38531 *inpr = comb & MAX_IN_PROGRESS;
38532@@ -401,7 +401,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
38533 ws->start_prevent_time = ws->last_time;
38534
38535 /* Increment the counter of events in progress. */
38536- cec = atomic_inc_return(&combined_event_count);
38537+ cec = atomic_inc_return_unchecked(&combined_event_count);
38538
38539 trace_wakeup_source_activate(ws->name, cec);
38540 }
38541@@ -527,7 +527,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
38542 * Increment the counter of registered wakeup events and decrement the
38543 * couter of wakeup events in progress simultaneously.
38544 */
38545- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
38546+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
38547 trace_wakeup_source_deactivate(ws->name, cec);
38548
38549 split_counters(&cnt, &inpr);
38550diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
38551index dbb8350..4762f4c 100644
38552--- a/drivers/base/syscore.c
38553+++ b/drivers/base/syscore.c
38554@@ -22,7 +22,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
38555 void register_syscore_ops(struct syscore_ops *ops)
38556 {
38557 mutex_lock(&syscore_ops_lock);
38558- list_add_tail(&ops->node, &syscore_ops_list);
38559+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
38560 mutex_unlock(&syscore_ops_lock);
38561 }
38562 EXPORT_SYMBOL_GPL(register_syscore_ops);
38563@@ -34,7 +34,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
38564 void unregister_syscore_ops(struct syscore_ops *ops)
38565 {
38566 mutex_lock(&syscore_ops_lock);
38567- list_del(&ops->node);
38568+ pax_list_del((struct list_head *)&ops->node);
38569 mutex_unlock(&syscore_ops_lock);
38570 }
38571 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
38572diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
38573index ff20f19..018f1da 100644
38574--- a/drivers/block/cciss.c
38575+++ b/drivers/block/cciss.c
38576@@ -3008,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
38577 while (!list_empty(&h->reqQ)) {
38578 c = list_entry(h->reqQ.next, CommandList_struct, list);
38579 /* can't do anything if fifo is full */
38580- if ((h->access.fifo_full(h))) {
38581+ if ((h->access->fifo_full(h))) {
38582 dev_warn(&h->pdev->dev, "fifo full\n");
38583 break;
38584 }
38585@@ -3018,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
38586 h->Qdepth--;
38587
38588 /* Tell the controller execute command */
38589- h->access.submit_command(h, c);
38590+ h->access->submit_command(h, c);
38591
38592 /* Put job onto the completed Q */
38593 addQ(&h->cmpQ, c);
38594@@ -3444,17 +3444,17 @@ startio:
38595
38596 static inline unsigned long get_next_completion(ctlr_info_t *h)
38597 {
38598- return h->access.command_completed(h);
38599+ return h->access->command_completed(h);
38600 }
38601
38602 static inline int interrupt_pending(ctlr_info_t *h)
38603 {
38604- return h->access.intr_pending(h);
38605+ return h->access->intr_pending(h);
38606 }
38607
38608 static inline long interrupt_not_for_us(ctlr_info_t *h)
38609 {
38610- return ((h->access.intr_pending(h) == 0) ||
38611+ return ((h->access->intr_pending(h) == 0) ||
38612 (h->interrupts_enabled == 0));
38613 }
38614
38615@@ -3487,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
38616 u32 a;
38617
38618 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
38619- return h->access.command_completed(h);
38620+ return h->access->command_completed(h);
38621
38622 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
38623 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
38624@@ -4044,7 +4044,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
38625 trans_support & CFGTBL_Trans_use_short_tags);
38626
38627 /* Change the access methods to the performant access methods */
38628- h->access = SA5_performant_access;
38629+ h->access = &SA5_performant_access;
38630 h->transMethod = CFGTBL_Trans_Performant;
38631
38632 return;
38633@@ -4318,7 +4318,7 @@ static int cciss_pci_init(ctlr_info_t *h)
38634 if (prod_index < 0)
38635 return -ENODEV;
38636 h->product_name = products[prod_index].product_name;
38637- h->access = *(products[prod_index].access);
38638+ h->access = products[prod_index].access;
38639
38640 if (cciss_board_disabled(h)) {
38641 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
38642@@ -5050,7 +5050,7 @@ reinit_after_soft_reset:
38643 }
38644
38645 /* make sure the board interrupts are off */
38646- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38647+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38648 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
38649 if (rc)
38650 goto clean2;
38651@@ -5100,7 +5100,7 @@ reinit_after_soft_reset:
38652 * fake ones to scoop up any residual completions.
38653 */
38654 spin_lock_irqsave(&h->lock, flags);
38655- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38656+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38657 spin_unlock_irqrestore(&h->lock, flags);
38658 free_irq(h->intr[h->intr_mode], h);
38659 rc = cciss_request_irq(h, cciss_msix_discard_completions,
38660@@ -5120,9 +5120,9 @@ reinit_after_soft_reset:
38661 dev_info(&h->pdev->dev, "Board READY.\n");
38662 dev_info(&h->pdev->dev,
38663 "Waiting for stale completions to drain.\n");
38664- h->access.set_intr_mask(h, CCISS_INTR_ON);
38665+ h->access->set_intr_mask(h, CCISS_INTR_ON);
38666 msleep(10000);
38667- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38668+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38669
38670 rc = controller_reset_failed(h->cfgtable);
38671 if (rc)
38672@@ -5145,7 +5145,7 @@ reinit_after_soft_reset:
38673 cciss_scsi_setup(h);
38674
38675 /* Turn the interrupts on so we can service requests */
38676- h->access.set_intr_mask(h, CCISS_INTR_ON);
38677+ h->access->set_intr_mask(h, CCISS_INTR_ON);
38678
38679 /* Get the firmware version */
38680 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
38681@@ -5217,7 +5217,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
38682 kfree(flush_buf);
38683 if (return_code != IO_OK)
38684 dev_warn(&h->pdev->dev, "Error flushing cache\n");
38685- h->access.set_intr_mask(h, CCISS_INTR_OFF);
38686+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
38687 free_irq(h->intr[h->intr_mode], h);
38688 }
38689
38690diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
38691index 7fda30e..2f27946 100644
38692--- a/drivers/block/cciss.h
38693+++ b/drivers/block/cciss.h
38694@@ -101,7 +101,7 @@ struct ctlr_info
38695 /* information about each logical volume */
38696 drive_info_struct *drv[CISS_MAX_LUN];
38697
38698- struct access_method access;
38699+ struct access_method *access;
38700
38701 /* queue and queue Info */
38702 struct list_head reqQ;
38703@@ -402,27 +402,27 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
38704 }
38705
38706 static struct access_method SA5_access = {
38707- SA5_submit_command,
38708- SA5_intr_mask,
38709- SA5_fifo_full,
38710- SA5_intr_pending,
38711- SA5_completed,
38712+ .submit_command = SA5_submit_command,
38713+ .set_intr_mask = SA5_intr_mask,
38714+ .fifo_full = SA5_fifo_full,
38715+ .intr_pending = SA5_intr_pending,
38716+ .command_completed = SA5_completed,
38717 };
38718
38719 static struct access_method SA5B_access = {
38720- SA5_submit_command,
38721- SA5B_intr_mask,
38722- SA5_fifo_full,
38723- SA5B_intr_pending,
38724- SA5_completed,
38725+ .submit_command = SA5_submit_command,
38726+ .set_intr_mask = SA5B_intr_mask,
38727+ .fifo_full = SA5_fifo_full,
38728+ .intr_pending = SA5B_intr_pending,
38729+ .command_completed = SA5_completed,
38730 };
38731
38732 static struct access_method SA5_performant_access = {
38733- SA5_submit_command,
38734- SA5_performant_intr_mask,
38735- SA5_fifo_full,
38736- SA5_performant_intr_pending,
38737- SA5_performant_completed,
38738+ .submit_command = SA5_submit_command,
38739+ .set_intr_mask = SA5_performant_intr_mask,
38740+ .fifo_full = SA5_fifo_full,
38741+ .intr_pending = SA5_performant_intr_pending,
38742+ .command_completed = SA5_performant_completed,
38743 };
38744
38745 struct board_type {
38746diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
38747index 2b94403..fd6ad1f 100644
38748--- a/drivers/block/cpqarray.c
38749+++ b/drivers/block/cpqarray.c
38750@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38751 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
38752 goto Enomem4;
38753 }
38754- hba[i]->access.set_intr_mask(hba[i], 0);
38755+ hba[i]->access->set_intr_mask(hba[i], 0);
38756 if (request_irq(hba[i]->intr, do_ida_intr,
38757 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
38758 {
38759@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
38760 add_timer(&hba[i]->timer);
38761
38762 /* Enable IRQ now that spinlock and rate limit timer are set up */
38763- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38764+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
38765
38766 for(j=0; j<NWD; j++) {
38767 struct gendisk *disk = ida_gendisk[i][j];
38768@@ -694,7 +694,7 @@ DBGINFO(
38769 for(i=0; i<NR_PRODUCTS; i++) {
38770 if (board_id == products[i].board_id) {
38771 c->product_name = products[i].product_name;
38772- c->access = *(products[i].access);
38773+ c->access = products[i].access;
38774 break;
38775 }
38776 }
38777@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
38778 hba[ctlr]->intr = intr;
38779 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
38780 hba[ctlr]->product_name = products[j].product_name;
38781- hba[ctlr]->access = *(products[j].access);
38782+ hba[ctlr]->access = products[j].access;
38783 hba[ctlr]->ctlr = ctlr;
38784 hba[ctlr]->board_id = board_id;
38785 hba[ctlr]->pci_dev = NULL; /* not PCI */
38786@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
38787
38788 while((c = h->reqQ) != NULL) {
38789 /* Can't do anything if we're busy */
38790- if (h->access.fifo_full(h) == 0)
38791+ if (h->access->fifo_full(h) == 0)
38792 return;
38793
38794 /* Get the first entry from the request Q */
38795@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
38796 h->Qdepth--;
38797
38798 /* Tell the controller to do our bidding */
38799- h->access.submit_command(h, c);
38800+ h->access->submit_command(h, c);
38801
38802 /* Get onto the completion Q */
38803 addQ(&h->cmpQ, c);
38804@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38805 unsigned long flags;
38806 __u32 a,a1;
38807
38808- istat = h->access.intr_pending(h);
38809+ istat = h->access->intr_pending(h);
38810 /* Is this interrupt for us? */
38811 if (istat == 0)
38812 return IRQ_NONE;
38813@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
38814 */
38815 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
38816 if (istat & FIFO_NOT_EMPTY) {
38817- while((a = h->access.command_completed(h))) {
38818+ while((a = h->access->command_completed(h))) {
38819 a1 = a; a &= ~3;
38820 if ((c = h->cmpQ) == NULL)
38821 {
38822@@ -1448,11 +1448,11 @@ static int sendcmd(
38823 /*
38824 * Disable interrupt
38825 */
38826- info_p->access.set_intr_mask(info_p, 0);
38827+ info_p->access->set_intr_mask(info_p, 0);
38828 /* Make sure there is room in the command FIFO */
38829 /* Actually it should be completely empty at this time. */
38830 for (i = 200000; i > 0; i--) {
38831- temp = info_p->access.fifo_full(info_p);
38832+ temp = info_p->access->fifo_full(info_p);
38833 if (temp != 0) {
38834 break;
38835 }
38836@@ -1465,7 +1465,7 @@ DBG(
38837 /*
38838 * Send the cmd
38839 */
38840- info_p->access.submit_command(info_p, c);
38841+ info_p->access->submit_command(info_p, c);
38842 complete = pollcomplete(ctlr);
38843
38844 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
38845@@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
38846 * we check the new geometry. Then turn interrupts back on when
38847 * we're done.
38848 */
38849- host->access.set_intr_mask(host, 0);
38850+ host->access->set_intr_mask(host, 0);
38851 getgeometry(ctlr);
38852- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
38853+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
38854
38855 for(i=0; i<NWD; i++) {
38856 struct gendisk *disk = ida_gendisk[ctlr][i];
38857@@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
38858 /* Wait (up to 2 seconds) for a command to complete */
38859
38860 for (i = 200000; i > 0; i--) {
38861- done = hba[ctlr]->access.command_completed(hba[ctlr]);
38862+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
38863 if (done == 0) {
38864 udelay(10); /* a short fixed delay */
38865 } else
38866diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
38867index be73e9d..7fbf140 100644
38868--- a/drivers/block/cpqarray.h
38869+++ b/drivers/block/cpqarray.h
38870@@ -99,7 +99,7 @@ struct ctlr_info {
38871 drv_info_t drv[NWD];
38872 struct proc_dir_entry *proc;
38873
38874- struct access_method access;
38875+ struct access_method *access;
38876
38877 cmdlist_t *reqQ;
38878 cmdlist_t *cmpQ;
38879diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
38880index 426c97a..8c58607 100644
38881--- a/drivers/block/drbd/drbd_bitmap.c
38882+++ b/drivers/block/drbd/drbd_bitmap.c
38883@@ -1036,7 +1036,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
38884 submit_bio(rw, bio);
38885 /* this should not count as user activity and cause the
38886 * resync to throttle -- see drbd_rs_should_slow_down(). */
38887- atomic_add(len >> 9, &device->rs_sect_ev);
38888+ atomic_add_unchecked(len >> 9, &device->rs_sect_ev);
38889 }
38890 }
38891
38892diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
38893index 1a00001..c0d4253 100644
38894--- a/drivers/block/drbd/drbd_int.h
38895+++ b/drivers/block/drbd/drbd_int.h
38896@@ -387,7 +387,7 @@ struct drbd_epoch {
38897 struct drbd_connection *connection;
38898 struct list_head list;
38899 unsigned int barrier_nr;
38900- atomic_t epoch_size; /* increased on every request added. */
38901+ atomic_unchecked_t epoch_size; /* increased on every request added. */
38902 atomic_t active; /* increased on every req. added, and dec on every finished. */
38903 unsigned long flags;
38904 };
38905@@ -948,7 +948,7 @@ struct drbd_device {
38906 unsigned int al_tr_number;
38907 int al_tr_cycle;
38908 wait_queue_head_t seq_wait;
38909- atomic_t packet_seq;
38910+ atomic_unchecked_t packet_seq;
38911 unsigned int peer_seq;
38912 spinlock_t peer_seq_lock;
38913 unsigned long comm_bm_set; /* communicated number of set bits. */
38914@@ -957,8 +957,8 @@ struct drbd_device {
38915 struct mutex own_state_mutex;
38916 struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
38917 char congestion_reason; /* Why we where congested... */
38918- atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38919- atomic_t rs_sect_ev; /* for submitted resync data rate, both */
38920+ atomic_unchecked_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
38921+ atomic_unchecked_t rs_sect_ev; /* for submitted resync data rate, both */
38922 int rs_last_sect_ev; /* counter to compare with */
38923 int rs_last_events; /* counter of read or write "events" (unit sectors)
38924 * on the lower level device when we last looked. */
38925@@ -1569,7 +1569,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
38926 char __user *uoptval;
38927 int err;
38928
38929- uoptval = (char __user __force *)optval;
38930+ uoptval = (char __force_user *)optval;
38931
38932 set_fs(KERNEL_DS);
38933 if (level == SOL_SOCKET)
38934diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
38935index 04a14e0..5b8f0aa 100644
38936--- a/drivers/block/drbd/drbd_interval.c
38937+++ b/drivers/block/drbd/drbd_interval.c
38938@@ -67,9 +67,9 @@ static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new)
38939 }
38940
38941 static const struct rb_augment_callbacks augment_callbacks = {
38942- augment_propagate,
38943- augment_copy,
38944- augment_rotate,
38945+ .propagate = augment_propagate,
38946+ .copy = augment_copy,
38947+ .rotate = augment_rotate,
38948 };
38949
38950 /**
38951diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
38952index 9b465bb..00034ecf 100644
38953--- a/drivers/block/drbd/drbd_main.c
38954+++ b/drivers/block/drbd/drbd_main.c
38955@@ -1328,7 +1328,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
38956 p->sector = sector;
38957 p->block_id = block_id;
38958 p->blksize = blksize;
38959- p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
38960+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&peer_device->device->packet_seq));
38961 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
38962 }
38963
38964@@ -1634,7 +1634,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
38965 return -EIO;
38966 p->sector = cpu_to_be64(req->i.sector);
38967 p->block_id = (unsigned long)req;
38968- p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
38969+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&device->packet_seq));
38970 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
38971 if (device->state.conn >= C_SYNC_SOURCE &&
38972 device->state.conn <= C_PAUSED_SYNC_T)
38973@@ -1915,8 +1915,8 @@ void drbd_init_set_defaults(struct drbd_device *device)
38974 atomic_set(&device->unacked_cnt, 0);
38975 atomic_set(&device->local_cnt, 0);
38976 atomic_set(&device->pp_in_use_by_net, 0);
38977- atomic_set(&device->rs_sect_in, 0);
38978- atomic_set(&device->rs_sect_ev, 0);
38979+ atomic_set_unchecked(&device->rs_sect_in, 0);
38980+ atomic_set_unchecked(&device->rs_sect_ev, 0);
38981 atomic_set(&device->ap_in_flight, 0);
38982 atomic_set(&device->md_io.in_use, 0);
38983
38984@@ -2688,8 +2688,8 @@ void drbd_destroy_connection(struct kref *kref)
38985 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
38986 struct drbd_resource *resource = connection->resource;
38987
38988- if (atomic_read(&connection->current_epoch->epoch_size) != 0)
38989- drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
38990+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size) != 0)
38991+ drbd_err(connection, "epoch_size:%d\n", atomic_read_unchecked(&connection->current_epoch->epoch_size));
38992 kfree(connection->current_epoch);
38993
38994 idr_destroy(&connection->peer_devices);
38995diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
38996index 1cd47df..57c53c0 100644
38997--- a/drivers/block/drbd/drbd_nl.c
38998+++ b/drivers/block/drbd/drbd_nl.c
38999@@ -3645,13 +3645,13 @@ finish:
39000
39001 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
39002 {
39003- static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
39004+ static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
39005 struct sk_buff *msg;
39006 struct drbd_genlmsghdr *d_out;
39007 unsigned seq;
39008 int err = -ENOMEM;
39009
39010- seq = atomic_inc_return(&drbd_genl_seq);
39011+ seq = atomic_inc_return_unchecked(&drbd_genl_seq);
39012 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
39013 if (!msg)
39014 goto failed;
39015diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
39016index 9342b8d..b6a6825 100644
39017--- a/drivers/block/drbd/drbd_receiver.c
39018+++ b/drivers/block/drbd/drbd_receiver.c
39019@@ -870,7 +870,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
39020 struct drbd_device *device = peer_device->device;
39021 int err;
39022
39023- atomic_set(&device->packet_seq, 0);
39024+ atomic_set_unchecked(&device->packet_seq, 0);
39025 device->peer_seq = 0;
39026
39027 device->state_mutex = peer_device->connection->agreed_pro_version < 100 ?
39028@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
39029 do {
39030 next_epoch = NULL;
39031
39032- epoch_size = atomic_read(&epoch->epoch_size);
39033+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
39034
39035 switch (ev & ~EV_CLEANUP) {
39036 case EV_PUT:
39037@@ -1273,7 +1273,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connectio
39038 rv = FE_DESTROYED;
39039 } else {
39040 epoch->flags = 0;
39041- atomic_set(&epoch->epoch_size, 0);
39042+ atomic_set_unchecked(&epoch->epoch_size, 0);
39043 /* atomic_set(&epoch->active, 0); is already zero */
39044 if (rv == FE_STILL_LIVE)
39045 rv = FE_RECYCLED;
39046@@ -1550,7 +1550,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
39047 conn_wait_active_ee_empty(connection);
39048 drbd_flush(connection);
39049
39050- if (atomic_read(&connection->current_epoch->epoch_size)) {
39051+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
39052 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
39053 if (epoch)
39054 break;
39055@@ -1564,11 +1564,11 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
39056 }
39057
39058 epoch->flags = 0;
39059- atomic_set(&epoch->epoch_size, 0);
39060+ atomic_set_unchecked(&epoch->epoch_size, 0);
39061 atomic_set(&epoch->active, 0);
39062
39063 spin_lock(&connection->epoch_lock);
39064- if (atomic_read(&connection->current_epoch->epoch_size)) {
39065+ if (atomic_read_unchecked(&connection->current_epoch->epoch_size)) {
39066 list_add(&epoch->list, &connection->current_epoch->list);
39067 connection->current_epoch = epoch;
39068 connection->epochs++;
39069@@ -1802,7 +1802,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto
39070 list_add_tail(&peer_req->w.list, &device->sync_ee);
39071 spin_unlock_irq(&device->resource->req_lock);
39072
39073- atomic_add(pi->size >> 9, &device->rs_sect_ev);
39074+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_ev);
39075 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
39076 return 0;
39077
39078@@ -1900,7 +1900,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet
39079 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
39080 }
39081
39082- atomic_add(pi->size >> 9, &device->rs_sect_in);
39083+ atomic_add_unchecked(pi->size >> 9, &device->rs_sect_in);
39084
39085 return err;
39086 }
39087@@ -2290,7 +2290,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
39088
39089 err = wait_for_and_update_peer_seq(peer_device, peer_seq);
39090 drbd_send_ack_dp(peer_device, P_NEG_ACK, p, pi->size);
39091- atomic_inc(&connection->current_epoch->epoch_size);
39092+ atomic_inc_unchecked(&connection->current_epoch->epoch_size);
39093 err2 = drbd_drain_block(peer_device, pi->size);
39094 if (!err)
39095 err = err2;
39096@@ -2334,7 +2334,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info *
39097
39098 spin_lock(&connection->epoch_lock);
39099 peer_req->epoch = connection->current_epoch;
39100- atomic_inc(&peer_req->epoch->epoch_size);
39101+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
39102 atomic_inc(&peer_req->epoch->active);
39103 spin_unlock(&connection->epoch_lock);
39104
39105@@ -2479,7 +2479,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
39106
39107 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
39108 (int)part_stat_read(&disk->part0, sectors[1]) -
39109- atomic_read(&device->rs_sect_ev);
39110+ atomic_read_unchecked(&device->rs_sect_ev);
39111
39112 if (atomic_read(&device->ap_actlog_cnt)
39113 || !device->rs_last_events || curr_events - device->rs_last_events > 64) {
39114@@ -2618,7 +2618,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
39115 device->use_csums = true;
39116 } else if (pi->cmd == P_OV_REPLY) {
39117 /* track progress, we may need to throttle */
39118- atomic_add(size >> 9, &device->rs_sect_in);
39119+ atomic_add_unchecked(size >> 9, &device->rs_sect_in);
39120 peer_req->w.cb = w_e_end_ov_reply;
39121 dec_rs_pending(device);
39122 /* drbd_rs_begin_io done when we sent this request,
39123@@ -2691,7 +2691,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
39124 goto out_free_e;
39125
39126 submit_for_resync:
39127- atomic_add(size >> 9, &device->rs_sect_ev);
39128+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
39129
39130 submit:
39131 update_receiver_timing_details(connection, drbd_submit_peer_request);
39132@@ -4564,7 +4564,7 @@ struct data_cmd {
39133 int expect_payload;
39134 size_t pkt_size;
39135 int (*fn)(struct drbd_connection *, struct packet_info *);
39136-};
39137+} __do_const;
39138
39139 static struct data_cmd drbd_cmd_handler[] = {
39140 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
39141@@ -4678,7 +4678,7 @@ static void conn_disconnect(struct drbd_connection *connection)
39142 if (!list_empty(&connection->current_epoch->list))
39143 drbd_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
39144 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
39145- atomic_set(&connection->current_epoch->epoch_size, 0);
39146+ atomic_set_unchecked(&connection->current_epoch->epoch_size, 0);
39147 connection->send.seen_any_write_yet = false;
39148
39149 drbd_info(connection, "Connection closed\n");
39150@@ -5182,7 +5182,7 @@ static int got_IsInSync(struct drbd_connection *connection, struct packet_info *
39151 put_ldev(device);
39152 }
39153 dec_rs_pending(device);
39154- atomic_add(blksize >> 9, &device->rs_sect_in);
39155+ atomic_add_unchecked(blksize >> 9, &device->rs_sect_in);
39156
39157 return 0;
39158 }
39159@@ -5470,7 +5470,7 @@ static int connection_finish_peer_reqs(struct drbd_connection *connection)
39160 struct asender_cmd {
39161 size_t pkt_size;
39162 int (*fn)(struct drbd_connection *connection, struct packet_info *);
39163-};
39164+} __do_const;
39165
39166 static struct asender_cmd asender_tbl[] = {
39167 [P_PING] = { 0, got_Ping },
39168diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
39169index 50776b3..1477c3f 100644
39170--- a/drivers/block/drbd/drbd_worker.c
39171+++ b/drivers/block/drbd/drbd_worker.c
39172@@ -408,7 +408,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector,
39173 list_add_tail(&peer_req->w.list, &device->read_ee);
39174 spin_unlock_irq(&device->resource->req_lock);
39175
39176- atomic_add(size >> 9, &device->rs_sect_ev);
39177+ atomic_add_unchecked(size >> 9, &device->rs_sect_ev);
39178 if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
39179 return 0;
39180
39181@@ -553,7 +553,7 @@ static int drbd_rs_number_requests(struct drbd_device *device)
39182 unsigned int sect_in; /* Number of sectors that came in since the last turn */
39183 int number, mxb;
39184
39185- sect_in = atomic_xchg(&device->rs_sect_in, 0);
39186+ sect_in = atomic_xchg_unchecked(&device->rs_sect_in, 0);
39187 device->rs_in_flight -= sect_in;
39188
39189 rcu_read_lock();
39190@@ -1594,8 +1594,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
39191 {
39192 struct fifo_buffer *plan;
39193
39194- atomic_set(&device->rs_sect_in, 0);
39195- atomic_set(&device->rs_sect_ev, 0);
39196+ atomic_set_unchecked(&device->rs_sect_in, 0);
39197+ atomic_set_unchecked(&device->rs_sect_ev, 0);
39198 device->rs_in_flight = 0;
39199
39200 /* Updating the RCU protected object in place is necessary since
39201diff --git a/drivers/block/loop.c b/drivers/block/loop.c
39202index 6cb1beb..bf490f7 100644
39203--- a/drivers/block/loop.c
39204+++ b/drivers/block/loop.c
39205@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
39206
39207 file_start_write(file);
39208 set_fs(get_ds());
39209- bw = file->f_op->write(file, buf, len, &pos);
39210+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
39211 set_fs(old_fs);
39212 file_end_write(file);
39213 if (likely(bw == len))
39214diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
39215index 02351e2..a9ea617 100644
39216--- a/drivers/block/nvme-core.c
39217+++ b/drivers/block/nvme-core.c
39218@@ -73,7 +73,6 @@ static LIST_HEAD(dev_list);
39219 static struct task_struct *nvme_thread;
39220 static struct workqueue_struct *nvme_workq;
39221 static wait_queue_head_t nvme_kthread_wait;
39222-static struct notifier_block nvme_nb;
39223
39224 static void nvme_reset_failed_dev(struct work_struct *ws);
39225
39226@@ -2925,6 +2924,10 @@ static struct pci_driver nvme_driver = {
39227 .err_handler = &nvme_err_handler,
39228 };
39229
39230+static struct notifier_block nvme_nb = {
39231+ .notifier_call = &nvme_cpu_notify,
39232+};
39233+
39234 static int __init nvme_init(void)
39235 {
39236 int result;
39237@@ -2941,7 +2944,6 @@ static int __init nvme_init(void)
39238 else if (result > 0)
39239 nvme_major = result;
39240
39241- nvme_nb.notifier_call = &nvme_cpu_notify;
39242 result = register_hotcpu_notifier(&nvme_nb);
39243 if (result)
39244 goto unregister_blkdev;
39245diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
39246index 758ac44..58087fd 100644
39247--- a/drivers/block/pktcdvd.c
39248+++ b/drivers/block/pktcdvd.c
39249@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
39250
39251 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
39252 {
39253- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
39254+ return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
39255 }
39256
39257 /*
39258@@ -1888,7 +1888,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
39259 return -EROFS;
39260 }
39261 pd->settings.fp = ti.fp;
39262- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
39263+ pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
39264
39265 if (ti.nwa_v) {
39266 pd->nwa = be32_to_cpu(ti.next_writable);
39267diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
39268index e5565fb..71be10b4 100644
39269--- a/drivers/block/smart1,2.h
39270+++ b/drivers/block/smart1,2.h
39271@@ -108,11 +108,11 @@ static unsigned long smart4_intr_pending(ctlr_info_t *h)
39272 }
39273
39274 static struct access_method smart4_access = {
39275- smart4_submit_command,
39276- smart4_intr_mask,
39277- smart4_fifo_full,
39278- smart4_intr_pending,
39279- smart4_completed,
39280+ .submit_command = smart4_submit_command,
39281+ .set_intr_mask = smart4_intr_mask,
39282+ .fifo_full = smart4_fifo_full,
39283+ .intr_pending = smart4_intr_pending,
39284+ .command_completed = smart4_completed,
39285 };
39286
39287 /*
39288@@ -144,11 +144,11 @@ static unsigned long smart2_intr_pending(ctlr_info_t *h)
39289 }
39290
39291 static struct access_method smart2_access = {
39292- smart2_submit_command,
39293- smart2_intr_mask,
39294- smart2_fifo_full,
39295- smart2_intr_pending,
39296- smart2_completed,
39297+ .submit_command = smart2_submit_command,
39298+ .set_intr_mask = smart2_intr_mask,
39299+ .fifo_full = smart2_fifo_full,
39300+ .intr_pending = smart2_intr_pending,
39301+ .command_completed = smart2_completed,
39302 };
39303
39304 /*
39305@@ -180,11 +180,11 @@ static unsigned long smart2e_intr_pending(ctlr_info_t *h)
39306 }
39307
39308 static struct access_method smart2e_access = {
39309- smart2e_submit_command,
39310- smart2e_intr_mask,
39311- smart2e_fifo_full,
39312- smart2e_intr_pending,
39313- smart2e_completed,
39314+ .submit_command = smart2e_submit_command,
39315+ .set_intr_mask = smart2e_intr_mask,
39316+ .fifo_full = smart2e_fifo_full,
39317+ .intr_pending = smart2e_intr_pending,
39318+ .command_completed = smart2e_completed,
39319 };
39320
39321 /*
39322@@ -270,9 +270,9 @@ static unsigned long smart1_intr_pending(ctlr_info_t *h)
39323 }
39324
39325 static struct access_method smart1_access = {
39326- smart1_submit_command,
39327- smart1_intr_mask,
39328- smart1_fifo_full,
39329- smart1_intr_pending,
39330- smart1_completed,
39331+ .submit_command = smart1_submit_command,
39332+ .set_intr_mask = smart1_intr_mask,
39333+ .fifo_full = smart1_fifo_full,
39334+ .intr_pending = smart1_intr_pending,
39335+ .command_completed = smart1_completed,
39336 };
39337diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
39338index f038dba..bb74c08 100644
39339--- a/drivers/bluetooth/btwilink.c
39340+++ b/drivers/bluetooth/btwilink.c
39341@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
39342
39343 static int bt_ti_probe(struct platform_device *pdev)
39344 {
39345- static struct ti_st *hst;
39346+ struct ti_st *hst;
39347 struct hci_dev *hdev;
39348 int err;
39349
39350diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
39351index 898b84b..86f74b9 100644
39352--- a/drivers/cdrom/cdrom.c
39353+++ b/drivers/cdrom/cdrom.c
39354@@ -610,7 +610,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
39355 ENSURE(reset, CDC_RESET);
39356 ENSURE(generic_packet, CDC_GENERIC_PACKET);
39357 cdi->mc_flags = 0;
39358- cdo->n_minors = 0;
39359 cdi->options = CDO_USE_FFLAGS;
39360
39361 if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
39362@@ -630,8 +629,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
39363 else
39364 cdi->cdda_method = CDDA_OLD;
39365
39366- if (!cdo->generic_packet)
39367- cdo->generic_packet = cdrom_dummy_generic_packet;
39368+ if (!cdo->generic_packet) {
39369+ pax_open_kernel();
39370+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
39371+ pax_close_kernel();
39372+ }
39373
39374 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
39375 mutex_lock(&cdrom_mutex);
39376@@ -652,7 +654,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
39377 if (cdi->exit)
39378 cdi->exit(cdi);
39379
39380- cdi->ops->n_minors--;
39381 cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
39382 }
39383
39384@@ -2126,7 +2127,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
39385 */
39386 nr = nframes;
39387 do {
39388- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
39389+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
39390 if (cgc.buffer)
39391 break;
39392
39393@@ -3434,7 +3435,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
39394 struct cdrom_device_info *cdi;
39395 int ret;
39396
39397- ret = scnprintf(info + *pos, max_size - *pos, header);
39398+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
39399 if (!ret)
39400 return 1;
39401
39402diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
39403index 584bc31..e64a12c 100644
39404--- a/drivers/cdrom/gdrom.c
39405+++ b/drivers/cdrom/gdrom.c
39406@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
39407 .audio_ioctl = gdrom_audio_ioctl,
39408 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
39409 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
39410- .n_minors = 1,
39411 };
39412
39413 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
39414diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
39415index 6e9f74a..50c7cea 100644
39416--- a/drivers/char/Kconfig
39417+++ b/drivers/char/Kconfig
39418@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
39419
39420 config DEVKMEM
39421 bool "/dev/kmem virtual device support"
39422- default y
39423+ default n
39424+ depends on !GRKERNSEC_KMEM
39425 help
39426 Say Y here if you want to support the /dev/kmem device. The
39427 /dev/kmem device is rarely used, but can be used for certain
39428@@ -577,6 +578,7 @@ config DEVPORT
39429 bool
39430 depends on !M68K
39431 depends on ISA || PCI
39432+ depends on !GRKERNSEC_KMEM
39433 default y
39434
39435 source "drivers/s390/char/Kconfig"
39436diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
39437index a48e05b..6bac831 100644
39438--- a/drivers/char/agp/compat_ioctl.c
39439+++ b/drivers/char/agp/compat_ioctl.c
39440@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
39441 return -ENOMEM;
39442 }
39443
39444- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
39445+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
39446 sizeof(*usegment) * ureserve.seg_count)) {
39447 kfree(usegment);
39448 kfree(ksegment);
39449diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
39450index 09f17eb..8531d2f 100644
39451--- a/drivers/char/agp/frontend.c
39452+++ b/drivers/char/agp/frontend.c
39453@@ -806,7 +806,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
39454 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
39455 return -EFAULT;
39456
39457- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
39458+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
39459 return -EFAULT;
39460
39461 client = agp_find_client_by_pid(reserve.pid);
39462@@ -836,7 +836,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
39463 if (segment == NULL)
39464 return -ENOMEM;
39465
39466- if (copy_from_user(segment, (void __user *) reserve.seg_list,
39467+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
39468 sizeof(struct agp_segment) * reserve.seg_count)) {
39469 kfree(segment);
39470 return -EFAULT;
39471diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
39472index 4f94375..413694e 100644
39473--- a/drivers/char/genrtc.c
39474+++ b/drivers/char/genrtc.c
39475@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
39476 switch (cmd) {
39477
39478 case RTC_PLL_GET:
39479+ memset(&pll, 0, sizeof(pll));
39480 if (get_rtc_pll(&pll))
39481 return -EINVAL;
39482 else
39483diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
39484index d5d4cd8..22d561d 100644
39485--- a/drivers/char/hpet.c
39486+++ b/drivers/char/hpet.c
39487@@ -575,7 +575,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
39488 }
39489
39490 static int
39491-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
39492+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
39493 struct hpet_info *info)
39494 {
39495 struct hpet_timer __iomem *timer;
39496diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
39497index 86fe45c..c0ea948 100644
39498--- a/drivers/char/hw_random/intel-rng.c
39499+++ b/drivers/char/hw_random/intel-rng.c
39500@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
39501
39502 if (no_fwh_detect)
39503 return -ENODEV;
39504- printk(warning);
39505+ printk("%s", warning);
39506 return -EBUSY;
39507 }
39508
39509diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
39510index e6db938..835e3a2 100644
39511--- a/drivers/char/ipmi/ipmi_msghandler.c
39512+++ b/drivers/char/ipmi/ipmi_msghandler.c
39513@@ -438,7 +438,7 @@ struct ipmi_smi {
39514 struct proc_dir_entry *proc_dir;
39515 char proc_dir_name[10];
39516
39517- atomic_t stats[IPMI_NUM_STATS];
39518+ atomic_unchecked_t stats[IPMI_NUM_STATS];
39519
39520 /*
39521 * run_to_completion duplicate of smb_info, smi_info
39522@@ -470,9 +470,9 @@ static LIST_HEAD(smi_watchers);
39523 static DEFINE_MUTEX(smi_watchers_mutex);
39524
39525 #define ipmi_inc_stat(intf, stat) \
39526- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
39527+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
39528 #define ipmi_get_stat(intf, stat) \
39529- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
39530+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
39531
39532 static int is_lan_addr(struct ipmi_addr *addr)
39533 {
39534@@ -2926,7 +2926,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
39535 INIT_LIST_HEAD(&intf->cmd_rcvrs);
39536 init_waitqueue_head(&intf->waitq);
39537 for (i = 0; i < IPMI_NUM_STATS; i++)
39538- atomic_set(&intf->stats[i], 0);
39539+ atomic_set_unchecked(&intf->stats[i], 0);
39540
39541 intf->proc_dir = NULL;
39542
39543diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
39544index 5d66568..c9d93c3 100644
39545--- a/drivers/char/ipmi/ipmi_si_intf.c
39546+++ b/drivers/char/ipmi/ipmi_si_intf.c
39547@@ -285,7 +285,7 @@ struct smi_info {
39548 unsigned char slave_addr;
39549
39550 /* Counters and things for the proc filesystem. */
39551- atomic_t stats[SI_NUM_STATS];
39552+ atomic_unchecked_t stats[SI_NUM_STATS];
39553
39554 struct task_struct *thread;
39555
39556@@ -294,9 +294,9 @@ struct smi_info {
39557 };
39558
39559 #define smi_inc_stat(smi, stat) \
39560- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
39561+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
39562 #define smi_get_stat(smi, stat) \
39563- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
39564+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
39565
39566 #define SI_MAX_PARMS 4
39567
39568@@ -3374,7 +3374,7 @@ static int try_smi_init(struct smi_info *new_smi)
39569 atomic_set(&new_smi->req_events, 0);
39570 new_smi->run_to_completion = false;
39571 for (i = 0; i < SI_NUM_STATS; i++)
39572- atomic_set(&new_smi->stats[i], 0);
39573+ atomic_set_unchecked(&new_smi->stats[i], 0);
39574
39575 new_smi->interrupt_disabled = true;
39576 atomic_set(&new_smi->stop_operation, 0);
39577diff --git a/drivers/char/mem.c b/drivers/char/mem.c
39578index 917403f..dddd899 100644
39579--- a/drivers/char/mem.c
39580+++ b/drivers/char/mem.c
39581@@ -18,6 +18,7 @@
39582 #include <linux/raw.h>
39583 #include <linux/tty.h>
39584 #include <linux/capability.h>
39585+#include <linux/security.h>
39586 #include <linux/ptrace.h>
39587 #include <linux/device.h>
39588 #include <linux/highmem.h>
39589@@ -36,6 +37,10 @@
39590
39591 #define DEVPORT_MINOR 4
39592
39593+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39594+extern const struct file_operations grsec_fops;
39595+#endif
39596+
39597 static inline unsigned long size_inside_page(unsigned long start,
39598 unsigned long size)
39599 {
39600@@ -67,9 +72,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
39601
39602 while (cursor < to) {
39603 if (!devmem_is_allowed(pfn)) {
39604+#ifdef CONFIG_GRKERNSEC_KMEM
39605+ gr_handle_mem_readwrite(from, to);
39606+#else
39607 printk(KERN_INFO
39608 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
39609 current->comm, from, to);
39610+#endif
39611 return 0;
39612 }
39613 cursor += PAGE_SIZE;
39614@@ -77,6 +86,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
39615 }
39616 return 1;
39617 }
39618+#elif defined(CONFIG_GRKERNSEC_KMEM)
39619+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
39620+{
39621+ return 0;
39622+}
39623 #else
39624 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
39625 {
39626@@ -122,6 +136,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
39627
39628 while (count > 0) {
39629 unsigned long remaining;
39630+ char *temp;
39631
39632 sz = size_inside_page(p, count);
39633
39634@@ -137,7 +152,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
39635 if (!ptr)
39636 return -EFAULT;
39637
39638- remaining = copy_to_user(buf, ptr, sz);
39639+#ifdef CONFIG_PAX_USERCOPY
39640+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
39641+ if (!temp) {
39642+ unxlate_dev_mem_ptr(p, ptr);
39643+ return -ENOMEM;
39644+ }
39645+ memcpy(temp, ptr, sz);
39646+#else
39647+ temp = ptr;
39648+#endif
39649+
39650+ remaining = copy_to_user(buf, temp, sz);
39651+
39652+#ifdef CONFIG_PAX_USERCOPY
39653+ kfree(temp);
39654+#endif
39655+
39656 unxlate_dev_mem_ptr(p, ptr);
39657 if (remaining)
39658 return -EFAULT;
39659@@ -369,9 +400,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
39660 size_t count, loff_t *ppos)
39661 {
39662 unsigned long p = *ppos;
39663- ssize_t low_count, read, sz;
39664+ ssize_t low_count, read, sz, err = 0;
39665 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
39666- int err = 0;
39667
39668 read = 0;
39669 if (p < (unsigned long) high_memory) {
39670@@ -393,6 +423,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
39671 }
39672 #endif
39673 while (low_count > 0) {
39674+ char *temp;
39675+
39676 sz = size_inside_page(p, low_count);
39677
39678 /*
39679@@ -402,7 +434,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
39680 */
39681 kbuf = xlate_dev_kmem_ptr((char *)p);
39682
39683- if (copy_to_user(buf, kbuf, sz))
39684+#ifdef CONFIG_PAX_USERCOPY
39685+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
39686+ if (!temp)
39687+ return -ENOMEM;
39688+ memcpy(temp, kbuf, sz);
39689+#else
39690+ temp = kbuf;
39691+#endif
39692+
39693+ err = copy_to_user(buf, temp, sz);
39694+
39695+#ifdef CONFIG_PAX_USERCOPY
39696+ kfree(temp);
39697+#endif
39698+
39699+ if (err)
39700 return -EFAULT;
39701 buf += sz;
39702 p += sz;
39703@@ -827,6 +874,9 @@ static const struct memdev {
39704 #ifdef CONFIG_PRINTK
39705 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
39706 #endif
39707+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39708+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
39709+#endif
39710 };
39711
39712 static int memory_open(struct inode *inode, struct file *filp)
39713@@ -898,7 +948,7 @@ static int __init chr_dev_init(void)
39714 continue;
39715
39716 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
39717- NULL, devlist[minor].name);
39718+ NULL, "%s", devlist[minor].name);
39719 }
39720
39721 return tty_init();
39722diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
39723index 9df78e2..01ba9ae 100644
39724--- a/drivers/char/nvram.c
39725+++ b/drivers/char/nvram.c
39726@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
39727
39728 spin_unlock_irq(&rtc_lock);
39729
39730- if (copy_to_user(buf, contents, tmp - contents))
39731+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
39732 return -EFAULT;
39733
39734 *ppos = i;
39735diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
39736index 0ea9986..e7b07e4 100644
39737--- a/drivers/char/pcmcia/synclink_cs.c
39738+++ b/drivers/char/pcmcia/synclink_cs.c
39739@@ -2345,7 +2345,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39740
39741 if (debug_level >= DEBUG_LEVEL_INFO)
39742 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
39743- __FILE__, __LINE__, info->device_name, port->count);
39744+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
39745
39746 if (tty_port_close_start(port, tty, filp) == 0)
39747 goto cleanup;
39748@@ -2363,7 +2363,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
39749 cleanup:
39750 if (debug_level >= DEBUG_LEVEL_INFO)
39751 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
39752- tty->driver->name, port->count);
39753+ tty->driver->name, atomic_read(&port->count));
39754 }
39755
39756 /* Wait until the transmitter is empty.
39757@@ -2505,7 +2505,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39758
39759 if (debug_level >= DEBUG_LEVEL_INFO)
39760 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
39761- __FILE__, __LINE__, tty->driver->name, port->count);
39762+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
39763
39764 /* If port is closing, signal caller to try again */
39765 if (port->flags & ASYNC_CLOSING){
39766@@ -2525,11 +2525,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
39767 goto cleanup;
39768 }
39769 spin_lock(&port->lock);
39770- port->count++;
39771+ atomic_inc(&port->count);
39772 spin_unlock(&port->lock);
39773 spin_unlock_irqrestore(&info->netlock, flags);
39774
39775- if (port->count == 1) {
39776+ if (atomic_read(&port->count) == 1) {
39777 /* 1st open on this device, init hardware */
39778 retval = startup(info, tty);
39779 if (retval < 0)
39780@@ -3918,7 +3918,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
39781 unsigned short new_crctype;
39782
39783 /* return error if TTY interface open */
39784- if (info->port.count)
39785+ if (atomic_read(&info->port.count))
39786 return -EBUSY;
39787
39788 switch (encoding)
39789@@ -4022,7 +4022,7 @@ static int hdlcdev_open(struct net_device *dev)
39790
39791 /* arbitrate between network and tty opens */
39792 spin_lock_irqsave(&info->netlock, flags);
39793- if (info->port.count != 0 || info->netcount != 0) {
39794+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
39795 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
39796 spin_unlock_irqrestore(&info->netlock, flags);
39797 return -EBUSY;
39798@@ -4112,7 +4112,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39799 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
39800
39801 /* return error if TTY interface open */
39802- if (info->port.count)
39803+ if (atomic_read(&info->port.count))
39804 return -EBUSY;
39805
39806 if (cmd != SIOCWANDEV)
39807diff --git a/drivers/char/random.c b/drivers/char/random.c
39808index 8c86a95..7c499f3 100644
39809--- a/drivers/char/random.c
39810+++ b/drivers/char/random.c
39811@@ -289,9 +289,6 @@
39812 /*
39813 * To allow fractional bits to be tracked, the entropy_count field is
39814 * denominated in units of 1/8th bits.
39815- *
39816- * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
39817- * credit_entropy_bits() needs to be 64 bits wide.
39818 */
39819 #define ENTROPY_SHIFT 3
39820 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
39821@@ -439,9 +436,9 @@ struct entropy_store {
39822 };
39823
39824 static void push_to_pool(struct work_struct *work);
39825-static __u32 input_pool_data[INPUT_POOL_WORDS];
39826-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
39827-static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
39828+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
39829+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39830+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
39831
39832 static struct entropy_store input_pool = {
39833 .poolinfo = &poolinfo_table[0],
39834@@ -635,7 +632,7 @@ retry:
39835 /* The +2 corresponds to the /4 in the denominator */
39836
39837 do {
39838- unsigned int anfrac = min(pnfrac, pool_size/2);
39839+ u64 anfrac = min(pnfrac, pool_size/2);
39840 unsigned int add =
39841 ((pool_size - entropy_count)*anfrac*3) >> s;
39842
39843@@ -1207,7 +1204,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
39844
39845 extract_buf(r, tmp);
39846 i = min_t(int, nbytes, EXTRACT_SIZE);
39847- if (copy_to_user(buf, tmp, i)) {
39848+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
39849 ret = -EFAULT;
39850 break;
39851 }
39852@@ -1590,7 +1587,7 @@ static char sysctl_bootid[16];
39853 static int proc_do_uuid(struct ctl_table *table, int write,
39854 void __user *buffer, size_t *lenp, loff_t *ppos)
39855 {
39856- struct ctl_table fake_table;
39857+ ctl_table_no_const fake_table;
39858 unsigned char buf[64], tmp_uuid[16], *uuid;
39859
39860 uuid = table->data;
39861@@ -1620,7 +1617,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
39862 static int proc_do_entropy(struct ctl_table *table, int write,
39863 void __user *buffer, size_t *lenp, loff_t *ppos)
39864 {
39865- struct ctl_table fake_table;
39866+ ctl_table_no_const fake_table;
39867 int entropy_count;
39868
39869 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
39870diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
39871index 7cc1fe22..b602d6b 100644
39872--- a/drivers/char/sonypi.c
39873+++ b/drivers/char/sonypi.c
39874@@ -54,6 +54,7 @@
39875
39876 #include <asm/uaccess.h>
39877 #include <asm/io.h>
39878+#include <asm/local.h>
39879
39880 #include <linux/sonypi.h>
39881
39882@@ -490,7 +491,7 @@ static struct sonypi_device {
39883 spinlock_t fifo_lock;
39884 wait_queue_head_t fifo_proc_list;
39885 struct fasync_struct *fifo_async;
39886- int open_count;
39887+ local_t open_count;
39888 int model;
39889 struct input_dev *input_jog_dev;
39890 struct input_dev *input_key_dev;
39891@@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
39892 static int sonypi_misc_release(struct inode *inode, struct file *file)
39893 {
39894 mutex_lock(&sonypi_device.lock);
39895- sonypi_device.open_count--;
39896+ local_dec(&sonypi_device.open_count);
39897 mutex_unlock(&sonypi_device.lock);
39898 return 0;
39899 }
39900@@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
39901 {
39902 mutex_lock(&sonypi_device.lock);
39903 /* Flush input queue on first open */
39904- if (!sonypi_device.open_count)
39905+ if (!local_read(&sonypi_device.open_count))
39906 kfifo_reset(&sonypi_device.fifo);
39907- sonypi_device.open_count++;
39908+ local_inc(&sonypi_device.open_count);
39909 mutex_unlock(&sonypi_device.lock);
39910
39911 return 0;
39912diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
39913index 565a947..dcdc06e 100644
39914--- a/drivers/char/tpm/tpm_acpi.c
39915+++ b/drivers/char/tpm/tpm_acpi.c
39916@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
39917 virt = acpi_os_map_iomem(start, len);
39918 if (!virt) {
39919 kfree(log->bios_event_log);
39920+ log->bios_event_log = NULL;
39921 printk("%s: ERROR - Unable to map memory\n", __func__);
39922 return -EIO;
39923 }
39924
39925- memcpy_fromio(log->bios_event_log, virt, len);
39926+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
39927
39928 acpi_os_unmap_iomem(virt, len);
39929 return 0;
39930diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
39931index 3a56a13..f8cbd25 100644
39932--- a/drivers/char/tpm/tpm_eventlog.c
39933+++ b/drivers/char/tpm/tpm_eventlog.c
39934@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
39935 event = addr;
39936
39937 if ((event->event_type == 0 && event->event_size == 0) ||
39938- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
39939+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
39940 return NULL;
39941
39942 return addr;
39943@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
39944 return NULL;
39945
39946 if ((event->event_type == 0 && event->event_size == 0) ||
39947- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
39948+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
39949 return NULL;
39950
39951 (*pos)++;
39952@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
39953 int i;
39954
39955 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
39956- seq_putc(m, data[i]);
39957+ if (!seq_putc(m, data[i]))
39958+ return -EFAULT;
39959
39960 return 0;
39961 }
39962diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
39963index b585b47..488f43e 100644
39964--- a/drivers/char/virtio_console.c
39965+++ b/drivers/char/virtio_console.c
39966@@ -684,7 +684,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
39967 if (to_user) {
39968 ssize_t ret;
39969
39970- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
39971+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
39972 if (ret)
39973 return -EFAULT;
39974 } else {
39975@@ -787,7 +787,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
39976 if (!port_has_data(port) && !port->host_connected)
39977 return 0;
39978
39979- return fill_readbuf(port, ubuf, count, true);
39980+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
39981 }
39982
39983 static int wait_port_writable(struct port *port, bool nonblock)
39984diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
39985index b9355da..9611f4e 100644
39986--- a/drivers/clk/clk-composite.c
39987+++ b/drivers/clk/clk-composite.c
39988@@ -191,7 +191,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
39989 struct clk *clk;
39990 struct clk_init_data init;
39991 struct clk_composite *composite;
39992- struct clk_ops *clk_composite_ops;
39993+ clk_ops_no_const *clk_composite_ops;
39994
39995 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
39996 if (!composite) {
39997diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
39998index dd3a78c..386d49c 100644
39999--- a/drivers/clk/socfpga/clk-gate.c
40000+++ b/drivers/clk/socfpga/clk-gate.c
40001@@ -22,6 +22,7 @@
40002 #include <linux/mfd/syscon.h>
40003 #include <linux/of.h>
40004 #include <linux/regmap.h>
40005+#include <asm/pgtable.h>
40006
40007 #include "clk.h"
40008
40009@@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
40010 return 0;
40011 }
40012
40013-static struct clk_ops gateclk_ops = {
40014+static clk_ops_no_const gateclk_ops __read_only = {
40015 .prepare = socfpga_clk_prepare,
40016 .recalc_rate = socfpga_clk_recalc_rate,
40017 .get_parent = socfpga_clk_get_parent,
40018@@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
40019 socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
40020 socfpga_clk->hw.bit_idx = clk_gate[1];
40021
40022- gateclk_ops.enable = clk_gate_ops.enable;
40023- gateclk_ops.disable = clk_gate_ops.disable;
40024+ pax_open_kernel();
40025+ *(void **)&gateclk_ops.enable = clk_gate_ops.enable;
40026+ *(void **)&gateclk_ops.disable = clk_gate_ops.disable;
40027+ pax_close_kernel();
40028 }
40029
40030 rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
40031diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
40032index de6da95..c98278b 100644
40033--- a/drivers/clk/socfpga/clk-pll.c
40034+++ b/drivers/clk/socfpga/clk-pll.c
40035@@ -21,6 +21,7 @@
40036 #include <linux/io.h>
40037 #include <linux/of.h>
40038 #include <linux/of_address.h>
40039+#include <asm/pgtable.h>
40040
40041 #include "clk.h"
40042
40043@@ -76,7 +77,7 @@ static u8 clk_pll_get_parent(struct clk_hw *hwclk)
40044 CLK_MGR_PLL_CLK_SRC_MASK;
40045 }
40046
40047-static struct clk_ops clk_pll_ops = {
40048+static clk_ops_no_const clk_pll_ops __read_only = {
40049 .recalc_rate = clk_pll_recalc_rate,
40050 .get_parent = clk_pll_get_parent,
40051 };
40052@@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
40053 pll_clk->hw.hw.init = &init;
40054
40055 pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
40056- clk_pll_ops.enable = clk_gate_ops.enable;
40057- clk_pll_ops.disable = clk_gate_ops.disable;
40058+ pax_open_kernel();
40059+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
40060+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
40061+ pax_close_kernel();
40062
40063 clk = clk_register(NULL, &pll_clk->hw.hw);
40064 if (WARN_ON(IS_ERR(clk))) {
40065diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
40066index b0c18ed..1713a80 100644
40067--- a/drivers/cpufreq/acpi-cpufreq.c
40068+++ b/drivers/cpufreq/acpi-cpufreq.c
40069@@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
40070 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
40071 per_cpu(acfreq_data, cpu) = data;
40072
40073- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
40074- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
40075+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
40076+ pax_open_kernel();
40077+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
40078+ pax_close_kernel();
40079+ }
40080
40081 result = acpi_processor_register_performance(data->acpi_data, cpu);
40082 if (result)
40083@@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
40084 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
40085 break;
40086 case ACPI_ADR_SPACE_FIXED_HARDWARE:
40087- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
40088+ pax_open_kernel();
40089+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
40090+ pax_close_kernel();
40091 break;
40092 default:
40093 break;
40094@@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
40095 if (!msrs)
40096 return;
40097
40098- acpi_cpufreq_driver.boost_supported = true;
40099- acpi_cpufreq_driver.boost_enabled = boost_state(0);
40100+ pax_open_kernel();
40101+ *(bool *)&acpi_cpufreq_driver.boost_supported = true;
40102+ *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0);
40103+ pax_close_kernel();
40104
40105 cpu_notifier_register_begin();
40106
40107diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
40108index 07c8276..38bd07c 100644
40109--- a/drivers/cpufreq/cpufreq.c
40110+++ b/drivers/cpufreq/cpufreq.c
40111@@ -2107,7 +2107,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
40112 }
40113
40114 mutex_lock(&cpufreq_governor_mutex);
40115- list_del(&governor->governor_list);
40116+ pax_list_del(&governor->governor_list);
40117 mutex_unlock(&cpufreq_governor_mutex);
40118 return;
40119 }
40120@@ -2323,7 +2323,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
40121 return NOTIFY_OK;
40122 }
40123
40124-static struct notifier_block __refdata cpufreq_cpu_notifier = {
40125+static struct notifier_block cpufreq_cpu_notifier = {
40126 .notifier_call = cpufreq_cpu_callback,
40127 };
40128
40129@@ -2363,13 +2363,17 @@ int cpufreq_boost_trigger_state(int state)
40130 return 0;
40131
40132 write_lock_irqsave(&cpufreq_driver_lock, flags);
40133- cpufreq_driver->boost_enabled = state;
40134+ pax_open_kernel();
40135+ *(bool *)&cpufreq_driver->boost_enabled = state;
40136+ pax_close_kernel();
40137 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
40138
40139 ret = cpufreq_driver->set_boost(state);
40140 if (ret) {
40141 write_lock_irqsave(&cpufreq_driver_lock, flags);
40142- cpufreq_driver->boost_enabled = !state;
40143+ pax_open_kernel();
40144+ *(bool *)&cpufreq_driver->boost_enabled = !state;
40145+ pax_close_kernel();
40146 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
40147
40148 pr_err("%s: Cannot %s BOOST\n",
40149@@ -2426,8 +2430,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
40150
40151 pr_debug("trying to register driver %s\n", driver_data->name);
40152
40153- if (driver_data->setpolicy)
40154- driver_data->flags |= CPUFREQ_CONST_LOOPS;
40155+ if (driver_data->setpolicy) {
40156+ pax_open_kernel();
40157+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
40158+ pax_close_kernel();
40159+ }
40160
40161 write_lock_irqsave(&cpufreq_driver_lock, flags);
40162 if (cpufreq_driver) {
40163@@ -2442,8 +2449,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
40164 * Check if driver provides function to enable boost -
40165 * if not, use cpufreq_boost_set_sw as default
40166 */
40167- if (!cpufreq_driver->set_boost)
40168- cpufreq_driver->set_boost = cpufreq_boost_set_sw;
40169+ if (!cpufreq_driver->set_boost) {
40170+ pax_open_kernel();
40171+ *(void **)&cpufreq_driver->set_boost = cpufreq_boost_set_sw;
40172+ pax_close_kernel();
40173+ }
40174
40175 ret = cpufreq_sysfs_create_file(&boost.attr);
40176 if (ret) {
40177diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
40178index 1b44496..b80ff5e 100644
40179--- a/drivers/cpufreq/cpufreq_governor.c
40180+++ b/drivers/cpufreq/cpufreq_governor.c
40181@@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
40182 struct dbs_data *dbs_data;
40183 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
40184 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
40185- struct od_ops *od_ops = NULL;
40186+ const struct od_ops *od_ops = NULL;
40187 struct od_dbs_tuners *od_tuners = NULL;
40188 struct cs_dbs_tuners *cs_tuners = NULL;
40189 struct cpu_dbs_common_info *cpu_cdbs;
40190@@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
40191
40192 if ((cdata->governor == GOV_CONSERVATIVE) &&
40193 (!policy->governor->initialized)) {
40194- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
40195+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
40196
40197 cpufreq_register_notifier(cs_ops->notifier_block,
40198 CPUFREQ_TRANSITION_NOTIFIER);
40199@@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
40200
40201 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
40202 (policy->governor->initialized == 1)) {
40203- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
40204+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
40205
40206 cpufreq_unregister_notifier(cs_ops->notifier_block,
40207 CPUFREQ_TRANSITION_NOTIFIER);
40208diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
40209index cc401d1..8197340 100644
40210--- a/drivers/cpufreq/cpufreq_governor.h
40211+++ b/drivers/cpufreq/cpufreq_governor.h
40212@@ -212,7 +212,7 @@ struct common_dbs_data {
40213 void (*exit)(struct dbs_data *dbs_data);
40214
40215 /* Governor specific ops, see below */
40216- void *gov_ops;
40217+ const void *gov_ops;
40218 };
40219
40220 /* Governor Per policy data */
40221@@ -232,7 +232,7 @@ struct od_ops {
40222 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
40223 unsigned int freq_next, unsigned int relation);
40224 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
40225-};
40226+} __no_const;
40227
40228 struct cs_ops {
40229 struct notifier_block *notifier_block;
40230diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
40231index ad3f38f..8f086cd 100644
40232--- a/drivers/cpufreq/cpufreq_ondemand.c
40233+++ b/drivers/cpufreq/cpufreq_ondemand.c
40234@@ -524,7 +524,7 @@ static void od_exit(struct dbs_data *dbs_data)
40235
40236 define_get_cpu_dbs_routines(od_cpu_dbs_info);
40237
40238-static struct od_ops od_ops = {
40239+static struct od_ops od_ops __read_only = {
40240 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
40241 .powersave_bias_target = generic_powersave_bias_target,
40242 .freq_increase = dbs_freq_increase,
40243@@ -579,14 +579,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
40244 (struct cpufreq_policy *, unsigned int, unsigned int),
40245 unsigned int powersave_bias)
40246 {
40247- od_ops.powersave_bias_target = f;
40248+ pax_open_kernel();
40249+ *(void **)&od_ops.powersave_bias_target = f;
40250+ pax_close_kernel();
40251 od_set_powersave_bias(powersave_bias);
40252 }
40253 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
40254
40255 void od_unregister_powersave_bias_handler(void)
40256 {
40257- od_ops.powersave_bias_target = generic_powersave_bias_target;
40258+ pax_open_kernel();
40259+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
40260+ pax_close_kernel();
40261 od_set_powersave_bias(0);
40262 }
40263 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
40264diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
40265index 27bb6d3..4cf595c 100644
40266--- a/drivers/cpufreq/intel_pstate.c
40267+++ b/drivers/cpufreq/intel_pstate.c
40268@@ -133,10 +133,10 @@ struct pstate_funcs {
40269 struct cpu_defaults {
40270 struct pstate_adjust_policy pid_policy;
40271 struct pstate_funcs funcs;
40272-};
40273+} __do_const;
40274
40275 static struct pstate_adjust_policy pid_params;
40276-static struct pstate_funcs pstate_funcs;
40277+static struct pstate_funcs *pstate_funcs;
40278
40279 struct perf_limits {
40280 int no_turbo;
40281@@ -594,18 +594,18 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
40282
40283 cpu->pstate.current_pstate = pstate;
40284
40285- pstate_funcs.set(cpu, pstate);
40286+ pstate_funcs->set(cpu, pstate);
40287 }
40288
40289 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
40290 {
40291- cpu->pstate.min_pstate = pstate_funcs.get_min();
40292- cpu->pstate.max_pstate = pstate_funcs.get_max();
40293- cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
40294- cpu->pstate.scaling = pstate_funcs.get_scaling();
40295+ cpu->pstate.min_pstate = pstate_funcs->get_min();
40296+ cpu->pstate.max_pstate = pstate_funcs->get_max();
40297+ cpu->pstate.turbo_pstate = pstate_funcs->get_turbo();
40298+ cpu->pstate.scaling = pstate_funcs->get_scaling();
40299
40300- if (pstate_funcs.get_vid)
40301- pstate_funcs.get_vid(cpu);
40302+ if (pstate_funcs->get_vid)
40303+ pstate_funcs->get_vid(cpu);
40304 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
40305 }
40306
40307@@ -875,9 +875,9 @@ static int intel_pstate_msrs_not_valid(void)
40308 rdmsrl(MSR_IA32_APERF, aperf);
40309 rdmsrl(MSR_IA32_MPERF, mperf);
40310
40311- if (!pstate_funcs.get_max() ||
40312- !pstate_funcs.get_min() ||
40313- !pstate_funcs.get_turbo())
40314+ if (!pstate_funcs->get_max() ||
40315+ !pstate_funcs->get_min() ||
40316+ !pstate_funcs->get_turbo())
40317 return -ENODEV;
40318
40319 rdmsrl(MSR_IA32_APERF, tmp);
40320@@ -891,7 +891,7 @@ static int intel_pstate_msrs_not_valid(void)
40321 return 0;
40322 }
40323
40324-static void copy_pid_params(struct pstate_adjust_policy *policy)
40325+static void copy_pid_params(const struct pstate_adjust_policy *policy)
40326 {
40327 pid_params.sample_rate_ms = policy->sample_rate_ms;
40328 pid_params.p_gain_pct = policy->p_gain_pct;
40329@@ -903,12 +903,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
40330
40331 static void copy_cpu_funcs(struct pstate_funcs *funcs)
40332 {
40333- pstate_funcs.get_max = funcs->get_max;
40334- pstate_funcs.get_min = funcs->get_min;
40335- pstate_funcs.get_turbo = funcs->get_turbo;
40336- pstate_funcs.get_scaling = funcs->get_scaling;
40337- pstate_funcs.set = funcs->set;
40338- pstate_funcs.get_vid = funcs->get_vid;
40339+ pstate_funcs = funcs;
40340 }
40341
40342 #if IS_ENABLED(CONFIG_ACPI)
40343diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
40344index 529cfd9..0e28fff 100644
40345--- a/drivers/cpufreq/p4-clockmod.c
40346+++ b/drivers/cpufreq/p4-clockmod.c
40347@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
40348 case 0x0F: /* Core Duo */
40349 case 0x16: /* Celeron Core */
40350 case 0x1C: /* Atom */
40351- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
40352+ pax_open_kernel();
40353+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
40354+ pax_close_kernel();
40355 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
40356 case 0x0D: /* Pentium M (Dothan) */
40357- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
40358+ pax_open_kernel();
40359+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
40360+ pax_close_kernel();
40361 /* fall through */
40362 case 0x09: /* Pentium M (Banias) */
40363 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
40364@@ -149,7 +153,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
40365
40366 /* on P-4s, the TSC runs with constant frequency independent whether
40367 * throttling is active or not. */
40368- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
40369+ pax_open_kernel();
40370+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
40371+ pax_close_kernel();
40372
40373 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
40374 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
40375diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
40376index 9bb42ba..b01b4a2 100644
40377--- a/drivers/cpufreq/sparc-us3-cpufreq.c
40378+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
40379@@ -18,14 +18,12 @@
40380 #include <asm/head.h>
40381 #include <asm/timer.h>
40382
40383-static struct cpufreq_driver *cpufreq_us3_driver;
40384-
40385 struct us3_freq_percpu_info {
40386 struct cpufreq_frequency_table table[4];
40387 };
40388
40389 /* Indexed by cpu number. */
40390-static struct us3_freq_percpu_info *us3_freq_table;
40391+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
40392
40393 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
40394 * in the Safari config register.
40395@@ -156,16 +154,27 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
40396
40397 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
40398 {
40399- if (cpufreq_us3_driver)
40400- us3_freq_target(policy, 0);
40401+ us3_freq_target(policy, 0);
40402
40403 return 0;
40404 }
40405
40406+static int __init us3_freq_init(void);
40407+static void __exit us3_freq_exit(void);
40408+
40409+static struct cpufreq_driver cpufreq_us3_driver = {
40410+ .init = us3_freq_cpu_init,
40411+ .verify = cpufreq_generic_frequency_table_verify,
40412+ .target_index = us3_freq_target,
40413+ .get = us3_freq_get,
40414+ .exit = us3_freq_cpu_exit,
40415+ .name = "UltraSPARC-III",
40416+
40417+};
40418+
40419 static int __init us3_freq_init(void)
40420 {
40421 unsigned long manuf, impl, ver;
40422- int ret;
40423
40424 if (tlb_type != cheetah && tlb_type != cheetah_plus)
40425 return -ENODEV;
40426@@ -178,55 +187,15 @@ static int __init us3_freq_init(void)
40427 (impl == CHEETAH_IMPL ||
40428 impl == CHEETAH_PLUS_IMPL ||
40429 impl == JAGUAR_IMPL ||
40430- impl == PANTHER_IMPL)) {
40431- struct cpufreq_driver *driver;
40432-
40433- ret = -ENOMEM;
40434- driver = kzalloc(sizeof(*driver), GFP_KERNEL);
40435- if (!driver)
40436- goto err_out;
40437-
40438- us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
40439- GFP_KERNEL);
40440- if (!us3_freq_table)
40441- goto err_out;
40442-
40443- driver->init = us3_freq_cpu_init;
40444- driver->verify = cpufreq_generic_frequency_table_verify;
40445- driver->target_index = us3_freq_target;
40446- driver->get = us3_freq_get;
40447- driver->exit = us3_freq_cpu_exit;
40448- strcpy(driver->name, "UltraSPARC-III");
40449-
40450- cpufreq_us3_driver = driver;
40451- ret = cpufreq_register_driver(driver);
40452- if (ret)
40453- goto err_out;
40454-
40455- return 0;
40456-
40457-err_out:
40458- if (driver) {
40459- kfree(driver);
40460- cpufreq_us3_driver = NULL;
40461- }
40462- kfree(us3_freq_table);
40463- us3_freq_table = NULL;
40464- return ret;
40465- }
40466+ impl == PANTHER_IMPL))
40467+ return cpufreq_register_driver(&cpufreq_us3_driver);
40468
40469 return -ENODEV;
40470 }
40471
40472 static void __exit us3_freq_exit(void)
40473 {
40474- if (cpufreq_us3_driver) {
40475- cpufreq_unregister_driver(cpufreq_us3_driver);
40476- kfree(cpufreq_us3_driver);
40477- cpufreq_us3_driver = NULL;
40478- kfree(us3_freq_table);
40479- us3_freq_table = NULL;
40480- }
40481+ cpufreq_unregister_driver(&cpufreq_us3_driver);
40482 }
40483
40484 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
40485diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
40486index 7d4a315..21bb886 100644
40487--- a/drivers/cpufreq/speedstep-centrino.c
40488+++ b/drivers/cpufreq/speedstep-centrino.c
40489@@ -351,8 +351,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
40490 !cpu_has(cpu, X86_FEATURE_EST))
40491 return -ENODEV;
40492
40493- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
40494- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
40495+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
40496+ pax_open_kernel();
40497+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
40498+ pax_close_kernel();
40499+ }
40500
40501 if (policy->cpu != 0)
40502 return -ENODEV;
40503diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
40504index e431d11..d0b997e 100644
40505--- a/drivers/cpuidle/driver.c
40506+++ b/drivers/cpuidle/driver.c
40507@@ -194,7 +194,7 @@ static int poll_idle(struct cpuidle_device *dev,
40508
40509 static void poll_idle_init(struct cpuidle_driver *drv)
40510 {
40511- struct cpuidle_state *state = &drv->states[0];
40512+ cpuidle_state_no_const *state = &drv->states[0];
40513
40514 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
40515 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
40516diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
40517index ca89412..a7b9c49 100644
40518--- a/drivers/cpuidle/governor.c
40519+++ b/drivers/cpuidle/governor.c
40520@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
40521 mutex_lock(&cpuidle_lock);
40522 if (__cpuidle_find_governor(gov->name) == NULL) {
40523 ret = 0;
40524- list_add_tail(&gov->governor_list, &cpuidle_governors);
40525+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
40526 if (!cpuidle_curr_governor ||
40527 cpuidle_curr_governor->rating < gov->rating)
40528 cpuidle_switch_governor(gov);
40529diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
40530index 97c5903..023ad23 100644
40531--- a/drivers/cpuidle/sysfs.c
40532+++ b/drivers/cpuidle/sysfs.c
40533@@ -135,7 +135,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
40534 NULL
40535 };
40536
40537-static struct attribute_group cpuidle_attr_group = {
40538+static attribute_group_no_const cpuidle_attr_group = {
40539 .attrs = cpuidle_default_attrs,
40540 .name = "cpuidle",
40541 };
40542diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
40543index 8d2a772..33826c9 100644
40544--- a/drivers/crypto/hifn_795x.c
40545+++ b/drivers/crypto/hifn_795x.c
40546@@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
40547 MODULE_PARM_DESC(hifn_pll_ref,
40548 "PLL reference clock (pci[freq] or ext[freq], default ext)");
40549
40550-static atomic_t hifn_dev_number;
40551+static atomic_unchecked_t hifn_dev_number;
40552
40553 #define ACRYPTO_OP_DECRYPT 0
40554 #define ACRYPTO_OP_ENCRYPT 1
40555@@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
40556 goto err_out_disable_pci_device;
40557
40558 snprintf(name, sizeof(name), "hifn%d",
40559- atomic_inc_return(&hifn_dev_number)-1);
40560+ atomic_inc_return_unchecked(&hifn_dev_number)-1);
40561
40562 err = pci_request_regions(pdev, name);
40563 if (err)
40564diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
40565index 9f90369..bfcacdb 100644
40566--- a/drivers/devfreq/devfreq.c
40567+++ b/drivers/devfreq/devfreq.c
40568@@ -673,7 +673,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
40569 goto err_out;
40570 }
40571
40572- list_add(&governor->node, &devfreq_governor_list);
40573+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
40574
40575 list_for_each_entry(devfreq, &devfreq_list, node) {
40576 int ret = 0;
40577@@ -761,7 +761,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
40578 }
40579 }
40580
40581- list_del(&governor->node);
40582+ pax_list_del((struct list_head *)&governor->node);
40583 err_out:
40584 mutex_unlock(&devfreq_list_lock);
40585
40586diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
40587index 42d4974..2714f36 100644
40588--- a/drivers/dma/sh/shdma-base.c
40589+++ b/drivers/dma/sh/shdma-base.c
40590@@ -228,8 +228,8 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
40591 schan->slave_id = -EINVAL;
40592 }
40593
40594- schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
40595- sdev->desc_size, GFP_KERNEL);
40596+ schan->desc = kcalloc(sdev->desc_size,
40597+ NR_DESCS_PER_CHANNEL, GFP_KERNEL);
40598 if (!schan->desc) {
40599 ret = -ENOMEM;
40600 goto edescalloc;
40601diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
40602index 58eb857..d7e42c8 100644
40603--- a/drivers/dma/sh/shdmac.c
40604+++ b/drivers/dma/sh/shdmac.c
40605@@ -513,7 +513,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
40606 return ret;
40607 }
40608
40609-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
40610+static struct notifier_block sh_dmae_nmi_notifier = {
40611 .notifier_call = sh_dmae_nmi_handler,
40612
40613 /* Run before NMI debug handler and KGDB */
40614diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
40615index 592af5f..bb1d583 100644
40616--- a/drivers/edac/edac_device.c
40617+++ b/drivers/edac/edac_device.c
40618@@ -477,9 +477,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
40619 */
40620 int edac_device_alloc_index(void)
40621 {
40622- static atomic_t device_indexes = ATOMIC_INIT(0);
40623+ static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
40624
40625- return atomic_inc_return(&device_indexes) - 1;
40626+ return atomic_inc_return_unchecked(&device_indexes) - 1;
40627 }
40628 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
40629
40630diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
40631index a6cd361..7bdbf53 100644
40632--- a/drivers/edac/edac_mc_sysfs.c
40633+++ b/drivers/edac/edac_mc_sysfs.c
40634@@ -154,7 +154,7 @@ static const char * const edac_caps[] = {
40635 struct dev_ch_attribute {
40636 struct device_attribute attr;
40637 int channel;
40638-};
40639+} __do_const;
40640
40641 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
40642 struct dev_ch_attribute dev_attr_legacy_##_name = \
40643@@ -1011,14 +1011,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
40644 }
40645
40646 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
40647+ pax_open_kernel();
40648 if (mci->get_sdram_scrub_rate) {
40649- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
40650- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
40651+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
40652+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
40653 }
40654 if (mci->set_sdram_scrub_rate) {
40655- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
40656- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
40657+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
40658+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
40659 }
40660+ pax_close_kernel();
40661 err = device_create_file(&mci->dev,
40662 &dev_attr_sdram_scrub_rate);
40663 if (err) {
40664diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
40665index 2cf44b4d..6dd2dc7 100644
40666--- a/drivers/edac/edac_pci.c
40667+++ b/drivers/edac/edac_pci.c
40668@@ -29,7 +29,7 @@
40669
40670 static DEFINE_MUTEX(edac_pci_ctls_mutex);
40671 static LIST_HEAD(edac_pci_list);
40672-static atomic_t pci_indexes = ATOMIC_INIT(0);
40673+static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
40674
40675 /*
40676 * edac_pci_alloc_ctl_info
40677@@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
40678 */
40679 int edac_pci_alloc_index(void)
40680 {
40681- return atomic_inc_return(&pci_indexes) - 1;
40682+ return atomic_inc_return_unchecked(&pci_indexes) - 1;
40683 }
40684 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
40685
40686diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
40687index e8658e4..22746d6 100644
40688--- a/drivers/edac/edac_pci_sysfs.c
40689+++ b/drivers/edac/edac_pci_sysfs.c
40690@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
40691 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
40692 static int edac_pci_poll_msec = 1000; /* one second workq period */
40693
40694-static atomic_t pci_parity_count = ATOMIC_INIT(0);
40695-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
40696+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
40697+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
40698
40699 static struct kobject *edac_pci_top_main_kobj;
40700 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
40701@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
40702 void *value;
40703 ssize_t(*show) (void *, char *);
40704 ssize_t(*store) (void *, const char *, size_t);
40705-};
40706+} __do_const;
40707
40708 /* Set of show/store abstract level functions for PCI Parity object */
40709 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
40710@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40711 edac_printk(KERN_CRIT, EDAC_PCI,
40712 "Signaled System Error on %s\n",
40713 pci_name(dev));
40714- atomic_inc(&pci_nonparity_count);
40715+ atomic_inc_unchecked(&pci_nonparity_count);
40716 }
40717
40718 if (status & (PCI_STATUS_PARITY)) {
40719@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40720 "Master Data Parity Error on %s\n",
40721 pci_name(dev));
40722
40723- atomic_inc(&pci_parity_count);
40724+ atomic_inc_unchecked(&pci_parity_count);
40725 }
40726
40727 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40728@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40729 "Detected Parity Error on %s\n",
40730 pci_name(dev));
40731
40732- atomic_inc(&pci_parity_count);
40733+ atomic_inc_unchecked(&pci_parity_count);
40734 }
40735 }
40736
40737@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40738 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
40739 "Signaled System Error on %s\n",
40740 pci_name(dev));
40741- atomic_inc(&pci_nonparity_count);
40742+ atomic_inc_unchecked(&pci_nonparity_count);
40743 }
40744
40745 if (status & (PCI_STATUS_PARITY)) {
40746@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40747 "Master Data Parity Error on "
40748 "%s\n", pci_name(dev));
40749
40750- atomic_inc(&pci_parity_count);
40751+ atomic_inc_unchecked(&pci_parity_count);
40752 }
40753
40754 if (status & (PCI_STATUS_DETECTED_PARITY)) {
40755@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
40756 "Detected Parity Error on %s\n",
40757 pci_name(dev));
40758
40759- atomic_inc(&pci_parity_count);
40760+ atomic_inc_unchecked(&pci_parity_count);
40761 }
40762 }
40763 }
40764@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
40765 if (!check_pci_errors)
40766 return;
40767
40768- before_count = atomic_read(&pci_parity_count);
40769+ before_count = atomic_read_unchecked(&pci_parity_count);
40770
40771 /* scan all PCI devices looking for a Parity Error on devices and
40772 * bridges.
40773@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
40774 /* Only if operator has selected panic on PCI Error */
40775 if (edac_pci_get_panic_on_pe()) {
40776 /* If the count is different 'after' from 'before' */
40777- if (before_count != atomic_read(&pci_parity_count))
40778+ if (before_count != atomic_read_unchecked(&pci_parity_count))
40779 panic("EDAC: PCI Parity Error");
40780 }
40781 }
40782diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
40783index 51b7e3a..aa8a3e8 100644
40784--- a/drivers/edac/mce_amd.h
40785+++ b/drivers/edac/mce_amd.h
40786@@ -77,7 +77,7 @@ struct amd_decoder_ops {
40787 bool (*mc0_mce)(u16, u8);
40788 bool (*mc1_mce)(u16, u8);
40789 bool (*mc2_mce)(u16, u8);
40790-};
40791+} __no_const;
40792
40793 void amd_report_gart_errors(bool);
40794 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
40795diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
40796index 57ea7f4..af06b76 100644
40797--- a/drivers/firewire/core-card.c
40798+++ b/drivers/firewire/core-card.c
40799@@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
40800 const struct fw_card_driver *driver,
40801 struct device *device)
40802 {
40803- static atomic_t index = ATOMIC_INIT(-1);
40804+ static atomic_unchecked_t index = ATOMIC_INIT(-1);
40805
40806- card->index = atomic_inc_return(&index);
40807+ card->index = atomic_inc_return_unchecked(&index);
40808 card->driver = driver;
40809 card->device = device;
40810 card->current_tlabel = 0;
40811@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
40812
40813 void fw_core_remove_card(struct fw_card *card)
40814 {
40815- struct fw_card_driver dummy_driver = dummy_driver_template;
40816+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
40817
40818 card->driver->update_phy_reg(card, 4,
40819 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
40820diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
40821index 2c6d5e1..a2cca6b 100644
40822--- a/drivers/firewire/core-device.c
40823+++ b/drivers/firewire/core-device.c
40824@@ -253,7 +253,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
40825 struct config_rom_attribute {
40826 struct device_attribute attr;
40827 u32 key;
40828-};
40829+} __do_const;
40830
40831 static ssize_t show_immediate(struct device *dev,
40832 struct device_attribute *dattr, char *buf)
40833diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
40834index eb6935c..3cc2bfa 100644
40835--- a/drivers/firewire/core-transaction.c
40836+++ b/drivers/firewire/core-transaction.c
40837@@ -38,6 +38,7 @@
40838 #include <linux/timer.h>
40839 #include <linux/types.h>
40840 #include <linux/workqueue.h>
40841+#include <linux/sched.h>
40842
40843 #include <asm/byteorder.h>
40844
40845diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
40846index e1480ff6..1a429bd 100644
40847--- a/drivers/firewire/core.h
40848+++ b/drivers/firewire/core.h
40849@@ -111,6 +111,7 @@ struct fw_card_driver {
40850
40851 int (*stop_iso)(struct fw_iso_context *ctx);
40852 };
40853+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
40854
40855 void fw_card_initialize(struct fw_card *card,
40856 const struct fw_card_driver *driver, struct device *device);
40857diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
40858index a66a321..f6caf20 100644
40859--- a/drivers/firewire/ohci.c
40860+++ b/drivers/firewire/ohci.c
40861@@ -2056,10 +2056,12 @@ static void bus_reset_work(struct work_struct *work)
40862 be32_to_cpu(ohci->next_header));
40863 }
40864
40865+#ifndef CONFIG_GRKERNSEC
40866 if (param_remote_dma) {
40867 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
40868 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
40869 }
40870+#endif
40871
40872 spin_unlock_irq(&ohci->lock);
40873
40874@@ -2591,8 +2593,10 @@ static int ohci_enable_phys_dma(struct fw_card *card,
40875 unsigned long flags;
40876 int n, ret = 0;
40877
40878+#ifndef CONFIG_GRKERNSEC
40879 if (param_remote_dma)
40880 return 0;
40881+#endif
40882
40883 /*
40884 * FIXME: Make sure this bitmask is cleared when we clear the busReset
40885diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
40886index 94a58a0..f5eba42 100644
40887--- a/drivers/firmware/dmi-id.c
40888+++ b/drivers/firmware/dmi-id.c
40889@@ -16,7 +16,7 @@
40890 struct dmi_device_attribute{
40891 struct device_attribute dev_attr;
40892 int field;
40893-};
40894+} __do_const;
40895 #define to_dmi_dev_attr(_dev_attr) \
40896 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
40897
40898diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
40899index 17afc51..0ef90cd 100644
40900--- a/drivers/firmware/dmi_scan.c
40901+++ b/drivers/firmware/dmi_scan.c
40902@@ -835,7 +835,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
40903 if (buf == NULL)
40904 return -1;
40905
40906- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
40907+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
40908
40909 dmi_unmap(buf);
40910 return 0;
40911diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
40912index 5b53d61..72cee96 100644
40913--- a/drivers/firmware/efi/cper.c
40914+++ b/drivers/firmware/efi/cper.c
40915@@ -44,12 +44,12 @@ static char rcd_decode_str[CPER_REC_LEN];
40916 */
40917 u64 cper_next_record_id(void)
40918 {
40919- static atomic64_t seq;
40920+ static atomic64_unchecked_t seq;
40921
40922- if (!atomic64_read(&seq))
40923- atomic64_set(&seq, ((u64)get_seconds()) << 32);
40924+ if (!atomic64_read_unchecked(&seq))
40925+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
40926
40927- return atomic64_inc_return(&seq);
40928+ return atomic64_inc_return_unchecked(&seq);
40929 }
40930 EXPORT_SYMBOL_GPL(cper_next_record_id);
40931
40932diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
40933index 64ecbb5..d921eb3 100644
40934--- a/drivers/firmware/efi/efi.c
40935+++ b/drivers/firmware/efi/efi.c
40936@@ -126,14 +126,16 @@ static struct attribute_group efi_subsys_attr_group = {
40937 };
40938
40939 static struct efivars generic_efivars;
40940-static struct efivar_operations generic_ops;
40941+static efivar_operations_no_const generic_ops __read_only;
40942
40943 static int generic_ops_register(void)
40944 {
40945- generic_ops.get_variable = efi.get_variable;
40946- generic_ops.set_variable = efi.set_variable;
40947- generic_ops.get_next_variable = efi.get_next_variable;
40948- generic_ops.query_variable_store = efi_query_variable_store;
40949+ pax_open_kernel();
40950+ *(void **)&generic_ops.get_variable = efi.get_variable;
40951+ *(void **)&generic_ops.set_variable = efi.set_variable;
40952+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
40953+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
40954+ pax_close_kernel();
40955
40956 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
40957 }
40958diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
40959index f256ecd..387dcb1 100644
40960--- a/drivers/firmware/efi/efivars.c
40961+++ b/drivers/firmware/efi/efivars.c
40962@@ -589,7 +589,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
40963 static int
40964 create_efivars_bin_attributes(void)
40965 {
40966- struct bin_attribute *attr;
40967+ bin_attribute_no_const *attr;
40968 int error;
40969
40970 /* new_var */
40971diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
40972index 2f569aa..c95f4fb 100644
40973--- a/drivers/firmware/google/memconsole.c
40974+++ b/drivers/firmware/google/memconsole.c
40975@@ -155,7 +155,10 @@ static int __init memconsole_init(void)
40976 if (!found_memconsole())
40977 return -ENODEV;
40978
40979- memconsole_bin_attr.size = memconsole_length;
40980+ pax_open_kernel();
40981+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
40982+ pax_close_kernel();
40983+
40984 return sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
40985 }
40986
40987diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
40988index fe49ec3..1ade794 100644
40989--- a/drivers/gpio/gpio-em.c
40990+++ b/drivers/gpio/gpio-em.c
40991@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
40992 struct em_gio_priv *p;
40993 struct resource *io[2], *irq[2];
40994 struct gpio_chip *gpio_chip;
40995- struct irq_chip *irq_chip;
40996+ irq_chip_no_const *irq_chip;
40997 const char *name = dev_name(&pdev->dev);
40998 int ret;
40999
41000diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
41001index 3784e81..73637b5 100644
41002--- a/drivers/gpio/gpio-ich.c
41003+++ b/drivers/gpio/gpio-ich.c
41004@@ -94,7 +94,7 @@ struct ichx_desc {
41005 * this option allows driver caching written output values
41006 */
41007 bool use_outlvl_cache;
41008-};
41009+} __do_const;
41010
41011 static struct {
41012 spinlock_t lock;
41013diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
41014index bf6c094..6573caf 100644
41015--- a/drivers/gpio/gpio-rcar.c
41016+++ b/drivers/gpio/gpio-rcar.c
41017@@ -357,7 +357,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
41018 struct gpio_rcar_priv *p;
41019 struct resource *io, *irq;
41020 struct gpio_chip *gpio_chip;
41021- struct irq_chip *irq_chip;
41022+ irq_chip_no_const *irq_chip;
41023 struct device *dev = &pdev->dev;
41024 const char *name = dev_name(dev);
41025 int ret;
41026diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
41027index dbf28fa..04dad4e 100644
41028--- a/drivers/gpio/gpio-vr41xx.c
41029+++ b/drivers/gpio/gpio-vr41xx.c
41030@@ -224,7 +224,7 @@ static int giu_get_irq(unsigned int irq)
41031 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
41032 maskl, pendl, maskh, pendh);
41033
41034- atomic_inc(&irq_err_count);
41035+ atomic_inc_unchecked(&irq_err_count);
41036
41037 return -EINVAL;
41038 }
41039diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
41040index c68d037..2f4f9a9 100644
41041--- a/drivers/gpio/gpiolib.c
41042+++ b/drivers/gpio/gpiolib.c
41043@@ -529,8 +529,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
41044 }
41045
41046 if (gpiochip->irqchip) {
41047- gpiochip->irqchip->irq_request_resources = NULL;
41048- gpiochip->irqchip->irq_release_resources = NULL;
41049+ pax_open_kernel();
41050+ *(void **)&gpiochip->irqchip->irq_request_resources = NULL;
41051+ *(void **)&gpiochip->irqchip->irq_release_resources = NULL;
41052+ pax_close_kernel();
41053 gpiochip->irqchip = NULL;
41054 }
41055 }
41056@@ -596,8 +598,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
41057 gpiochip->irqchip = NULL;
41058 return -EINVAL;
41059 }
41060- irqchip->irq_request_resources = gpiochip_irq_reqres;
41061- irqchip->irq_release_resources = gpiochip_irq_relres;
41062+
41063+ pax_open_kernel();
41064+ *(void **)&irqchip->irq_request_resources = gpiochip_irq_reqres;
41065+ *(void **)&irqchip->irq_release_resources = gpiochip_irq_relres;
41066+ pax_close_kernel();
41067
41068 /*
41069 * Prepare the mapping since the irqchip shall be orthogonal to
41070diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
41071index 90e7730..3b41807 100644
41072--- a/drivers/gpu/drm/drm_crtc.c
41073+++ b/drivers/gpu/drm/drm_crtc.c
41074@@ -3861,7 +3861,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
41075 goto done;
41076 }
41077
41078- if (copy_to_user(&enum_ptr[copied].name,
41079+ if (copy_to_user(enum_ptr[copied].name,
41080 &prop_enum->name, DRM_PROP_NAME_LEN)) {
41081 ret = -EFAULT;
41082 goto done;
41083diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
41084index 3242e20..7e4f621 100644
41085--- a/drivers/gpu/drm/drm_drv.c
41086+++ b/drivers/gpu/drm/drm_drv.c
41087@@ -463,7 +463,7 @@ void drm_unplug_dev(struct drm_device *dev)
41088
41089 drm_device_set_unplugged(dev);
41090
41091- if (dev->open_count == 0) {
41092+ if (local_read(&dev->open_count) == 0) {
41093 drm_put_dev(dev);
41094 }
41095 mutex_unlock(&drm_global_mutex);
41096diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
41097index 79d5221..7ff73496 100644
41098--- a/drivers/gpu/drm/drm_fops.c
41099+++ b/drivers/gpu/drm/drm_fops.c
41100@@ -89,7 +89,7 @@ int drm_open(struct inode *inode, struct file *filp)
41101 return PTR_ERR(minor);
41102
41103 dev = minor->dev;
41104- if (!dev->open_count++)
41105+ if (local_inc_return(&dev->open_count) == 1)
41106 need_setup = 1;
41107
41108 /* share address_space across all char-devs of a single device */
41109@@ -106,7 +106,7 @@ int drm_open(struct inode *inode, struct file *filp)
41110 return 0;
41111
41112 err_undo:
41113- dev->open_count--;
41114+ local_dec(&dev->open_count);
41115 drm_minor_release(minor);
41116 return retcode;
41117 }
41118@@ -384,7 +384,7 @@ int drm_release(struct inode *inode, struct file *filp)
41119
41120 mutex_lock(&drm_global_mutex);
41121
41122- DRM_DEBUG("open_count = %d\n", dev->open_count);
41123+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
41124
41125 mutex_lock(&dev->struct_mutex);
41126 list_del(&file_priv->lhead);
41127@@ -397,10 +397,10 @@ int drm_release(struct inode *inode, struct file *filp)
41128 * Begin inline drm_release
41129 */
41130
41131- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
41132+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
41133 task_pid_nr(current),
41134 (long)old_encode_dev(file_priv->minor->kdev->devt),
41135- dev->open_count);
41136+ local_read(&dev->open_count));
41137
41138 /* Release any auth tokens that might point to this file_priv,
41139 (do that under the drm_global_mutex) */
41140@@ -471,7 +471,7 @@ int drm_release(struct inode *inode, struct file *filp)
41141 * End inline drm_release
41142 */
41143
41144- if (!--dev->open_count) {
41145+ if (local_dec_and_test(&dev->open_count)) {
41146 retcode = drm_lastclose(dev);
41147 if (drm_device_is_unplugged(dev))
41148 drm_put_dev(dev);
41149diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
41150index 3d2e91c..d31c4c9 100644
41151--- a/drivers/gpu/drm/drm_global.c
41152+++ b/drivers/gpu/drm/drm_global.c
41153@@ -36,7 +36,7 @@
41154 struct drm_global_item {
41155 struct mutex mutex;
41156 void *object;
41157- int refcount;
41158+ atomic_t refcount;
41159 };
41160
41161 static struct drm_global_item glob[DRM_GLOBAL_NUM];
41162@@ -49,7 +49,7 @@ void drm_global_init(void)
41163 struct drm_global_item *item = &glob[i];
41164 mutex_init(&item->mutex);
41165 item->object = NULL;
41166- item->refcount = 0;
41167+ atomic_set(&item->refcount, 0);
41168 }
41169 }
41170
41171@@ -59,7 +59,7 @@ void drm_global_release(void)
41172 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
41173 struct drm_global_item *item = &glob[i];
41174 BUG_ON(item->object != NULL);
41175- BUG_ON(item->refcount != 0);
41176+ BUG_ON(atomic_read(&item->refcount) != 0);
41177 }
41178 }
41179
41180@@ -69,7 +69,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
41181 struct drm_global_item *item = &glob[ref->global_type];
41182
41183 mutex_lock(&item->mutex);
41184- if (item->refcount == 0) {
41185+ if (atomic_read(&item->refcount) == 0) {
41186 item->object = kzalloc(ref->size, GFP_KERNEL);
41187 if (unlikely(item->object == NULL)) {
41188 ret = -ENOMEM;
41189@@ -82,7 +82,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
41190 goto out_err;
41191
41192 }
41193- ++item->refcount;
41194+ atomic_inc(&item->refcount);
41195 ref->object = item->object;
41196 mutex_unlock(&item->mutex);
41197 return 0;
41198@@ -98,9 +98,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
41199 struct drm_global_item *item = &glob[ref->global_type];
41200
41201 mutex_lock(&item->mutex);
41202- BUG_ON(item->refcount == 0);
41203+ BUG_ON(atomic_read(&item->refcount) == 0);
41204 BUG_ON(ref->object != item->object);
41205- if (--item->refcount == 0) {
41206+ if (atomic_dec_and_test(&item->refcount)) {
41207 ref->release(ref);
41208 item->object = NULL;
41209 }
41210diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
41211index ecaf0fa..a49cee9 100644
41212--- a/drivers/gpu/drm/drm_info.c
41213+++ b/drivers/gpu/drm/drm_info.c
41214@@ -73,10 +73,13 @@ int drm_vm_info(struct seq_file *m, void *data)
41215 struct drm_local_map *map;
41216 struct drm_map_list *r_list;
41217
41218- /* Hardcoded from _DRM_FRAME_BUFFER,
41219- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
41220- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
41221- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
41222+ static const char * const types[] = {
41223+ [_DRM_FRAME_BUFFER] = "FB",
41224+ [_DRM_REGISTERS] = "REG",
41225+ [_DRM_SHM] = "SHM",
41226+ [_DRM_AGP] = "AGP",
41227+ [_DRM_SCATTER_GATHER] = "SG",
41228+ [_DRM_CONSISTENT] = "PCI"};
41229 const char *type;
41230 int i;
41231
41232@@ -87,7 +90,7 @@ int drm_vm_info(struct seq_file *m, void *data)
41233 map = r_list->map;
41234 if (!map)
41235 continue;
41236- if (map->type < 0 || map->type > 5)
41237+ if (map->type >= ARRAY_SIZE(types))
41238 type = "??";
41239 else
41240 type = types[map->type];
41241@@ -259,7 +262,11 @@ int drm_vma_info(struct seq_file *m, void *data)
41242 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
41243 vma->vm_flags & VM_LOCKED ? 'l' : '-',
41244 vma->vm_flags & VM_IO ? 'i' : '-',
41245+#ifdef CONFIG_GRKERNSEC_HIDESYM
41246+ 0);
41247+#else
41248 vma->vm_pgoff);
41249+#endif
41250
41251 #if defined(__i386__)
41252 pgprot = pgprot_val(vma->vm_page_prot);
41253diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
41254index 2f4c4343..dd12cd2 100644
41255--- a/drivers/gpu/drm/drm_ioc32.c
41256+++ b/drivers/gpu/drm/drm_ioc32.c
41257@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
41258 request = compat_alloc_user_space(nbytes);
41259 if (!access_ok(VERIFY_WRITE, request, nbytes))
41260 return -EFAULT;
41261- list = (struct drm_buf_desc *) (request + 1);
41262+ list = (struct drm_buf_desc __user *) (request + 1);
41263
41264 if (__put_user(count, &request->count)
41265 || __put_user(list, &request->list))
41266@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
41267 request = compat_alloc_user_space(nbytes);
41268 if (!access_ok(VERIFY_WRITE, request, nbytes))
41269 return -EFAULT;
41270- list = (struct drm_buf_pub *) (request + 1);
41271+ list = (struct drm_buf_pub __user *) (request + 1);
41272
41273 if (__put_user(count, &request->count)
41274 || __put_user(list, &request->list))
41275@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
41276 return 0;
41277 }
41278
41279-drm_ioctl_compat_t *drm_compat_ioctls[] = {
41280+drm_ioctl_compat_t drm_compat_ioctls[] = {
41281 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
41282 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
41283 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
41284@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
41285 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41286 {
41287 unsigned int nr = DRM_IOCTL_NR(cmd);
41288- drm_ioctl_compat_t *fn;
41289 int ret;
41290
41291 /* Assume that ioctls without an explicit compat routine will just
41292@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41293 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
41294 return drm_ioctl(filp, cmd, arg);
41295
41296- fn = drm_compat_ioctls[nr];
41297-
41298- if (fn != NULL)
41299- ret = (*fn) (filp, cmd, arg);
41300+ if (drm_compat_ioctls[nr] != NULL)
41301+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
41302 else
41303 ret = drm_ioctl(filp, cmd, arg);
41304
41305diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
41306index 40be746..fd78faf 100644
41307--- a/drivers/gpu/drm/drm_ioctl.c
41308+++ b/drivers/gpu/drm/drm_ioctl.c
41309@@ -642,7 +642,7 @@ long drm_ioctl(struct file *filp,
41310 struct drm_file *file_priv = filp->private_data;
41311 struct drm_device *dev;
41312 const struct drm_ioctl_desc *ioctl = NULL;
41313- drm_ioctl_t *func;
41314+ drm_ioctl_no_const_t func;
41315 unsigned int nr = DRM_IOCTL_NR(cmd);
41316 int retcode = -EINVAL;
41317 char stack_kdata[128];
41318diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
41319index d4d16ed..8fb0b51 100644
41320--- a/drivers/gpu/drm/i810/i810_drv.h
41321+++ b/drivers/gpu/drm/i810/i810_drv.h
41322@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
41323 int page_flipping;
41324
41325 wait_queue_head_t irq_queue;
41326- atomic_t irq_received;
41327- atomic_t irq_emitted;
41328+ atomic_unchecked_t irq_received;
41329+ atomic_unchecked_t irq_emitted;
41330
41331 int front_offset;
41332 } drm_i810_private_t;
41333diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
41334index 2d23e57..1c61d41 100644
41335--- a/drivers/gpu/drm/i915/i915_dma.c
41336+++ b/drivers/gpu/drm/i915/i915_dma.c
41337@@ -1292,7 +1292,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
41338 * locking inversion with the driver load path. And the access here is
41339 * completely racy anyway. So don't bother with locking for now.
41340 */
41341- return dev->open_count == 0;
41342+ return local_read(&dev->open_count) == 0;
41343 }
41344
41345 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
41346diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
41347index 60998fc..3b244bc 100644
41348--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
41349+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
41350@@ -891,9 +891,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
41351
41352 static int
41353 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
41354- int count)
41355+ unsigned int count)
41356 {
41357- int i;
41358+ unsigned int i;
41359 unsigned relocs_total = 0;
41360 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
41361
41362diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
41363index 2e0613e..a8b94d9 100644
41364--- a/drivers/gpu/drm/i915/i915_ioc32.c
41365+++ b/drivers/gpu/drm/i915/i915_ioc32.c
41366@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
41367 (unsigned long)request);
41368 }
41369
41370-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
41371+static drm_ioctl_compat_t i915_compat_ioctls[] = {
41372 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
41373 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
41374 [DRM_I915_GETPARAM] = compat_i915_getparam,
41375@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
41376 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41377 {
41378 unsigned int nr = DRM_IOCTL_NR(cmd);
41379- drm_ioctl_compat_t *fn = NULL;
41380 int ret;
41381
41382 if (nr < DRM_COMMAND_BASE)
41383 return drm_compat_ioctl(filp, cmd, arg);
41384
41385- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
41386- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
41387-
41388- if (fn != NULL)
41389+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls)) {
41390+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
41391 ret = (*fn) (filp, cmd, arg);
41392- else
41393+ } else
41394 ret = drm_ioctl(filp, cmd, arg);
41395
41396 return ret;
41397diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
41398index 7bd17b3..ffa0a11 100644
41399--- a/drivers/gpu/drm/i915/intel_display.c
41400+++ b/drivers/gpu/drm/i915/intel_display.c
41401@@ -12441,13 +12441,13 @@ struct intel_quirk {
41402 int subsystem_vendor;
41403 int subsystem_device;
41404 void (*hook)(struct drm_device *dev);
41405-};
41406+} __do_const;
41407
41408 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
41409 struct intel_dmi_quirk {
41410 void (*hook)(struct drm_device *dev);
41411 const struct dmi_system_id (*dmi_id_list)[];
41412-};
41413+} __do_const;
41414
41415 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
41416 {
41417@@ -12455,18 +12455,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
41418 return 1;
41419 }
41420
41421-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
41422+static const struct dmi_system_id intel_dmi_quirks_table[] = {
41423 {
41424- .dmi_id_list = &(const struct dmi_system_id[]) {
41425- {
41426- .callback = intel_dmi_reverse_brightness,
41427- .ident = "NCR Corporation",
41428- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
41429- DMI_MATCH(DMI_PRODUCT_NAME, ""),
41430- },
41431- },
41432- { } /* terminating entry */
41433+ .callback = intel_dmi_reverse_brightness,
41434+ .ident = "NCR Corporation",
41435+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
41436+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
41437 },
41438+ },
41439+ { } /* terminating entry */
41440+};
41441+
41442+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
41443+ {
41444+ .dmi_id_list = &intel_dmi_quirks_table,
41445 .hook = quirk_invert_brightness,
41446 },
41447 };
41448diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
41449index fe45321..836fdca 100644
41450--- a/drivers/gpu/drm/mga/mga_drv.h
41451+++ b/drivers/gpu/drm/mga/mga_drv.h
41452@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
41453 u32 clear_cmd;
41454 u32 maccess;
41455
41456- atomic_t vbl_received; /**< Number of vblanks received. */
41457+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
41458 wait_queue_head_t fence_queue;
41459- atomic_t last_fence_retired;
41460+ atomic_unchecked_t last_fence_retired;
41461 u32 next_fence_to_post;
41462
41463 unsigned int fb_cpp;
41464diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
41465index 729bfd5..ead8823 100644
41466--- a/drivers/gpu/drm/mga/mga_ioc32.c
41467+++ b/drivers/gpu/drm/mga/mga_ioc32.c
41468@@ -190,7 +190,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
41469 return 0;
41470 }
41471
41472-drm_ioctl_compat_t *mga_compat_ioctls[] = {
41473+drm_ioctl_compat_t mga_compat_ioctls[] = {
41474 [DRM_MGA_INIT] = compat_mga_init,
41475 [DRM_MGA_GETPARAM] = compat_mga_getparam,
41476 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
41477@@ -208,18 +208,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
41478 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41479 {
41480 unsigned int nr = DRM_IOCTL_NR(cmd);
41481- drm_ioctl_compat_t *fn = NULL;
41482 int ret;
41483
41484 if (nr < DRM_COMMAND_BASE)
41485 return drm_compat_ioctl(filp, cmd, arg);
41486
41487- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
41488- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
41489-
41490- if (fn != NULL)
41491+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls)) {
41492+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
41493 ret = (*fn) (filp, cmd, arg);
41494- else
41495+ } else
41496 ret = drm_ioctl(filp, cmd, arg);
41497
41498 return ret;
41499diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
41500index 1b071b8..de8601a 100644
41501--- a/drivers/gpu/drm/mga/mga_irq.c
41502+++ b/drivers/gpu/drm/mga/mga_irq.c
41503@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
41504 if (crtc != 0)
41505 return 0;
41506
41507- return atomic_read(&dev_priv->vbl_received);
41508+ return atomic_read_unchecked(&dev_priv->vbl_received);
41509 }
41510
41511
41512@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
41513 /* VBLANK interrupt */
41514 if (status & MGA_VLINEPEN) {
41515 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
41516- atomic_inc(&dev_priv->vbl_received);
41517+ atomic_inc_unchecked(&dev_priv->vbl_received);
41518 drm_handle_vblank(dev, 0);
41519 handled = 1;
41520 }
41521@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(int irq, void *arg)
41522 if ((prim_start & ~0x03) != (prim_end & ~0x03))
41523 MGA_WRITE(MGA_PRIMEND, prim_end);
41524
41525- atomic_inc(&dev_priv->last_fence_retired);
41526+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
41527 wake_up(&dev_priv->fence_queue);
41528 handled = 1;
41529 }
41530@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
41531 * using fences.
41532 */
41533 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
41534- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
41535+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
41536 - *sequence) <= (1 << 23)));
41537
41538 *sequence = cur_fence;
41539diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
41540index dae2c96..324dbe4 100644
41541--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
41542+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
41543@@ -963,7 +963,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
41544 struct bit_table {
41545 const char id;
41546 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
41547-};
41548+} __no_const;
41549
41550 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
41551
41552diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
41553index b02b024..aed7bad 100644
41554--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
41555+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
41556@@ -119,7 +119,6 @@ struct nouveau_drm {
41557 struct drm_global_reference mem_global_ref;
41558 struct ttm_bo_global_ref bo_global_ref;
41559 struct ttm_bo_device bdev;
41560- atomic_t validate_sequence;
41561 int (*move)(struct nouveau_channel *,
41562 struct ttm_buffer_object *,
41563 struct ttm_mem_reg *, struct ttm_mem_reg *);
41564diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41565index 462679a..88e32a7 100644
41566--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41567+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
41568@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
41569 unsigned long arg)
41570 {
41571 unsigned int nr = DRM_IOCTL_NR(cmd);
41572- drm_ioctl_compat_t *fn = NULL;
41573+ drm_ioctl_compat_t fn = NULL;
41574 int ret;
41575
41576 if (nr < DRM_COMMAND_BASE)
41577diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
41578index 53874b7..1db0a68 100644
41579--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
41580+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
41581@@ -127,11 +127,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41582 }
41583
41584 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
41585- nouveau_vram_manager_init,
41586- nouveau_vram_manager_fini,
41587- nouveau_vram_manager_new,
41588- nouveau_vram_manager_del,
41589- nouveau_vram_manager_debug
41590+ .init = nouveau_vram_manager_init,
41591+ .takedown = nouveau_vram_manager_fini,
41592+ .get_node = nouveau_vram_manager_new,
41593+ .put_node = nouveau_vram_manager_del,
41594+ .debug = nouveau_vram_manager_debug
41595 };
41596
41597 static int
41598@@ -196,11 +196,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41599 }
41600
41601 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
41602- nouveau_gart_manager_init,
41603- nouveau_gart_manager_fini,
41604- nouveau_gart_manager_new,
41605- nouveau_gart_manager_del,
41606- nouveau_gart_manager_debug
41607+ .init = nouveau_gart_manager_init,
41608+ .takedown = nouveau_gart_manager_fini,
41609+ .get_node = nouveau_gart_manager_new,
41610+ .put_node = nouveau_gart_manager_del,
41611+ .debug = nouveau_gart_manager_debug
41612 };
41613
41614 /*XXX*/
41615@@ -270,11 +270,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
41616 }
41617
41618 const struct ttm_mem_type_manager_func nv04_gart_manager = {
41619- nv04_gart_manager_init,
41620- nv04_gart_manager_fini,
41621- nv04_gart_manager_new,
41622- nv04_gart_manager_del,
41623- nv04_gart_manager_debug
41624+ .init = nv04_gart_manager_init,
41625+ .takedown = nv04_gart_manager_fini,
41626+ .get_node = nv04_gart_manager_new,
41627+ .put_node = nv04_gart_manager_del,
41628+ .debug = nv04_gart_manager_debug
41629 };
41630
41631 int
41632diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
41633index c7592ec..dd45ebc 100644
41634--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
41635+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
41636@@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
41637 * locking inversion with the driver load path. And the access here is
41638 * completely racy anyway. So don't bother with locking for now.
41639 */
41640- return dev->open_count == 0;
41641+ return local_read(&dev->open_count) == 0;
41642 }
41643
41644 static const struct vga_switcheroo_client_ops
41645diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
41646index eb89653..613cf71 100644
41647--- a/drivers/gpu/drm/qxl/qxl_cmd.c
41648+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
41649@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
41650 int ret;
41651
41652 mutex_lock(&qdev->async_io_mutex);
41653- irq_num = atomic_read(&qdev->irq_received_io_cmd);
41654+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
41655 if (qdev->last_sent_io_cmd > irq_num) {
41656 if (intr)
41657 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
41658- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41659+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41660 else
41661 ret = wait_event_timeout(qdev->io_cmd_event,
41662- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41663+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41664 /* 0 is timeout, just bail the "hw" has gone away */
41665 if (ret <= 0)
41666 goto out;
41667- irq_num = atomic_read(&qdev->irq_received_io_cmd);
41668+ irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
41669 }
41670 outb(val, addr);
41671 qdev->last_sent_io_cmd = irq_num + 1;
41672 if (intr)
41673 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
41674- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41675+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41676 else
41677 ret = wait_event_timeout(qdev->io_cmd_event,
41678- atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41679+ atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
41680 out:
41681 if (ret > 0)
41682 ret = 0;
41683diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
41684index c3c2bbd..bc3c0fb 100644
41685--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
41686+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
41687@@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
41688 struct drm_info_node *node = (struct drm_info_node *) m->private;
41689 struct qxl_device *qdev = node->minor->dev->dev_private;
41690
41691- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
41692- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
41693- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
41694- seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
41695+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
41696+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
41697+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
41698+ seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
41699 seq_printf(m, "%d\n", qdev->irq_received_error);
41700 return 0;
41701 }
41702diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
41703index 36ed40b..0397633 100644
41704--- a/drivers/gpu/drm/qxl/qxl_drv.h
41705+++ b/drivers/gpu/drm/qxl/qxl_drv.h
41706@@ -290,10 +290,10 @@ struct qxl_device {
41707 unsigned int last_sent_io_cmd;
41708
41709 /* interrupt handling */
41710- atomic_t irq_received;
41711- atomic_t irq_received_display;
41712- atomic_t irq_received_cursor;
41713- atomic_t irq_received_io_cmd;
41714+ atomic_unchecked_t irq_received;
41715+ atomic_unchecked_t irq_received_display;
41716+ atomic_unchecked_t irq_received_cursor;
41717+ atomic_unchecked_t irq_received_io_cmd;
41718 unsigned irq_received_error;
41719 wait_queue_head_t display_event;
41720 wait_queue_head_t cursor_event;
41721diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
41722index b110883..dd06418 100644
41723--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
41724+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
41725@@ -181,7 +181,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41726
41727 /* TODO copy slow path code from i915 */
41728 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
41729- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
41730+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void __force_user *)(unsigned long)cmd->command, cmd->command_size);
41731
41732 {
41733 struct qxl_drawable *draw = fb_cmd;
41734@@ -201,7 +201,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
41735 struct drm_qxl_reloc reloc;
41736
41737 if (copy_from_user(&reloc,
41738- &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
41739+ &((struct drm_qxl_reloc __force_user *)(uintptr_t)cmd->relocs)[i],
41740 sizeof(reloc))) {
41741 ret = -EFAULT;
41742 goto out_free_bos;
41743@@ -294,10 +294,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
41744
41745 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
41746
41747- struct drm_qxl_command *commands =
41748- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
41749+ struct drm_qxl_command __user *commands =
41750+ (struct drm_qxl_command __user *)(uintptr_t)execbuffer->commands;
41751
41752- if (copy_from_user(&user_cmd, &commands[cmd_num],
41753+ if (copy_from_user(&user_cmd, (struct drm_qxl_command __force_user *)&commands[cmd_num],
41754 sizeof(user_cmd)))
41755 return -EFAULT;
41756
41757diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
41758index 0bf1e20..42a7310 100644
41759--- a/drivers/gpu/drm/qxl/qxl_irq.c
41760+++ b/drivers/gpu/drm/qxl/qxl_irq.c
41761@@ -36,19 +36,19 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
41762 if (!pending)
41763 return IRQ_NONE;
41764
41765- atomic_inc(&qdev->irq_received);
41766+ atomic_inc_unchecked(&qdev->irq_received);
41767
41768 if (pending & QXL_INTERRUPT_DISPLAY) {
41769- atomic_inc(&qdev->irq_received_display);
41770+ atomic_inc_unchecked(&qdev->irq_received_display);
41771 wake_up_all(&qdev->display_event);
41772 qxl_queue_garbage_collect(qdev, false);
41773 }
41774 if (pending & QXL_INTERRUPT_CURSOR) {
41775- atomic_inc(&qdev->irq_received_cursor);
41776+ atomic_inc_unchecked(&qdev->irq_received_cursor);
41777 wake_up_all(&qdev->cursor_event);
41778 }
41779 if (pending & QXL_INTERRUPT_IO_CMD) {
41780- atomic_inc(&qdev->irq_received_io_cmd);
41781+ atomic_inc_unchecked(&qdev->irq_received_io_cmd);
41782 wake_up_all(&qdev->io_cmd_event);
41783 }
41784 if (pending & QXL_INTERRUPT_ERROR) {
41785@@ -85,10 +85,10 @@ int qxl_irq_init(struct qxl_device *qdev)
41786 init_waitqueue_head(&qdev->io_cmd_event);
41787 INIT_WORK(&qdev->client_monitors_config_work,
41788 qxl_client_monitors_config_work_func);
41789- atomic_set(&qdev->irq_received, 0);
41790- atomic_set(&qdev->irq_received_display, 0);
41791- atomic_set(&qdev->irq_received_cursor, 0);
41792- atomic_set(&qdev->irq_received_io_cmd, 0);
41793+ atomic_set_unchecked(&qdev->irq_received, 0);
41794+ atomic_set_unchecked(&qdev->irq_received_display, 0);
41795+ atomic_set_unchecked(&qdev->irq_received_cursor, 0);
41796+ atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
41797 qdev->irq_received_error = 0;
41798 ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
41799 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
41800diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
41801index 71a1bae..cb1f103 100644
41802--- a/drivers/gpu/drm/qxl/qxl_ttm.c
41803+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
41804@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
41805 }
41806 }
41807
41808-static struct vm_operations_struct qxl_ttm_vm_ops;
41809+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
41810 static const struct vm_operations_struct *ttm_vm_ops;
41811
41812 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41813@@ -145,8 +145,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
41814 return r;
41815 if (unlikely(ttm_vm_ops == NULL)) {
41816 ttm_vm_ops = vma->vm_ops;
41817+ pax_open_kernel();
41818 qxl_ttm_vm_ops = *ttm_vm_ops;
41819 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
41820+ pax_close_kernel();
41821 }
41822 vma->vm_ops = &qxl_ttm_vm_ops;
41823 return 0;
41824@@ -555,25 +557,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
41825 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
41826 {
41827 #if defined(CONFIG_DEBUG_FS)
41828- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
41829- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
41830- unsigned i;
41831+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
41832+ {
41833+ .name = "qxl_mem_mm",
41834+ .show = &qxl_mm_dump_table,
41835+ },
41836+ {
41837+ .name = "qxl_surf_mm",
41838+ .show = &qxl_mm_dump_table,
41839+ }
41840+ };
41841
41842- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
41843- if (i == 0)
41844- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
41845- else
41846- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
41847- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
41848- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
41849- qxl_mem_types_list[i].driver_features = 0;
41850- if (i == 0)
41851- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41852- else
41853- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41854+ pax_open_kernel();
41855+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
41856+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
41857+ pax_close_kernel();
41858
41859- }
41860- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
41861+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
41862 #else
41863 return 0;
41864 #endif
41865diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
41866index 59459fe..be26b31 100644
41867--- a/drivers/gpu/drm/r128/r128_cce.c
41868+++ b/drivers/gpu/drm/r128/r128_cce.c
41869@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
41870
41871 /* GH: Simple idle check.
41872 */
41873- atomic_set(&dev_priv->idle_count, 0);
41874+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41875
41876 /* We don't support anything other than bus-mastering ring mode,
41877 * but the ring can be in either AGP or PCI space for the ring
41878diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
41879index 5bf3f5f..7000661 100644
41880--- a/drivers/gpu/drm/r128/r128_drv.h
41881+++ b/drivers/gpu/drm/r128/r128_drv.h
41882@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
41883 int is_pci;
41884 unsigned long cce_buffers_offset;
41885
41886- atomic_t idle_count;
41887+ atomic_unchecked_t idle_count;
41888
41889 int page_flipping;
41890 int current_page;
41891 u32 crtc_offset;
41892 u32 crtc_offset_cntl;
41893
41894- atomic_t vbl_received;
41895+ atomic_unchecked_t vbl_received;
41896
41897 u32 color_fmt;
41898 unsigned int front_offset;
41899diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
41900index 663f38c..c689495 100644
41901--- a/drivers/gpu/drm/r128/r128_ioc32.c
41902+++ b/drivers/gpu/drm/r128/r128_ioc32.c
41903@@ -178,7 +178,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
41904 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
41905 }
41906
41907-drm_ioctl_compat_t *r128_compat_ioctls[] = {
41908+drm_ioctl_compat_t r128_compat_ioctls[] = {
41909 [DRM_R128_INIT] = compat_r128_init,
41910 [DRM_R128_DEPTH] = compat_r128_depth,
41911 [DRM_R128_STIPPLE] = compat_r128_stipple,
41912@@ -197,18 +197,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
41913 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
41914 {
41915 unsigned int nr = DRM_IOCTL_NR(cmd);
41916- drm_ioctl_compat_t *fn = NULL;
41917 int ret;
41918
41919 if (nr < DRM_COMMAND_BASE)
41920 return drm_compat_ioctl(filp, cmd, arg);
41921
41922- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
41923- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41924-
41925- if (fn != NULL)
41926+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls)) {
41927+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
41928 ret = (*fn) (filp, cmd, arg);
41929- else
41930+ } else
41931 ret = drm_ioctl(filp, cmd, arg);
41932
41933 return ret;
41934diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
41935index c2ae496..30b5993 100644
41936--- a/drivers/gpu/drm/r128/r128_irq.c
41937+++ b/drivers/gpu/drm/r128/r128_irq.c
41938@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
41939 if (crtc != 0)
41940 return 0;
41941
41942- return atomic_read(&dev_priv->vbl_received);
41943+ return atomic_read_unchecked(&dev_priv->vbl_received);
41944 }
41945
41946 irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41947@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(int irq, void *arg)
41948 /* VBLANK interrupt */
41949 if (status & R128_CRTC_VBLANK_INT) {
41950 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
41951- atomic_inc(&dev_priv->vbl_received);
41952+ atomic_inc_unchecked(&dev_priv->vbl_received);
41953 drm_handle_vblank(dev, 0);
41954 return IRQ_HANDLED;
41955 }
41956diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
41957index 575e986..66e62ca 100644
41958--- a/drivers/gpu/drm/r128/r128_state.c
41959+++ b/drivers/gpu/drm/r128/r128_state.c
41960@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
41961
41962 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
41963 {
41964- if (atomic_read(&dev_priv->idle_count) == 0)
41965+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
41966 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
41967 else
41968- atomic_set(&dev_priv->idle_count, 0);
41969+ atomic_set_unchecked(&dev_priv->idle_count, 0);
41970 }
41971
41972 #endif
41973diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
41974index 4a85bb6..aaea819 100644
41975--- a/drivers/gpu/drm/radeon/mkregtable.c
41976+++ b/drivers/gpu/drm/radeon/mkregtable.c
41977@@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
41978 regex_t mask_rex;
41979 regmatch_t match[4];
41980 char buf[1024];
41981- size_t end;
41982+ long end;
41983 int len;
41984 int done = 0;
41985 int r;
41986 unsigned o;
41987 struct offset *offset;
41988 char last_reg_s[10];
41989- int last_reg;
41990+ unsigned long last_reg;
41991
41992 if (regcomp
41993 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
41994diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
41995index 5d4416f..80b7fc4 100644
41996--- a/drivers/gpu/drm/radeon/radeon_device.c
41997+++ b/drivers/gpu/drm/radeon/radeon_device.c
41998@@ -1214,7 +1214,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
41999 * locking inversion with the driver load path. And the access here is
42000 * completely racy anyway. So don't bother with locking for now.
42001 */
42002- return dev->open_count == 0;
42003+ return local_read(&dev->open_count) == 0;
42004 }
42005
42006 static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
42007diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
42008index dafd812..1bf20c7 100644
42009--- a/drivers/gpu/drm/radeon/radeon_drv.h
42010+++ b/drivers/gpu/drm/radeon/radeon_drv.h
42011@@ -262,7 +262,7 @@ typedef struct drm_radeon_private {
42012
42013 /* SW interrupt */
42014 wait_queue_head_t swi_queue;
42015- atomic_t swi_emitted;
42016+ atomic_unchecked_t swi_emitted;
42017 int vblank_crtc;
42018 uint32_t irq_enable_reg;
42019 uint32_t r500_disp_irq_reg;
42020diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
42021index 0b98ea1..0881827 100644
42022--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
42023+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
42024@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
42025 request = compat_alloc_user_space(sizeof(*request));
42026 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
42027 || __put_user(req32.param, &request->param)
42028- || __put_user((void __user *)(unsigned long)req32.value,
42029+ || __put_user((unsigned long)req32.value,
42030 &request->value))
42031 return -EFAULT;
42032
42033@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
42034 #define compat_radeon_cp_setparam NULL
42035 #endif /* X86_64 || IA64 */
42036
42037-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
42038+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
42039 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
42040 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
42041 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
42042@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
42043 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
42044 {
42045 unsigned int nr = DRM_IOCTL_NR(cmd);
42046- drm_ioctl_compat_t *fn = NULL;
42047 int ret;
42048
42049 if (nr < DRM_COMMAND_BASE)
42050 return drm_compat_ioctl(filp, cmd, arg);
42051
42052- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
42053- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
42054-
42055- if (fn != NULL)
42056+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls)) {
42057+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
42058 ret = (*fn) (filp, cmd, arg);
42059- else
42060+ } else
42061 ret = drm_ioctl(filp, cmd, arg);
42062
42063 return ret;
42064diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
42065index 244b19b..c19226d 100644
42066--- a/drivers/gpu/drm/radeon/radeon_irq.c
42067+++ b/drivers/gpu/drm/radeon/radeon_irq.c
42068@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
42069 unsigned int ret;
42070 RING_LOCALS;
42071
42072- atomic_inc(&dev_priv->swi_emitted);
42073- ret = atomic_read(&dev_priv->swi_emitted);
42074+ atomic_inc_unchecked(&dev_priv->swi_emitted);
42075+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
42076
42077 BEGIN_RING(4);
42078 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
42079@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
42080 drm_radeon_private_t *dev_priv =
42081 (drm_radeon_private_t *) dev->dev_private;
42082
42083- atomic_set(&dev_priv->swi_emitted, 0);
42084+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
42085 init_waitqueue_head(&dev_priv->swi_queue);
42086
42087 dev->max_vblank_count = 0x001fffff;
42088diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
42089index 23bb64f..69d7234 100644
42090--- a/drivers/gpu/drm/radeon/radeon_state.c
42091+++ b/drivers/gpu/drm/radeon/radeon_state.c
42092@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
42093 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
42094 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
42095
42096- if (copy_from_user(&depth_boxes, clear->depth_boxes,
42097+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || copy_from_user(&depth_boxes, clear->depth_boxes,
42098 sarea_priv->nbox * sizeof(depth_boxes[0])))
42099 return -EFAULT;
42100
42101@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
42102 {
42103 drm_radeon_private_t *dev_priv = dev->dev_private;
42104 drm_radeon_getparam_t *param = data;
42105- int value;
42106+ int value = 0;
42107
42108 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
42109
42110diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
42111index 72afe82..056a57a 100644
42112--- a/drivers/gpu/drm/radeon/radeon_ttm.c
42113+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
42114@@ -801,7 +801,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
42115 man->size = size >> PAGE_SHIFT;
42116 }
42117
42118-static struct vm_operations_struct radeon_ttm_vm_ops;
42119+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
42120 static const struct vm_operations_struct *ttm_vm_ops = NULL;
42121
42122 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42123@@ -842,8 +842,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
42124 }
42125 if (unlikely(ttm_vm_ops == NULL)) {
42126 ttm_vm_ops = vma->vm_ops;
42127+ pax_open_kernel();
42128 radeon_ttm_vm_ops = *ttm_vm_ops;
42129 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
42130+ pax_close_kernel();
42131 }
42132 vma->vm_ops = &radeon_ttm_vm_ops;
42133 return 0;
42134diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
42135index 6553fd2..aecd29c 100644
42136--- a/drivers/gpu/drm/tegra/dc.c
42137+++ b/drivers/gpu/drm/tegra/dc.c
42138@@ -1243,7 +1243,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
42139 }
42140
42141 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
42142- dc->debugfs_files[i].data = dc;
42143+ *(void **)&dc->debugfs_files[i].data = dc;
42144
42145 err = drm_debugfs_create_files(dc->debugfs_files,
42146 ARRAY_SIZE(debugfs_files),
42147diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
42148index f787445..2df2c65 100644
42149--- a/drivers/gpu/drm/tegra/dsi.c
42150+++ b/drivers/gpu/drm/tegra/dsi.c
42151@@ -41,7 +41,7 @@ struct tegra_dsi {
42152 struct clk *clk_lp;
42153 struct clk *clk;
42154
42155- struct drm_info_list *debugfs_files;
42156+ drm_info_list_no_const *debugfs_files;
42157 struct drm_minor *minor;
42158 struct dentry *debugfs;
42159
42160diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
42161index ffe2654..03c7b1c 100644
42162--- a/drivers/gpu/drm/tegra/hdmi.c
42163+++ b/drivers/gpu/drm/tegra/hdmi.c
42164@@ -60,7 +60,7 @@ struct tegra_hdmi {
42165 bool stereo;
42166 bool dvi;
42167
42168- struct drm_info_list *debugfs_files;
42169+ drm_info_list_no_const *debugfs_files;
42170 struct drm_minor *minor;
42171 struct dentry *debugfs;
42172 };
42173diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
42174index 9e103a48..0e117f3 100644
42175--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
42176+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
42177@@ -147,10 +147,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
42178 }
42179
42180 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
42181- ttm_bo_man_init,
42182- ttm_bo_man_takedown,
42183- ttm_bo_man_get_node,
42184- ttm_bo_man_put_node,
42185- ttm_bo_man_debug
42186+ .init = ttm_bo_man_init,
42187+ .takedown = ttm_bo_man_takedown,
42188+ .get_node = ttm_bo_man_get_node,
42189+ .put_node = ttm_bo_man_put_node,
42190+ .debug = ttm_bo_man_debug
42191 };
42192 EXPORT_SYMBOL(ttm_bo_manager_func);
42193diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
42194index dbc2def..0a9f710 100644
42195--- a/drivers/gpu/drm/ttm/ttm_memory.c
42196+++ b/drivers/gpu/drm/ttm/ttm_memory.c
42197@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
42198 zone->glob = glob;
42199 glob->zone_kernel = zone;
42200 ret = kobject_init_and_add(
42201- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
42202+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
42203 if (unlikely(ret != 0)) {
42204 kobject_put(&zone->kobj);
42205 return ret;
42206@@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
42207 zone->glob = glob;
42208 glob->zone_dma32 = zone;
42209 ret = kobject_init_and_add(
42210- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
42211+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
42212 if (unlikely(ret != 0)) {
42213 kobject_put(&zone->kobj);
42214 return ret;
42215diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
42216index d1da339..829235e 100644
42217--- a/drivers/gpu/drm/udl/udl_fb.c
42218+++ b/drivers/gpu/drm/udl/udl_fb.c
42219@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
42220 fb_deferred_io_cleanup(info);
42221 kfree(info->fbdefio);
42222 info->fbdefio = NULL;
42223- info->fbops->fb_mmap = udl_fb_mmap;
42224 }
42225
42226 pr_warn("released /dev/fb%d user=%d count=%d\n",
42227diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
42228index ad02732..144f5ed 100644
42229--- a/drivers/gpu/drm/via/via_drv.h
42230+++ b/drivers/gpu/drm/via/via_drv.h
42231@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
42232 typedef uint32_t maskarray_t[5];
42233
42234 typedef struct drm_via_irq {
42235- atomic_t irq_received;
42236+ atomic_unchecked_t irq_received;
42237 uint32_t pending_mask;
42238 uint32_t enable_mask;
42239 wait_queue_head_t irq_queue;
42240@@ -75,7 +75,7 @@ typedef struct drm_via_private {
42241 struct timeval last_vblank;
42242 int last_vblank_valid;
42243 unsigned usec_per_vblank;
42244- atomic_t vbl_received;
42245+ atomic_unchecked_t vbl_received;
42246 drm_via_state_t hc_state;
42247 char pci_buf[VIA_PCI_BUF_SIZE];
42248 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
42249diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
42250index 1319433..a993b0c 100644
42251--- a/drivers/gpu/drm/via/via_irq.c
42252+++ b/drivers/gpu/drm/via/via_irq.c
42253@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
42254 if (crtc != 0)
42255 return 0;
42256
42257- return atomic_read(&dev_priv->vbl_received);
42258+ return atomic_read_unchecked(&dev_priv->vbl_received);
42259 }
42260
42261 irqreturn_t via_driver_irq_handler(int irq, void *arg)
42262@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
42263
42264 status = VIA_READ(VIA_REG_INTERRUPT);
42265 if (status & VIA_IRQ_VBLANK_PENDING) {
42266- atomic_inc(&dev_priv->vbl_received);
42267- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
42268+ atomic_inc_unchecked(&dev_priv->vbl_received);
42269+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
42270 do_gettimeofday(&cur_vblank);
42271 if (dev_priv->last_vblank_valid) {
42272 dev_priv->usec_per_vblank =
42273@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
42274 dev_priv->last_vblank = cur_vblank;
42275 dev_priv->last_vblank_valid = 1;
42276 }
42277- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
42278+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
42279 DRM_DEBUG("US per vblank is: %u\n",
42280 dev_priv->usec_per_vblank);
42281 }
42282@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
42283
42284 for (i = 0; i < dev_priv->num_irqs; ++i) {
42285 if (status & cur_irq->pending_mask) {
42286- atomic_inc(&cur_irq->irq_received);
42287+ atomic_inc_unchecked(&cur_irq->irq_received);
42288 wake_up(&cur_irq->irq_queue);
42289 handled = 1;
42290 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
42291@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
42292 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
42293 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
42294 masks[irq][4]));
42295- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
42296+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
42297 } else {
42298 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
42299 (((cur_irq_sequence =
42300- atomic_read(&cur_irq->irq_received)) -
42301+ atomic_read_unchecked(&cur_irq->irq_received)) -
42302 *sequence) <= (1 << 23)));
42303 }
42304 *sequence = cur_irq_sequence;
42305@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
42306 }
42307
42308 for (i = 0; i < dev_priv->num_irqs; ++i) {
42309- atomic_set(&cur_irq->irq_received, 0);
42310+ atomic_set_unchecked(&cur_irq->irq_received, 0);
42311 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
42312 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
42313 init_waitqueue_head(&cur_irq->irq_queue);
42314@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
42315 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
42316 case VIA_IRQ_RELATIVE:
42317 irqwait->request.sequence +=
42318- atomic_read(&cur_irq->irq_received);
42319+ atomic_read_unchecked(&cur_irq->irq_received);
42320 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
42321 case VIA_IRQ_ABSOLUTE:
42322 break;
42323diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
42324index 99f7317..33a835b 100644
42325--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
42326+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
42327@@ -447,7 +447,7 @@ struct vmw_private {
42328 * Fencing and IRQs.
42329 */
42330
42331- atomic_t marker_seq;
42332+ atomic_unchecked_t marker_seq;
42333 wait_queue_head_t fence_queue;
42334 wait_queue_head_t fifo_queue;
42335 int fence_queue_waiters; /* Protected by hw_mutex */
42336diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
42337index 6eae14d..aa311b3 100644
42338--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
42339+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
42340@@ -154,7 +154,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
42341 (unsigned int) min,
42342 (unsigned int) fifo->capabilities);
42343
42344- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
42345+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
42346 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
42347 vmw_marker_queue_init(&fifo->marker_queue);
42348 return vmw_fifo_send_fence(dev_priv, &dummy);
42349@@ -373,7 +373,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
42350 if (reserveable)
42351 iowrite32(bytes, fifo_mem +
42352 SVGA_FIFO_RESERVED);
42353- return fifo_mem + (next_cmd >> 2);
42354+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
42355 } else {
42356 need_bounce = true;
42357 }
42358@@ -493,7 +493,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
42359
42360 fm = vmw_fifo_reserve(dev_priv, bytes);
42361 if (unlikely(fm == NULL)) {
42362- *seqno = atomic_read(&dev_priv->marker_seq);
42363+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
42364 ret = -ENOMEM;
42365 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
42366 false, 3*HZ);
42367@@ -501,7 +501,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
42368 }
42369
42370 do {
42371- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
42372+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
42373 } while (*seqno == 0);
42374
42375 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
42376diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
42377index 26f8bdd..90a0008 100644
42378--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
42379+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
42380@@ -165,9 +165,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
42381 }
42382
42383 const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
42384- vmw_gmrid_man_init,
42385- vmw_gmrid_man_takedown,
42386- vmw_gmrid_man_get_node,
42387- vmw_gmrid_man_put_node,
42388- vmw_gmrid_man_debug
42389+ .init = vmw_gmrid_man_init,
42390+ .takedown = vmw_gmrid_man_takedown,
42391+ .get_node = vmw_gmrid_man_get_node,
42392+ .put_node = vmw_gmrid_man_put_node,
42393+ .debug = vmw_gmrid_man_debug
42394 };
42395diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
42396index 37881ec..319065d 100644
42397--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
42398+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
42399@@ -235,7 +235,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
42400 int ret;
42401
42402 num_clips = arg->num_clips;
42403- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
42404+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
42405
42406 if (unlikely(num_clips == 0))
42407 return 0;
42408@@ -318,7 +318,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
42409 int ret;
42410
42411 num_clips = arg->num_clips;
42412- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
42413+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
42414
42415 if (unlikely(num_clips == 0))
42416 return 0;
42417diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42418index 0c42376..6febe77 100644
42419--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42420+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
42421@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
42422 * emitted. Then the fence is stale and signaled.
42423 */
42424
42425- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
42426+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
42427 > VMW_FENCE_WRAP);
42428
42429 return ret;
42430@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
42431
42432 if (fifo_idle)
42433 down_read(&fifo_state->rwsem);
42434- signal_seq = atomic_read(&dev_priv->marker_seq);
42435+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
42436 ret = 0;
42437
42438 for (;;) {
42439diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42440index efd1ffd..0ae13ca 100644
42441--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42442+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
42443@@ -135,7 +135,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
42444 while (!vmw_lag_lt(queue, us)) {
42445 spin_lock(&queue->lock);
42446 if (list_empty(&queue->head))
42447- seqno = atomic_read(&dev_priv->marker_seq);
42448+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
42449 else {
42450 marker = list_first_entry(&queue->head,
42451 struct vmw_marker, head);
42452diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
42453index 37ac7b5..d52a5c9 100644
42454--- a/drivers/gpu/vga/vga_switcheroo.c
42455+++ b/drivers/gpu/vga/vga_switcheroo.c
42456@@ -644,7 +644,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
42457
42458 /* this version is for the case where the power switch is separate
42459 to the device being powered down. */
42460-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
42461+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
42462 {
42463 /* copy over all the bus versions */
42464 if (dev->bus && dev->bus->pm) {
42465@@ -695,7 +695,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
42466 return ret;
42467 }
42468
42469-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
42470+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
42471 {
42472 /* copy over all the bus versions */
42473 if (dev->bus && dev->bus->pm) {
42474diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
42475index 12b6e67..ddd983c 100644
42476--- a/drivers/hid/hid-core.c
42477+++ b/drivers/hid/hid-core.c
42478@@ -2500,7 +2500,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
42479
42480 int hid_add_device(struct hid_device *hdev)
42481 {
42482- static atomic_t id = ATOMIC_INIT(0);
42483+ static atomic_unchecked_t id = ATOMIC_INIT(0);
42484 int ret;
42485
42486 if (WARN_ON(hdev->status & HID_STAT_ADDED))
42487@@ -2542,7 +2542,7 @@ int hid_add_device(struct hid_device *hdev)
42488 /* XXX hack, any other cleaner solution after the driver core
42489 * is converted to allow more than 20 bytes as the device name? */
42490 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
42491- hdev->vendor, hdev->product, atomic_inc_return(&id));
42492+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
42493
42494 hid_debug_register(hdev, dev_name(&hdev->dev));
42495 ret = device_add(&hdev->dev);
42496diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
42497index 9bf8637..f462416 100644
42498--- a/drivers/hid/hid-logitech-dj.c
42499+++ b/drivers/hid/hid-logitech-dj.c
42500@@ -682,6 +682,12 @@ static int logi_dj_raw_event(struct hid_device *hdev,
42501 * device (via hid_input_report() ) and return 1 so hid-core does not do
42502 * anything else with it.
42503 */
42504+ if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
42505+ (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
42506+ dev_err(&hdev->dev, "%s: invalid device index:%d\n",
42507+ __func__, dj_report->device_index);
42508+ return false;
42509+ }
42510
42511 /* case 1) */
42512 if (data[0] != REPORT_ID_DJ_SHORT)
42513diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
42514index c13fb5b..55a3802 100644
42515--- a/drivers/hid/hid-wiimote-debug.c
42516+++ b/drivers/hid/hid-wiimote-debug.c
42517@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
42518 else if (size == 0)
42519 return -EIO;
42520
42521- if (copy_to_user(u, buf, size))
42522+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
42523 return -EFAULT;
42524
42525 *off += size;
42526diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
42527index 0cb92e3..c7d453d 100644
42528--- a/drivers/hid/uhid.c
42529+++ b/drivers/hid/uhid.c
42530@@ -47,7 +47,7 @@ struct uhid_device {
42531 struct mutex report_lock;
42532 wait_queue_head_t report_wait;
42533 atomic_t report_done;
42534- atomic_t report_id;
42535+ atomic_unchecked_t report_id;
42536 struct uhid_event report_buf;
42537 };
42538
42539@@ -163,7 +163,7 @@ static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum,
42540
42541 spin_lock_irqsave(&uhid->qlock, flags);
42542 ev->type = UHID_FEATURE;
42543- ev->u.feature.id = atomic_inc_return(&uhid->report_id);
42544+ ev->u.feature.id = atomic_inc_return_unchecked(&uhid->report_id);
42545 ev->u.feature.rnum = rnum;
42546 ev->u.feature.rtype = report_type;
42547
42548@@ -538,7 +538,7 @@ static int uhid_dev_feature_answer(struct uhid_device *uhid,
42549 spin_lock_irqsave(&uhid->qlock, flags);
42550
42551 /* id for old report; drop it silently */
42552- if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id)
42553+ if (atomic_read_unchecked(&uhid->report_id) != ev->u.feature_answer.id)
42554 goto unlock;
42555 if (atomic_read(&uhid->report_done))
42556 goto unlock;
42557diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
42558index 19bad59..ca24eaf 100644
42559--- a/drivers/hv/channel.c
42560+++ b/drivers/hv/channel.c
42561@@ -366,8 +366,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
42562 unsigned long flags;
42563 int ret = 0;
42564
42565- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
42566- atomic_inc(&vmbus_connection.next_gpadl_handle);
42567+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
42568+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
42569
42570 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
42571 if (ret)
42572diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
42573index 3e4235c..877d0e5 100644
42574--- a/drivers/hv/hv.c
42575+++ b/drivers/hv/hv.c
42576@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
42577 u64 output_address = (output) ? virt_to_phys(output) : 0;
42578 u32 output_address_hi = output_address >> 32;
42579 u32 output_address_lo = output_address & 0xFFFFFFFF;
42580- void *hypercall_page = hv_context.hypercall_page;
42581+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
42582
42583 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
42584 "=a"(hv_status_lo) : "d" (control_hi),
42585@@ -156,7 +156,7 @@ int hv_init(void)
42586 /* See if the hypercall page is already set */
42587 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
42588
42589- virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
42590+ virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
42591
42592 if (!virtaddr)
42593 goto cleanup;
42594diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
42595index 5e90c5d..d8fcefb 100644
42596--- a/drivers/hv/hv_balloon.c
42597+++ b/drivers/hv/hv_balloon.c
42598@@ -470,7 +470,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
42599
42600 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
42601 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
42602-static atomic_t trans_id = ATOMIC_INIT(0);
42603+static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
42604
42605 static int dm_ring_size = (5 * PAGE_SIZE);
42606
42607@@ -893,7 +893,7 @@ static void hot_add_req(struct work_struct *dummy)
42608 pr_info("Memory hot add failed\n");
42609
42610 dm->state = DM_INITIALIZED;
42611- resp.hdr.trans_id = atomic_inc_return(&trans_id);
42612+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42613 vmbus_sendpacket(dm->dev->channel, &resp,
42614 sizeof(struct dm_hot_add_response),
42615 (unsigned long)NULL,
42616@@ -973,7 +973,7 @@ static void post_status(struct hv_dynmem_device *dm)
42617 memset(&status, 0, sizeof(struct dm_status));
42618 status.hdr.type = DM_STATUS_REPORT;
42619 status.hdr.size = sizeof(struct dm_status);
42620- status.hdr.trans_id = atomic_inc_return(&trans_id);
42621+ status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42622
42623 /*
42624 * The host expects the guest to report free memory.
42625@@ -993,7 +993,7 @@ static void post_status(struct hv_dynmem_device *dm)
42626 * send the status. This can happen if we were interrupted
42627 * after we picked our transaction ID.
42628 */
42629- if (status.hdr.trans_id != atomic_read(&trans_id))
42630+ if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
42631 return;
42632
42633 /*
42634@@ -1129,7 +1129,7 @@ static void balloon_up(struct work_struct *dummy)
42635 */
42636
42637 do {
42638- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
42639+ bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42640 ret = vmbus_sendpacket(dm_device.dev->channel,
42641 bl_resp,
42642 bl_resp->hdr.size,
42643@@ -1175,7 +1175,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
42644
42645 memset(&resp, 0, sizeof(struct dm_unballoon_response));
42646 resp.hdr.type = DM_UNBALLOON_RESPONSE;
42647- resp.hdr.trans_id = atomic_inc_return(&trans_id);
42648+ resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42649 resp.hdr.size = sizeof(struct dm_unballoon_response);
42650
42651 vmbus_sendpacket(dm_device.dev->channel, &resp,
42652@@ -1239,7 +1239,7 @@ static void version_resp(struct hv_dynmem_device *dm,
42653 memset(&version_req, 0, sizeof(struct dm_version_request));
42654 version_req.hdr.type = DM_VERSION_REQUEST;
42655 version_req.hdr.size = sizeof(struct dm_version_request);
42656- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42657+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42658 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
42659 version_req.is_last_attempt = 1;
42660
42661@@ -1409,7 +1409,7 @@ static int balloon_probe(struct hv_device *dev,
42662 memset(&version_req, 0, sizeof(struct dm_version_request));
42663 version_req.hdr.type = DM_VERSION_REQUEST;
42664 version_req.hdr.size = sizeof(struct dm_version_request);
42665- version_req.hdr.trans_id = atomic_inc_return(&trans_id);
42666+ version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42667 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
42668 version_req.is_last_attempt = 0;
42669
42670@@ -1440,7 +1440,7 @@ static int balloon_probe(struct hv_device *dev,
42671 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
42672 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
42673 cap_msg.hdr.size = sizeof(struct dm_capabilities);
42674- cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
42675+ cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
42676
42677 cap_msg.caps.cap_bits.balloon = 1;
42678 cap_msg.caps.cap_bits.hot_add = 1;
42679diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
42680index c386d8d..d6004c4 100644
42681--- a/drivers/hv/hyperv_vmbus.h
42682+++ b/drivers/hv/hyperv_vmbus.h
42683@@ -611,7 +611,7 @@ enum vmbus_connect_state {
42684 struct vmbus_connection {
42685 enum vmbus_connect_state conn_state;
42686
42687- atomic_t next_gpadl_handle;
42688+ atomic_unchecked_t next_gpadl_handle;
42689
42690 /*
42691 * Represents channel interrupts. Each bit position represents a
42692diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
42693index 4d6b269..2e23b86 100644
42694--- a/drivers/hv/vmbus_drv.c
42695+++ b/drivers/hv/vmbus_drv.c
42696@@ -807,10 +807,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
42697 {
42698 int ret = 0;
42699
42700- static atomic_t device_num = ATOMIC_INIT(0);
42701+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
42702
42703 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
42704- atomic_inc_return(&device_num));
42705+ atomic_inc_return_unchecked(&device_num));
42706
42707 child_device_obj->device.bus = &hv_bus;
42708 child_device_obj->device.parent = &hv_acpi_dev->dev;
42709diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
42710index 579bdf9..75118b5 100644
42711--- a/drivers/hwmon/acpi_power_meter.c
42712+++ b/drivers/hwmon/acpi_power_meter.c
42713@@ -116,7 +116,7 @@ struct sensor_template {
42714 struct device_attribute *devattr,
42715 const char *buf, size_t count);
42716 int index;
42717-};
42718+} __do_const;
42719
42720 /* Averaging interval */
42721 static int update_avg_interval(struct acpi_power_meter_resource *resource)
42722@@ -631,7 +631,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
42723 struct sensor_template *attrs)
42724 {
42725 struct device *dev = &resource->acpi_dev->dev;
42726- struct sensor_device_attribute *sensors =
42727+ sensor_device_attribute_no_const *sensors =
42728 &resource->sensors[resource->num_sensors];
42729 int res = 0;
42730
42731diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
42732index 3288f13..71cfb4e 100644
42733--- a/drivers/hwmon/applesmc.c
42734+++ b/drivers/hwmon/applesmc.c
42735@@ -1106,7 +1106,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
42736 {
42737 struct applesmc_node_group *grp;
42738 struct applesmc_dev_attr *node;
42739- struct attribute *attr;
42740+ attribute_no_const *attr;
42741 int ret, i;
42742
42743 for (grp = groups; grp->format; grp++) {
42744diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
42745index cccef87..06ce8ec 100644
42746--- a/drivers/hwmon/asus_atk0110.c
42747+++ b/drivers/hwmon/asus_atk0110.c
42748@@ -147,10 +147,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
42749 struct atk_sensor_data {
42750 struct list_head list;
42751 struct atk_data *data;
42752- struct device_attribute label_attr;
42753- struct device_attribute input_attr;
42754- struct device_attribute limit1_attr;
42755- struct device_attribute limit2_attr;
42756+ device_attribute_no_const label_attr;
42757+ device_attribute_no_const input_attr;
42758+ device_attribute_no_const limit1_attr;
42759+ device_attribute_no_const limit2_attr;
42760 char label_attr_name[ATTR_NAME_SIZE];
42761 char input_attr_name[ATTR_NAME_SIZE];
42762 char limit1_attr_name[ATTR_NAME_SIZE];
42763@@ -270,7 +270,7 @@ static ssize_t atk_name_show(struct device *dev,
42764 static struct device_attribute atk_name_attr =
42765 __ATTR(name, 0444, atk_name_show, NULL);
42766
42767-static void atk_init_attribute(struct device_attribute *attr, char *name,
42768+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
42769 sysfs_show_func show)
42770 {
42771 sysfs_attr_init(&attr->attr);
42772diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
42773index d76f0b7..55ae976 100644
42774--- a/drivers/hwmon/coretemp.c
42775+++ b/drivers/hwmon/coretemp.c
42776@@ -784,7 +784,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
42777 return NOTIFY_OK;
42778 }
42779
42780-static struct notifier_block coretemp_cpu_notifier __refdata = {
42781+static struct notifier_block coretemp_cpu_notifier = {
42782 .notifier_call = coretemp_cpu_callback,
42783 };
42784
42785diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
42786index 7a8a6fb..015c1fd 100644
42787--- a/drivers/hwmon/ibmaem.c
42788+++ b/drivers/hwmon/ibmaem.c
42789@@ -924,7 +924,7 @@ static int aem_register_sensors(struct aem_data *data,
42790 struct aem_rw_sensor_template *rw)
42791 {
42792 struct device *dev = &data->pdev->dev;
42793- struct sensor_device_attribute *sensors = data->sensors;
42794+ sensor_device_attribute_no_const *sensors = data->sensors;
42795 int err;
42796
42797 /* Set up read-only sensors */
42798diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
42799index 14c82da..09b25d7 100644
42800--- a/drivers/hwmon/iio_hwmon.c
42801+++ b/drivers/hwmon/iio_hwmon.c
42802@@ -61,7 +61,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
42803 {
42804 struct device *dev = &pdev->dev;
42805 struct iio_hwmon_state *st;
42806- struct sensor_device_attribute *a;
42807+ sensor_device_attribute_no_const *a;
42808 int ret, i;
42809 int in_i = 1, temp_i = 1, curr_i = 1;
42810 enum iio_chan_type type;
42811diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
42812index 7710f46..427a28d 100644
42813--- a/drivers/hwmon/nct6683.c
42814+++ b/drivers/hwmon/nct6683.c
42815@@ -397,11 +397,11 @@ static struct attribute_group *
42816 nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42817 int repeat)
42818 {
42819- struct sensor_device_attribute_2 *a2;
42820- struct sensor_device_attribute *a;
42821+ sensor_device_attribute_2_no_const *a2;
42822+ sensor_device_attribute_no_const *a;
42823 struct sensor_device_template **t;
42824 struct sensor_device_attr_u *su;
42825- struct attribute_group *group;
42826+ attribute_group_no_const *group;
42827 struct attribute **attrs;
42828 int i, j, count;
42829
42830diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
42831index 504cbdd..35d6f25 100644
42832--- a/drivers/hwmon/nct6775.c
42833+++ b/drivers/hwmon/nct6775.c
42834@@ -943,10 +943,10 @@ static struct attribute_group *
42835 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
42836 int repeat)
42837 {
42838- struct attribute_group *group;
42839+ attribute_group_no_const *group;
42840 struct sensor_device_attr_u *su;
42841- struct sensor_device_attribute *a;
42842- struct sensor_device_attribute_2 *a2;
42843+ sensor_device_attribute_no_const *a;
42844+ sensor_device_attribute_2_no_const *a2;
42845 struct attribute **attrs;
42846 struct sensor_device_template **t;
42847 int i, count;
42848diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
42849index 291d11f..3f0dbbd 100644
42850--- a/drivers/hwmon/pmbus/pmbus_core.c
42851+++ b/drivers/hwmon/pmbus/pmbus_core.c
42852@@ -783,7 +783,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
42853 return 0;
42854 }
42855
42856-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42857+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
42858 const char *name,
42859 umode_t mode,
42860 ssize_t (*show)(struct device *dev,
42861@@ -800,7 +800,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
42862 dev_attr->store = store;
42863 }
42864
42865-static void pmbus_attr_init(struct sensor_device_attribute *a,
42866+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
42867 const char *name,
42868 umode_t mode,
42869 ssize_t (*show)(struct device *dev,
42870@@ -822,7 +822,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
42871 u16 reg, u8 mask)
42872 {
42873 struct pmbus_boolean *boolean;
42874- struct sensor_device_attribute *a;
42875+ sensor_device_attribute_no_const *a;
42876
42877 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
42878 if (!boolean)
42879@@ -847,7 +847,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
42880 bool update, bool readonly)
42881 {
42882 struct pmbus_sensor *sensor;
42883- struct device_attribute *a;
42884+ device_attribute_no_const *a;
42885
42886 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
42887 if (!sensor)
42888@@ -878,7 +878,7 @@ static int pmbus_add_label(struct pmbus_data *data,
42889 const char *lstring, int index)
42890 {
42891 struct pmbus_label *label;
42892- struct device_attribute *a;
42893+ device_attribute_no_const *a;
42894
42895 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
42896 if (!label)
42897diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
42898index 97cd45a..ac54d8b 100644
42899--- a/drivers/hwmon/sht15.c
42900+++ b/drivers/hwmon/sht15.c
42901@@ -169,7 +169,7 @@ struct sht15_data {
42902 int supply_uv;
42903 bool supply_uv_valid;
42904 struct work_struct update_supply_work;
42905- atomic_t interrupt_handled;
42906+ atomic_unchecked_t interrupt_handled;
42907 };
42908
42909 /**
42910@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
42911 ret = gpio_direction_input(data->pdata->gpio_data);
42912 if (ret)
42913 return ret;
42914- atomic_set(&data->interrupt_handled, 0);
42915+ atomic_set_unchecked(&data->interrupt_handled, 0);
42916
42917 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42918 if (gpio_get_value(data->pdata->gpio_data) == 0) {
42919 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
42920 /* Only relevant if the interrupt hasn't occurred. */
42921- if (!atomic_read(&data->interrupt_handled))
42922+ if (!atomic_read_unchecked(&data->interrupt_handled))
42923 schedule_work(&data->read_work);
42924 }
42925 ret = wait_event_timeout(data->wait_queue,
42926@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
42927
42928 /* First disable the interrupt */
42929 disable_irq_nosync(irq);
42930- atomic_inc(&data->interrupt_handled);
42931+ atomic_inc_unchecked(&data->interrupt_handled);
42932 /* Then schedule a reading work struct */
42933 if (data->state != SHT15_READING_NOTHING)
42934 schedule_work(&data->read_work);
42935@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
42936 * If not, then start the interrupt again - care here as could
42937 * have gone low in meantime so verify it hasn't!
42938 */
42939- atomic_set(&data->interrupt_handled, 0);
42940+ atomic_set_unchecked(&data->interrupt_handled, 0);
42941 enable_irq(gpio_to_irq(data->pdata->gpio_data));
42942 /* If still not occurred or another handler was scheduled */
42943 if (gpio_get_value(data->pdata->gpio_data)
42944- || atomic_read(&data->interrupt_handled))
42945+ || atomic_read_unchecked(&data->interrupt_handled))
42946 return;
42947 }
42948
42949diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
42950index 8df43c5..b07b91d 100644
42951--- a/drivers/hwmon/via-cputemp.c
42952+++ b/drivers/hwmon/via-cputemp.c
42953@@ -296,7 +296,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
42954 return NOTIFY_OK;
42955 }
42956
42957-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
42958+static struct notifier_block via_cputemp_cpu_notifier = {
42959 .notifier_call = via_cputemp_cpu_callback,
42960 };
42961
42962diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
42963index 41fc683..a39cfea 100644
42964--- a/drivers/i2c/busses/i2c-amd756-s4882.c
42965+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
42966@@ -43,7 +43,7 @@
42967 extern struct i2c_adapter amd756_smbus;
42968
42969 static struct i2c_adapter *s4882_adapter;
42970-static struct i2c_algorithm *s4882_algo;
42971+static i2c_algorithm_no_const *s4882_algo;
42972
42973 /* Wrapper access functions for multiplexed SMBus */
42974 static DEFINE_MUTEX(amd756_lock);
42975diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
42976index b19a310..d6eece0 100644
42977--- a/drivers/i2c/busses/i2c-diolan-u2c.c
42978+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
42979@@ -98,7 +98,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
42980 /* usb layer */
42981
42982 /* Send command to device, and get response. */
42983-static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42984+static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
42985 {
42986 int ret = 0;
42987 int actual;
42988diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
42989index b170bdf..3c76427 100644
42990--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
42991+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
42992@@ -41,7 +41,7 @@
42993 extern struct i2c_adapter *nforce2_smbus;
42994
42995 static struct i2c_adapter *s4985_adapter;
42996-static struct i2c_algorithm *s4985_algo;
42997+static i2c_algorithm_no_const *s4985_algo;
42998
42999 /* Wrapper access functions for multiplexed SMBus */
43000 static DEFINE_MUTEX(nforce2_lock);
43001diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
43002index 80b47e8..1a6040d9 100644
43003--- a/drivers/i2c/i2c-dev.c
43004+++ b/drivers/i2c/i2c-dev.c
43005@@ -277,7 +277,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
43006 break;
43007 }
43008
43009- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
43010+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
43011 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
43012 if (IS_ERR(rdwr_pa[i].buf)) {
43013 res = PTR_ERR(rdwr_pa[i].buf);
43014diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
43015index 0b510ba..4fbb5085 100644
43016--- a/drivers/ide/ide-cd.c
43017+++ b/drivers/ide/ide-cd.c
43018@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
43019 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
43020 if ((unsigned long)buf & alignment
43021 || blk_rq_bytes(rq) & q->dma_pad_mask
43022- || object_is_on_stack(buf))
43023+ || object_starts_on_stack(buf))
43024 drive->dma = 0;
43025 }
43026 }
43027diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
43028index af3e76d..96dfe5e 100644
43029--- a/drivers/iio/industrialio-core.c
43030+++ b/drivers/iio/industrialio-core.c
43031@@ -555,7 +555,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
43032 }
43033
43034 static
43035-int __iio_device_attr_init(struct device_attribute *dev_attr,
43036+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
43037 const char *postfix,
43038 struct iio_chan_spec const *chan,
43039 ssize_t (*readfunc)(struct device *dev,
43040diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
43041index e28a494..f7c2671 100644
43042--- a/drivers/infiniband/core/cm.c
43043+++ b/drivers/infiniband/core/cm.c
43044@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
43045
43046 struct cm_counter_group {
43047 struct kobject obj;
43048- atomic_long_t counter[CM_ATTR_COUNT];
43049+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
43050 };
43051
43052 struct cm_counter_attribute {
43053@@ -1398,7 +1398,7 @@ static void cm_dup_req_handler(struct cm_work *work,
43054 struct ib_mad_send_buf *msg = NULL;
43055 int ret;
43056
43057- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43058+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43059 counter[CM_REQ_COUNTER]);
43060
43061 /* Quick state check to discard duplicate REQs. */
43062@@ -1785,7 +1785,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
43063 if (!cm_id_priv)
43064 return;
43065
43066- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43067+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43068 counter[CM_REP_COUNTER]);
43069 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
43070 if (ret)
43071@@ -1952,7 +1952,7 @@ static int cm_rtu_handler(struct cm_work *work)
43072 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
43073 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
43074 spin_unlock_irq(&cm_id_priv->lock);
43075- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43076+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43077 counter[CM_RTU_COUNTER]);
43078 goto out;
43079 }
43080@@ -2135,7 +2135,7 @@ static int cm_dreq_handler(struct cm_work *work)
43081 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
43082 dreq_msg->local_comm_id);
43083 if (!cm_id_priv) {
43084- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43085+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43086 counter[CM_DREQ_COUNTER]);
43087 cm_issue_drep(work->port, work->mad_recv_wc);
43088 return -EINVAL;
43089@@ -2160,7 +2160,7 @@ static int cm_dreq_handler(struct cm_work *work)
43090 case IB_CM_MRA_REP_RCVD:
43091 break;
43092 case IB_CM_TIMEWAIT:
43093- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43094+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43095 counter[CM_DREQ_COUNTER]);
43096 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
43097 goto unlock;
43098@@ -2174,7 +2174,7 @@ static int cm_dreq_handler(struct cm_work *work)
43099 cm_free_msg(msg);
43100 goto deref;
43101 case IB_CM_DREQ_RCVD:
43102- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43103+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43104 counter[CM_DREQ_COUNTER]);
43105 goto unlock;
43106 default:
43107@@ -2541,7 +2541,7 @@ static int cm_mra_handler(struct cm_work *work)
43108 ib_modify_mad(cm_id_priv->av.port->mad_agent,
43109 cm_id_priv->msg, timeout)) {
43110 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
43111- atomic_long_inc(&work->port->
43112+ atomic_long_inc_unchecked(&work->port->
43113 counter_group[CM_RECV_DUPLICATES].
43114 counter[CM_MRA_COUNTER]);
43115 goto out;
43116@@ -2550,7 +2550,7 @@ static int cm_mra_handler(struct cm_work *work)
43117 break;
43118 case IB_CM_MRA_REQ_RCVD:
43119 case IB_CM_MRA_REP_RCVD:
43120- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43121+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43122 counter[CM_MRA_COUNTER]);
43123 /* fall through */
43124 default:
43125@@ -2712,7 +2712,7 @@ static int cm_lap_handler(struct cm_work *work)
43126 case IB_CM_LAP_IDLE:
43127 break;
43128 case IB_CM_MRA_LAP_SENT:
43129- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43130+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43131 counter[CM_LAP_COUNTER]);
43132 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
43133 goto unlock;
43134@@ -2728,7 +2728,7 @@ static int cm_lap_handler(struct cm_work *work)
43135 cm_free_msg(msg);
43136 goto deref;
43137 case IB_CM_LAP_RCVD:
43138- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43139+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43140 counter[CM_LAP_COUNTER]);
43141 goto unlock;
43142 default:
43143@@ -3012,7 +3012,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
43144 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
43145 if (cur_cm_id_priv) {
43146 spin_unlock_irq(&cm.lock);
43147- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
43148+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
43149 counter[CM_SIDR_REQ_COUNTER]);
43150 goto out; /* Duplicate message. */
43151 }
43152@@ -3224,10 +3224,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
43153 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
43154 msg->retries = 1;
43155
43156- atomic_long_add(1 + msg->retries,
43157+ atomic_long_add_unchecked(1 + msg->retries,
43158 &port->counter_group[CM_XMIT].counter[attr_index]);
43159 if (msg->retries)
43160- atomic_long_add(msg->retries,
43161+ atomic_long_add_unchecked(msg->retries,
43162 &port->counter_group[CM_XMIT_RETRIES].
43163 counter[attr_index]);
43164
43165@@ -3437,7 +3437,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
43166 }
43167
43168 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
43169- atomic_long_inc(&port->counter_group[CM_RECV].
43170+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
43171 counter[attr_id - CM_ATTR_ID_OFFSET]);
43172
43173 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
43174@@ -3668,7 +3668,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
43175 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
43176
43177 return sprintf(buf, "%ld\n",
43178- atomic_long_read(&group->counter[cm_attr->index]));
43179+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
43180 }
43181
43182 static const struct sysfs_ops cm_counter_ops = {
43183diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
43184index 9f5ad7c..588cd84 100644
43185--- a/drivers/infiniband/core/fmr_pool.c
43186+++ b/drivers/infiniband/core/fmr_pool.c
43187@@ -98,8 +98,8 @@ struct ib_fmr_pool {
43188
43189 struct task_struct *thread;
43190
43191- atomic_t req_ser;
43192- atomic_t flush_ser;
43193+ atomic_unchecked_t req_ser;
43194+ atomic_unchecked_t flush_ser;
43195
43196 wait_queue_head_t force_wait;
43197 };
43198@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
43199 struct ib_fmr_pool *pool = pool_ptr;
43200
43201 do {
43202- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
43203+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
43204 ib_fmr_batch_release(pool);
43205
43206- atomic_inc(&pool->flush_ser);
43207+ atomic_inc_unchecked(&pool->flush_ser);
43208 wake_up_interruptible(&pool->force_wait);
43209
43210 if (pool->flush_function)
43211@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
43212 }
43213
43214 set_current_state(TASK_INTERRUPTIBLE);
43215- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
43216+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
43217 !kthread_should_stop())
43218 schedule();
43219 __set_current_state(TASK_RUNNING);
43220@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
43221 pool->dirty_watermark = params->dirty_watermark;
43222 pool->dirty_len = 0;
43223 spin_lock_init(&pool->pool_lock);
43224- atomic_set(&pool->req_ser, 0);
43225- atomic_set(&pool->flush_ser, 0);
43226+ atomic_set_unchecked(&pool->req_ser, 0);
43227+ atomic_set_unchecked(&pool->flush_ser, 0);
43228 init_waitqueue_head(&pool->force_wait);
43229
43230 pool->thread = kthread_run(ib_fmr_cleanup_thread,
43231@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
43232 }
43233 spin_unlock_irq(&pool->pool_lock);
43234
43235- serial = atomic_inc_return(&pool->req_ser);
43236+ serial = atomic_inc_return_unchecked(&pool->req_ser);
43237 wake_up_process(pool->thread);
43238
43239 if (wait_event_interruptible(pool->force_wait,
43240- atomic_read(&pool->flush_ser) - serial >= 0))
43241+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
43242 return -EINTR;
43243
43244 return 0;
43245@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
43246 } else {
43247 list_add_tail(&fmr->list, &pool->dirty_list);
43248 if (++pool->dirty_len >= pool->dirty_watermark) {
43249- atomic_inc(&pool->req_ser);
43250+ atomic_inc_unchecked(&pool->req_ser);
43251 wake_up_process(pool->thread);
43252 }
43253 }
43254diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
43255index ec7a298..8742e59 100644
43256--- a/drivers/infiniband/hw/cxgb4/mem.c
43257+++ b/drivers/infiniband/hw/cxgb4/mem.c
43258@@ -249,7 +249,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
43259 int err;
43260 struct fw_ri_tpte tpt;
43261 u32 stag_idx;
43262- static atomic_t key;
43263+ static atomic_unchecked_t key;
43264
43265 if (c4iw_fatal_error(rdev))
43266 return -EIO;
43267@@ -270,7 +270,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
43268 if (rdev->stats.stag.cur > rdev->stats.stag.max)
43269 rdev->stats.stag.max = rdev->stats.stag.cur;
43270 mutex_unlock(&rdev->stats.lock);
43271- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
43272+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
43273 }
43274 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
43275 __func__, stag_state, type, pdid, stag_idx);
43276diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
43277index 79b3dbc..96e5fcc 100644
43278--- a/drivers/infiniband/hw/ipath/ipath_rc.c
43279+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
43280@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
43281 struct ib_atomic_eth *ateth;
43282 struct ipath_ack_entry *e;
43283 u64 vaddr;
43284- atomic64_t *maddr;
43285+ atomic64_unchecked_t *maddr;
43286 u64 sdata;
43287 u32 rkey;
43288 u8 next;
43289@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
43290 IB_ACCESS_REMOTE_ATOMIC)))
43291 goto nack_acc_unlck;
43292 /* Perform atomic OP and save result. */
43293- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
43294+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
43295 sdata = be64_to_cpu(ateth->swap_data);
43296 e = &qp->s_ack_queue[qp->r_head_ack_queue];
43297 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
43298- (u64) atomic64_add_return(sdata, maddr) - sdata :
43299+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
43300 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
43301 be64_to_cpu(ateth->compare_data),
43302 sdata);
43303diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
43304index 1f95bba..9530f87 100644
43305--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
43306+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
43307@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
43308 unsigned long flags;
43309 struct ib_wc wc;
43310 u64 sdata;
43311- atomic64_t *maddr;
43312+ atomic64_unchecked_t *maddr;
43313 enum ib_wc_status send_status;
43314
43315 /*
43316@@ -382,11 +382,11 @@ again:
43317 IB_ACCESS_REMOTE_ATOMIC)))
43318 goto acc_err;
43319 /* Perform atomic OP and save result. */
43320- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
43321+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
43322 sdata = wqe->wr.wr.atomic.compare_add;
43323 *(u64 *) sqp->s_sge.sge.vaddr =
43324 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
43325- (u64) atomic64_add_return(sdata, maddr) - sdata :
43326+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
43327 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
43328 sdata, wqe->wr.wr.atomic.swap);
43329 goto send_comp;
43330diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
43331index 82a7dd8..8fb6ba6 100644
43332--- a/drivers/infiniband/hw/mlx4/mad.c
43333+++ b/drivers/infiniband/hw/mlx4/mad.c
43334@@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void)
43335
43336 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
43337 {
43338- return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
43339+ return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
43340 cpu_to_be64(0xff00000000000000LL);
43341 }
43342
43343diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
43344index ed327e6..ca1739e0 100644
43345--- a/drivers/infiniband/hw/mlx4/mcg.c
43346+++ b/drivers/infiniband/hw/mlx4/mcg.c
43347@@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
43348 {
43349 char name[20];
43350
43351- atomic_set(&ctx->tid, 0);
43352+ atomic_set_unchecked(&ctx->tid, 0);
43353 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
43354 ctx->mcg_wq = create_singlethread_workqueue(name);
43355 if (!ctx->mcg_wq)
43356diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
43357index 6eb743f..a7b0f6d 100644
43358--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
43359+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
43360@@ -426,7 +426,7 @@ struct mlx4_ib_demux_ctx {
43361 struct list_head mcg_mgid0_list;
43362 struct workqueue_struct *mcg_wq;
43363 struct mlx4_ib_demux_pv_ctx **tun;
43364- atomic_t tid;
43365+ atomic_unchecked_t tid;
43366 int flushing; /* flushing the work queue */
43367 };
43368
43369diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
43370index 9d3e5c1..6f166df 100644
43371--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
43372+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
43373@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
43374 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
43375 }
43376
43377-int mthca_QUERY_FW(struct mthca_dev *dev)
43378+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
43379 {
43380 struct mthca_mailbox *mailbox;
43381 u32 *outbox;
43382@@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43383 CMD_TIME_CLASS_B);
43384 }
43385
43386-int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43387+int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43388 int num_mtt)
43389 {
43390 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
43391@@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
43392 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
43393 }
43394
43395-int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43396+int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
43397 int eq_num)
43398 {
43399 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
43400@@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
43401 CMD_TIME_CLASS_B);
43402 }
43403
43404-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
43405+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
43406 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
43407 void *in_mad, void *response_mad)
43408 {
43409diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
43410index ded76c1..0cf0a08 100644
43411--- a/drivers/infiniband/hw/mthca/mthca_main.c
43412+++ b/drivers/infiniband/hw/mthca/mthca_main.c
43413@@ -692,7 +692,7 @@ err_close:
43414 return err;
43415 }
43416
43417-static int mthca_setup_hca(struct mthca_dev *dev)
43418+static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
43419 {
43420 int err;
43421
43422diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
43423index ed9a989..6aa5dc2 100644
43424--- a/drivers/infiniband/hw/mthca/mthca_mr.c
43425+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
43426@@ -81,7 +81,7 @@ struct mthca_mpt_entry {
43427 * through the bitmaps)
43428 */
43429
43430-static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
43431+static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
43432 {
43433 int o;
43434 int m;
43435@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
43436 return key;
43437 }
43438
43439-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
43440+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
43441 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
43442 {
43443 struct mthca_mailbox *mailbox;
43444@@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
43445 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
43446 }
43447
43448-int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
43449+int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
43450 u64 *buffer_list, int buffer_size_shift,
43451 int list_len, u64 iova, u64 total_size,
43452 u32 access, struct mthca_mr *mr)
43453diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
43454index 415f8e1..e34214e 100644
43455--- a/drivers/infiniband/hw/mthca/mthca_provider.c
43456+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
43457@@ -764,7 +764,7 @@ unlock:
43458 return 0;
43459 }
43460
43461-static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
43462+static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
43463 {
43464 struct mthca_dev *dev = to_mdev(ibcq->device);
43465 struct mthca_cq *cq = to_mcq(ibcq);
43466diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
43467index 3b2a6dc..bce26ff 100644
43468--- a/drivers/infiniband/hw/nes/nes.c
43469+++ b/drivers/infiniband/hw/nes/nes.c
43470@@ -97,7 +97,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
43471 LIST_HEAD(nes_adapter_list);
43472 static LIST_HEAD(nes_dev_list);
43473
43474-atomic_t qps_destroyed;
43475+atomic_unchecked_t qps_destroyed;
43476
43477 static unsigned int ee_flsh_adapter;
43478 static unsigned int sysfs_nonidx_addr;
43479@@ -278,7 +278,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
43480 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
43481 struct nes_adapter *nesadapter = nesdev->nesadapter;
43482
43483- atomic_inc(&qps_destroyed);
43484+ atomic_inc_unchecked(&qps_destroyed);
43485
43486 /* Free the control structures */
43487
43488diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
43489index bd9d132..70d84f4 100644
43490--- a/drivers/infiniband/hw/nes/nes.h
43491+++ b/drivers/infiniband/hw/nes/nes.h
43492@@ -180,17 +180,17 @@ extern unsigned int nes_debug_level;
43493 extern unsigned int wqm_quanta;
43494 extern struct list_head nes_adapter_list;
43495
43496-extern atomic_t cm_connects;
43497-extern atomic_t cm_accepts;
43498-extern atomic_t cm_disconnects;
43499-extern atomic_t cm_closes;
43500-extern atomic_t cm_connecteds;
43501-extern atomic_t cm_connect_reqs;
43502-extern atomic_t cm_rejects;
43503-extern atomic_t mod_qp_timouts;
43504-extern atomic_t qps_created;
43505-extern atomic_t qps_destroyed;
43506-extern atomic_t sw_qps_destroyed;
43507+extern atomic_unchecked_t cm_connects;
43508+extern atomic_unchecked_t cm_accepts;
43509+extern atomic_unchecked_t cm_disconnects;
43510+extern atomic_unchecked_t cm_closes;
43511+extern atomic_unchecked_t cm_connecteds;
43512+extern atomic_unchecked_t cm_connect_reqs;
43513+extern atomic_unchecked_t cm_rejects;
43514+extern atomic_unchecked_t mod_qp_timouts;
43515+extern atomic_unchecked_t qps_created;
43516+extern atomic_unchecked_t qps_destroyed;
43517+extern atomic_unchecked_t sw_qps_destroyed;
43518 extern u32 mh_detected;
43519 extern u32 mh_pauses_sent;
43520 extern u32 cm_packets_sent;
43521@@ -199,16 +199,16 @@ extern u32 cm_packets_created;
43522 extern u32 cm_packets_received;
43523 extern u32 cm_packets_dropped;
43524 extern u32 cm_packets_retrans;
43525-extern atomic_t cm_listens_created;
43526-extern atomic_t cm_listens_destroyed;
43527+extern atomic_unchecked_t cm_listens_created;
43528+extern atomic_unchecked_t cm_listens_destroyed;
43529 extern u32 cm_backlog_drops;
43530-extern atomic_t cm_loopbacks;
43531-extern atomic_t cm_nodes_created;
43532-extern atomic_t cm_nodes_destroyed;
43533-extern atomic_t cm_accel_dropped_pkts;
43534-extern atomic_t cm_resets_recvd;
43535-extern atomic_t pau_qps_created;
43536-extern atomic_t pau_qps_destroyed;
43537+extern atomic_unchecked_t cm_loopbacks;
43538+extern atomic_unchecked_t cm_nodes_created;
43539+extern atomic_unchecked_t cm_nodes_destroyed;
43540+extern atomic_unchecked_t cm_accel_dropped_pkts;
43541+extern atomic_unchecked_t cm_resets_recvd;
43542+extern atomic_unchecked_t pau_qps_created;
43543+extern atomic_unchecked_t pau_qps_destroyed;
43544
43545 extern u32 int_mod_timer_init;
43546 extern u32 int_mod_cq_depth_256;
43547diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
43548index 6f09a72..cf4399d 100644
43549--- a/drivers/infiniband/hw/nes/nes_cm.c
43550+++ b/drivers/infiniband/hw/nes/nes_cm.c
43551@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
43552 u32 cm_packets_retrans;
43553 u32 cm_packets_created;
43554 u32 cm_packets_received;
43555-atomic_t cm_listens_created;
43556-atomic_t cm_listens_destroyed;
43557+atomic_unchecked_t cm_listens_created;
43558+atomic_unchecked_t cm_listens_destroyed;
43559 u32 cm_backlog_drops;
43560-atomic_t cm_loopbacks;
43561-atomic_t cm_nodes_created;
43562-atomic_t cm_nodes_destroyed;
43563-atomic_t cm_accel_dropped_pkts;
43564-atomic_t cm_resets_recvd;
43565+atomic_unchecked_t cm_loopbacks;
43566+atomic_unchecked_t cm_nodes_created;
43567+atomic_unchecked_t cm_nodes_destroyed;
43568+atomic_unchecked_t cm_accel_dropped_pkts;
43569+atomic_unchecked_t cm_resets_recvd;
43570
43571 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
43572 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
43573@@ -135,28 +135,28 @@ static void record_ird_ord(struct nes_cm_node *, u16, u16);
43574 /* instance of function pointers for client API */
43575 /* set address of this instance to cm_core->cm_ops at cm_core alloc */
43576 static struct nes_cm_ops nes_cm_api = {
43577- mini_cm_accelerated,
43578- mini_cm_listen,
43579- mini_cm_del_listen,
43580- mini_cm_connect,
43581- mini_cm_close,
43582- mini_cm_accept,
43583- mini_cm_reject,
43584- mini_cm_recv_pkt,
43585- mini_cm_dealloc_core,
43586- mini_cm_get,
43587- mini_cm_set
43588+ .accelerated = mini_cm_accelerated,
43589+ .listen = mini_cm_listen,
43590+ .stop_listener = mini_cm_del_listen,
43591+ .connect = mini_cm_connect,
43592+ .close = mini_cm_close,
43593+ .accept = mini_cm_accept,
43594+ .reject = mini_cm_reject,
43595+ .recv_pkt = mini_cm_recv_pkt,
43596+ .destroy_cm_core = mini_cm_dealloc_core,
43597+ .get = mini_cm_get,
43598+ .set = mini_cm_set
43599 };
43600
43601 static struct nes_cm_core *g_cm_core;
43602
43603-atomic_t cm_connects;
43604-atomic_t cm_accepts;
43605-atomic_t cm_disconnects;
43606-atomic_t cm_closes;
43607-atomic_t cm_connecteds;
43608-atomic_t cm_connect_reqs;
43609-atomic_t cm_rejects;
43610+atomic_unchecked_t cm_connects;
43611+atomic_unchecked_t cm_accepts;
43612+atomic_unchecked_t cm_disconnects;
43613+atomic_unchecked_t cm_closes;
43614+atomic_unchecked_t cm_connecteds;
43615+atomic_unchecked_t cm_connect_reqs;
43616+atomic_unchecked_t cm_rejects;
43617
43618 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
43619 {
43620@@ -1436,7 +1436,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
43621 kfree(listener);
43622 listener = NULL;
43623 ret = 0;
43624- atomic_inc(&cm_listens_destroyed);
43625+ atomic_inc_unchecked(&cm_listens_destroyed);
43626 } else {
43627 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
43628 }
43629@@ -1637,7 +1637,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
43630 cm_node->rem_mac);
43631
43632 add_hte_node(cm_core, cm_node);
43633- atomic_inc(&cm_nodes_created);
43634+ atomic_inc_unchecked(&cm_nodes_created);
43635
43636 return cm_node;
43637 }
43638@@ -1698,7 +1698,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
43639 }
43640
43641 atomic_dec(&cm_core->node_cnt);
43642- atomic_inc(&cm_nodes_destroyed);
43643+ atomic_inc_unchecked(&cm_nodes_destroyed);
43644 nesqp = cm_node->nesqp;
43645 if (nesqp) {
43646 nesqp->cm_node = NULL;
43647@@ -1762,7 +1762,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
43648
43649 static void drop_packet(struct sk_buff *skb)
43650 {
43651- atomic_inc(&cm_accel_dropped_pkts);
43652+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43653 dev_kfree_skb_any(skb);
43654 }
43655
43656@@ -1825,7 +1825,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
43657 {
43658
43659 int reset = 0; /* whether to send reset in case of err.. */
43660- atomic_inc(&cm_resets_recvd);
43661+ atomic_inc_unchecked(&cm_resets_recvd);
43662 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
43663 " refcnt=%d\n", cm_node, cm_node->state,
43664 atomic_read(&cm_node->ref_count));
43665@@ -2492,7 +2492,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
43666 rem_ref_cm_node(cm_node->cm_core, cm_node);
43667 return NULL;
43668 }
43669- atomic_inc(&cm_loopbacks);
43670+ atomic_inc_unchecked(&cm_loopbacks);
43671 loopbackremotenode->loopbackpartner = cm_node;
43672 loopbackremotenode->tcp_cntxt.rcv_wscale =
43673 NES_CM_DEFAULT_RCV_WND_SCALE;
43674@@ -2773,7 +2773,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
43675 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
43676 else {
43677 rem_ref_cm_node(cm_core, cm_node);
43678- atomic_inc(&cm_accel_dropped_pkts);
43679+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
43680 dev_kfree_skb_any(skb);
43681 }
43682 break;
43683@@ -3081,7 +3081,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43684
43685 if ((cm_id) && (cm_id->event_handler)) {
43686 if (issue_disconn) {
43687- atomic_inc(&cm_disconnects);
43688+ atomic_inc_unchecked(&cm_disconnects);
43689 cm_event.event = IW_CM_EVENT_DISCONNECT;
43690 cm_event.status = disconn_status;
43691 cm_event.local_addr = cm_id->local_addr;
43692@@ -3103,7 +3103,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
43693 }
43694
43695 if (issue_close) {
43696- atomic_inc(&cm_closes);
43697+ atomic_inc_unchecked(&cm_closes);
43698 nes_disconnect(nesqp, 1);
43699
43700 cm_id->provider_data = nesqp;
43701@@ -3241,7 +3241,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43702
43703 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
43704 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
43705- atomic_inc(&cm_accepts);
43706+ atomic_inc_unchecked(&cm_accepts);
43707
43708 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
43709 netdev_refcnt_read(nesvnic->netdev));
43710@@ -3439,7 +3439,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
43711 struct nes_cm_core *cm_core;
43712 u8 *start_buff;
43713
43714- atomic_inc(&cm_rejects);
43715+ atomic_inc_unchecked(&cm_rejects);
43716 cm_node = (struct nes_cm_node *)cm_id->provider_data;
43717 loopback = cm_node->loopbackpartner;
43718 cm_core = cm_node->cm_core;
43719@@ -3504,7 +3504,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
43720 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
43721 ntohs(laddr->sin_port));
43722
43723- atomic_inc(&cm_connects);
43724+ atomic_inc_unchecked(&cm_connects);
43725 nesqp->active_conn = 1;
43726
43727 /* cache the cm_id in the qp */
43728@@ -3649,7 +3649,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
43729 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
43730 return err;
43731 }
43732- atomic_inc(&cm_listens_created);
43733+ atomic_inc_unchecked(&cm_listens_created);
43734 }
43735
43736 cm_id->add_ref(cm_id);
43737@@ -3756,7 +3756,7 @@ static void cm_event_connected(struct nes_cm_event *event)
43738
43739 if (nesqp->destroyed)
43740 return;
43741- atomic_inc(&cm_connecteds);
43742+ atomic_inc_unchecked(&cm_connecteds);
43743 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
43744 " local port 0x%04X. jiffies = %lu.\n",
43745 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
43746@@ -3941,7 +3941,7 @@ static void cm_event_reset(struct nes_cm_event *event)
43747
43748 cm_id->add_ref(cm_id);
43749 ret = cm_id->event_handler(cm_id, &cm_event);
43750- atomic_inc(&cm_closes);
43751+ atomic_inc_unchecked(&cm_closes);
43752 cm_event.event = IW_CM_EVENT_CLOSE;
43753 cm_event.status = 0;
43754 cm_event.provider_data = cm_id->provider_data;
43755@@ -3981,7 +3981,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
43756 return;
43757 cm_id = cm_node->cm_id;
43758
43759- atomic_inc(&cm_connect_reqs);
43760+ atomic_inc_unchecked(&cm_connect_reqs);
43761 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43762 cm_node, cm_id, jiffies);
43763
43764@@ -4030,7 +4030,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
43765 return;
43766 cm_id = cm_node->cm_id;
43767
43768- atomic_inc(&cm_connect_reqs);
43769+ atomic_inc_unchecked(&cm_connect_reqs);
43770 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
43771 cm_node, cm_id, jiffies);
43772
43773diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
43774index 4166452..fc952c3 100644
43775--- a/drivers/infiniband/hw/nes/nes_mgt.c
43776+++ b/drivers/infiniband/hw/nes/nes_mgt.c
43777@@ -40,8 +40,8 @@
43778 #include "nes.h"
43779 #include "nes_mgt.h"
43780
43781-atomic_t pau_qps_created;
43782-atomic_t pau_qps_destroyed;
43783+atomic_unchecked_t pau_qps_created;
43784+atomic_unchecked_t pau_qps_destroyed;
43785
43786 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
43787 {
43788@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
43789 {
43790 struct sk_buff *skb;
43791 unsigned long flags;
43792- atomic_inc(&pau_qps_destroyed);
43793+ atomic_inc_unchecked(&pau_qps_destroyed);
43794
43795 /* Free packets that have not yet been forwarded */
43796 /* Lock is acquired by skb_dequeue when removing the skb */
43797@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
43798 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
43799 skb_queue_head_init(&nesqp->pau_list);
43800 spin_lock_init(&nesqp->pau_lock);
43801- atomic_inc(&pau_qps_created);
43802+ atomic_inc_unchecked(&pau_qps_created);
43803 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
43804 }
43805
43806diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
43807index 49eb511..a774366 100644
43808--- a/drivers/infiniband/hw/nes/nes_nic.c
43809+++ b/drivers/infiniband/hw/nes/nes_nic.c
43810@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
43811 target_stat_values[++index] = mh_detected;
43812 target_stat_values[++index] = mh_pauses_sent;
43813 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
43814- target_stat_values[++index] = atomic_read(&cm_connects);
43815- target_stat_values[++index] = atomic_read(&cm_accepts);
43816- target_stat_values[++index] = atomic_read(&cm_disconnects);
43817- target_stat_values[++index] = atomic_read(&cm_connecteds);
43818- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
43819- target_stat_values[++index] = atomic_read(&cm_rejects);
43820- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
43821- target_stat_values[++index] = atomic_read(&qps_created);
43822- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
43823- target_stat_values[++index] = atomic_read(&qps_destroyed);
43824- target_stat_values[++index] = atomic_read(&cm_closes);
43825+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
43826+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
43827+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
43828+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
43829+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
43830+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
43831+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
43832+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
43833+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
43834+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
43835+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
43836 target_stat_values[++index] = cm_packets_sent;
43837 target_stat_values[++index] = cm_packets_bounced;
43838 target_stat_values[++index] = cm_packets_created;
43839 target_stat_values[++index] = cm_packets_received;
43840 target_stat_values[++index] = cm_packets_dropped;
43841 target_stat_values[++index] = cm_packets_retrans;
43842- target_stat_values[++index] = atomic_read(&cm_listens_created);
43843- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
43844+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
43845+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
43846 target_stat_values[++index] = cm_backlog_drops;
43847- target_stat_values[++index] = atomic_read(&cm_loopbacks);
43848- target_stat_values[++index] = atomic_read(&cm_nodes_created);
43849- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
43850- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
43851- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
43852+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
43853+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
43854+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
43855+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
43856+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
43857 target_stat_values[++index] = nesadapter->free_4kpbl;
43858 target_stat_values[++index] = nesadapter->free_256pbl;
43859 target_stat_values[++index] = int_mod_timer_init;
43860 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
43861 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
43862 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
43863- target_stat_values[++index] = atomic_read(&pau_qps_created);
43864- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
43865+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
43866+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
43867 }
43868
43869 /**
43870diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
43871index fef067c..6a25ccd 100644
43872--- a/drivers/infiniband/hw/nes/nes_verbs.c
43873+++ b/drivers/infiniband/hw/nes/nes_verbs.c
43874@@ -46,9 +46,9 @@
43875
43876 #include <rdma/ib_umem.h>
43877
43878-atomic_t mod_qp_timouts;
43879-atomic_t qps_created;
43880-atomic_t sw_qps_destroyed;
43881+atomic_unchecked_t mod_qp_timouts;
43882+atomic_unchecked_t qps_created;
43883+atomic_unchecked_t sw_qps_destroyed;
43884
43885 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
43886
43887@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
43888 if (init_attr->create_flags)
43889 return ERR_PTR(-EINVAL);
43890
43891- atomic_inc(&qps_created);
43892+ atomic_inc_unchecked(&qps_created);
43893 switch (init_attr->qp_type) {
43894 case IB_QPT_RC:
43895 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
43896@@ -1468,7 +1468,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
43897 struct iw_cm_event cm_event;
43898 int ret = 0;
43899
43900- atomic_inc(&sw_qps_destroyed);
43901+ atomic_inc_unchecked(&sw_qps_destroyed);
43902 nesqp->destroyed = 1;
43903
43904 /* Blow away the connection if it exists. */
43905diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
43906index c00ae09..04e91be 100644
43907--- a/drivers/infiniband/hw/qib/qib.h
43908+++ b/drivers/infiniband/hw/qib/qib.h
43909@@ -52,6 +52,7 @@
43910 #include <linux/kref.h>
43911 #include <linux/sched.h>
43912 #include <linux/kthread.h>
43913+#include <linux/slab.h>
43914
43915 #include "qib_common.h"
43916 #include "qib_verbs.h"
43917diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
43918index de05545..b535322 100644
43919--- a/drivers/input/evdev.c
43920+++ b/drivers/input/evdev.c
43921@@ -421,7 +421,7 @@ static int evdev_open(struct inode *inode, struct file *file)
43922
43923 err_free_client:
43924 evdev_detach_client(evdev, client);
43925- kfree(client);
43926+ kvfree(client);
43927 return error;
43928 }
43929
43930diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
43931index 24c41ba..102d71f 100644
43932--- a/drivers/input/gameport/gameport.c
43933+++ b/drivers/input/gameport/gameport.c
43934@@ -490,14 +490,14 @@ EXPORT_SYMBOL(gameport_set_phys);
43935 */
43936 static void gameport_init_port(struct gameport *gameport)
43937 {
43938- static atomic_t gameport_no = ATOMIC_INIT(0);
43939+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
43940
43941 __module_get(THIS_MODULE);
43942
43943 mutex_init(&gameport->drv_mutex);
43944 device_initialize(&gameport->dev);
43945 dev_set_name(&gameport->dev, "gameport%lu",
43946- (unsigned long)atomic_inc_return(&gameport_no) - 1);
43947+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
43948 gameport->dev.bus = &gameport_bus;
43949 gameport->dev.release = gameport_release_port;
43950 if (gameport->parent)
43951diff --git a/drivers/input/input.c b/drivers/input/input.c
43952index 29ca0bb..f4bc2e3 100644
43953--- a/drivers/input/input.c
43954+++ b/drivers/input/input.c
43955@@ -1774,7 +1774,7 @@ EXPORT_SYMBOL_GPL(input_class);
43956 */
43957 struct input_dev *input_allocate_device(void)
43958 {
43959- static atomic_t input_no = ATOMIC_INIT(0);
43960+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
43961 struct input_dev *dev;
43962
43963 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
43964@@ -1789,7 +1789,7 @@ struct input_dev *input_allocate_device(void)
43965 INIT_LIST_HEAD(&dev->node);
43966
43967 dev_set_name(&dev->dev, "input%ld",
43968- (unsigned long) atomic_inc_return(&input_no) - 1);
43969+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
43970
43971 __module_get(THIS_MODULE);
43972 }
43973diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
43974index 4a95b22..874c182 100644
43975--- a/drivers/input/joystick/sidewinder.c
43976+++ b/drivers/input/joystick/sidewinder.c
43977@@ -30,6 +30,7 @@
43978 #include <linux/kernel.h>
43979 #include <linux/module.h>
43980 #include <linux/slab.h>
43981+#include <linux/sched.h>
43982 #include <linux/input.h>
43983 #include <linux/gameport.h>
43984 #include <linux/jiffies.h>
43985diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
43986index e65d9c0..ad3942e 100644
43987--- a/drivers/input/joystick/xpad.c
43988+++ b/drivers/input/joystick/xpad.c
43989@@ -850,7 +850,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
43990
43991 static int xpad_led_probe(struct usb_xpad *xpad)
43992 {
43993- static atomic_t led_seq = ATOMIC_INIT(0);
43994+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
43995 long led_no;
43996 struct xpad_led *led;
43997 struct led_classdev *led_cdev;
43998@@ -863,7 +863,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
43999 if (!led)
44000 return -ENOMEM;
44001
44002- led_no = (long)atomic_inc_return(&led_seq) - 1;
44003+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
44004
44005 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
44006 led->xpad = xpad;
44007diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
44008index 719410f..1896169 100644
44009--- a/drivers/input/misc/ims-pcu.c
44010+++ b/drivers/input/misc/ims-pcu.c
44011@@ -1851,7 +1851,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
44012
44013 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
44014 {
44015- static atomic_t device_no = ATOMIC_INIT(0);
44016+ static atomic_unchecked_t device_no = ATOMIC_INIT(0);
44017
44018 const struct ims_pcu_device_info *info;
44019 int error;
44020@@ -1882,7 +1882,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
44021 }
44022
44023 /* Device appears to be operable, complete initialization */
44024- pcu->device_no = atomic_inc_return(&device_no) - 1;
44025+ pcu->device_no = atomic_inc_return_unchecked(&device_no) - 1;
44026
44027 /*
44028 * PCU-B devices, both GEN_1 and GEN_2 do not have OFN sensor
44029diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
44030index 2f0b39d..7370f13 100644
44031--- a/drivers/input/mouse/psmouse.h
44032+++ b/drivers/input/mouse/psmouse.h
44033@@ -116,7 +116,7 @@ struct psmouse_attribute {
44034 ssize_t (*set)(struct psmouse *psmouse, void *data,
44035 const char *buf, size_t count);
44036 bool protect;
44037-};
44038+} __do_const;
44039 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
44040
44041 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
44042diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
44043index b604564..3f14ae4 100644
44044--- a/drivers/input/mousedev.c
44045+++ b/drivers/input/mousedev.c
44046@@ -744,7 +744,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
44047
44048 spin_unlock_irq(&client->packet_lock);
44049
44050- if (copy_to_user(buffer, data, count))
44051+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
44052 return -EFAULT;
44053
44054 return count;
44055diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
44056index b29134d..394deb0 100644
44057--- a/drivers/input/serio/serio.c
44058+++ b/drivers/input/serio/serio.c
44059@@ -514,7 +514,7 @@ static void serio_release_port(struct device *dev)
44060 */
44061 static void serio_init_port(struct serio *serio)
44062 {
44063- static atomic_t serio_no = ATOMIC_INIT(0);
44064+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
44065
44066 __module_get(THIS_MODULE);
44067
44068@@ -525,7 +525,7 @@ static void serio_init_port(struct serio *serio)
44069 mutex_init(&serio->drv_mutex);
44070 device_initialize(&serio->dev);
44071 dev_set_name(&serio->dev, "serio%ld",
44072- (long)atomic_inc_return(&serio_no) - 1);
44073+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
44074 serio->dev.bus = &serio_bus;
44075 serio->dev.release = serio_release_port;
44076 serio->dev.groups = serio_device_attr_groups;
44077diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
44078index c9a02fe..0debc75 100644
44079--- a/drivers/input/serio/serio_raw.c
44080+++ b/drivers/input/serio/serio_raw.c
44081@@ -292,7 +292,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
44082
44083 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
44084 {
44085- static atomic_t serio_raw_no = ATOMIC_INIT(0);
44086+ static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(0);
44087 struct serio_raw *serio_raw;
44088 int err;
44089
44090@@ -303,7 +303,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
44091 }
44092
44093 snprintf(serio_raw->name, sizeof(serio_raw->name),
44094- "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no) - 1);
44095+ "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no) - 1);
44096 kref_init(&serio_raw->kref);
44097 INIT_LIST_HEAD(&serio_raw->client_list);
44098 init_waitqueue_head(&serio_raw->wait);
44099diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
44100index a83cc2a..64462e6 100644
44101--- a/drivers/iommu/arm-smmu.c
44102+++ b/drivers/iommu/arm-smmu.c
44103@@ -921,7 +921,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44104 cfg->irptndx = cfg->cbndx;
44105 }
44106
44107- ACCESS_ONCE(smmu_domain->smmu) = smmu;
44108+ ACCESS_ONCE_RW(smmu_domain->smmu) = smmu;
44109 arm_smmu_init_context_bank(smmu_domain);
44110 spin_unlock_irqrestore(&smmu_domain->lock, flags);
44111
44112diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
44113index 33c4395..e06447e 100644
44114--- a/drivers/iommu/irq_remapping.c
44115+++ b/drivers/iommu/irq_remapping.c
44116@@ -354,7 +354,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
44117 void panic_if_irq_remap(const char *msg)
44118 {
44119 if (irq_remapping_enabled)
44120- panic(msg);
44121+ panic("%s", msg);
44122 }
44123
44124 static void ir_ack_apic_edge(struct irq_data *data)
44125@@ -375,10 +375,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
44126
44127 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
44128 {
44129- chip->irq_print_chip = ir_print_prefix;
44130- chip->irq_ack = ir_ack_apic_edge;
44131- chip->irq_eoi = ir_ack_apic_level;
44132- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
44133+ pax_open_kernel();
44134+ *(void **)&chip->irq_print_chip = ir_print_prefix;
44135+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
44136+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
44137+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
44138+ pax_close_kernel();
44139 }
44140
44141 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
44142diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
44143index dda6dbc..f9adebb 100644
44144--- a/drivers/irqchip/irq-gic.c
44145+++ b/drivers/irqchip/irq-gic.c
44146@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
44147 * Supported arch specific GIC irq extension.
44148 * Default make them NULL.
44149 */
44150-struct irq_chip gic_arch_extn = {
44151+irq_chip_no_const gic_arch_extn = {
44152 .irq_eoi = NULL,
44153 .irq_mask = NULL,
44154 .irq_unmask = NULL,
44155@@ -312,7 +312,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
44156 chained_irq_exit(chip, desc);
44157 }
44158
44159-static struct irq_chip gic_chip = {
44160+static irq_chip_no_const gic_chip __read_only = {
44161 .name = "GIC",
44162 .irq_mask = gic_mask_irq,
44163 .irq_unmask = gic_unmask_irq,
44164diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
44165index 8777065..a4a9967 100644
44166--- a/drivers/irqchip/irq-renesas-irqc.c
44167+++ b/drivers/irqchip/irq-renesas-irqc.c
44168@@ -151,7 +151,7 @@ static int irqc_probe(struct platform_device *pdev)
44169 struct irqc_priv *p;
44170 struct resource *io;
44171 struct resource *irq;
44172- struct irq_chip *irq_chip;
44173+ irq_chip_no_const *irq_chip;
44174 const char *name = dev_name(&pdev->dev);
44175 int ret;
44176 int k;
44177diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
44178index 6a2df32..dc962f1 100644
44179--- a/drivers/isdn/capi/capi.c
44180+++ b/drivers/isdn/capi/capi.c
44181@@ -81,8 +81,8 @@ struct capiminor {
44182
44183 struct capi20_appl *ap;
44184 u32 ncci;
44185- atomic_t datahandle;
44186- atomic_t msgid;
44187+ atomic_unchecked_t datahandle;
44188+ atomic_unchecked_t msgid;
44189
44190 struct tty_port port;
44191 int ttyinstop;
44192@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
44193 capimsg_setu16(s, 2, mp->ap->applid);
44194 capimsg_setu8 (s, 4, CAPI_DATA_B3);
44195 capimsg_setu8 (s, 5, CAPI_RESP);
44196- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
44197+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
44198 capimsg_setu32(s, 8, mp->ncci);
44199 capimsg_setu16(s, 12, datahandle);
44200 }
44201@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
44202 mp->outbytes -= len;
44203 spin_unlock_bh(&mp->outlock);
44204
44205- datahandle = atomic_inc_return(&mp->datahandle);
44206+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
44207 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
44208 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
44209 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
44210 capimsg_setu16(skb->data, 2, mp->ap->applid);
44211 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
44212 capimsg_setu8 (skb->data, 5, CAPI_REQ);
44213- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
44214+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
44215 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
44216 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
44217 capimsg_setu16(skb->data, 16, len); /* Data length */
44218diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
44219index b7ae0a0..04590fa 100644
44220--- a/drivers/isdn/gigaset/bas-gigaset.c
44221+++ b/drivers/isdn/gigaset/bas-gigaset.c
44222@@ -2565,22 +2565,22 @@ static int gigaset_post_reset(struct usb_interface *intf)
44223
44224
44225 static const struct gigaset_ops gigops = {
44226- gigaset_write_cmd,
44227- gigaset_write_room,
44228- gigaset_chars_in_buffer,
44229- gigaset_brkchars,
44230- gigaset_init_bchannel,
44231- gigaset_close_bchannel,
44232- gigaset_initbcshw,
44233- gigaset_freebcshw,
44234- gigaset_reinitbcshw,
44235- gigaset_initcshw,
44236- gigaset_freecshw,
44237- gigaset_set_modem_ctrl,
44238- gigaset_baud_rate,
44239- gigaset_set_line_ctrl,
44240- gigaset_isoc_send_skb,
44241- gigaset_isoc_input,
44242+ .write_cmd = gigaset_write_cmd,
44243+ .write_room = gigaset_write_room,
44244+ .chars_in_buffer = gigaset_chars_in_buffer,
44245+ .brkchars = gigaset_brkchars,
44246+ .init_bchannel = gigaset_init_bchannel,
44247+ .close_bchannel = gigaset_close_bchannel,
44248+ .initbcshw = gigaset_initbcshw,
44249+ .freebcshw = gigaset_freebcshw,
44250+ .reinitbcshw = gigaset_reinitbcshw,
44251+ .initcshw = gigaset_initcshw,
44252+ .freecshw = gigaset_freecshw,
44253+ .set_modem_ctrl = gigaset_set_modem_ctrl,
44254+ .baud_rate = gigaset_baud_rate,
44255+ .set_line_ctrl = gigaset_set_line_ctrl,
44256+ .send_skb = gigaset_isoc_send_skb,
44257+ .handle_input = gigaset_isoc_input,
44258 };
44259
44260 /* bas_gigaset_init
44261diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
44262index 600c79b..3752bab 100644
44263--- a/drivers/isdn/gigaset/interface.c
44264+++ b/drivers/isdn/gigaset/interface.c
44265@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
44266 }
44267 tty->driver_data = cs;
44268
44269- ++cs->port.count;
44270+ atomic_inc(&cs->port.count);
44271
44272- if (cs->port.count == 1) {
44273+ if (atomic_read(&cs->port.count) == 1) {
44274 tty_port_tty_set(&cs->port, tty);
44275 cs->port.low_latency = 1;
44276 }
44277@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
44278
44279 if (!cs->connected)
44280 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
44281- else if (!cs->port.count)
44282+ else if (!atomic_read(&cs->port.count))
44283 dev_warn(cs->dev, "%s: device not opened\n", __func__);
44284- else if (!--cs->port.count)
44285+ else if (!atomic_dec_return(&cs->port.count))
44286 tty_port_tty_set(&cs->port, NULL);
44287
44288 mutex_unlock(&cs->mutex);
44289diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
44290index 8c91fd5..14f13ce 100644
44291--- a/drivers/isdn/gigaset/ser-gigaset.c
44292+++ b/drivers/isdn/gigaset/ser-gigaset.c
44293@@ -453,22 +453,22 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
44294 }
44295
44296 static const struct gigaset_ops ops = {
44297- gigaset_write_cmd,
44298- gigaset_write_room,
44299- gigaset_chars_in_buffer,
44300- gigaset_brkchars,
44301- gigaset_init_bchannel,
44302- gigaset_close_bchannel,
44303- gigaset_initbcshw,
44304- gigaset_freebcshw,
44305- gigaset_reinitbcshw,
44306- gigaset_initcshw,
44307- gigaset_freecshw,
44308- gigaset_set_modem_ctrl,
44309- gigaset_baud_rate,
44310- gigaset_set_line_ctrl,
44311- gigaset_m10x_send_skb, /* asyncdata.c */
44312- gigaset_m10x_input, /* asyncdata.c */
44313+ .write_cmd = gigaset_write_cmd,
44314+ .write_room = gigaset_write_room,
44315+ .chars_in_buffer = gigaset_chars_in_buffer,
44316+ .brkchars = gigaset_brkchars,
44317+ .init_bchannel = gigaset_init_bchannel,
44318+ .close_bchannel = gigaset_close_bchannel,
44319+ .initbcshw = gigaset_initbcshw,
44320+ .freebcshw = gigaset_freebcshw,
44321+ .reinitbcshw = gigaset_reinitbcshw,
44322+ .initcshw = gigaset_initcshw,
44323+ .freecshw = gigaset_freecshw,
44324+ .set_modem_ctrl = gigaset_set_modem_ctrl,
44325+ .baud_rate = gigaset_baud_rate,
44326+ .set_line_ctrl = gigaset_set_line_ctrl,
44327+ .send_skb = gigaset_m10x_send_skb, /* asyncdata.c */
44328+ .handle_input = gigaset_m10x_input, /* asyncdata.c */
44329 };
44330
44331
44332diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
44333index d0a41cb..b953e50 100644
44334--- a/drivers/isdn/gigaset/usb-gigaset.c
44335+++ b/drivers/isdn/gigaset/usb-gigaset.c
44336@@ -547,7 +547,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
44337 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
44338 memcpy(cs->hw.usb->bchars, buf, 6);
44339 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
44340- 0, 0, &buf, 6, 2000);
44341+ 0, 0, buf, 6, 2000);
44342 }
44343
44344 static void gigaset_freebcshw(struct bc_state *bcs)
44345@@ -869,22 +869,22 @@ static int gigaset_pre_reset(struct usb_interface *intf)
44346 }
44347
44348 static const struct gigaset_ops ops = {
44349- gigaset_write_cmd,
44350- gigaset_write_room,
44351- gigaset_chars_in_buffer,
44352- gigaset_brkchars,
44353- gigaset_init_bchannel,
44354- gigaset_close_bchannel,
44355- gigaset_initbcshw,
44356- gigaset_freebcshw,
44357- gigaset_reinitbcshw,
44358- gigaset_initcshw,
44359- gigaset_freecshw,
44360- gigaset_set_modem_ctrl,
44361- gigaset_baud_rate,
44362- gigaset_set_line_ctrl,
44363- gigaset_m10x_send_skb,
44364- gigaset_m10x_input,
44365+ .write_cmd = gigaset_write_cmd,
44366+ .write_room = gigaset_write_room,
44367+ .chars_in_buffer = gigaset_chars_in_buffer,
44368+ .brkchars = gigaset_brkchars,
44369+ .init_bchannel = gigaset_init_bchannel,
44370+ .close_bchannel = gigaset_close_bchannel,
44371+ .initbcshw = gigaset_initbcshw,
44372+ .freebcshw = gigaset_freebcshw,
44373+ .reinitbcshw = gigaset_reinitbcshw,
44374+ .initcshw = gigaset_initcshw,
44375+ .freecshw = gigaset_freecshw,
44376+ .set_modem_ctrl = gigaset_set_modem_ctrl,
44377+ .baud_rate = gigaset_baud_rate,
44378+ .set_line_ctrl = gigaset_set_line_ctrl,
44379+ .send_skb = gigaset_m10x_send_skb,
44380+ .handle_input = gigaset_m10x_input,
44381 };
44382
44383 /*
44384diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
44385index 4d9b195..455075c 100644
44386--- a/drivers/isdn/hardware/avm/b1.c
44387+++ b/drivers/isdn/hardware/avm/b1.c
44388@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
44389 }
44390 if (left) {
44391 if (t4file->user) {
44392- if (copy_from_user(buf, dp, left))
44393+ if (left > sizeof buf || copy_from_user(buf, dp, left))
44394 return -EFAULT;
44395 } else {
44396 memcpy(buf, dp, left);
44397@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
44398 }
44399 if (left) {
44400 if (config->user) {
44401- if (copy_from_user(buf, dp, left))
44402+ if (left > sizeof buf || copy_from_user(buf, dp, left))
44403 return -EFAULT;
44404 } else {
44405 memcpy(buf, dp, left);
44406diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
44407index 9b856e1..fa03c92 100644
44408--- a/drivers/isdn/i4l/isdn_common.c
44409+++ b/drivers/isdn/i4l/isdn_common.c
44410@@ -1654,6 +1654,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
44411 } else
44412 return -EINVAL;
44413 case IIOCDBGVAR:
44414+ if (!capable(CAP_SYS_RAWIO))
44415+ return -EPERM;
44416 if (arg) {
44417 if (copy_to_user(argp, &dev, sizeof(ulong)))
44418 return -EFAULT;
44419diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
44420index 91d5730..336523e 100644
44421--- a/drivers/isdn/i4l/isdn_concap.c
44422+++ b/drivers/isdn/i4l/isdn_concap.c
44423@@ -80,9 +80,9 @@ static int isdn_concap_dl_disconn_req(struct concap_proto *concap)
44424 }
44425
44426 struct concap_device_ops isdn_concap_reliable_dl_dops = {
44427- &isdn_concap_dl_data_req,
44428- &isdn_concap_dl_connect_req,
44429- &isdn_concap_dl_disconn_req
44430+ .data_req = &isdn_concap_dl_data_req,
44431+ .connect_req = &isdn_concap_dl_connect_req,
44432+ .disconn_req = &isdn_concap_dl_disconn_req
44433 };
44434
44435 /* The following should better go into a dedicated source file such that
44436diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
44437index 3c5f249..5fac4d0 100644
44438--- a/drivers/isdn/i4l/isdn_tty.c
44439+++ b/drivers/isdn/i4l/isdn_tty.c
44440@@ -1508,9 +1508,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
44441
44442 #ifdef ISDN_DEBUG_MODEM_OPEN
44443 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
44444- port->count);
44445+ atomic_read(&port->count));
44446 #endif
44447- port->count++;
44448+ atomic_inc(&port->count);
44449 port->tty = tty;
44450 /*
44451 * Start up serial port
44452@@ -1554,7 +1554,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
44453 #endif
44454 return;
44455 }
44456- if ((tty->count == 1) && (port->count != 1)) {
44457+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
44458 /*
44459 * Uh, oh. tty->count is 1, which means that the tty
44460 * structure will be freed. Info->count should always
44461@@ -1563,15 +1563,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
44462 * serial port won't be shutdown.
44463 */
44464 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
44465- "info->count is %d\n", port->count);
44466- port->count = 1;
44467+ "info->count is %d\n", atomic_read(&port->count));
44468+ atomic_set(&port->count, 1);
44469 }
44470- if (--port->count < 0) {
44471+ if (atomic_dec_return(&port->count) < 0) {
44472 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
44473- info->line, port->count);
44474- port->count = 0;
44475+ info->line, atomic_read(&port->count));
44476+ atomic_set(&port->count, 0);
44477 }
44478- if (port->count) {
44479+ if (atomic_read(&port->count)) {
44480 #ifdef ISDN_DEBUG_MODEM_OPEN
44481 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
44482 #endif
44483@@ -1625,7 +1625,7 @@ isdn_tty_hangup(struct tty_struct *tty)
44484 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
44485 return;
44486 isdn_tty_shutdown(info);
44487- port->count = 0;
44488+ atomic_set(&port->count, 0);
44489 port->flags &= ~ASYNC_NORMAL_ACTIVE;
44490 port->tty = NULL;
44491 wake_up_interruptible(&port->open_wait);
44492@@ -1970,7 +1970,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
44493 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
44494 modem_info *info = &dev->mdm.info[i];
44495
44496- if (info->port.count == 0)
44497+ if (atomic_read(&info->port.count) == 0)
44498 continue;
44499 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
44500 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
44501diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
44502index e2d4e58..40cd045 100644
44503--- a/drivers/isdn/i4l/isdn_x25iface.c
44504+++ b/drivers/isdn/i4l/isdn_x25iface.c
44505@@ -53,14 +53,14 @@ static int isdn_x25iface_disconn_ind(struct concap_proto *);
44506
44507
44508 static struct concap_proto_ops ix25_pops = {
44509- &isdn_x25iface_proto_new,
44510- &isdn_x25iface_proto_del,
44511- &isdn_x25iface_proto_restart,
44512- &isdn_x25iface_proto_close,
44513- &isdn_x25iface_xmit,
44514- &isdn_x25iface_receive,
44515- &isdn_x25iface_connect_ind,
44516- &isdn_x25iface_disconn_ind
44517+ .proto_new = &isdn_x25iface_proto_new,
44518+ .proto_del = &isdn_x25iface_proto_del,
44519+ .restart = &isdn_x25iface_proto_restart,
44520+ .close = &isdn_x25iface_proto_close,
44521+ .encap_and_xmit = &isdn_x25iface_xmit,
44522+ .data_ind = &isdn_x25iface_receive,
44523+ .connect_ind = &isdn_x25iface_connect_ind,
44524+ .disconn_ind = &isdn_x25iface_disconn_ind
44525 };
44526
44527 /* error message helper function */
44528diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
44529index 6a7447c..cae33fe 100644
44530--- a/drivers/isdn/icn/icn.c
44531+++ b/drivers/isdn/icn/icn.c
44532@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
44533 if (count > len)
44534 count = len;
44535 if (user) {
44536- if (copy_from_user(msg, buf, count))
44537+ if (count > sizeof msg || copy_from_user(msg, buf, count))
44538 return -EFAULT;
44539 } else
44540 memcpy(msg, buf, count);
44541diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
44542index a4f05c5..1433bc5 100644
44543--- a/drivers/isdn/mISDN/dsp_cmx.c
44544+++ b/drivers/isdn/mISDN/dsp_cmx.c
44545@@ -1628,7 +1628,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
44546 static u16 dsp_count; /* last sample count */
44547 static int dsp_count_valid; /* if we have last sample count */
44548
44549-void
44550+void __intentional_overflow(-1)
44551 dsp_cmx_send(void *arg)
44552 {
44553 struct dsp_conf *conf;
44554diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
44555index f58a354..fbae176 100644
44556--- a/drivers/leds/leds-clevo-mail.c
44557+++ b/drivers/leds/leds-clevo-mail.c
44558@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
44559 * detected as working, but in reality it is not) as low as
44560 * possible.
44561 */
44562-static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
44563+static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
44564 {
44565 .callback = clevo_mail_led_dmi_callback,
44566 .ident = "Clevo D410J",
44567diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
44568index 046cb70..6b20d39 100644
44569--- a/drivers/leds/leds-ss4200.c
44570+++ b/drivers/leds/leds-ss4200.c
44571@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
44572 * detected as working, but in reality it is not) as low as
44573 * possible.
44574 */
44575-static struct dmi_system_id nas_led_whitelist[] __initdata = {
44576+static struct dmi_system_id nas_led_whitelist[] __initconst = {
44577 {
44578 .callback = ss4200_led_dmi_callback,
44579 .ident = "Intel SS4200-E",
44580diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
44581index 6590558..a74c5dd 100644
44582--- a/drivers/lguest/core.c
44583+++ b/drivers/lguest/core.c
44584@@ -96,9 +96,17 @@ static __init int map_switcher(void)
44585 * The end address needs +1 because __get_vm_area allocates an
44586 * extra guard page, so we need space for that.
44587 */
44588+
44589+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
44590+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
44591+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
44592+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
44593+#else
44594 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
44595 VM_ALLOC, switcher_addr, switcher_addr
44596 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
44597+#endif
44598+
44599 if (!switcher_vma) {
44600 err = -ENOMEM;
44601 printk("lguest: could not map switcher pages high\n");
44602@@ -121,7 +129,7 @@ static __init int map_switcher(void)
44603 * Now the Switcher is mapped at the right address, we can't fail!
44604 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
44605 */
44606- memcpy(switcher_vma->addr, start_switcher_text,
44607+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
44608 end_switcher_text - start_switcher_text);
44609
44610 printk(KERN_INFO "lguest: mapped switcher at %p\n",
44611diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
44612index e8b55c3..3514c37 100644
44613--- a/drivers/lguest/page_tables.c
44614+++ b/drivers/lguest/page_tables.c
44615@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
44616 /*:*/
44617
44618 #ifdef CONFIG_X86_PAE
44619-static void release_pmd(pmd_t *spmd)
44620+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
44621 {
44622 /* If the entry's not present, there's nothing to release. */
44623 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
44624diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
44625index 922a1ac..9dd0c2a 100644
44626--- a/drivers/lguest/x86/core.c
44627+++ b/drivers/lguest/x86/core.c
44628@@ -59,7 +59,7 @@ static struct {
44629 /* Offset from where switcher.S was compiled to where we've copied it */
44630 static unsigned long switcher_offset(void)
44631 {
44632- return switcher_addr - (unsigned long)start_switcher_text;
44633+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
44634 }
44635
44636 /* This cpu's struct lguest_pages (after the Switcher text page) */
44637@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
44638 * These copies are pretty cheap, so we do them unconditionally: */
44639 /* Save the current Host top-level page directory.
44640 */
44641+
44642+#ifdef CONFIG_PAX_PER_CPU_PGD
44643+ pages->state.host_cr3 = read_cr3();
44644+#else
44645 pages->state.host_cr3 = __pa(current->mm->pgd);
44646+#endif
44647+
44648 /*
44649 * Set up the Guest's page tables to see this CPU's pages (and no
44650 * other CPU's pages).
44651@@ -477,7 +483,7 @@ void __init lguest_arch_host_init(void)
44652 * compiled-in switcher code and the high-mapped copy we just made.
44653 */
44654 for (i = 0; i < IDT_ENTRIES; i++)
44655- default_idt_entries[i] += switcher_offset();
44656+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
44657
44658 /*
44659 * Set up the Switcher's per-cpu areas.
44660@@ -560,7 +566,7 @@ void __init lguest_arch_host_init(void)
44661 * it will be undisturbed when we switch. To change %cs and jump we
44662 * need this structure to feed to Intel's "lcall" instruction.
44663 */
44664- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
44665+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
44666 lguest_entry.segment = LGUEST_CS;
44667
44668 /*
44669diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
44670index 40634b0..4f5855e 100644
44671--- a/drivers/lguest/x86/switcher_32.S
44672+++ b/drivers/lguest/x86/switcher_32.S
44673@@ -87,6 +87,7 @@
44674 #include <asm/page.h>
44675 #include <asm/segment.h>
44676 #include <asm/lguest.h>
44677+#include <asm/processor-flags.h>
44678
44679 // We mark the start of the code to copy
44680 // It's placed in .text tho it's never run here
44681@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
44682 // Changes type when we load it: damn Intel!
44683 // For after we switch over our page tables
44684 // That entry will be read-only: we'd crash.
44685+
44686+#ifdef CONFIG_PAX_KERNEXEC
44687+ mov %cr0, %edx
44688+ xor $X86_CR0_WP, %edx
44689+ mov %edx, %cr0
44690+#endif
44691+
44692 movl $(GDT_ENTRY_TSS*8), %edx
44693 ltr %dx
44694
44695@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
44696 // Let's clear it again for our return.
44697 // The GDT descriptor of the Host
44698 // Points to the table after two "size" bytes
44699- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
44700+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
44701 // Clear "used" from type field (byte 5, bit 2)
44702- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
44703+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
44704+
44705+#ifdef CONFIG_PAX_KERNEXEC
44706+ mov %cr0, %eax
44707+ xor $X86_CR0_WP, %eax
44708+ mov %eax, %cr0
44709+#endif
44710
44711 // Once our page table's switched, the Guest is live!
44712 // The Host fades as we run this final step.
44713@@ -295,13 +309,12 @@ deliver_to_host:
44714 // I consulted gcc, and it gave
44715 // These instructions, which I gladly credit:
44716 leal (%edx,%ebx,8), %eax
44717- movzwl (%eax),%edx
44718- movl 4(%eax), %eax
44719- xorw %ax, %ax
44720- orl %eax, %edx
44721+ movl 4(%eax), %edx
44722+ movw (%eax), %dx
44723 // Now the address of the handler's in %edx
44724 // We call it now: its "iret" drops us home.
44725- jmp *%edx
44726+ ljmp $__KERNEL_CS, $1f
44727+1: jmp *%edx
44728
44729 // Every interrupt can come to us here
44730 // But we must truly tell each apart.
44731diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
44732index a08e3ee..df8ade2 100644
44733--- a/drivers/md/bcache/closure.h
44734+++ b/drivers/md/bcache/closure.h
44735@@ -238,7 +238,7 @@ static inline void closure_set_stopped(struct closure *cl)
44736 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
44737 struct workqueue_struct *wq)
44738 {
44739- BUG_ON(object_is_on_stack(cl));
44740+ BUG_ON(object_starts_on_stack(cl));
44741 closure_set_ip(cl);
44742 cl->fn = fn;
44743 cl->wq = wq;
44744diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
44745index 67f8b31..9418f2b 100644
44746--- a/drivers/md/bitmap.c
44747+++ b/drivers/md/bitmap.c
44748@@ -1775,7 +1775,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
44749 chunk_kb ? "KB" : "B");
44750 if (bitmap->storage.file) {
44751 seq_printf(seq, ", file: ");
44752- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
44753+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
44754 }
44755
44756 seq_printf(seq, "\n");
44757diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
44758index 5152142..623d141 100644
44759--- a/drivers/md/dm-ioctl.c
44760+++ b/drivers/md/dm-ioctl.c
44761@@ -1769,7 +1769,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
44762 cmd == DM_LIST_VERSIONS_CMD)
44763 return 0;
44764
44765- if ((cmd == DM_DEV_CREATE_CMD)) {
44766+ if (cmd == DM_DEV_CREATE_CMD) {
44767 if (!*param->name) {
44768 DMWARN("name not supplied when creating device");
44769 return -EINVAL;
44770diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
44771index 7dfdb5c..4caada6 100644
44772--- a/drivers/md/dm-raid1.c
44773+++ b/drivers/md/dm-raid1.c
44774@@ -40,7 +40,7 @@ enum dm_raid1_error {
44775
44776 struct mirror {
44777 struct mirror_set *ms;
44778- atomic_t error_count;
44779+ atomic_unchecked_t error_count;
44780 unsigned long error_type;
44781 struct dm_dev *dev;
44782 sector_t offset;
44783@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
44784 struct mirror *m;
44785
44786 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
44787- if (!atomic_read(&m->error_count))
44788+ if (!atomic_read_unchecked(&m->error_count))
44789 return m;
44790
44791 return NULL;
44792@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
44793 * simple way to tell if a device has encountered
44794 * errors.
44795 */
44796- atomic_inc(&m->error_count);
44797+ atomic_inc_unchecked(&m->error_count);
44798
44799 if (test_and_set_bit(error_type, &m->error_type))
44800 return;
44801@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
44802 struct mirror *m = get_default_mirror(ms);
44803
44804 do {
44805- if (likely(!atomic_read(&m->error_count)))
44806+ if (likely(!atomic_read_unchecked(&m->error_count)))
44807 return m;
44808
44809 if (m-- == ms->mirror)
44810@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
44811 {
44812 struct mirror *default_mirror = get_default_mirror(m->ms);
44813
44814- return !atomic_read(&default_mirror->error_count);
44815+ return !atomic_read_unchecked(&default_mirror->error_count);
44816 }
44817
44818 static int mirror_available(struct mirror_set *ms, struct bio *bio)
44819@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
44820 */
44821 if (likely(region_in_sync(ms, region, 1)))
44822 m = choose_mirror(ms, bio->bi_iter.bi_sector);
44823- else if (m && atomic_read(&m->error_count))
44824+ else if (m && atomic_read_unchecked(&m->error_count))
44825 m = NULL;
44826
44827 if (likely(m))
44828@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
44829 }
44830
44831 ms->mirror[mirror].ms = ms;
44832- atomic_set(&(ms->mirror[mirror].error_count), 0);
44833+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
44834 ms->mirror[mirror].error_type = 0;
44835 ms->mirror[mirror].offset = offset;
44836
44837@@ -1342,7 +1342,7 @@ static void mirror_resume(struct dm_target *ti)
44838 */
44839 static char device_status_char(struct mirror *m)
44840 {
44841- if (!atomic_read(&(m->error_count)))
44842+ if (!atomic_read_unchecked(&(m->error_count)))
44843 return 'A';
44844
44845 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
44846diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
44847index 28a9012..9c0f6a5 100644
44848--- a/drivers/md/dm-stats.c
44849+++ b/drivers/md/dm-stats.c
44850@@ -382,7 +382,7 @@ do_sync_free:
44851 synchronize_rcu_expedited();
44852 dm_stat_free(&s->rcu_head);
44853 } else {
44854- ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
44855+ ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
44856 call_rcu(&s->rcu_head, dm_stat_free);
44857 }
44858 return 0;
44859@@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
44860 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
44861 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
44862 ));
44863- ACCESS_ONCE(last->last_sector) = end_sector;
44864- ACCESS_ONCE(last->last_rw) = bi_rw;
44865+ ACCESS_ONCE_RW(last->last_sector) = end_sector;
44866+ ACCESS_ONCE_RW(last->last_rw) = bi_rw;
44867 }
44868
44869 rcu_read_lock();
44870diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
44871index d1600d2..4c3af3a 100644
44872--- a/drivers/md/dm-stripe.c
44873+++ b/drivers/md/dm-stripe.c
44874@@ -21,7 +21,7 @@ struct stripe {
44875 struct dm_dev *dev;
44876 sector_t physical_start;
44877
44878- atomic_t error_count;
44879+ atomic_unchecked_t error_count;
44880 };
44881
44882 struct stripe_c {
44883@@ -186,7 +186,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
44884 kfree(sc);
44885 return r;
44886 }
44887- atomic_set(&(sc->stripe[i].error_count), 0);
44888+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
44889 }
44890
44891 ti->private = sc;
44892@@ -330,7 +330,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
44893 DMEMIT("%d ", sc->stripes);
44894 for (i = 0; i < sc->stripes; i++) {
44895 DMEMIT("%s ", sc->stripe[i].dev->name);
44896- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
44897+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
44898 'D' : 'A';
44899 }
44900 buffer[i] = '\0';
44901@@ -375,8 +375,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
44902 */
44903 for (i = 0; i < sc->stripes; i++)
44904 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
44905- atomic_inc(&(sc->stripe[i].error_count));
44906- if (atomic_read(&(sc->stripe[i].error_count)) <
44907+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
44908+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
44909 DM_IO_ERROR_THRESHOLD)
44910 schedule_work(&sc->trigger_event);
44911 }
44912diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
44913index f9c6cb8..e272df6 100644
44914--- a/drivers/md/dm-table.c
44915+++ b/drivers/md/dm-table.c
44916@@ -274,7 +274,7 @@ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
44917 static int open_dev(struct dm_dev_internal *d, dev_t dev,
44918 struct mapped_device *md)
44919 {
44920- static char *_claim_ptr = "I belong to device-mapper";
44921+ static char _claim_ptr[] = "I belong to device-mapper";
44922 struct block_device *bdev;
44923
44924 int r;
44925@@ -342,7 +342,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
44926 if (!dev_size)
44927 return 0;
44928
44929- if ((start >= dev_size) || (start + len > dev_size)) {
44930+ if ((start >= dev_size) || (len > dev_size - start)) {
44931 DMWARN("%s: %s too small for target: "
44932 "start=%llu, len=%llu, dev_size=%llu",
44933 dm_device_name(ti->table->md), bdevname(bdev, b),
44934diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
44935index e9d33ad..dae9880d 100644
44936--- a/drivers/md/dm-thin-metadata.c
44937+++ b/drivers/md/dm-thin-metadata.c
44938@@ -404,7 +404,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44939 {
44940 pmd->info.tm = pmd->tm;
44941 pmd->info.levels = 2;
44942- pmd->info.value_type.context = pmd->data_sm;
44943+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44944 pmd->info.value_type.size = sizeof(__le64);
44945 pmd->info.value_type.inc = data_block_inc;
44946 pmd->info.value_type.dec = data_block_dec;
44947@@ -423,7 +423,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
44948
44949 pmd->bl_info.tm = pmd->tm;
44950 pmd->bl_info.levels = 1;
44951- pmd->bl_info.value_type.context = pmd->data_sm;
44952+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
44953 pmd->bl_info.value_type.size = sizeof(__le64);
44954 pmd->bl_info.value_type.inc = data_block_inc;
44955 pmd->bl_info.value_type.dec = data_block_dec;
44956diff --git a/drivers/md/dm.c b/drivers/md/dm.c
44957index 32b958d..34011e8 100644
44958--- a/drivers/md/dm.c
44959+++ b/drivers/md/dm.c
44960@@ -180,9 +180,9 @@ struct mapped_device {
44961 /*
44962 * Event handling.
44963 */
44964- atomic_t event_nr;
44965+ atomic_unchecked_t event_nr;
44966 wait_queue_head_t eventq;
44967- atomic_t uevent_seq;
44968+ atomic_unchecked_t uevent_seq;
44969 struct list_head uevent_list;
44970 spinlock_t uevent_lock; /* Protect access to uevent_list */
44971
44972@@ -1952,8 +1952,8 @@ static struct mapped_device *alloc_dev(int minor)
44973 spin_lock_init(&md->deferred_lock);
44974 atomic_set(&md->holders, 1);
44975 atomic_set(&md->open_count, 0);
44976- atomic_set(&md->event_nr, 0);
44977- atomic_set(&md->uevent_seq, 0);
44978+ atomic_set_unchecked(&md->event_nr, 0);
44979+ atomic_set_unchecked(&md->uevent_seq, 0);
44980 INIT_LIST_HEAD(&md->uevent_list);
44981 spin_lock_init(&md->uevent_lock);
44982
44983@@ -2107,7 +2107,7 @@ static void event_callback(void *context)
44984
44985 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
44986
44987- atomic_inc(&md->event_nr);
44988+ atomic_inc_unchecked(&md->event_nr);
44989 wake_up(&md->eventq);
44990 }
44991
44992@@ -2800,18 +2800,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
44993
44994 uint32_t dm_next_uevent_seq(struct mapped_device *md)
44995 {
44996- return atomic_add_return(1, &md->uevent_seq);
44997+ return atomic_add_return_unchecked(1, &md->uevent_seq);
44998 }
44999
45000 uint32_t dm_get_event_nr(struct mapped_device *md)
45001 {
45002- return atomic_read(&md->event_nr);
45003+ return atomic_read_unchecked(&md->event_nr);
45004 }
45005
45006 int dm_wait_event(struct mapped_device *md, int event_nr)
45007 {
45008 return wait_event_interruptible(md->eventq,
45009- (event_nr != atomic_read(&md->event_nr)));
45010+ (event_nr != atomic_read_unchecked(&md->event_nr)));
45011 }
45012
45013 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
45014diff --git a/drivers/md/md.c b/drivers/md/md.c
45015index b7f603c..723d2bd 100644
45016--- a/drivers/md/md.c
45017+++ b/drivers/md/md.c
45018@@ -194,10 +194,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
45019 * start build, activate spare
45020 */
45021 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
45022-static atomic_t md_event_count;
45023+static atomic_unchecked_t md_event_count;
45024 void md_new_event(struct mddev *mddev)
45025 {
45026- atomic_inc(&md_event_count);
45027+ atomic_inc_unchecked(&md_event_count);
45028 wake_up(&md_event_waiters);
45029 }
45030 EXPORT_SYMBOL_GPL(md_new_event);
45031@@ -207,7 +207,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
45032 */
45033 static void md_new_event_inintr(struct mddev *mddev)
45034 {
45035- atomic_inc(&md_event_count);
45036+ atomic_inc_unchecked(&md_event_count);
45037 wake_up(&md_event_waiters);
45038 }
45039
45040@@ -1462,7 +1462,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
45041 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
45042 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
45043 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
45044- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
45045+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
45046
45047 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
45048 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
45049@@ -1713,7 +1713,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
45050 else
45051 sb->resync_offset = cpu_to_le64(0);
45052
45053- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
45054+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
45055
45056 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
45057 sb->size = cpu_to_le64(mddev->dev_sectors);
45058@@ -2725,7 +2725,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
45059 static ssize_t
45060 errors_show(struct md_rdev *rdev, char *page)
45061 {
45062- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
45063+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
45064 }
45065
45066 static ssize_t
45067@@ -2734,7 +2734,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
45068 char *e;
45069 unsigned long n = simple_strtoul(buf, &e, 10);
45070 if (*buf && (*e == 0 || *e == '\n')) {
45071- atomic_set(&rdev->corrected_errors, n);
45072+ atomic_set_unchecked(&rdev->corrected_errors, n);
45073 return len;
45074 }
45075 return -EINVAL;
45076@@ -3183,8 +3183,8 @@ int md_rdev_init(struct md_rdev *rdev)
45077 rdev->sb_loaded = 0;
45078 rdev->bb_page = NULL;
45079 atomic_set(&rdev->nr_pending, 0);
45080- atomic_set(&rdev->read_errors, 0);
45081- atomic_set(&rdev->corrected_errors, 0);
45082+ atomic_set_unchecked(&rdev->read_errors, 0);
45083+ atomic_set_unchecked(&rdev->corrected_errors, 0);
45084
45085 INIT_LIST_HEAD(&rdev->same_set);
45086 init_waitqueue_head(&rdev->blocked_wait);
45087@@ -7072,7 +7072,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
45088
45089 spin_unlock(&pers_lock);
45090 seq_printf(seq, "\n");
45091- seq->poll_event = atomic_read(&md_event_count);
45092+ seq->poll_event = atomic_read_unchecked(&md_event_count);
45093 return 0;
45094 }
45095 if (v == (void*)2) {
45096@@ -7175,7 +7175,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
45097 return error;
45098
45099 seq = file->private_data;
45100- seq->poll_event = atomic_read(&md_event_count);
45101+ seq->poll_event = atomic_read_unchecked(&md_event_count);
45102 return error;
45103 }
45104
45105@@ -7192,7 +7192,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
45106 /* always allow read */
45107 mask = POLLIN | POLLRDNORM;
45108
45109- if (seq->poll_event != atomic_read(&md_event_count))
45110+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
45111 mask |= POLLERR | POLLPRI;
45112 return mask;
45113 }
45114@@ -7236,7 +7236,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
45115 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
45116 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
45117 (int)part_stat_read(&disk->part0, sectors[1]) -
45118- atomic_read(&disk->sync_io);
45119+ atomic_read_unchecked(&disk->sync_io);
45120 /* sync IO will cause sync_io to increase before the disk_stats
45121 * as sync_io is counted when a request starts, and
45122 * disk_stats is counted when it completes.
45123diff --git a/drivers/md/md.h b/drivers/md/md.h
45124index a49d991..3582bb7 100644
45125--- a/drivers/md/md.h
45126+++ b/drivers/md/md.h
45127@@ -94,13 +94,13 @@ struct md_rdev {
45128 * only maintained for arrays that
45129 * support hot removal
45130 */
45131- atomic_t read_errors; /* number of consecutive read errors that
45132+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
45133 * we have tried to ignore.
45134 */
45135 struct timespec last_read_error; /* monotonic time since our
45136 * last read error
45137 */
45138- atomic_t corrected_errors; /* number of corrected read errors,
45139+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
45140 * for reporting to userspace and storing
45141 * in superblock.
45142 */
45143@@ -449,7 +449,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
45144
45145 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
45146 {
45147- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
45148+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
45149 }
45150
45151 struct md_personality
45152diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
45153index 786b689..ea8c956 100644
45154--- a/drivers/md/persistent-data/dm-space-map-metadata.c
45155+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
45156@@ -679,7 +679,7 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
45157 * Flick into a mode where all blocks get allocated in the new area.
45158 */
45159 smm->begin = old_len;
45160- memcpy(sm, &bootstrap_ops, sizeof(*sm));
45161+ memcpy((void *)sm, &bootstrap_ops, sizeof(*sm));
45162
45163 /*
45164 * Extend.
45165@@ -710,7 +710,7 @@ out:
45166 /*
45167 * Switch back to normal behaviour.
45168 */
45169- memcpy(sm, &ops, sizeof(*sm));
45170+ memcpy((void *)sm, &ops, sizeof(*sm));
45171 return r;
45172 }
45173
45174diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
45175index 3e6d115..ffecdeb 100644
45176--- a/drivers/md/persistent-data/dm-space-map.h
45177+++ b/drivers/md/persistent-data/dm-space-map.h
45178@@ -71,6 +71,7 @@ struct dm_space_map {
45179 dm_sm_threshold_fn fn,
45180 void *context);
45181 };
45182+typedef struct dm_space_map __no_const dm_space_map_no_const;
45183
45184 /*----------------------------------------------------------------*/
45185
45186diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
45187index 55de4f6..b1c57fe 100644
45188--- a/drivers/md/raid1.c
45189+++ b/drivers/md/raid1.c
45190@@ -1936,7 +1936,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
45191 if (r1_sync_page_io(rdev, sect, s,
45192 bio->bi_io_vec[idx].bv_page,
45193 READ) != 0)
45194- atomic_add(s, &rdev->corrected_errors);
45195+ atomic_add_unchecked(s, &rdev->corrected_errors);
45196 }
45197 sectors -= s;
45198 sect += s;
45199@@ -2170,7 +2170,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
45200 !test_bit(Faulty, &rdev->flags)) {
45201 if (r1_sync_page_io(rdev, sect, s,
45202 conf->tmppage, READ)) {
45203- atomic_add(s, &rdev->corrected_errors);
45204+ atomic_add_unchecked(s, &rdev->corrected_errors);
45205 printk(KERN_INFO
45206 "md/raid1:%s: read error corrected "
45207 "(%d sectors at %llu on %s)\n",
45208diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
45209index 6703751..187af1e 100644
45210--- a/drivers/md/raid10.c
45211+++ b/drivers/md/raid10.c
45212@@ -1948,7 +1948,7 @@ static void end_sync_read(struct bio *bio, int error)
45213 /* The write handler will notice the lack of
45214 * R10BIO_Uptodate and record any errors etc
45215 */
45216- atomic_add(r10_bio->sectors,
45217+ atomic_add_unchecked(r10_bio->sectors,
45218 &conf->mirrors[d].rdev->corrected_errors);
45219
45220 /* for reconstruct, we always reschedule after a read.
45221@@ -2306,7 +2306,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
45222 {
45223 struct timespec cur_time_mon;
45224 unsigned long hours_since_last;
45225- unsigned int read_errors = atomic_read(&rdev->read_errors);
45226+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
45227
45228 ktime_get_ts(&cur_time_mon);
45229
45230@@ -2328,9 +2328,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
45231 * overflowing the shift of read_errors by hours_since_last.
45232 */
45233 if (hours_since_last >= 8 * sizeof(read_errors))
45234- atomic_set(&rdev->read_errors, 0);
45235+ atomic_set_unchecked(&rdev->read_errors, 0);
45236 else
45237- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
45238+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
45239 }
45240
45241 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
45242@@ -2384,8 +2384,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45243 return;
45244
45245 check_decay_read_errors(mddev, rdev);
45246- atomic_inc(&rdev->read_errors);
45247- if (atomic_read(&rdev->read_errors) > max_read_errors) {
45248+ atomic_inc_unchecked(&rdev->read_errors);
45249+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
45250 char b[BDEVNAME_SIZE];
45251 bdevname(rdev->bdev, b);
45252
45253@@ -2393,7 +2393,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45254 "md/raid10:%s: %s: Raid device exceeded "
45255 "read_error threshold [cur %d:max %d]\n",
45256 mdname(mddev), b,
45257- atomic_read(&rdev->read_errors), max_read_errors);
45258+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
45259 printk(KERN_NOTICE
45260 "md/raid10:%s: %s: Failing raid device\n",
45261 mdname(mddev), b);
45262@@ -2548,7 +2548,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
45263 sect +
45264 choose_data_offset(r10_bio, rdev)),
45265 bdevname(rdev->bdev, b));
45266- atomic_add(s, &rdev->corrected_errors);
45267+ atomic_add_unchecked(s, &rdev->corrected_errors);
45268 }
45269
45270 rdev_dec_pending(rdev, mddev);
45271diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
45272index 9f0fbec..991e7a1 100644
45273--- a/drivers/md/raid5.c
45274+++ b/drivers/md/raid5.c
45275@@ -1735,6 +1735,10 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
45276 return 1;
45277 }
45278
45279+#ifdef CONFIG_GRKERNSEC_HIDESYM
45280+static atomic_unchecked_t raid5_cache_id = ATOMIC_INIT(0);
45281+#endif
45282+
45283 static int grow_stripes(struct r5conf *conf, int num)
45284 {
45285 struct kmem_cache *sc;
45286@@ -1746,7 +1750,11 @@ static int grow_stripes(struct r5conf *conf, int num)
45287 "raid%d-%s", conf->level, mdname(conf->mddev));
45288 else
45289 sprintf(conf->cache_name[0],
45290+#ifdef CONFIG_GRKERNSEC_HIDESYM
45291+ "raid%d-%08lx", conf->level, atomic_inc_return_unchecked(&raid5_cache_id));
45292+#else
45293 "raid%d-%p", conf->level, conf->mddev);
45294+#endif
45295 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
45296
45297 conf->active_name = 0;
45298@@ -2022,21 +2030,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
45299 mdname(conf->mddev), STRIPE_SECTORS,
45300 (unsigned long long)s,
45301 bdevname(rdev->bdev, b));
45302- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
45303+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
45304 clear_bit(R5_ReadError, &sh->dev[i].flags);
45305 clear_bit(R5_ReWrite, &sh->dev[i].flags);
45306 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
45307 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
45308
45309- if (atomic_read(&rdev->read_errors))
45310- atomic_set(&rdev->read_errors, 0);
45311+ if (atomic_read_unchecked(&rdev->read_errors))
45312+ atomic_set_unchecked(&rdev->read_errors, 0);
45313 } else {
45314 const char *bdn = bdevname(rdev->bdev, b);
45315 int retry = 0;
45316 int set_bad = 0;
45317
45318 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
45319- atomic_inc(&rdev->read_errors);
45320+ atomic_inc_unchecked(&rdev->read_errors);
45321 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
45322 printk_ratelimited(
45323 KERN_WARNING
45324@@ -2064,7 +2072,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
45325 mdname(conf->mddev),
45326 (unsigned long long)s,
45327 bdn);
45328- } else if (atomic_read(&rdev->read_errors)
45329+ } else if (atomic_read_unchecked(&rdev->read_errors)
45330 > conf->max_nr_stripes)
45331 printk(KERN_WARNING
45332 "md/raid:%s: Too many read errors, failing device %s.\n",
45333diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
45334index 983db75..ef9248c 100644
45335--- a/drivers/media/dvb-core/dvbdev.c
45336+++ b/drivers/media/dvb-core/dvbdev.c
45337@@ -185,7 +185,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
45338 const struct dvb_device *template, void *priv, int type)
45339 {
45340 struct dvb_device *dvbdev;
45341- struct file_operations *dvbdevfops;
45342+ file_operations_no_const *dvbdevfops;
45343 struct device *clsdev;
45344 int minor;
45345 int id;
45346diff --git a/drivers/media/dvb-frontends/af9033.h b/drivers/media/dvb-frontends/af9033.h
45347index 539f4db..cdd403b 100644
45348--- a/drivers/media/dvb-frontends/af9033.h
45349+++ b/drivers/media/dvb-frontends/af9033.h
45350@@ -82,7 +82,7 @@ struct af9033_ops {
45351 int (*pid_filter_ctrl)(struct dvb_frontend *fe, int onoff);
45352 int (*pid_filter)(struct dvb_frontend *fe, int index, u16 pid,
45353 int onoff);
45354-};
45355+} __no_const;
45356
45357
45358 #if IS_ENABLED(CONFIG_DVB_AF9033)
45359diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
45360index 9b6c3bb..baeb5c7 100644
45361--- a/drivers/media/dvb-frontends/dib3000.h
45362+++ b/drivers/media/dvb-frontends/dib3000.h
45363@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
45364 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
45365 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
45366 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
45367-};
45368+} __no_const;
45369
45370 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
45371 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
45372diff --git a/drivers/media/dvb-frontends/dib7000p.h b/drivers/media/dvb-frontends/dib7000p.h
45373index 1fea0e9..321ce8f 100644
45374--- a/drivers/media/dvb-frontends/dib7000p.h
45375+++ b/drivers/media/dvb-frontends/dib7000p.h
45376@@ -64,7 +64,7 @@ struct dib7000p_ops {
45377 int (*get_adc_power)(struct dvb_frontend *fe);
45378 int (*slave_reset)(struct dvb_frontend *fe);
45379 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg);
45380-};
45381+} __no_const;
45382
45383 #if IS_ENABLED(CONFIG_DVB_DIB7000P)
45384 void *dib7000p_attach(struct dib7000p_ops *ops);
45385diff --git a/drivers/media/dvb-frontends/dib8000.h b/drivers/media/dvb-frontends/dib8000.h
45386index 84cc103..5780c54 100644
45387--- a/drivers/media/dvb-frontends/dib8000.h
45388+++ b/drivers/media/dvb-frontends/dib8000.h
45389@@ -61,7 +61,7 @@ struct dib8000_ops {
45390 int (*pid_filter_ctrl)(struct dvb_frontend *fe, u8 onoff);
45391 int (*pid_filter)(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff);
45392 struct dvb_frontend *(*init)(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg);
45393-};
45394+} __no_const;
45395
45396 #if IS_ENABLED(CONFIG_DVB_DIB8000)
45397 void *dib8000_attach(struct dib8000_ops *ops);
45398diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
45399index ed8cb90..5ef7f79 100644
45400--- a/drivers/media/pci/cx88/cx88-video.c
45401+++ b/drivers/media/pci/cx88/cx88-video.c
45402@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
45403
45404 /* ------------------------------------------------------------------ */
45405
45406-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45407-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45408-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45409+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45410+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45411+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
45412
45413 module_param_array(video_nr, int, NULL, 0444);
45414 module_param_array(vbi_nr, int, NULL, 0444);
45415diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
45416index 802642d..5534900 100644
45417--- a/drivers/media/pci/ivtv/ivtv-driver.c
45418+++ b/drivers/media/pci/ivtv/ivtv-driver.c
45419@@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
45420 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
45421
45422 /* ivtv instance counter */
45423-static atomic_t ivtv_instance = ATOMIC_INIT(0);
45424+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
45425
45426 /* Parameter declarations */
45427 static int cardtype[IVTV_MAX_CARDS];
45428diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
45429index 172583d..0f806f4 100644
45430--- a/drivers/media/pci/solo6x10/solo6x10-core.c
45431+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
45432@@ -430,7 +430,7 @@ static void solo_device_release(struct device *dev)
45433
45434 static int solo_sysfs_init(struct solo_dev *solo_dev)
45435 {
45436- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
45437+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
45438 struct device *dev = &solo_dev->dev;
45439 const char *driver;
45440 int i;
45441diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
45442index c7141f2..5301fec 100644
45443--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
45444+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
45445@@ -351,7 +351,7 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev)
45446
45447 int solo_g723_init(struct solo_dev *solo_dev)
45448 {
45449- static struct snd_device_ops ops = { NULL };
45450+ static struct snd_device_ops ops = { };
45451 struct snd_card *card;
45452 struct snd_kcontrol_new kctl;
45453 char name[32];
45454diff --git a/drivers/media/pci/solo6x10/solo6x10-p2m.c b/drivers/media/pci/solo6x10/solo6x10-p2m.c
45455index 8c84846..27b4f83 100644
45456--- a/drivers/media/pci/solo6x10/solo6x10-p2m.c
45457+++ b/drivers/media/pci/solo6x10/solo6x10-p2m.c
45458@@ -73,7 +73,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
45459
45460 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
45461 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
45462- p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
45463+ p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
45464 if (p2m_id < 0)
45465 p2m_id = -p2m_id;
45466 }
45467diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
45468index c6154b0..73e4ae9 100644
45469--- a/drivers/media/pci/solo6x10/solo6x10.h
45470+++ b/drivers/media/pci/solo6x10/solo6x10.h
45471@@ -219,7 +219,7 @@ struct solo_dev {
45472
45473 /* P2M DMA Engine */
45474 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
45475- atomic_t p2m_count;
45476+ atomic_unchecked_t p2m_count;
45477 int p2m_jiffies;
45478 unsigned int p2m_timeouts;
45479
45480diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
45481index 2d177fa..5b925a1 100644
45482--- a/drivers/media/platform/omap/omap_vout.c
45483+++ b/drivers/media/platform/omap/omap_vout.c
45484@@ -63,7 +63,6 @@ enum omap_vout_channels {
45485 OMAP_VIDEO2,
45486 };
45487
45488-static struct videobuf_queue_ops video_vbq_ops;
45489 /* Variables configurable through module params*/
45490 static u32 video1_numbuffers = 3;
45491 static u32 video2_numbuffers = 3;
45492@@ -1014,6 +1013,12 @@ static int omap_vout_open(struct file *file)
45493 {
45494 struct videobuf_queue *q;
45495 struct omap_vout_device *vout = NULL;
45496+ static struct videobuf_queue_ops video_vbq_ops = {
45497+ .buf_setup = omap_vout_buffer_setup,
45498+ .buf_prepare = omap_vout_buffer_prepare,
45499+ .buf_release = omap_vout_buffer_release,
45500+ .buf_queue = omap_vout_buffer_queue,
45501+ };
45502
45503 vout = video_drvdata(file);
45504 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
45505@@ -1031,10 +1036,6 @@ static int omap_vout_open(struct file *file)
45506 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
45507
45508 q = &vout->vbq;
45509- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
45510- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
45511- video_vbq_ops.buf_release = omap_vout_buffer_release;
45512- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
45513 spin_lock_init(&vout->vbq_lock);
45514
45515 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
45516diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
45517index fb2acc5..a2fcbdc4 100644
45518--- a/drivers/media/platform/s5p-tv/mixer.h
45519+++ b/drivers/media/platform/s5p-tv/mixer.h
45520@@ -156,7 +156,7 @@ struct mxr_layer {
45521 /** layer index (unique identifier) */
45522 int idx;
45523 /** callbacks for layer methods */
45524- struct mxr_layer_ops ops;
45525+ struct mxr_layer_ops *ops;
45526 /** format array */
45527 const struct mxr_format **fmt_array;
45528 /** size of format array */
45529diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
45530index 74344c7..a39e70e 100644
45531--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
45532+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
45533@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
45534 {
45535 struct mxr_layer *layer;
45536 int ret;
45537- struct mxr_layer_ops ops = {
45538+ static struct mxr_layer_ops ops = {
45539 .release = mxr_graph_layer_release,
45540 .buffer_set = mxr_graph_buffer_set,
45541 .stream_set = mxr_graph_stream_set,
45542diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
45543index b713403..53cb5ad 100644
45544--- a/drivers/media/platform/s5p-tv/mixer_reg.c
45545+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
45546@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
45547 layer->update_buf = next;
45548 }
45549
45550- layer->ops.buffer_set(layer, layer->update_buf);
45551+ layer->ops->buffer_set(layer, layer->update_buf);
45552
45553 if (done && done != layer->shadow_buf)
45554 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
45555diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
45556index b4d2696..91df48e 100644
45557--- a/drivers/media/platform/s5p-tv/mixer_video.c
45558+++ b/drivers/media/platform/s5p-tv/mixer_video.c
45559@@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
45560 layer->geo.src.height = layer->geo.src.full_height;
45561
45562 mxr_geometry_dump(mdev, &layer->geo);
45563- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45564+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45565 mxr_geometry_dump(mdev, &layer->geo);
45566 }
45567
45568@@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
45569 layer->geo.dst.full_width = mbus_fmt.width;
45570 layer->geo.dst.full_height = mbus_fmt.height;
45571 layer->geo.dst.field = mbus_fmt.field;
45572- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45573+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
45574
45575 mxr_geometry_dump(mdev, &layer->geo);
45576 }
45577@@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
45578 /* set source size to highest accepted value */
45579 geo->src.full_width = max(geo->dst.full_width, pix->width);
45580 geo->src.full_height = max(geo->dst.full_height, pix->height);
45581- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45582+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45583 mxr_geometry_dump(mdev, &layer->geo);
45584 /* set cropping to total visible screen */
45585 geo->src.width = pix->width;
45586@@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
45587 geo->src.x_offset = 0;
45588 geo->src.y_offset = 0;
45589 /* assure consistency of geometry */
45590- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
45591+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
45592 mxr_geometry_dump(mdev, &layer->geo);
45593 /* set full size to lowest possible value */
45594 geo->src.full_width = 0;
45595 geo->src.full_height = 0;
45596- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45597+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
45598 mxr_geometry_dump(mdev, &layer->geo);
45599
45600 /* returning results */
45601@@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
45602 target->width = s->r.width;
45603 target->height = s->r.height;
45604
45605- layer->ops.fix_geometry(layer, stage, s->flags);
45606+ layer->ops->fix_geometry(layer, stage, s->flags);
45607
45608 /* retrieve update selection rectangle */
45609 res.left = target->x_offset;
45610@@ -954,13 +954,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
45611 mxr_output_get(mdev);
45612
45613 mxr_layer_update_output(layer);
45614- layer->ops.format_set(layer);
45615+ layer->ops->format_set(layer);
45616 /* enabling layer in hardware */
45617 spin_lock_irqsave(&layer->enq_slock, flags);
45618 layer->state = MXR_LAYER_STREAMING;
45619 spin_unlock_irqrestore(&layer->enq_slock, flags);
45620
45621- layer->ops.stream_set(layer, MXR_ENABLE);
45622+ layer->ops->stream_set(layer, MXR_ENABLE);
45623 mxr_streamer_get(mdev);
45624
45625 return 0;
45626@@ -1030,7 +1030,7 @@ static void stop_streaming(struct vb2_queue *vq)
45627 spin_unlock_irqrestore(&layer->enq_slock, flags);
45628
45629 /* disabling layer in hardware */
45630- layer->ops.stream_set(layer, MXR_DISABLE);
45631+ layer->ops->stream_set(layer, MXR_DISABLE);
45632 /* remove one streamer */
45633 mxr_streamer_put(mdev);
45634 /* allow changes in output configuration */
45635@@ -1068,8 +1068,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
45636
45637 void mxr_layer_release(struct mxr_layer *layer)
45638 {
45639- if (layer->ops.release)
45640- layer->ops.release(layer);
45641+ if (layer->ops->release)
45642+ layer->ops->release(layer);
45643 }
45644
45645 void mxr_base_layer_release(struct mxr_layer *layer)
45646@@ -1095,7 +1095,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
45647
45648 layer->mdev = mdev;
45649 layer->idx = idx;
45650- layer->ops = *ops;
45651+ layer->ops = ops;
45652
45653 spin_lock_init(&layer->enq_slock);
45654 INIT_LIST_HEAD(&layer->enq_list);
45655diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45656index c9388c4..ce71ece 100644
45657--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45658+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
45659@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
45660 {
45661 struct mxr_layer *layer;
45662 int ret;
45663- struct mxr_layer_ops ops = {
45664+ static struct mxr_layer_ops ops = {
45665 .release = mxr_vp_layer_release,
45666 .buffer_set = mxr_vp_buffer_set,
45667 .stream_set = mxr_vp_stream_set,
45668diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c
45669index 8033371..de5bca0 100644
45670--- a/drivers/media/platform/vivi.c
45671+++ b/drivers/media/platform/vivi.c
45672@@ -58,8 +58,8 @@ MODULE_AUTHOR("Mauro Carvalho Chehab, Ted Walther and John Sokol");
45673 MODULE_LICENSE("Dual BSD/GPL");
45674 MODULE_VERSION(VIVI_VERSION);
45675
45676-static unsigned video_nr = -1;
45677-module_param(video_nr, uint, 0644);
45678+static int video_nr = -1;
45679+module_param(video_nr, int, 0644);
45680 MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
45681
45682 static unsigned n_devs = 1;
45683diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
45684index 82affae..42833ec 100644
45685--- a/drivers/media/radio/radio-cadet.c
45686+++ b/drivers/media/radio/radio-cadet.c
45687@@ -333,6 +333,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
45688 unsigned char readbuf[RDS_BUFFER];
45689 int i = 0;
45690
45691+ if (count > RDS_BUFFER)
45692+ return -EFAULT;
45693 mutex_lock(&dev->lock);
45694 if (dev->rdsstat == 0)
45695 cadet_start_rds(dev);
45696@@ -349,8 +351,9 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
45697 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
45698 mutex_unlock(&dev->lock);
45699
45700- if (i && copy_to_user(data, readbuf, i))
45701- return -EFAULT;
45702+ if (i > sizeof(readbuf) || (i && copy_to_user(data, readbuf, i)))
45703+ i = -EFAULT;
45704+
45705 return i;
45706 }
45707
45708diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
45709index 5236035..c622c74 100644
45710--- a/drivers/media/radio/radio-maxiradio.c
45711+++ b/drivers/media/radio/radio-maxiradio.c
45712@@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
45713 /* TEA5757 pin mappings */
45714 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
45715
45716-static atomic_t maxiradio_instance = ATOMIC_INIT(0);
45717+static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
45718
45719 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
45720 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
45721diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
45722index 050b3bb..79f62b9 100644
45723--- a/drivers/media/radio/radio-shark.c
45724+++ b/drivers/media/radio/radio-shark.c
45725@@ -79,7 +79,7 @@ struct shark_device {
45726 u32 last_val;
45727 };
45728
45729-static atomic_t shark_instance = ATOMIC_INIT(0);
45730+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
45731
45732 static void shark_write_val(struct snd_tea575x *tea, u32 val)
45733 {
45734diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
45735index 8654e0d..0608a64 100644
45736--- a/drivers/media/radio/radio-shark2.c
45737+++ b/drivers/media/radio/radio-shark2.c
45738@@ -74,7 +74,7 @@ struct shark_device {
45739 u8 *transfer_buffer;
45740 };
45741
45742-static atomic_t shark_instance = ATOMIC_INIT(0);
45743+static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
45744
45745 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
45746 {
45747diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
45748index 633022b..7f10754 100644
45749--- a/drivers/media/radio/radio-si476x.c
45750+++ b/drivers/media/radio/radio-si476x.c
45751@@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
45752 struct si476x_radio *radio;
45753 struct v4l2_ctrl *ctrl;
45754
45755- static atomic_t instance = ATOMIC_INIT(0);
45756+ static atomic_unchecked_t instance = ATOMIC_INIT(0);
45757
45758 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
45759 if (!radio)
45760diff --git a/drivers/media/usb/dvb-usb/cinergyT2-core.c b/drivers/media/usb/dvb-usb/cinergyT2-core.c
45761index 9fd1527..8927230 100644
45762--- a/drivers/media/usb/dvb-usb/cinergyT2-core.c
45763+++ b/drivers/media/usb/dvb-usb/cinergyT2-core.c
45764@@ -50,29 +50,73 @@ static struct dvb_usb_device_properties cinergyt2_properties;
45765
45766 static int cinergyt2_streaming_ctrl(struct dvb_usb_adapter *adap, int enable)
45767 {
45768- char buf[] = { CINERGYT2_EP1_CONTROL_STREAM_TRANSFER, enable ? 1 : 0 };
45769- char result[64];
45770- return dvb_usb_generic_rw(adap->dev, buf, sizeof(buf), result,
45771- sizeof(result), 0);
45772+ char *buf;
45773+ char *result;
45774+ int retval;
45775+
45776+ buf = kmalloc(2, GFP_KERNEL);
45777+ if (buf == NULL)
45778+ return -ENOMEM;
45779+ result = kmalloc(64, GFP_KERNEL);
45780+ if (result == NULL) {
45781+ kfree(buf);
45782+ return -ENOMEM;
45783+ }
45784+
45785+ buf[0] = CINERGYT2_EP1_CONTROL_STREAM_TRANSFER;
45786+ buf[1] = enable ? 1 : 0;
45787+
45788+ retval = dvb_usb_generic_rw(adap->dev, buf, 2, result, 64, 0);
45789+
45790+ kfree(buf);
45791+ kfree(result);
45792+ return retval;
45793 }
45794
45795 static int cinergyt2_power_ctrl(struct dvb_usb_device *d, int enable)
45796 {
45797- char buf[] = { CINERGYT2_EP1_SLEEP_MODE, enable ? 0 : 1 };
45798- char state[3];
45799- return dvb_usb_generic_rw(d, buf, sizeof(buf), state, sizeof(state), 0);
45800+ char *buf;
45801+ char *state;
45802+ int retval;
45803+
45804+ buf = kmalloc(2, GFP_KERNEL);
45805+ if (buf == NULL)
45806+ return -ENOMEM;
45807+ state = kmalloc(3, GFP_KERNEL);
45808+ if (state == NULL) {
45809+ kfree(buf);
45810+ return -ENOMEM;
45811+ }
45812+
45813+ buf[0] = CINERGYT2_EP1_SLEEP_MODE;
45814+ buf[1] = enable ? 1 : 0;
45815+
45816+ retval = dvb_usb_generic_rw(d, buf, 2, state, 3, 0);
45817+
45818+ kfree(buf);
45819+ kfree(state);
45820+ return retval;
45821 }
45822
45823 static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
45824 {
45825- char query[] = { CINERGYT2_EP1_GET_FIRMWARE_VERSION };
45826- char state[3];
45827+ char *query;
45828+ char *state;
45829 int ret;
45830+ query = kmalloc(1, GFP_KERNEL);
45831+ if (query == NULL)
45832+ return -ENOMEM;
45833+ state = kmalloc(3, GFP_KERNEL);
45834+ if (state == NULL) {
45835+ kfree(query);
45836+ return -ENOMEM;
45837+ }
45838+
45839+ query[0] = CINERGYT2_EP1_GET_FIRMWARE_VERSION;
45840
45841 adap->fe_adap[0].fe = cinergyt2_fe_attach(adap->dev);
45842
45843- ret = dvb_usb_generic_rw(adap->dev, query, sizeof(query), state,
45844- sizeof(state), 0);
45845+ ret = dvb_usb_generic_rw(adap->dev, query, 1, state, 3, 0);
45846 if (ret < 0) {
45847 deb_rc("cinergyt2_power_ctrl() Failed to retrieve sleep "
45848 "state info\n");
45849@@ -80,7 +124,8 @@ static int cinergyt2_frontend_attach(struct dvb_usb_adapter *adap)
45850
45851 /* Copy this pointer as we are gonna need it in the release phase */
45852 cinergyt2_usb_device = adap->dev;
45853-
45854+ kfree(query);
45855+ kfree(state);
45856 return 0;
45857 }
45858
45859@@ -141,12 +186,23 @@ static int repeatable_keys[] = {
45860 static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45861 {
45862 struct cinergyt2_state *st = d->priv;
45863- u8 key[5] = {0, 0, 0, 0, 0}, cmd = CINERGYT2_EP1_GET_RC_EVENTS;
45864+ u8 *key, *cmd;
45865 int i;
45866
45867+ cmd = kmalloc(1, GFP_KERNEL);
45868+ if (cmd == NULL)
45869+ return -EINVAL;
45870+ key = kzalloc(5, GFP_KERNEL);
45871+ if (key == NULL) {
45872+ kfree(cmd);
45873+ return -EINVAL;
45874+ }
45875+
45876+ cmd[0] = CINERGYT2_EP1_GET_RC_EVENTS;
45877+
45878 *state = REMOTE_NO_KEY_PRESSED;
45879
45880- dvb_usb_generic_rw(d, &cmd, 1, key, sizeof(key), 0);
45881+ dvb_usb_generic_rw(d, cmd, 1, key, 5, 0);
45882 if (key[4] == 0xff) {
45883 /* key repeat */
45884 st->rc_counter++;
45885@@ -157,12 +213,12 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45886 *event = d->last_event;
45887 deb_rc("repeat key, event %x\n",
45888 *event);
45889- return 0;
45890+ goto out;
45891 }
45892 }
45893 deb_rc("repeated key (non repeatable)\n");
45894 }
45895- return 0;
45896+ goto out;
45897 }
45898
45899 /* hack to pass checksum on the custom field */
45900@@ -174,6 +230,9 @@ static int cinergyt2_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
45901
45902 deb_rc("key: %*ph\n", 5, key);
45903 }
45904+out:
45905+ kfree(cmd);
45906+ kfree(key);
45907 return 0;
45908 }
45909
45910diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45911index c890fe4..f9b2ae6 100644
45912--- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45913+++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c
45914@@ -145,103 +145,176 @@ static int cinergyt2_fe_read_status(struct dvb_frontend *fe,
45915 fe_status_t *status)
45916 {
45917 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45918- struct dvbt_get_status_msg result;
45919- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45920+ struct dvbt_get_status_msg *result;
45921+ u8 *cmd;
45922 int ret;
45923
45924- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&result,
45925- sizeof(result), 0);
45926+ cmd = kmalloc(1, GFP_KERNEL);
45927+ if (cmd == NULL)
45928+ return -ENOMEM;
45929+ result = kmalloc(sizeof(*result), GFP_KERNEL);
45930+ if (result == NULL) {
45931+ kfree(cmd);
45932+ return -ENOMEM;
45933+ }
45934+
45935+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45936+
45937+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)result,
45938+ sizeof(*result), 0);
45939 if (ret < 0)
45940- return ret;
45941+ goto out;
45942
45943 *status = 0;
45944
45945- if (0xffff - le16_to_cpu(result.gain) > 30)
45946+ if (0xffff - le16_to_cpu(result->gain) > 30)
45947 *status |= FE_HAS_SIGNAL;
45948- if (result.lock_bits & (1 << 6))
45949+ if (result->lock_bits & (1 << 6))
45950 *status |= FE_HAS_LOCK;
45951- if (result.lock_bits & (1 << 5))
45952+ if (result->lock_bits & (1 << 5))
45953 *status |= FE_HAS_SYNC;
45954- if (result.lock_bits & (1 << 4))
45955+ if (result->lock_bits & (1 << 4))
45956 *status |= FE_HAS_CARRIER;
45957- if (result.lock_bits & (1 << 1))
45958+ if (result->lock_bits & (1 << 1))
45959 *status |= FE_HAS_VITERBI;
45960
45961 if ((*status & (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC)) !=
45962 (FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC))
45963 *status &= ~FE_HAS_LOCK;
45964
45965- return 0;
45966+out:
45967+ kfree(cmd);
45968+ kfree(result);
45969+ return ret;
45970 }
45971
45972 static int cinergyt2_fe_read_ber(struct dvb_frontend *fe, u32 *ber)
45973 {
45974 struct cinergyt2_fe_state *state = fe->demodulator_priv;
45975- struct dvbt_get_status_msg status;
45976- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
45977+ struct dvbt_get_status_msg *status;
45978+ char *cmd;
45979 int ret;
45980
45981- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
45982- sizeof(status), 0);
45983+ cmd = kmalloc(1, GFP_KERNEL);
45984+ if (cmd == NULL)
45985+ return -ENOMEM;
45986+ status = kmalloc(sizeof(*status), GFP_KERNEL);
45987+ if (status == NULL) {
45988+ kfree(cmd);
45989+ return -ENOMEM;
45990+ }
45991+
45992+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
45993+
45994+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
45995+ sizeof(*status), 0);
45996 if (ret < 0)
45997- return ret;
45998+ goto out;
45999
46000- *ber = le32_to_cpu(status.viterbi_error_rate);
46001+ *ber = le32_to_cpu(status->viterbi_error_rate);
46002+out:
46003+ kfree(cmd);
46004+ kfree(status);
46005 return 0;
46006 }
46007
46008 static int cinergyt2_fe_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
46009 {
46010 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46011- struct dvbt_get_status_msg status;
46012- u8 cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46013+ struct dvbt_get_status_msg *status;
46014+ u8 *cmd;
46015 int ret;
46016
46017- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (u8 *)&status,
46018- sizeof(status), 0);
46019+ cmd = kmalloc(1, GFP_KERNEL);
46020+ if (cmd == NULL)
46021+ return -ENOMEM;
46022+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46023+ if (status == NULL) {
46024+ kfree(cmd);
46025+ return -ENOMEM;
46026+ }
46027+
46028+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46029+
46030+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (u8 *)status,
46031+ sizeof(*status), 0);
46032 if (ret < 0) {
46033 err("cinergyt2_fe_read_unc_blocks() Failed! (Error=%d)\n",
46034 ret);
46035- return ret;
46036+ goto out;
46037 }
46038- *unc = le32_to_cpu(status.uncorrected_block_count);
46039- return 0;
46040+ *unc = le32_to_cpu(status->uncorrected_block_count);
46041+
46042+out:
46043+ kfree(cmd);
46044+ kfree(status);
46045+ return ret;
46046 }
46047
46048 static int cinergyt2_fe_read_signal_strength(struct dvb_frontend *fe,
46049 u16 *strength)
46050 {
46051 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46052- struct dvbt_get_status_msg status;
46053- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46054+ struct dvbt_get_status_msg *status;
46055+ char *cmd;
46056 int ret;
46057
46058- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46059- sizeof(status), 0);
46060+ cmd = kmalloc(1, GFP_KERNEL);
46061+ if (cmd == NULL)
46062+ return -ENOMEM;
46063+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46064+ if (status == NULL) {
46065+ kfree(cmd);
46066+ return -ENOMEM;
46067+ }
46068+
46069+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46070+
46071+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46072+ sizeof(*status), 0);
46073 if (ret < 0) {
46074 err("cinergyt2_fe_read_signal_strength() Failed!"
46075 " (Error=%d)\n", ret);
46076- return ret;
46077+ goto out;
46078 }
46079- *strength = (0xffff - le16_to_cpu(status.gain));
46080+ *strength = (0xffff - le16_to_cpu(status->gain));
46081+
46082+out:
46083+ kfree(cmd);
46084+ kfree(status);
46085 return 0;
46086 }
46087
46088 static int cinergyt2_fe_read_snr(struct dvb_frontend *fe, u16 *snr)
46089 {
46090 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46091- struct dvbt_get_status_msg status;
46092- char cmd[] = { CINERGYT2_EP1_GET_TUNER_STATUS };
46093+ struct dvbt_get_status_msg *status;
46094+ char *cmd;
46095 int ret;
46096
46097- ret = dvb_usb_generic_rw(state->d, cmd, sizeof(cmd), (char *)&status,
46098- sizeof(status), 0);
46099+ cmd = kmalloc(1, GFP_KERNEL);
46100+ if (cmd == NULL)
46101+ return -ENOMEM;
46102+ status = kmalloc(sizeof(*status), GFP_KERNEL);
46103+ if (status == NULL) {
46104+ kfree(cmd);
46105+ return -ENOMEM;
46106+ }
46107+
46108+ cmd[0] = CINERGYT2_EP1_GET_TUNER_STATUS;
46109+
46110+ ret = dvb_usb_generic_rw(state->d, cmd, 1, (char *)status,
46111+ sizeof(*status), 0);
46112 if (ret < 0) {
46113 err("cinergyt2_fe_read_snr() Failed! (Error=%d)\n", ret);
46114- return ret;
46115+ goto out;
46116 }
46117- *snr = (status.snr << 8) | status.snr;
46118- return 0;
46119+ *snr = (status->snr << 8) | status->snr;
46120+
46121+out:
46122+ kfree(cmd);
46123+ kfree(status);
46124+ return ret;
46125 }
46126
46127 static int cinergyt2_fe_init(struct dvb_frontend *fe)
46128@@ -266,35 +339,46 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
46129 {
46130 struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
46131 struct cinergyt2_fe_state *state = fe->demodulator_priv;
46132- struct dvbt_set_parameters_msg param;
46133- char result[2];
46134+ struct dvbt_set_parameters_msg *param;
46135+ char *result;
46136 int err;
46137
46138- param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
46139- param.tps = cpu_to_le16(compute_tps(fep));
46140- param.freq = cpu_to_le32(fep->frequency / 1000);
46141- param.flags = 0;
46142+ result = kmalloc(2, GFP_KERNEL);
46143+ if (result == NULL)
46144+ return -ENOMEM;
46145+ param = kmalloc(sizeof(*param), GFP_KERNEL);
46146+ if (param == NULL) {
46147+ kfree(result);
46148+ return -ENOMEM;
46149+ }
46150+
46151+ param->cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS;
46152+ param->tps = cpu_to_le16(compute_tps(fep));
46153+ param->freq = cpu_to_le32(fep->frequency / 1000);
46154+ param->flags = 0;
46155
46156 switch (fep->bandwidth_hz) {
46157 default:
46158 case 8000000:
46159- param.bandwidth = 8;
46160+ param->bandwidth = 8;
46161 break;
46162 case 7000000:
46163- param.bandwidth = 7;
46164+ param->bandwidth = 7;
46165 break;
46166 case 6000000:
46167- param.bandwidth = 6;
46168+ param->bandwidth = 6;
46169 break;
46170 }
46171
46172 err = dvb_usb_generic_rw(state->d,
46173- (char *)&param, sizeof(param),
46174- result, sizeof(result), 0);
46175+ (char *)param, sizeof(*param),
46176+ result, 2, 0);
46177 if (err < 0)
46178 err("cinergyt2_fe_set_frontend() Failed! err=%d\n", err);
46179
46180- return (err < 0) ? err : 0;
46181+ kfree(result);
46182+ kfree(param);
46183+ return err;
46184 }
46185
46186 static void cinergyt2_fe_release(struct dvb_frontend *fe)
46187diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46188index 733a7ff..f8b52e3 100644
46189--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46190+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
46191@@ -35,42 +35,57 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
46192
46193 int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
46194 {
46195- struct hexline hx;
46196- u8 reset;
46197+ struct hexline *hx;
46198+ u8 *reset;
46199 int ret,pos=0;
46200
46201+ reset = kmalloc(1, GFP_KERNEL);
46202+ if (reset == NULL)
46203+ return -ENOMEM;
46204+
46205+ hx = kmalloc(sizeof(struct hexline), GFP_KERNEL);
46206+ if (hx == NULL) {
46207+ kfree(reset);
46208+ return -ENOMEM;
46209+ }
46210+
46211 /* stop the CPU */
46212- reset = 1;
46213- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
46214+ reset[0] = 1;
46215+ if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1)) != 1)
46216 err("could not stop the USB controller CPU.");
46217
46218- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
46219- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
46220- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
46221+ while ((ret = dvb_usb_get_hexline(fw,hx,&pos)) > 0) {
46222+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx->addr,hx->len,hx->chk);
46223+ ret = usb_cypress_writemem(udev,hx->addr,hx->data,hx->len);
46224
46225- if (ret != hx.len) {
46226+ if (ret != hx->len) {
46227 err("error while transferring firmware "
46228 "(transferred size: %d, block size: %d)",
46229- ret,hx.len);
46230+ ret,hx->len);
46231 ret = -EINVAL;
46232 break;
46233 }
46234 }
46235 if (ret < 0) {
46236 err("firmware download failed at %d with %d",pos,ret);
46237+ kfree(reset);
46238+ kfree(hx);
46239 return ret;
46240 }
46241
46242 if (ret == 0) {
46243 /* restart the CPU */
46244- reset = 0;
46245- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
46246+ reset[0] = 0;
46247+ if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,reset,1) != 1) {
46248 err("could not restart the USB controller CPU.");
46249 ret = -EINVAL;
46250 }
46251 } else
46252 ret = -EIO;
46253
46254+ kfree(reset);
46255+ kfree(hx);
46256+
46257 return ret;
46258 }
46259 EXPORT_SYMBOL(usb_cypress_load_firmware);
46260diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
46261index 2add8c5..c33b854 100644
46262--- a/drivers/media/usb/dvb-usb/dw2102.c
46263+++ b/drivers/media/usb/dvb-usb/dw2102.c
46264@@ -118,7 +118,7 @@ struct su3000_state {
46265
46266 struct s6x0_state {
46267 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
46268-};
46269+} __no_const;
46270
46271 /* debug */
46272 static int dvb_usb_dw2102_debug;
46273diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
46274index 6b0b8b6b..4038398 100644
46275--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
46276+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
46277@@ -87,8 +87,11 @@ struct technisat_usb2_state {
46278 static int technisat_usb2_i2c_access(struct usb_device *udev,
46279 u8 device_addr, u8 *tx, u8 txlen, u8 *rx, u8 rxlen)
46280 {
46281- u8 b[64];
46282- int ret, actual_length;
46283+ u8 *b = kmalloc(64, GFP_KERNEL);
46284+ int ret, actual_length, error = 0;
46285+
46286+ if (b == NULL)
46287+ return -ENOMEM;
46288
46289 deb_i2c("i2c-access: %02x, tx: ", device_addr);
46290 debug_dump(tx, txlen, deb_i2c);
46291@@ -121,7 +124,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46292
46293 if (ret < 0) {
46294 err("i2c-error: out failed %02x = %d", device_addr, ret);
46295- return -ENODEV;
46296+ error = -ENODEV;
46297+ goto out;
46298 }
46299
46300 ret = usb_bulk_msg(udev,
46301@@ -129,7 +133,8 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46302 b, 64, &actual_length, 1000);
46303 if (ret < 0) {
46304 err("i2c-error: in failed %02x = %d", device_addr, ret);
46305- return -ENODEV;
46306+ error = -ENODEV;
46307+ goto out;
46308 }
46309
46310 if (b[0] != I2C_STATUS_OK) {
46311@@ -137,8 +142,10 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46312 /* handle tuner-i2c-nak */
46313 if (!(b[0] == I2C_STATUS_NAK &&
46314 device_addr == 0x60
46315- /* && device_is_technisat_usb2 */))
46316- return -ENODEV;
46317+ /* && device_is_technisat_usb2 */)) {
46318+ error = -ENODEV;
46319+ goto out;
46320+ }
46321 }
46322
46323 deb_i2c("status: %d, ", b[0]);
46324@@ -152,7 +159,9 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
46325
46326 deb_i2c("\n");
46327
46328- return 0;
46329+out:
46330+ kfree(b);
46331+ return error;
46332 }
46333
46334 static int technisat_usb2_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
46335@@ -224,14 +233,16 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
46336 {
46337 int ret;
46338
46339- u8 led[8] = {
46340- red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
46341- 0
46342- };
46343+ u8 *led = kzalloc(8, GFP_KERNEL);
46344+
46345+ if (led == NULL)
46346+ return -ENOMEM;
46347
46348 if (disable_led_control && state != TECH_LED_OFF)
46349 return 0;
46350
46351+ led[0] = red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST;
46352+
46353 switch (state) {
46354 case TECH_LED_ON:
46355 led[1] = 0x82;
46356@@ -263,16 +274,22 @@ static int technisat_usb2_set_led(struct dvb_usb_device *d, int red, enum techni
46357 red ? SET_RED_LED_VENDOR_REQUEST : SET_GREEN_LED_VENDOR_REQUEST,
46358 USB_TYPE_VENDOR | USB_DIR_OUT,
46359 0, 0,
46360- led, sizeof(led), 500);
46361+ led, 8, 500);
46362
46363 mutex_unlock(&d->i2c_mutex);
46364+
46365+ kfree(led);
46366+
46367 return ret;
46368 }
46369
46370 static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 green)
46371 {
46372 int ret;
46373- u8 b = 0;
46374+ u8 *b = kzalloc(1, GFP_KERNEL);
46375+
46376+ if (b == NULL)
46377+ return -ENOMEM;
46378
46379 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
46380 return -EAGAIN;
46381@@ -281,10 +298,12 @@ static int technisat_usb2_set_led_timer(struct dvb_usb_device *d, u8 red, u8 gre
46382 SET_LED_TIMER_DIVIDER_VENDOR_REQUEST,
46383 USB_TYPE_VENDOR | USB_DIR_OUT,
46384 (red << 8) | green, 0,
46385- &b, 1, 500);
46386+ b, 1, 500);
46387
46388 mutex_unlock(&d->i2c_mutex);
46389
46390+ kfree(b);
46391+
46392 return ret;
46393 }
46394
46395@@ -328,7 +347,7 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
46396 struct dvb_usb_device_description **desc, int *cold)
46397 {
46398 int ret;
46399- u8 version[3];
46400+ u8 *version = kmalloc(3, GFP_KERNEL);
46401
46402 /* first select the interface */
46403 if (usb_set_interface(udev, 0, 1) != 0)
46404@@ -338,11 +357,14 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
46405
46406 *cold = 0; /* by default do not download a firmware - just in case something is wrong */
46407
46408+ if (version == NULL)
46409+ return 0;
46410+
46411 ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
46412 GET_VERSION_INFO_VENDOR_REQUEST,
46413 USB_TYPE_VENDOR | USB_DIR_IN,
46414 0, 0,
46415- version, sizeof(version), 500);
46416+ version, 3, 500);
46417
46418 if (ret < 0)
46419 *cold = 1;
46420@@ -351,6 +373,8 @@ static int technisat_usb2_identify_state(struct usb_device *udev,
46421 *cold = 0;
46422 }
46423
46424+ kfree(version);
46425+
46426 return 0;
46427 }
46428
46429@@ -591,10 +615,15 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
46430
46431 static int technisat_usb2_get_ir(struct dvb_usb_device *d)
46432 {
46433- u8 buf[62], *b;
46434+ u8 *buf, *b;
46435 int ret;
46436 struct ir_raw_event ev;
46437
46438+ buf = kmalloc(62, GFP_KERNEL);
46439+
46440+ if (buf == NULL)
46441+ return -ENOMEM;
46442+
46443 buf[0] = GET_IR_DATA_VENDOR_REQUEST;
46444 buf[1] = 0x08;
46445 buf[2] = 0x8f;
46446@@ -617,16 +646,20 @@ static int technisat_usb2_get_ir(struct dvb_usb_device *d)
46447 GET_IR_DATA_VENDOR_REQUEST,
46448 USB_TYPE_VENDOR | USB_DIR_IN,
46449 0x8080, 0,
46450- buf, sizeof(buf), 500);
46451+ buf, 62, 500);
46452
46453 unlock:
46454 mutex_unlock(&d->i2c_mutex);
46455
46456- if (ret < 0)
46457+ if (ret < 0) {
46458+ kfree(buf);
46459 return ret;
46460+ }
46461
46462- if (ret == 1)
46463+ if (ret == 1) {
46464+ kfree(buf);
46465 return 0; /* no key pressed */
46466+ }
46467
46468 /* decoding */
46469 b = buf+1;
46470@@ -653,6 +686,8 @@ unlock:
46471
46472 ir_raw_event_handle(d->rc_dev);
46473
46474+ kfree(buf);
46475+
46476 return 1;
46477 }
46478
46479diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
46480index cca6c2f..77b9a18 100644
46481--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
46482+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
46483@@ -328,7 +328,7 @@ struct v4l2_buffer32 {
46484 __u32 reserved;
46485 };
46486
46487-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
46488+static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
46489 enum v4l2_memory memory)
46490 {
46491 void __user *up_pln;
46492@@ -357,7 +357,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
46493 return 0;
46494 }
46495
46496-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
46497+static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
46498 enum v4l2_memory memory)
46499 {
46500 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
46501@@ -427,7 +427,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
46502 * by passing a very big num_planes value */
46503 uplane = compat_alloc_user_space(num_planes *
46504 sizeof(struct v4l2_plane));
46505- kp->m.planes = uplane;
46506+ kp->m.planes = (struct v4l2_plane __force_kernel *)uplane;
46507
46508 while (--num_planes >= 0) {
46509 ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
46510@@ -498,7 +498,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
46511 if (num_planes == 0)
46512 return 0;
46513
46514- uplane = kp->m.planes;
46515+ uplane = (struct v4l2_plane __force_user *)kp->m.planes;
46516 if (get_user(p, &up->m.planes))
46517 return -EFAULT;
46518 uplane32 = compat_ptr(p);
46519@@ -562,7 +562,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_frame
46520 get_user(kp->flags, &up->flags) ||
46521 copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
46522 return -EFAULT;
46523- kp->base = compat_ptr(tmp);
46524+ kp->base = (void __force_kernel *)compat_ptr(tmp);
46525 return 0;
46526 }
46527
46528@@ -667,7 +667,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
46529 n * sizeof(struct v4l2_ext_control32)))
46530 return -EFAULT;
46531 kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
46532- kp->controls = kcontrols;
46533+ kp->controls = (struct v4l2_ext_control __force_kernel *)kcontrols;
46534 while (--n >= 0) {
46535 if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
46536 return -EFAULT;
46537@@ -689,7 +689,7 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
46538 static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
46539 {
46540 struct v4l2_ext_control32 __user *ucontrols;
46541- struct v4l2_ext_control __user *kcontrols = kp->controls;
46542+ struct v4l2_ext_control __user *kcontrols = (struct v4l2_ext_control __force_user *)kp->controls;
46543 int n = kp->count;
46544 compat_caddr_t p;
46545
46546@@ -783,7 +783,7 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
46547 put_user(kp->start_block, &up->start_block) ||
46548 put_user(kp->blocks, &up->blocks) ||
46549 put_user(tmp, &up->edid) ||
46550- copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
46551+ copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
46552 return -EFAULT;
46553 return 0;
46554 }
46555diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
46556index 015f92a..59e311e 100644
46557--- a/drivers/media/v4l2-core/v4l2-device.c
46558+++ b/drivers/media/v4l2-core/v4l2-device.c
46559@@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
46560 EXPORT_SYMBOL_GPL(v4l2_device_put);
46561
46562 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
46563- atomic_t *instance)
46564+ atomic_unchecked_t *instance)
46565 {
46566- int num = atomic_inc_return(instance) - 1;
46567+ int num = atomic_inc_return_unchecked(instance) - 1;
46568 int len = strlen(basename);
46569
46570 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
46571diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
46572index d15e167..337f374 100644
46573--- a/drivers/media/v4l2-core/v4l2-ioctl.c
46574+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
46575@@ -2142,7 +2142,8 @@ struct v4l2_ioctl_info {
46576 struct file *file, void *fh, void *p);
46577 } u;
46578 void (*debug)(const void *arg, bool write_only);
46579-};
46580+} __do_const;
46581+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
46582
46583 /* This control needs a priority check */
46584 #define INFO_FL_PRIO (1 << 0)
46585@@ -2326,7 +2327,7 @@ static long __video_do_ioctl(struct file *file,
46586 struct video_device *vfd = video_devdata(file);
46587 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
46588 bool write_only = false;
46589- struct v4l2_ioctl_info default_info;
46590+ v4l2_ioctl_info_no_const default_info;
46591 const struct v4l2_ioctl_info *info;
46592 void *fh = file->private_data;
46593 struct v4l2_fh *vfh = NULL;
46594@@ -2413,7 +2414,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
46595 ret = -EINVAL;
46596 break;
46597 }
46598- *user_ptr = (void __user *)buf->m.planes;
46599+ *user_ptr = (void __force_user *)buf->m.planes;
46600 *kernel_ptr = (void **)&buf->m.planes;
46601 *array_size = sizeof(struct v4l2_plane) * buf->length;
46602 ret = 1;
46603@@ -2430,7 +2431,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
46604 ret = -EINVAL;
46605 break;
46606 }
46607- *user_ptr = (void __user *)edid->edid;
46608+ *user_ptr = (void __force_user *)edid->edid;
46609 *kernel_ptr = (void **)&edid->edid;
46610 *array_size = edid->blocks * 128;
46611 ret = 1;
46612@@ -2448,7 +2449,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
46613 ret = -EINVAL;
46614 break;
46615 }
46616- *user_ptr = (void __user *)ctrls->controls;
46617+ *user_ptr = (void __force_user *)ctrls->controls;
46618 *kernel_ptr = (void **)&ctrls->controls;
46619 *array_size = sizeof(struct v4l2_ext_control)
46620 * ctrls->count;
46621@@ -2549,7 +2550,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
46622 }
46623
46624 if (has_array_args) {
46625- *kernel_ptr = (void __force *)user_ptr;
46626+ *kernel_ptr = (void __force_kernel *)user_ptr;
46627 if (copy_to_user(user_ptr, mbuf, array_size))
46628 err = -EFAULT;
46629 goto out_array_args;
46630diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
46631index a896d94..a5d56b1 100644
46632--- a/drivers/message/fusion/mptbase.c
46633+++ b/drivers/message/fusion/mptbase.c
46634@@ -6752,8 +6752,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
46635 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
46636 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
46637
46638+#ifdef CONFIG_GRKERNSEC_HIDESYM
46639+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
46640+#else
46641 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
46642 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
46643+#endif
46644+
46645 /*
46646 * Rounding UP to nearest 4-kB boundary here...
46647 */
46648@@ -6766,7 +6771,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
46649 ioc->facts.GlobalCredits);
46650
46651 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
46652+#ifdef CONFIG_GRKERNSEC_HIDESYM
46653+ NULL, NULL);
46654+#else
46655 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
46656+#endif
46657 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
46658 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
46659 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
46660diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
46661index 0707fa2..70ca794 100644
46662--- a/drivers/message/fusion/mptsas.c
46663+++ b/drivers/message/fusion/mptsas.c
46664@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
46665 return 0;
46666 }
46667
46668+static inline void
46669+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
46670+{
46671+ if (phy_info->port_details) {
46672+ phy_info->port_details->rphy = rphy;
46673+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
46674+ ioc->name, rphy));
46675+ }
46676+
46677+ if (rphy) {
46678+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
46679+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
46680+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
46681+ ioc->name, rphy, rphy->dev.release));
46682+ }
46683+}
46684+
46685 /* no mutex */
46686 static void
46687 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
46688@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
46689 return NULL;
46690 }
46691
46692-static inline void
46693-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
46694-{
46695- if (phy_info->port_details) {
46696- phy_info->port_details->rphy = rphy;
46697- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
46698- ioc->name, rphy));
46699- }
46700-
46701- if (rphy) {
46702- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
46703- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
46704- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
46705- ioc->name, rphy, rphy->dev.release));
46706- }
46707-}
46708-
46709 static inline struct sas_port *
46710 mptsas_get_port(struct mptsas_phyinfo *phy_info)
46711 {
46712diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
46713index b7d87cd..3fb36da 100644
46714--- a/drivers/message/i2o/i2o_proc.c
46715+++ b/drivers/message/i2o/i2o_proc.c
46716@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
46717 "Array Controller Device"
46718 };
46719
46720-static char *chtostr(char *tmp, u8 *chars, int n)
46721-{
46722- tmp[0] = 0;
46723- return strncat(tmp, (char *)chars, n);
46724-}
46725-
46726 static int i2o_report_query_status(struct seq_file *seq, int block_status,
46727 char *group)
46728 {
46729@@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
46730 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
46731 {
46732 struct i2o_controller *c = (struct i2o_controller *)seq->private;
46733- static u32 work32[5];
46734- static u8 *work8 = (u8 *) work32;
46735- static u16 *work16 = (u16 *) work32;
46736+ u32 work32[5];
46737+ u8 *work8 = (u8 *) work32;
46738+ u16 *work16 = (u16 *) work32;
46739 int token;
46740 u32 hwcap;
46741
46742@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
46743 } *result;
46744
46745 i2o_exec_execute_ddm_table ddm_table;
46746- char tmp[28 + 1];
46747
46748 result = kmalloc(sizeof(*result), GFP_KERNEL);
46749 if (!result)
46750@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
46751
46752 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
46753 seq_printf(seq, "%-#8x", ddm_table.module_id);
46754- seq_printf(seq, "%-29s",
46755- chtostr(tmp, ddm_table.module_name_version, 28));
46756+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
46757 seq_printf(seq, "%9d ", ddm_table.data_size);
46758 seq_printf(seq, "%8d", ddm_table.code_size);
46759
46760@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
46761
46762 i2o_driver_result_table *result;
46763 i2o_driver_store_table *dst;
46764- char tmp[28 + 1];
46765
46766 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
46767 if (result == NULL)
46768@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
46769
46770 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
46771 seq_printf(seq, "%-#8x", dst->module_id);
46772- seq_printf(seq, "%-29s",
46773- chtostr(tmp, dst->module_name_version, 28));
46774- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
46775+ seq_printf(seq, "%-.28s", dst->module_name_version);
46776+ seq_printf(seq, "%-.8s", dst->date);
46777 seq_printf(seq, "%8d ", dst->module_size);
46778 seq_printf(seq, "%8d ", dst->mpb_size);
46779 seq_printf(seq, "0x%04x", dst->module_flags);
46780@@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
46781 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
46782 {
46783 struct i2o_device *d = (struct i2o_device *)seq->private;
46784- static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
46785+ u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
46786 // == (allow) 512d bytes (max)
46787- static u16 *work16 = (u16 *) work32;
46788+ u16 *work16 = (u16 *) work32;
46789 int token;
46790- char tmp[16 + 1];
46791
46792 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
46793
46794@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
46795 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
46796 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
46797 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
46798- seq_printf(seq, "Vendor info : %s\n",
46799- chtostr(tmp, (u8 *) (work32 + 2), 16));
46800- seq_printf(seq, "Product info : %s\n",
46801- chtostr(tmp, (u8 *) (work32 + 6), 16));
46802- seq_printf(seq, "Description : %s\n",
46803- chtostr(tmp, (u8 *) (work32 + 10), 16));
46804- seq_printf(seq, "Product rev. : %s\n",
46805- chtostr(tmp, (u8 *) (work32 + 14), 8));
46806+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
46807+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
46808+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
46809+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
46810
46811 seq_printf(seq, "Serial number : ");
46812 print_serial_number(seq, (u8 *) (work32 + 16),
46813@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
46814 u8 pad[256]; // allow up to 256 byte (max) serial number
46815 } result;
46816
46817- char tmp[24 + 1];
46818-
46819 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
46820
46821 if (token < 0) {
46822@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
46823 }
46824
46825 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
46826- seq_printf(seq, "Module name : %s\n",
46827- chtostr(tmp, result.module_name, 24));
46828- seq_printf(seq, "Module revision : %s\n",
46829- chtostr(tmp, result.module_rev, 8));
46830+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
46831+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
46832
46833 seq_printf(seq, "Serial number : ");
46834 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
46835@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46836 u8 instance_number[4];
46837 } result;
46838
46839- char tmp[64 + 1];
46840-
46841 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
46842
46843 if (token < 0) {
46844@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46845 return 0;
46846 }
46847
46848- seq_printf(seq, "Device name : %s\n",
46849- chtostr(tmp, result.device_name, 64));
46850- seq_printf(seq, "Service name : %s\n",
46851- chtostr(tmp, result.service_name, 64));
46852- seq_printf(seq, "Physical name : %s\n",
46853- chtostr(tmp, result.physical_location, 64));
46854- seq_printf(seq, "Instance number : %s\n",
46855- chtostr(tmp, result.instance_number, 4));
46856+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
46857+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
46858+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
46859+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
46860
46861 return 0;
46862 }
46863@@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
46864 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
46865 {
46866 struct i2o_device *d = (struct i2o_device *)seq->private;
46867- static u32 work32[12];
46868- static u16 *work16 = (u16 *) work32;
46869- static u8 *work8 = (u8 *) work32;
46870+ u32 work32[12];
46871+ u16 *work16 = (u16 *) work32;
46872+ u8 *work8 = (u8 *) work32;
46873 int token;
46874
46875 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
46876diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
46877index 92752fb..a7494f6 100644
46878--- a/drivers/message/i2o/iop.c
46879+++ b/drivers/message/i2o/iop.c
46880@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
46881
46882 spin_lock_irqsave(&c->context_list_lock, flags);
46883
46884- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
46885- atomic_inc(&c->context_list_counter);
46886+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
46887+ atomic_inc_unchecked(&c->context_list_counter);
46888
46889- entry->context = atomic_read(&c->context_list_counter);
46890+ entry->context = atomic_read_unchecked(&c->context_list_counter);
46891
46892 list_add(&entry->list, &c->context_list);
46893
46894@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
46895
46896 #if BITS_PER_LONG == 64
46897 spin_lock_init(&c->context_list_lock);
46898- atomic_set(&c->context_list_counter, 0);
46899+ atomic_set_unchecked(&c->context_list_counter, 0);
46900 INIT_LIST_HEAD(&c->context_list);
46901 #endif
46902
46903diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
46904index b2c7e3b..85aa4764 100644
46905--- a/drivers/mfd/ab8500-debugfs.c
46906+++ b/drivers/mfd/ab8500-debugfs.c
46907@@ -100,7 +100,7 @@ static int irq_last;
46908 static u32 *irq_count;
46909 static int num_irqs;
46910
46911-static struct device_attribute **dev_attr;
46912+static device_attribute_no_const **dev_attr;
46913 static char **event_name;
46914
46915 static u8 avg_sample = SAMPLE_16;
46916diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
46917index ecbe78e..b2ca870 100644
46918--- a/drivers/mfd/max8925-i2c.c
46919+++ b/drivers/mfd/max8925-i2c.c
46920@@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
46921 const struct i2c_device_id *id)
46922 {
46923 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
46924- static struct max8925_chip *chip;
46925+ struct max8925_chip *chip;
46926 struct device_node *node = client->dev.of_node;
46927
46928 if (node && !pdata) {
46929diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
46930index f243e75..322176c 100644
46931--- a/drivers/mfd/tps65910.c
46932+++ b/drivers/mfd/tps65910.c
46933@@ -230,7 +230,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
46934 struct tps65910_platform_data *pdata)
46935 {
46936 int ret = 0;
46937- static struct regmap_irq_chip *tps6591x_irqs_chip;
46938+ struct regmap_irq_chip *tps6591x_irqs_chip;
46939
46940 if (!irq) {
46941 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
46942diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
46943index b1dabba..24a88f2 100644
46944--- a/drivers/mfd/twl4030-irq.c
46945+++ b/drivers/mfd/twl4030-irq.c
46946@@ -34,6 +34,7 @@
46947 #include <linux/of.h>
46948 #include <linux/irqdomain.h>
46949 #include <linux/i2c/twl.h>
46950+#include <asm/pgtable.h>
46951
46952 #include "twl-core.h"
46953
46954@@ -725,10 +726,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
46955 * Install an irq handler for each of the SIH modules;
46956 * clone dummy irq_chip since PIH can't *do* anything
46957 */
46958- twl4030_irq_chip = dummy_irq_chip;
46959- twl4030_irq_chip.name = "twl4030";
46960+ pax_open_kernel();
46961+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
46962+ *(const char **)&twl4030_irq_chip.name = "twl4030";
46963
46964- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
46965+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
46966+ pax_close_kernel();
46967
46968 for (i = irq_base; i < irq_end; i++) {
46969 irq_set_chip_and_handler(i, &twl4030_irq_chip,
46970diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
46971index 464419b..64bae8d 100644
46972--- a/drivers/misc/c2port/core.c
46973+++ b/drivers/misc/c2port/core.c
46974@@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
46975 goto error_idr_alloc;
46976 c2dev->id = ret;
46977
46978- bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
46979+ pax_open_kernel();
46980+ *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
46981+ pax_close_kernel();
46982
46983 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
46984 "c2port%d", c2dev->id);
46985diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
46986index 3f2b625..945e179 100644
46987--- a/drivers/misc/eeprom/sunxi_sid.c
46988+++ b/drivers/misc/eeprom/sunxi_sid.c
46989@@ -126,7 +126,9 @@ static int sunxi_sid_probe(struct platform_device *pdev)
46990
46991 platform_set_drvdata(pdev, sid_data);
46992
46993- sid_bin_attr.size = sid_data->keysize;
46994+ pax_open_kernel();
46995+ *(size_t *)&sid_bin_attr.size = sid_data->keysize;
46996+ pax_close_kernel();
46997 if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
46998 return -ENODEV;
46999
47000diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
47001index 36f5d52..32311c3 100644
47002--- a/drivers/misc/kgdbts.c
47003+++ b/drivers/misc/kgdbts.c
47004@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
47005 char before[BREAK_INSTR_SIZE];
47006 char after[BREAK_INSTR_SIZE];
47007
47008- probe_kernel_read(before, (char *)kgdbts_break_test,
47009+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
47010 BREAK_INSTR_SIZE);
47011 init_simple_test();
47012 ts.tst = plant_and_detach_test;
47013@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
47014 /* Activate test with initial breakpoint */
47015 if (!is_early)
47016 kgdb_breakpoint();
47017- probe_kernel_read(after, (char *)kgdbts_break_test,
47018+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
47019 BREAK_INSTR_SIZE);
47020 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
47021 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
47022diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
47023index 3ef4627..8d00486 100644
47024--- a/drivers/misc/lis3lv02d/lis3lv02d.c
47025+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
47026@@ -497,7 +497,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
47027 * the lid is closed. This leads to interrupts as soon as a little move
47028 * is done.
47029 */
47030- atomic_inc(&lis3->count);
47031+ atomic_inc_unchecked(&lis3->count);
47032
47033 wake_up_interruptible(&lis3->misc_wait);
47034 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
47035@@ -583,7 +583,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
47036 if (lis3->pm_dev)
47037 pm_runtime_get_sync(lis3->pm_dev);
47038
47039- atomic_set(&lis3->count, 0);
47040+ atomic_set_unchecked(&lis3->count, 0);
47041 return 0;
47042 }
47043
47044@@ -615,7 +615,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
47045 add_wait_queue(&lis3->misc_wait, &wait);
47046 while (true) {
47047 set_current_state(TASK_INTERRUPTIBLE);
47048- data = atomic_xchg(&lis3->count, 0);
47049+ data = atomic_xchg_unchecked(&lis3->count, 0);
47050 if (data)
47051 break;
47052
47053@@ -656,7 +656,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
47054 struct lis3lv02d, miscdev);
47055
47056 poll_wait(file, &lis3->misc_wait, wait);
47057- if (atomic_read(&lis3->count))
47058+ if (atomic_read_unchecked(&lis3->count))
47059 return POLLIN | POLLRDNORM;
47060 return 0;
47061 }
47062diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
47063index c439c82..1f20f57 100644
47064--- a/drivers/misc/lis3lv02d/lis3lv02d.h
47065+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
47066@@ -297,7 +297,7 @@ struct lis3lv02d {
47067 struct input_polled_dev *idev; /* input device */
47068 struct platform_device *pdev; /* platform device */
47069 struct regulator_bulk_data regulators[2];
47070- atomic_t count; /* interrupt count after last read */
47071+ atomic_unchecked_t count; /* interrupt count after last read */
47072 union axis_conversion ac; /* hw -> logical axis */
47073 int mapped_btns[3];
47074
47075diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
47076index 2f30bad..c4c13d0 100644
47077--- a/drivers/misc/sgi-gru/gruhandles.c
47078+++ b/drivers/misc/sgi-gru/gruhandles.c
47079@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
47080 unsigned long nsec;
47081
47082 nsec = CLKS2NSEC(clks);
47083- atomic_long_inc(&mcs_op_statistics[op].count);
47084- atomic_long_add(nsec, &mcs_op_statistics[op].total);
47085+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
47086+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
47087 if (mcs_op_statistics[op].max < nsec)
47088 mcs_op_statistics[op].max = nsec;
47089 }
47090diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
47091index 4f76359..cdfcb2e 100644
47092--- a/drivers/misc/sgi-gru/gruprocfs.c
47093+++ b/drivers/misc/sgi-gru/gruprocfs.c
47094@@ -32,9 +32,9 @@
47095
47096 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
47097
47098-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
47099+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
47100 {
47101- unsigned long val = atomic_long_read(v);
47102+ unsigned long val = atomic_long_read_unchecked(v);
47103
47104 seq_printf(s, "%16lu %s\n", val, id);
47105 }
47106@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
47107
47108 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
47109 for (op = 0; op < mcsop_last; op++) {
47110- count = atomic_long_read(&mcs_op_statistics[op].count);
47111- total = atomic_long_read(&mcs_op_statistics[op].total);
47112+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
47113+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
47114 max = mcs_op_statistics[op].max;
47115 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
47116 count ? total / count : 0, max);
47117diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
47118index 5c3ce24..4915ccb 100644
47119--- a/drivers/misc/sgi-gru/grutables.h
47120+++ b/drivers/misc/sgi-gru/grutables.h
47121@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
47122 * GRU statistics.
47123 */
47124 struct gru_stats_s {
47125- atomic_long_t vdata_alloc;
47126- atomic_long_t vdata_free;
47127- atomic_long_t gts_alloc;
47128- atomic_long_t gts_free;
47129- atomic_long_t gms_alloc;
47130- atomic_long_t gms_free;
47131- atomic_long_t gts_double_allocate;
47132- atomic_long_t assign_context;
47133- atomic_long_t assign_context_failed;
47134- atomic_long_t free_context;
47135- atomic_long_t load_user_context;
47136- atomic_long_t load_kernel_context;
47137- atomic_long_t lock_kernel_context;
47138- atomic_long_t unlock_kernel_context;
47139- atomic_long_t steal_user_context;
47140- atomic_long_t steal_kernel_context;
47141- atomic_long_t steal_context_failed;
47142- atomic_long_t nopfn;
47143- atomic_long_t asid_new;
47144- atomic_long_t asid_next;
47145- atomic_long_t asid_wrap;
47146- atomic_long_t asid_reuse;
47147- atomic_long_t intr;
47148- atomic_long_t intr_cbr;
47149- atomic_long_t intr_tfh;
47150- atomic_long_t intr_spurious;
47151- atomic_long_t intr_mm_lock_failed;
47152- atomic_long_t call_os;
47153- atomic_long_t call_os_wait_queue;
47154- atomic_long_t user_flush_tlb;
47155- atomic_long_t user_unload_context;
47156- atomic_long_t user_exception;
47157- atomic_long_t set_context_option;
47158- atomic_long_t check_context_retarget_intr;
47159- atomic_long_t check_context_unload;
47160- atomic_long_t tlb_dropin;
47161- atomic_long_t tlb_preload_page;
47162- atomic_long_t tlb_dropin_fail_no_asid;
47163- atomic_long_t tlb_dropin_fail_upm;
47164- atomic_long_t tlb_dropin_fail_invalid;
47165- atomic_long_t tlb_dropin_fail_range_active;
47166- atomic_long_t tlb_dropin_fail_idle;
47167- atomic_long_t tlb_dropin_fail_fmm;
47168- atomic_long_t tlb_dropin_fail_no_exception;
47169- atomic_long_t tfh_stale_on_fault;
47170- atomic_long_t mmu_invalidate_range;
47171- atomic_long_t mmu_invalidate_page;
47172- atomic_long_t flush_tlb;
47173- atomic_long_t flush_tlb_gru;
47174- atomic_long_t flush_tlb_gru_tgh;
47175- atomic_long_t flush_tlb_gru_zero_asid;
47176+ atomic_long_unchecked_t vdata_alloc;
47177+ atomic_long_unchecked_t vdata_free;
47178+ atomic_long_unchecked_t gts_alloc;
47179+ atomic_long_unchecked_t gts_free;
47180+ atomic_long_unchecked_t gms_alloc;
47181+ atomic_long_unchecked_t gms_free;
47182+ atomic_long_unchecked_t gts_double_allocate;
47183+ atomic_long_unchecked_t assign_context;
47184+ atomic_long_unchecked_t assign_context_failed;
47185+ atomic_long_unchecked_t free_context;
47186+ atomic_long_unchecked_t load_user_context;
47187+ atomic_long_unchecked_t load_kernel_context;
47188+ atomic_long_unchecked_t lock_kernel_context;
47189+ atomic_long_unchecked_t unlock_kernel_context;
47190+ atomic_long_unchecked_t steal_user_context;
47191+ atomic_long_unchecked_t steal_kernel_context;
47192+ atomic_long_unchecked_t steal_context_failed;
47193+ atomic_long_unchecked_t nopfn;
47194+ atomic_long_unchecked_t asid_new;
47195+ atomic_long_unchecked_t asid_next;
47196+ atomic_long_unchecked_t asid_wrap;
47197+ atomic_long_unchecked_t asid_reuse;
47198+ atomic_long_unchecked_t intr;
47199+ atomic_long_unchecked_t intr_cbr;
47200+ atomic_long_unchecked_t intr_tfh;
47201+ atomic_long_unchecked_t intr_spurious;
47202+ atomic_long_unchecked_t intr_mm_lock_failed;
47203+ atomic_long_unchecked_t call_os;
47204+ atomic_long_unchecked_t call_os_wait_queue;
47205+ atomic_long_unchecked_t user_flush_tlb;
47206+ atomic_long_unchecked_t user_unload_context;
47207+ atomic_long_unchecked_t user_exception;
47208+ atomic_long_unchecked_t set_context_option;
47209+ atomic_long_unchecked_t check_context_retarget_intr;
47210+ atomic_long_unchecked_t check_context_unload;
47211+ atomic_long_unchecked_t tlb_dropin;
47212+ atomic_long_unchecked_t tlb_preload_page;
47213+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
47214+ atomic_long_unchecked_t tlb_dropin_fail_upm;
47215+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
47216+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
47217+ atomic_long_unchecked_t tlb_dropin_fail_idle;
47218+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
47219+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
47220+ atomic_long_unchecked_t tfh_stale_on_fault;
47221+ atomic_long_unchecked_t mmu_invalidate_range;
47222+ atomic_long_unchecked_t mmu_invalidate_page;
47223+ atomic_long_unchecked_t flush_tlb;
47224+ atomic_long_unchecked_t flush_tlb_gru;
47225+ atomic_long_unchecked_t flush_tlb_gru_tgh;
47226+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
47227
47228- atomic_long_t copy_gpa;
47229- atomic_long_t read_gpa;
47230+ atomic_long_unchecked_t copy_gpa;
47231+ atomic_long_unchecked_t read_gpa;
47232
47233- atomic_long_t mesq_receive;
47234- atomic_long_t mesq_receive_none;
47235- atomic_long_t mesq_send;
47236- atomic_long_t mesq_send_failed;
47237- atomic_long_t mesq_noop;
47238- atomic_long_t mesq_send_unexpected_error;
47239- atomic_long_t mesq_send_lb_overflow;
47240- atomic_long_t mesq_send_qlimit_reached;
47241- atomic_long_t mesq_send_amo_nacked;
47242- atomic_long_t mesq_send_put_nacked;
47243- atomic_long_t mesq_page_overflow;
47244- atomic_long_t mesq_qf_locked;
47245- atomic_long_t mesq_qf_noop_not_full;
47246- atomic_long_t mesq_qf_switch_head_failed;
47247- atomic_long_t mesq_qf_unexpected_error;
47248- atomic_long_t mesq_noop_unexpected_error;
47249- atomic_long_t mesq_noop_lb_overflow;
47250- atomic_long_t mesq_noop_qlimit_reached;
47251- atomic_long_t mesq_noop_amo_nacked;
47252- atomic_long_t mesq_noop_put_nacked;
47253- atomic_long_t mesq_noop_page_overflow;
47254+ atomic_long_unchecked_t mesq_receive;
47255+ atomic_long_unchecked_t mesq_receive_none;
47256+ atomic_long_unchecked_t mesq_send;
47257+ atomic_long_unchecked_t mesq_send_failed;
47258+ atomic_long_unchecked_t mesq_noop;
47259+ atomic_long_unchecked_t mesq_send_unexpected_error;
47260+ atomic_long_unchecked_t mesq_send_lb_overflow;
47261+ atomic_long_unchecked_t mesq_send_qlimit_reached;
47262+ atomic_long_unchecked_t mesq_send_amo_nacked;
47263+ atomic_long_unchecked_t mesq_send_put_nacked;
47264+ atomic_long_unchecked_t mesq_page_overflow;
47265+ atomic_long_unchecked_t mesq_qf_locked;
47266+ atomic_long_unchecked_t mesq_qf_noop_not_full;
47267+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
47268+ atomic_long_unchecked_t mesq_qf_unexpected_error;
47269+ atomic_long_unchecked_t mesq_noop_unexpected_error;
47270+ atomic_long_unchecked_t mesq_noop_lb_overflow;
47271+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
47272+ atomic_long_unchecked_t mesq_noop_amo_nacked;
47273+ atomic_long_unchecked_t mesq_noop_put_nacked;
47274+ atomic_long_unchecked_t mesq_noop_page_overflow;
47275
47276 };
47277
47278@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
47279 tghop_invalidate, mcsop_last};
47280
47281 struct mcs_op_statistic {
47282- atomic_long_t count;
47283- atomic_long_t total;
47284+ atomic_long_unchecked_t count;
47285+ atomic_long_unchecked_t total;
47286 unsigned long max;
47287 };
47288
47289@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
47290
47291 #define STAT(id) do { \
47292 if (gru_options & OPT_STATS) \
47293- atomic_long_inc(&gru_stats.id); \
47294+ atomic_long_inc_unchecked(&gru_stats.id); \
47295 } while (0)
47296
47297 #ifdef CONFIG_SGI_GRU_DEBUG
47298diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
47299index c862cd4..0d176fe 100644
47300--- a/drivers/misc/sgi-xp/xp.h
47301+++ b/drivers/misc/sgi-xp/xp.h
47302@@ -288,7 +288,7 @@ struct xpc_interface {
47303 xpc_notify_func, void *);
47304 void (*received) (short, int, void *);
47305 enum xp_retval (*partid_to_nasids) (short, void *);
47306-};
47307+} __no_const;
47308
47309 extern struct xpc_interface xpc_interface;
47310
47311diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
47312index 01be66d..e3a0c7e 100644
47313--- a/drivers/misc/sgi-xp/xp_main.c
47314+++ b/drivers/misc/sgi-xp/xp_main.c
47315@@ -78,13 +78,13 @@ xpc_notloaded(void)
47316 }
47317
47318 struct xpc_interface xpc_interface = {
47319- (void (*)(int))xpc_notloaded,
47320- (void (*)(int))xpc_notloaded,
47321- (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
47322- (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
47323+ .connect = (void (*)(int))xpc_notloaded,
47324+ .disconnect = (void (*)(int))xpc_notloaded,
47325+ .send = (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
47326+ .send_notify = (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
47327 void *))xpc_notloaded,
47328- (void (*)(short, int, void *))xpc_notloaded,
47329- (enum xp_retval(*)(short, void *))xpc_notloaded
47330+ .received = (void (*)(short, int, void *))xpc_notloaded,
47331+ .partid_to_nasids = (enum xp_retval(*)(short, void *))xpc_notloaded
47332 };
47333 EXPORT_SYMBOL_GPL(xpc_interface);
47334
47335diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
47336index b94d5f7..7f494c5 100644
47337--- a/drivers/misc/sgi-xp/xpc.h
47338+++ b/drivers/misc/sgi-xp/xpc.h
47339@@ -835,6 +835,7 @@ struct xpc_arch_operations {
47340 void (*received_payload) (struct xpc_channel *, void *);
47341 void (*notify_senders_of_disconnect) (struct xpc_channel *);
47342 };
47343+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
47344
47345 /* struct xpc_partition act_state values (for XPC HB) */
47346
47347@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
47348 /* found in xpc_main.c */
47349 extern struct device *xpc_part;
47350 extern struct device *xpc_chan;
47351-extern struct xpc_arch_operations xpc_arch_ops;
47352+extern xpc_arch_operations_no_const xpc_arch_ops;
47353 extern int xpc_disengage_timelimit;
47354 extern int xpc_disengage_timedout;
47355 extern int xpc_activate_IRQ_rcvd;
47356diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
47357index 82dc574..8539ab2 100644
47358--- a/drivers/misc/sgi-xp/xpc_main.c
47359+++ b/drivers/misc/sgi-xp/xpc_main.c
47360@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
47361 .notifier_call = xpc_system_die,
47362 };
47363
47364-struct xpc_arch_operations xpc_arch_ops;
47365+xpc_arch_operations_no_const xpc_arch_ops;
47366
47367 /*
47368 * Timer function to enforce the timelimit on the partition disengage.
47369@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
47370
47371 if (((die_args->trapnr == X86_TRAP_MF) ||
47372 (die_args->trapnr == X86_TRAP_XF)) &&
47373- !user_mode_vm(die_args->regs))
47374+ !user_mode(die_args->regs))
47375 xpc_die_deactivate();
47376
47377 break;
47378diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
47379index ede41f0..744fbd9 100644
47380--- a/drivers/mmc/card/block.c
47381+++ b/drivers/mmc/card/block.c
47382@@ -574,7 +574,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
47383 if (idata->ic.postsleep_min_us)
47384 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
47385
47386- if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
47387+ if (copy_to_user(ic_ptr->response, cmd.resp, sizeof(cmd.resp))) {
47388 err = -EFAULT;
47389 goto cmd_rel_host;
47390 }
47391diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
47392index f51b5ba..86614a7 100644
47393--- a/drivers/mmc/core/mmc_ops.c
47394+++ b/drivers/mmc/core/mmc_ops.c
47395@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
47396 void *data_buf;
47397 int is_on_stack;
47398
47399- is_on_stack = object_is_on_stack(buf);
47400+ is_on_stack = object_starts_on_stack(buf);
47401 if (is_on_stack) {
47402 /*
47403 * dma onto stack is unsafe/nonportable, but callers to this
47404diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
47405index 08fd956..370487a 100644
47406--- a/drivers/mmc/host/dw_mmc.h
47407+++ b/drivers/mmc/host/dw_mmc.h
47408@@ -262,5 +262,5 @@ struct dw_mci_drv_data {
47409 int (*parse_dt)(struct dw_mci *host);
47410 int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode,
47411 struct dw_mci_tuning_data *tuning_data);
47412-};
47413+} __do_const;
47414 #endif /* _DW_MMC_H_ */
47415diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
47416index e4d4707..28262a3 100644
47417--- a/drivers/mmc/host/mmci.c
47418+++ b/drivers/mmc/host/mmci.c
47419@@ -1612,7 +1612,9 @@ static int mmci_probe(struct amba_device *dev,
47420 mmc->caps |= MMC_CAP_CMD23;
47421
47422 if (variant->busy_detect) {
47423- mmci_ops.card_busy = mmci_card_busy;
47424+ pax_open_kernel();
47425+ *(void **)&mmci_ops.card_busy = mmci_card_busy;
47426+ pax_close_kernel();
47427 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
47428 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
47429 mmc->max_busy_timeout = 0;
47430diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
47431index ccec0e3..199f9ce 100644
47432--- a/drivers/mmc/host/sdhci-esdhc-imx.c
47433+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
47434@@ -1034,9 +1034,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
47435 host->mmc->caps |= MMC_CAP_1_8V_DDR;
47436 }
47437
47438- if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
47439- sdhci_esdhc_ops.platform_execute_tuning =
47440+ if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) {
47441+ pax_open_kernel();
47442+ *(void **)&sdhci_esdhc_ops.platform_execute_tuning =
47443 esdhc_executing_tuning;
47444+ pax_close_kernel();
47445+ }
47446
47447 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
47448 writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
47449diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
47450index 1e47903..7683916 100644
47451--- a/drivers/mmc/host/sdhci-s3c.c
47452+++ b/drivers/mmc/host/sdhci-s3c.c
47453@@ -584,9 +584,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
47454 * we can use overriding functions instead of default.
47455 */
47456 if (sc->no_divider) {
47457- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
47458- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
47459- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
47460+ pax_open_kernel();
47461+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
47462+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
47463+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
47464+ pax_close_kernel();
47465 }
47466
47467 /* It supports additional host capabilities if needed */
47468diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
47469index 423666b..81ff5eb 100644
47470--- a/drivers/mtd/chips/cfi_cmdset_0020.c
47471+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
47472@@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
47473 size_t totlen = 0, thislen;
47474 int ret = 0;
47475 size_t buflen = 0;
47476- static char *buffer;
47477+ char *buffer;
47478
47479 if (!ECCBUF_SIZE) {
47480 /* We should fall back to a general writev implementation.
47481diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
47482index 0b071a3..8ec3d5b 100644
47483--- a/drivers/mtd/nand/denali.c
47484+++ b/drivers/mtd/nand/denali.c
47485@@ -24,6 +24,7 @@
47486 #include <linux/slab.h>
47487 #include <linux/mtd/mtd.h>
47488 #include <linux/module.h>
47489+#include <linux/slab.h>
47490
47491 #include "denali.h"
47492
47493diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
47494index 959cb9b..8520fe5 100644
47495--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
47496+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
47497@@ -386,7 +386,7 @@ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
47498
47499 /* first try to map the upper buffer directly */
47500 if (virt_addr_valid(this->upper_buf) &&
47501- !object_is_on_stack(this->upper_buf)) {
47502+ !object_starts_on_stack(this->upper_buf)) {
47503 sg_init_one(sgl, this->upper_buf, this->upper_len);
47504 ret = dma_map_sg(this->dev, sgl, 1, dr);
47505 if (ret == 0)
47506diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
47507index 51b9d6a..52af9a7 100644
47508--- a/drivers/mtd/nftlmount.c
47509+++ b/drivers/mtd/nftlmount.c
47510@@ -24,6 +24,7 @@
47511 #include <asm/errno.h>
47512 #include <linux/delay.h>
47513 #include <linux/slab.h>
47514+#include <linux/sched.h>
47515 #include <linux/mtd/mtd.h>
47516 #include <linux/mtd/nand.h>
47517 #include <linux/mtd/nftl.h>
47518diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
47519index cf49c22..971b133 100644
47520--- a/drivers/mtd/sm_ftl.c
47521+++ b/drivers/mtd/sm_ftl.c
47522@@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
47523 #define SM_CIS_VENDOR_OFFSET 0x59
47524 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
47525 {
47526- struct attribute_group *attr_group;
47527+ attribute_group_no_const *attr_group;
47528 struct attribute **attributes;
47529 struct sm_sysfs_attribute *vendor_attribute;
47530 char *vendor;
47531diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
47532index d163e11..f517018 100644
47533--- a/drivers/net/bonding/bond_netlink.c
47534+++ b/drivers/net/bonding/bond_netlink.c
47535@@ -548,7 +548,7 @@ nla_put_failure:
47536 return -EMSGSIZE;
47537 }
47538
47539-struct rtnl_link_ops bond_link_ops __read_mostly = {
47540+struct rtnl_link_ops bond_link_ops = {
47541 .kind = "bond",
47542 .priv_size = sizeof(struct bonding),
47543 .setup = bond_setup,
47544diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
47545index 4168822..f38eeddf 100644
47546--- a/drivers/net/can/Kconfig
47547+++ b/drivers/net/can/Kconfig
47548@@ -98,7 +98,7 @@ config CAN_JANZ_ICAN3
47549
47550 config CAN_FLEXCAN
47551 tristate "Support for Freescale FLEXCAN based chips"
47552- depends on ARM || PPC
47553+ depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
47554 ---help---
47555 Say Y here if you want to support for Freescale FlexCAN.
47556
47557diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
47558index 1d162cc..b546a75 100644
47559--- a/drivers/net/ethernet/8390/ax88796.c
47560+++ b/drivers/net/ethernet/8390/ax88796.c
47561@@ -889,9 +889,11 @@ static int ax_probe(struct platform_device *pdev)
47562 if (ax->plat->reg_offsets)
47563 ei_local->reg_offset = ax->plat->reg_offsets;
47564 else {
47565+ resource_size_t _mem_size = mem_size;
47566+ do_div(_mem_size, 0x18);
47567 ei_local->reg_offset = ax->reg_offsets;
47568 for (ret = 0; ret < 0x18; ret++)
47569- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
47570+ ax->reg_offsets[ret] = _mem_size * ret;
47571 }
47572
47573 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
47574diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
47575index 7330681..7e9e463 100644
47576--- a/drivers/net/ethernet/altera/altera_tse_main.c
47577+++ b/drivers/net/ethernet/altera/altera_tse_main.c
47578@@ -1182,7 +1182,7 @@ static int tse_shutdown(struct net_device *dev)
47579 return 0;
47580 }
47581
47582-static struct net_device_ops altera_tse_netdev_ops = {
47583+static net_device_ops_no_const altera_tse_netdev_ops __read_only = {
47584 .ndo_open = tse_open,
47585 .ndo_stop = tse_shutdown,
47586 .ndo_start_xmit = tse_start_xmit,
47587@@ -1439,11 +1439,13 @@ static int altera_tse_probe(struct platform_device *pdev)
47588 ndev->netdev_ops = &altera_tse_netdev_ops;
47589 altera_tse_set_ethtool_ops(ndev);
47590
47591+ pax_open_kernel();
47592 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
47593
47594 if (priv->hash_filter)
47595 altera_tse_netdev_ops.ndo_set_rx_mode =
47596 tse_set_rx_mode_hashfilter;
47597+ pax_close_kernel();
47598
47599 /* Scatter/gather IO is not supported,
47600 * so it is turned off
47601diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47602index cc25a3a..c8d72d3 100644
47603--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47604+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
47605@@ -1083,14 +1083,14 @@ do { \
47606 * operations, everything works on mask values.
47607 */
47608 #define XMDIO_READ(_pdata, _mmd, _reg) \
47609- ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
47610+ ((_pdata)->hw_if->read_mmd_regs((_pdata), 0, \
47611 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
47612
47613 #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
47614 (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
47615
47616 #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
47617- ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
47618+ ((_pdata)->hw_if->write_mmd_regs((_pdata), 0, \
47619 MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
47620
47621 #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
47622diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47623index 7d6a49b..e6d403b 100644
47624--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47625+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
47626@@ -188,7 +188,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
47627
47628 memcpy(pdata->ets, ets, sizeof(*pdata->ets));
47629
47630- pdata->hw_if.config_dcb_tc(pdata);
47631+ pdata->hw_if->config_dcb_tc(pdata);
47632
47633 return 0;
47634 }
47635@@ -227,7 +227,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
47636
47637 memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
47638
47639- pdata->hw_if.config_dcb_pfc(pdata);
47640+ pdata->hw_if->config_dcb_pfc(pdata);
47641
47642 return 0;
47643 }
47644diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47645index 1c5d62e..8e14d54 100644
47646--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47647+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
47648@@ -236,7 +236,7 @@ err_ring:
47649
47650 static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
47651 {
47652- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47653+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47654 struct xgbe_channel *channel;
47655 struct xgbe_ring *ring;
47656 struct xgbe_ring_data *rdata;
47657@@ -277,7 +277,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
47658
47659 static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
47660 {
47661- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47662+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47663 struct xgbe_channel *channel;
47664 struct xgbe_ring *ring;
47665 struct xgbe_ring_desc *rdesc;
47666@@ -506,7 +506,7 @@ err_out:
47667 static void xgbe_realloc_skb(struct xgbe_channel *channel)
47668 {
47669 struct xgbe_prv_data *pdata = channel->pdata;
47670- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47671+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47672 struct xgbe_ring *ring = channel->rx_ring;
47673 struct xgbe_ring_data *rdata;
47674 struct sk_buff *skb = NULL;
47675@@ -550,17 +550,12 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel)
47676 DBGPR("<--xgbe_realloc_skb\n");
47677 }
47678
47679-void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
47680-{
47681- DBGPR("-->xgbe_init_function_ptrs_desc\n");
47682-
47683- desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
47684- desc_if->free_ring_resources = xgbe_free_ring_resources;
47685- desc_if->map_tx_skb = xgbe_map_tx_skb;
47686- desc_if->realloc_skb = xgbe_realloc_skb;
47687- desc_if->unmap_skb = xgbe_unmap_skb;
47688- desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
47689- desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
47690-
47691- DBGPR("<--xgbe_init_function_ptrs_desc\n");
47692-}
47693+const struct xgbe_desc_if default_xgbe_desc_if = {
47694+ .alloc_ring_resources = xgbe_alloc_ring_resources,
47695+ .free_ring_resources = xgbe_free_ring_resources,
47696+ .map_tx_skb = xgbe_map_tx_skb,
47697+ .realloc_skb = xgbe_realloc_skb,
47698+ .unmap_skb = xgbe_unmap_skb,
47699+ .wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init,
47700+ .wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init,
47701+};
47702diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47703index ea27383..faa8936 100644
47704--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47705+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
47706@@ -2463,7 +2463,7 @@ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
47707
47708 static int xgbe_init(struct xgbe_prv_data *pdata)
47709 {
47710- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47711+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47712 int ret;
47713
47714 DBGPR("-->xgbe_init\n");
47715@@ -2525,101 +2525,96 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
47716 return 0;
47717 }
47718
47719-void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
47720-{
47721- DBGPR("-->xgbe_init_function_ptrs\n");
47722-
47723- hw_if->tx_complete = xgbe_tx_complete;
47724-
47725- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
47726- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
47727- hw_if->add_mac_addresses = xgbe_add_mac_addresses;
47728- hw_if->set_mac_address = xgbe_set_mac_address;
47729-
47730- hw_if->enable_rx_csum = xgbe_enable_rx_csum;
47731- hw_if->disable_rx_csum = xgbe_disable_rx_csum;
47732-
47733- hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
47734- hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
47735- hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
47736- hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
47737- hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
47738-
47739- hw_if->read_mmd_regs = xgbe_read_mmd_regs;
47740- hw_if->write_mmd_regs = xgbe_write_mmd_regs;
47741-
47742- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
47743- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
47744- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
47745-
47746- hw_if->enable_tx = xgbe_enable_tx;
47747- hw_if->disable_tx = xgbe_disable_tx;
47748- hw_if->enable_rx = xgbe_enable_rx;
47749- hw_if->disable_rx = xgbe_disable_rx;
47750-
47751- hw_if->powerup_tx = xgbe_powerup_tx;
47752- hw_if->powerdown_tx = xgbe_powerdown_tx;
47753- hw_if->powerup_rx = xgbe_powerup_rx;
47754- hw_if->powerdown_rx = xgbe_powerdown_rx;
47755-
47756- hw_if->pre_xmit = xgbe_pre_xmit;
47757- hw_if->dev_read = xgbe_dev_read;
47758- hw_if->enable_int = xgbe_enable_int;
47759- hw_if->disable_int = xgbe_disable_int;
47760- hw_if->init = xgbe_init;
47761- hw_if->exit = xgbe_exit;
47762+const struct xgbe_hw_if default_xgbe_hw_if = {
47763+ .tx_complete = xgbe_tx_complete,
47764+
47765+ .set_promiscuous_mode = xgbe_set_promiscuous_mode,
47766+ .set_all_multicast_mode = xgbe_set_all_multicast_mode,
47767+ .add_mac_addresses = xgbe_add_mac_addresses,
47768+ .set_mac_address = xgbe_set_mac_address,
47769+
47770+ .enable_rx_csum = xgbe_enable_rx_csum,
47771+ .disable_rx_csum = xgbe_disable_rx_csum,
47772+
47773+ .enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping,
47774+ .disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping,
47775+ .enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering,
47776+ .disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering,
47777+ .update_vlan_hash_table = xgbe_update_vlan_hash_table,
47778+
47779+ .read_mmd_regs = xgbe_read_mmd_regs,
47780+ .write_mmd_regs = xgbe_write_mmd_regs,
47781+
47782+ .set_gmii_speed = xgbe_set_gmii_speed,
47783+ .set_gmii_2500_speed = xgbe_set_gmii_2500_speed,
47784+ .set_xgmii_speed = xgbe_set_xgmii_speed,
47785+
47786+ .enable_tx = xgbe_enable_tx,
47787+ .disable_tx = xgbe_disable_tx,
47788+ .enable_rx = xgbe_enable_rx,
47789+ .disable_rx = xgbe_disable_rx,
47790+
47791+ .powerup_tx = xgbe_powerup_tx,
47792+ .powerdown_tx = xgbe_powerdown_tx,
47793+ .powerup_rx = xgbe_powerup_rx,
47794+ .powerdown_rx = xgbe_powerdown_rx,
47795+
47796+ .pre_xmit = xgbe_pre_xmit,
47797+ .dev_read = xgbe_dev_read,
47798+ .enable_int = xgbe_enable_int,
47799+ .disable_int = xgbe_disable_int,
47800+ .init = xgbe_init,
47801+ .exit = xgbe_exit,
47802
47803 /* Descriptor related Sequences have to be initialized here */
47804- hw_if->tx_desc_init = xgbe_tx_desc_init;
47805- hw_if->rx_desc_init = xgbe_rx_desc_init;
47806- hw_if->tx_desc_reset = xgbe_tx_desc_reset;
47807- hw_if->rx_desc_reset = xgbe_rx_desc_reset;
47808- hw_if->is_last_desc = xgbe_is_last_desc;
47809- hw_if->is_context_desc = xgbe_is_context_desc;
47810+ .tx_desc_init = xgbe_tx_desc_init,
47811+ .rx_desc_init = xgbe_rx_desc_init,
47812+ .tx_desc_reset = xgbe_tx_desc_reset,
47813+ .rx_desc_reset = xgbe_rx_desc_reset,
47814+ .is_last_desc = xgbe_is_last_desc,
47815+ .is_context_desc = xgbe_is_context_desc,
47816
47817 /* For FLOW ctrl */
47818- hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
47819- hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
47820+ .config_tx_flow_control = xgbe_config_tx_flow_control,
47821+ .config_rx_flow_control = xgbe_config_rx_flow_control,
47822
47823 /* For RX coalescing */
47824- hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
47825- hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
47826- hw_if->usec_to_riwt = xgbe_usec_to_riwt;
47827- hw_if->riwt_to_usec = xgbe_riwt_to_usec;
47828+ .config_rx_coalesce = xgbe_config_rx_coalesce,
47829+ .config_tx_coalesce = xgbe_config_tx_coalesce,
47830+ .usec_to_riwt = xgbe_usec_to_riwt,
47831+ .riwt_to_usec = xgbe_riwt_to_usec,
47832
47833 /* For RX and TX threshold config */
47834- hw_if->config_rx_threshold = xgbe_config_rx_threshold;
47835- hw_if->config_tx_threshold = xgbe_config_tx_threshold;
47836+ .config_rx_threshold = xgbe_config_rx_threshold,
47837+ .config_tx_threshold = xgbe_config_tx_threshold,
47838
47839 /* For RX and TX Store and Forward Mode config */
47840- hw_if->config_rsf_mode = xgbe_config_rsf_mode;
47841- hw_if->config_tsf_mode = xgbe_config_tsf_mode;
47842+ .config_rsf_mode = xgbe_config_rsf_mode,
47843+ .config_tsf_mode = xgbe_config_tsf_mode,
47844
47845 /* For TX DMA Operating on Second Frame config */
47846- hw_if->config_osp_mode = xgbe_config_osp_mode;
47847+ .config_osp_mode = xgbe_config_osp_mode,
47848
47849 /* For RX and TX PBL config */
47850- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
47851- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
47852- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
47853- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
47854- hw_if->config_pblx8 = xgbe_config_pblx8;
47855+ .config_rx_pbl_val = xgbe_config_rx_pbl_val,
47856+ .get_rx_pbl_val = xgbe_get_rx_pbl_val,
47857+ .config_tx_pbl_val = xgbe_config_tx_pbl_val,
47858+ .get_tx_pbl_val = xgbe_get_tx_pbl_val,
47859+ .config_pblx8 = xgbe_config_pblx8,
47860
47861 /* For MMC statistics support */
47862- hw_if->tx_mmc_int = xgbe_tx_mmc_int;
47863- hw_if->rx_mmc_int = xgbe_rx_mmc_int;
47864- hw_if->read_mmc_stats = xgbe_read_mmc_stats;
47865+ .tx_mmc_int = xgbe_tx_mmc_int,
47866+ .rx_mmc_int = xgbe_rx_mmc_int,
47867+ .read_mmc_stats = xgbe_read_mmc_stats,
47868
47869 /* For PTP config */
47870- hw_if->config_tstamp = xgbe_config_tstamp;
47871- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
47872- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
47873- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
47874- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
47875+ .config_tstamp = xgbe_config_tstamp,
47876+ .update_tstamp_addend = xgbe_update_tstamp_addend,
47877+ .set_tstamp_time = xgbe_set_tstamp_time,
47878+ .get_tstamp_time = xgbe_get_tstamp_time,
47879+ .get_tx_tstamp = xgbe_get_tx_tstamp,
47880
47881 /* For Data Center Bridging config */
47882- hw_if->config_dcb_tc = xgbe_config_dcb_tc;
47883- hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
47884-
47885- DBGPR("<--xgbe_init_function_ptrs\n");
47886-}
47887+ .config_dcb_tc = xgbe_config_dcb_tc,
47888+ .config_dcb_pfc = xgbe_config_dcb_pfc,
47889+};
47890diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47891index b26d758..b0d1c3b 100644
47892--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47893+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
47894@@ -155,7 +155,7 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
47895
47896 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
47897 {
47898- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47899+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47900 struct xgbe_channel *channel;
47901 enum xgbe_int int_id;
47902 unsigned int i;
47903@@ -177,7 +177,7 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
47904
47905 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
47906 {
47907- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47908+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47909 struct xgbe_channel *channel;
47910 enum xgbe_int int_id;
47911 unsigned int i;
47912@@ -200,7 +200,7 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
47913 static irqreturn_t xgbe_isr(int irq, void *data)
47914 {
47915 struct xgbe_prv_data *pdata = data;
47916- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47917+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47918 struct xgbe_channel *channel;
47919 unsigned int dma_isr, dma_ch_isr;
47920 unsigned int mac_isr, mac_tssr;
47921@@ -447,7 +447,7 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
47922
47923 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
47924 {
47925- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47926+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47927
47928 DBGPR("-->xgbe_init_tx_coalesce\n");
47929
47930@@ -461,7 +461,7 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
47931
47932 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
47933 {
47934- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47935+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47936
47937 DBGPR("-->xgbe_init_rx_coalesce\n");
47938
47939@@ -475,7 +475,7 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
47940
47941 static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
47942 {
47943- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47944+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47945 struct xgbe_channel *channel;
47946 struct xgbe_ring *ring;
47947 struct xgbe_ring_data *rdata;
47948@@ -500,7 +500,7 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
47949
47950 static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
47951 {
47952- struct xgbe_desc_if *desc_if = &pdata->desc_if;
47953+ struct xgbe_desc_if *desc_if = pdata->desc_if;
47954 struct xgbe_channel *channel;
47955 struct xgbe_ring *ring;
47956 struct xgbe_ring_data *rdata;
47957@@ -526,7 +526,7 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
47958 static void xgbe_adjust_link(struct net_device *netdev)
47959 {
47960 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47961- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47962+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47963 struct phy_device *phydev = pdata->phydev;
47964 int new_state = 0;
47965
47966@@ -634,7 +634,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
47967 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
47968 {
47969 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47970- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47971+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47972 unsigned long flags;
47973
47974 DBGPR("-->xgbe_powerdown\n");
47975@@ -672,7 +672,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
47976 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
47977 {
47978 struct xgbe_prv_data *pdata = netdev_priv(netdev);
47979- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47980+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47981 unsigned long flags;
47982
47983 DBGPR("-->xgbe_powerup\n");
47984@@ -709,7 +709,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
47985
47986 static int xgbe_start(struct xgbe_prv_data *pdata)
47987 {
47988- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47989+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47990 struct net_device *netdev = pdata->netdev;
47991
47992 DBGPR("-->xgbe_start\n");
47993@@ -735,7 +735,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
47994
47995 static void xgbe_stop(struct xgbe_prv_data *pdata)
47996 {
47997- struct xgbe_hw_if *hw_if = &pdata->hw_if;
47998+ struct xgbe_hw_if *hw_if = pdata->hw_if;
47999 struct net_device *netdev = pdata->netdev;
48000
48001 DBGPR("-->xgbe_stop\n");
48002@@ -755,7 +755,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
48003
48004 static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
48005 {
48006- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48007+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48008
48009 DBGPR("-->xgbe_restart_dev\n");
48010
48011@@ -952,7 +952,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
48012 return -ERANGE;
48013 }
48014
48015- pdata->hw_if.config_tstamp(pdata, mac_tscr);
48016+ pdata->hw_if->config_tstamp(pdata, mac_tscr);
48017
48018 memcpy(&pdata->tstamp_config, &config, sizeof(config));
48019
48020@@ -1090,8 +1090,8 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
48021 static int xgbe_open(struct net_device *netdev)
48022 {
48023 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48024- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48025- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48026+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48027+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48028 int ret;
48029
48030 DBGPR("-->xgbe_open\n");
48031@@ -1171,8 +1171,8 @@ err_phy_init:
48032 static int xgbe_close(struct net_device *netdev)
48033 {
48034 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48035- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48036- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48037+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48038+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48039
48040 DBGPR("-->xgbe_close\n");
48041
48042@@ -1206,8 +1206,8 @@ static int xgbe_close(struct net_device *netdev)
48043 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
48044 {
48045 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48046- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48047- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48048+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48049+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48050 struct xgbe_channel *channel;
48051 struct xgbe_ring *ring;
48052 struct xgbe_packet_data *packet;
48053@@ -1276,7 +1276,7 @@ tx_netdev_return:
48054 static void xgbe_set_rx_mode(struct net_device *netdev)
48055 {
48056 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48057- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48058+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48059 unsigned int pr_mode, am_mode;
48060
48061 DBGPR("-->xgbe_set_rx_mode\n");
48062@@ -1295,7 +1295,7 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
48063 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
48064 {
48065 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48066- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48067+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48068 struct sockaddr *saddr = addr;
48069
48070 DBGPR("-->xgbe_set_mac_address\n");
48071@@ -1362,7 +1362,7 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
48072
48073 DBGPR("-->%s\n", __func__);
48074
48075- pdata->hw_if.read_mmc_stats(pdata);
48076+ pdata->hw_if->read_mmc_stats(pdata);
48077
48078 s->rx_packets = pstats->rxframecount_gb;
48079 s->rx_bytes = pstats->rxoctetcount_gb;
48080@@ -1389,7 +1389,7 @@ static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
48081 u16 vid)
48082 {
48083 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48084- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48085+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48086
48087 DBGPR("-->%s\n", __func__);
48088
48089@@ -1405,7 +1405,7 @@ static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
48090 u16 vid)
48091 {
48092 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48093- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48094+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48095
48096 DBGPR("-->%s\n", __func__);
48097
48098@@ -1465,7 +1465,7 @@ static int xgbe_set_features(struct net_device *netdev,
48099 netdev_features_t features)
48100 {
48101 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48102- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48103+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48104 unsigned int rxcsum, rxvlan, rxvlan_filter;
48105
48106 rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
48107@@ -1521,7 +1521,7 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
48108 static void xgbe_rx_refresh(struct xgbe_channel *channel)
48109 {
48110 struct xgbe_prv_data *pdata = channel->pdata;
48111- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48112+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48113 struct xgbe_ring *ring = channel->rx_ring;
48114 struct xgbe_ring_data *rdata;
48115
48116@@ -1537,8 +1537,8 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
48117 static int xgbe_tx_poll(struct xgbe_channel *channel)
48118 {
48119 struct xgbe_prv_data *pdata = channel->pdata;
48120- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48121- struct xgbe_desc_if *desc_if = &pdata->desc_if;
48122+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48123+ struct xgbe_desc_if *desc_if = pdata->desc_if;
48124 struct xgbe_ring *ring = channel->tx_ring;
48125 struct xgbe_ring_data *rdata;
48126 struct xgbe_ring_desc *rdesc;
48127@@ -1590,7 +1590,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
48128 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
48129 {
48130 struct xgbe_prv_data *pdata = channel->pdata;
48131- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48132+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48133 struct xgbe_ring *ring = channel->rx_ring;
48134 struct xgbe_ring_data *rdata;
48135 struct xgbe_packet_data *packet;
48136diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48137index 46f6130..f37dde3 100644
48138--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48139+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
48140@@ -203,7 +203,7 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
48141
48142 DBGPR("-->%s\n", __func__);
48143
48144- pdata->hw_if.read_mmc_stats(pdata);
48145+ pdata->hw_if->read_mmc_stats(pdata);
48146 for (i = 0; i < XGBE_STATS_COUNT; i++) {
48147 stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
48148 *data++ = *(u64 *)stat;
48149@@ -378,7 +378,7 @@ static int xgbe_get_coalesce(struct net_device *netdev,
48150 struct ethtool_coalesce *ec)
48151 {
48152 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48153- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48154+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48155 unsigned int riwt;
48156
48157 DBGPR("-->xgbe_get_coalesce\n");
48158@@ -401,7 +401,7 @@ static int xgbe_set_coalesce(struct net_device *netdev,
48159 struct ethtool_coalesce *ec)
48160 {
48161 struct xgbe_prv_data *pdata = netdev_priv(netdev);
48162- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48163+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48164 unsigned int rx_frames, rx_riwt, rx_usecs;
48165 unsigned int tx_frames, tx_usecs;
48166
48167diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48168index bdf9cfa..340aea1 100644
48169--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48170+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
48171@@ -210,12 +210,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
48172 DBGPR("<--xgbe_default_config\n");
48173 }
48174
48175-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
48176-{
48177- xgbe_init_function_ptrs_dev(&pdata->hw_if);
48178- xgbe_init_function_ptrs_desc(&pdata->desc_if);
48179-}
48180-
48181 static int xgbe_probe(struct platform_device *pdev)
48182 {
48183 struct xgbe_prv_data *pdata;
48184@@ -328,9 +322,8 @@ static int xgbe_probe(struct platform_device *pdev)
48185 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
48186
48187 /* Set all the function pointers */
48188- xgbe_init_all_fptrs(pdata);
48189- hw_if = &pdata->hw_if;
48190- desc_if = &pdata->desc_if;
48191+ hw_if = pdata->hw_if = &default_xgbe_hw_if;
48192+ desc_if = pdata->desc_if = &default_xgbe_desc_if;
48193
48194 /* Issue software reset to device */
48195 hw_if->exit(pdata);
48196diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48197index 6d2221e..47d1325 100644
48198--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48199+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
48200@@ -127,7 +127,7 @@
48201 static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
48202 {
48203 struct xgbe_prv_data *pdata = mii->priv;
48204- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48205+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48206 int mmd_data;
48207
48208 DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
48209@@ -144,7 +144,7 @@ static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
48210 u16 mmd_val)
48211 {
48212 struct xgbe_prv_data *pdata = mii->priv;
48213- struct xgbe_hw_if *hw_if = &pdata->hw_if;
48214+ struct xgbe_hw_if *hw_if = pdata->hw_if;
48215 int mmd_data = mmd_val;
48216
48217 DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
48218diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48219index 37e64cf..c3b61cf 100644
48220--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48221+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
48222@@ -130,7 +130,7 @@ static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
48223 tstamp_cc);
48224 u64 nsec;
48225
48226- nsec = pdata->hw_if.get_tstamp_time(pdata);
48227+ nsec = pdata->hw_if->get_tstamp_time(pdata);
48228
48229 return nsec;
48230 }
48231@@ -159,7 +159,7 @@ static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
48232
48233 spin_lock_irqsave(&pdata->tstamp_lock, flags);
48234
48235- pdata->hw_if.update_tstamp_addend(pdata, addend);
48236+ pdata->hw_if->update_tstamp_addend(pdata, addend);
48237
48238 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
48239
48240diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
48241index e9fe6e6..875fbaf 100644
48242--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
48243+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
48244@@ -585,8 +585,8 @@ struct xgbe_prv_data {
48245
48246 int irq_number;
48247
48248- struct xgbe_hw_if hw_if;
48249- struct xgbe_desc_if desc_if;
48250+ const struct xgbe_hw_if *hw_if;
48251+ const struct xgbe_desc_if *desc_if;
48252
48253 /* AXI DMA settings */
48254 unsigned int axdomain;
48255@@ -699,6 +699,9 @@ struct xgbe_prv_data {
48256 #endif
48257 };
48258
48259+extern const struct xgbe_hw_if default_xgbe_hw_if;
48260+extern const struct xgbe_desc_if default_xgbe_desc_if;
48261+
48262 /* Function prototypes*/
48263
48264 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
48265diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48266index 571427c..e9fe9e7 100644
48267--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48268+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
48269@@ -1058,7 +1058,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
48270 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
48271 {
48272 /* RX_MODE controlling object */
48273- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
48274+ bnx2x_init_rx_mode_obj(bp);
48275
48276 /* multicast configuration controlling object */
48277 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
48278diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48279index b193604..8873bfd 100644
48280--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48281+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
48282@@ -2329,15 +2329,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
48283 return rc;
48284 }
48285
48286-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
48287- struct bnx2x_rx_mode_obj *o)
48288+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
48289 {
48290 if (CHIP_IS_E1x(bp)) {
48291- o->wait_comp = bnx2x_empty_rx_mode_wait;
48292- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
48293+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
48294+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
48295 } else {
48296- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
48297- o->config_rx_mode = bnx2x_set_rx_mode_e2;
48298+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
48299+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
48300 }
48301 }
48302
48303diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48304index 718ecd2..2183b2f 100644
48305--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48306+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
48307@@ -1340,8 +1340,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
48308
48309 /********************* RX MODE ****************/
48310
48311-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
48312- struct bnx2x_rx_mode_obj *o);
48313+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
48314
48315 /**
48316 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
48317diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
48318index 31c9f82..e65e986 100644
48319--- a/drivers/net/ethernet/broadcom/tg3.h
48320+++ b/drivers/net/ethernet/broadcom/tg3.h
48321@@ -150,6 +150,7 @@
48322 #define CHIPREV_ID_5750_A0 0x4000
48323 #define CHIPREV_ID_5750_A1 0x4001
48324 #define CHIPREV_ID_5750_A3 0x4003
48325+#define CHIPREV_ID_5750_C1 0x4201
48326 #define CHIPREV_ID_5750_C2 0x4202
48327 #define CHIPREV_ID_5752_A0_HW 0x5000
48328 #define CHIPREV_ID_5752_A0 0x6000
48329diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
48330index 13f9636..228040f 100644
48331--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
48332+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
48333@@ -1690,10 +1690,10 @@ bna_cb_ioceth_reset(void *arg)
48334 }
48335
48336 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
48337- bna_cb_ioceth_enable,
48338- bna_cb_ioceth_disable,
48339- bna_cb_ioceth_hbfail,
48340- bna_cb_ioceth_reset
48341+ .enable_cbfn = bna_cb_ioceth_enable,
48342+ .disable_cbfn = bna_cb_ioceth_disable,
48343+ .hbfail_cbfn = bna_cb_ioceth_hbfail,
48344+ .reset_cbfn = bna_cb_ioceth_reset
48345 };
48346
48347 static void bna_attr_init(struct bna_ioceth *ioceth)
48348diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
48349index ffc92a4..40edc77 100644
48350--- a/drivers/net/ethernet/brocade/bna/bnad.c
48351+++ b/drivers/net/ethernet/brocade/bna/bnad.c
48352@@ -552,6 +552,7 @@ bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
48353
48354 len = (vec == nvecs) ?
48355 last_fraglen : unmap->vector.len;
48356+ skb->truesize += unmap->vector.len;
48357 totlen += len;
48358
48359 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
48360@@ -563,7 +564,6 @@ bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
48361
48362 skb->len += totlen;
48363 skb->data_len += totlen;
48364- skb->truesize += totlen;
48365 }
48366
48367 static inline void
48368diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
48369index 8cffcdf..aadf043 100644
48370--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
48371+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
48372@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
48373 */
48374 struct l2t_skb_cb {
48375 arp_failure_handler_func arp_failure_handler;
48376-};
48377+} __no_const;
48378
48379 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
48380
48381diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
48382index 9f5f3c3..86d21a6 100644
48383--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
48384+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
48385@@ -2359,7 +2359,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
48386
48387 int i;
48388 struct adapter *ap = netdev2adap(dev);
48389- static const unsigned int *reg_ranges;
48390+ const unsigned int *reg_ranges;
48391 int arr_size = 0, buf_size = 0;
48392
48393 if (is_t4(ap->params.chip)) {
48394diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
48395index cf8b6ff..274271e 100644
48396--- a/drivers/net/ethernet/dec/tulip/de4x5.c
48397+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
48398@@ -5387,7 +5387,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
48399 for (i=0; i<ETH_ALEN; i++) {
48400 tmp.addr[i] = dev->dev_addr[i];
48401 }
48402- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
48403+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
48404 break;
48405
48406 case DE4X5_SET_HWADDR: /* Set the hardware address */
48407@@ -5427,7 +5427,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
48408 spin_lock_irqsave(&lp->lock, flags);
48409 memcpy(&statbuf, &lp->pktStats, ioc->len);
48410 spin_unlock_irqrestore(&lp->lock, flags);
48411- if (copy_to_user(ioc->data, &statbuf, ioc->len))
48412+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
48413 return -EFAULT;
48414 break;
48415 }
48416diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
48417index 93ff8ef..39c64dd 100644
48418--- a/drivers/net/ethernet/emulex/benet/be_main.c
48419+++ b/drivers/net/ethernet/emulex/benet/be_main.c
48420@@ -533,7 +533,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
48421
48422 if (wrapped)
48423 newacc += 65536;
48424- ACCESS_ONCE(*acc) = newacc;
48425+ ACCESS_ONCE_RW(*acc) = newacc;
48426 }
48427
48428 static void populate_erx_stats(struct be_adapter *adapter,
48429@@ -4286,6 +4286,9 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
48430 if (nla_type(attr) != IFLA_BRIDGE_MODE)
48431 continue;
48432
48433+ if (nla_len(attr) < sizeof(mode))
48434+ return -EINVAL;
48435+
48436 mode = nla_get_u16(attr);
48437 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
48438 return -EINVAL;
48439diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
48440index c77fa4a..7fd42fc 100644
48441--- a/drivers/net/ethernet/faraday/ftgmac100.c
48442+++ b/drivers/net/ethernet/faraday/ftgmac100.c
48443@@ -30,6 +30,8 @@
48444 #include <linux/netdevice.h>
48445 #include <linux/phy.h>
48446 #include <linux/platform_device.h>
48447+#include <linux/interrupt.h>
48448+#include <linux/irqreturn.h>
48449 #include <net/ip.h>
48450
48451 #include "ftgmac100.h"
48452diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
48453index 4ff1adc..0ea6bf4 100644
48454--- a/drivers/net/ethernet/faraday/ftmac100.c
48455+++ b/drivers/net/ethernet/faraday/ftmac100.c
48456@@ -31,6 +31,8 @@
48457 #include <linux/module.h>
48458 #include <linux/netdevice.h>
48459 #include <linux/platform_device.h>
48460+#include <linux/interrupt.h>
48461+#include <linux/irqreturn.h>
48462
48463 #include "ftmac100.h"
48464
48465diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
48466index 537b621..07f87ce 100644
48467--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
48468+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
48469@@ -401,7 +401,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf)
48470 wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32);
48471
48472 /* Update the base adjustement value. */
48473- ACCESS_ONCE(pf->ptp_base_adj) = incval;
48474+ ACCESS_ONCE_RW(pf->ptp_base_adj) = incval;
48475 smp_mb(); /* Force the above update. */
48476 }
48477
48478diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
48479index e82821f..c7dd0af 100644
48480--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
48481+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
48482@@ -7789,6 +7789,9 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
48483 if (nla_type(attr) != IFLA_BRIDGE_MODE)
48484 continue;
48485
48486+ if (nla_len(attr) < sizeof(mode))
48487+ return -EINVAL;
48488+
48489 mode = nla_get_u16(attr);
48490 if (mode == BRIDGE_MODE_VEPA) {
48491 reg = 0;
48492diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48493index 5fd4b52..87aa34b 100644
48494--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48495+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
48496@@ -794,7 +794,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
48497 }
48498
48499 /* update the base incval used to calculate frequency adjustment */
48500- ACCESS_ONCE(adapter->base_incval) = incval;
48501+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
48502 smp_mb();
48503
48504 /* need lock to prevent incorrect read while modifying cyclecounter */
48505diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
48506index c14d4d8..66da603 100644
48507--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
48508+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
48509@@ -1259,6 +1259,9 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
48510 struct ixgbe_hw *hw = &adapter->hw;
48511 u32 regval;
48512
48513+ if (vf >= adapter->num_vfs)
48514+ return -EINVAL;
48515+
48516 adapter->vfinfo[vf].spoofchk_enabled = setting;
48517
48518 regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
48519diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
48520index 2bbd01f..e8baa64 100644
48521--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
48522+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
48523@@ -3457,7 +3457,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
48524 struct __vxge_hw_fifo *fifo;
48525 struct vxge_hw_fifo_config *config;
48526 u32 txdl_size, txdl_per_memblock;
48527- struct vxge_hw_mempool_cbs fifo_mp_callback;
48528+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
48529+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
48530+ };
48531+
48532 struct __vxge_hw_virtualpath *vpath;
48533
48534 if ((vp == NULL) || (attr == NULL)) {
48535@@ -3540,8 +3543,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
48536 goto exit;
48537 }
48538
48539- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
48540-
48541 fifo->mempool =
48542 __vxge_hw_mempool_create(vpath->hldev,
48543 fifo->config->memblock_size,
48544diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48545index 3172cdf..d01ab34 100644
48546--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48547+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
48548@@ -2190,7 +2190,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
48549 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
48550 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
48551 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
48552- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
48553+ pax_open_kernel();
48554+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
48555+ pax_close_kernel();
48556 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48557 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
48558 max_tx_rings = QLCNIC_MAX_TX_RINGS;
48559diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48560index be7d7a6..a8983f8 100644
48561--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48562+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
48563@@ -207,17 +207,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
48564 case QLCNIC_NON_PRIV_FUNC:
48565 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
48566 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48567- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
48568+ pax_open_kernel();
48569+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
48570+ pax_close_kernel();
48571 break;
48572 case QLCNIC_PRIV_FUNC:
48573 ahw->op_mode = QLCNIC_PRIV_FUNC;
48574 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
48575- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48576+ pax_open_kernel();
48577+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
48578+ pax_close_kernel();
48579 break;
48580 case QLCNIC_MGMT_FUNC:
48581 ahw->op_mode = QLCNIC_MGMT_FUNC;
48582 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
48583- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48584+ pax_open_kernel();
48585+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
48586+ pax_close_kernel();
48587 break;
48588 default:
48589 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
48590diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48591index c9f57fb..208bdc1 100644
48592--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48593+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
48594@@ -1285,7 +1285,7 @@ flash_temp:
48595 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
48596 {
48597 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
48598- static const struct qlcnic_dump_operations *fw_dump_ops;
48599+ const struct qlcnic_dump_operations *fw_dump_ops;
48600 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
48601 u32 entry_offset, dump, no_entries, buf_offset = 0;
48602 int i, k, ops_cnt, ops_index, dump_size = 0;
48603diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
48604index 0921302..927f761 100644
48605--- a/drivers/net/ethernet/realtek/r8169.c
48606+++ b/drivers/net/ethernet/realtek/r8169.c
48607@@ -744,22 +744,22 @@ struct rtl8169_private {
48608 struct mdio_ops {
48609 void (*write)(struct rtl8169_private *, int, int);
48610 int (*read)(struct rtl8169_private *, int);
48611- } mdio_ops;
48612+ } __no_const mdio_ops;
48613
48614 struct pll_power_ops {
48615 void (*down)(struct rtl8169_private *);
48616 void (*up)(struct rtl8169_private *);
48617- } pll_power_ops;
48618+ } __no_const pll_power_ops;
48619
48620 struct jumbo_ops {
48621 void (*enable)(struct rtl8169_private *);
48622 void (*disable)(struct rtl8169_private *);
48623- } jumbo_ops;
48624+ } __no_const jumbo_ops;
48625
48626 struct csi_ops {
48627 void (*write)(struct rtl8169_private *, int, int);
48628 u32 (*read)(struct rtl8169_private *, int);
48629- } csi_ops;
48630+ } __no_const csi_ops;
48631
48632 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
48633 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
48634diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
48635index 6b861e3..204ac86 100644
48636--- a/drivers/net/ethernet/sfc/ptp.c
48637+++ b/drivers/net/ethernet/sfc/ptp.c
48638@@ -822,7 +822,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
48639 ptp->start.dma_addr);
48640
48641 /* Clear flag that signals MC ready */
48642- ACCESS_ONCE(*start) = 0;
48643+ ACCESS_ONCE_RW(*start) = 0;
48644 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
48645 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
48646 EFX_BUG_ON_PARANOID(rc);
48647diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48648index 08c483b..2c4a553 100644
48649--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48650+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
48651@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
48652
48653 writel(value, ioaddr + MMC_CNTRL);
48654
48655- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
48656- MMC_CNTRL, value);
48657+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
48658+// MMC_CNTRL, value);
48659 }
48660
48661 /* To mask all all interrupts.*/
48662diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
48663index d5e07de..e3bf20a 100644
48664--- a/drivers/net/hyperv/hyperv_net.h
48665+++ b/drivers/net/hyperv/hyperv_net.h
48666@@ -171,7 +171,7 @@ struct rndis_device {
48667 enum rndis_device_state state;
48668 bool link_state;
48669 bool link_change;
48670- atomic_t new_req_id;
48671+ atomic_unchecked_t new_req_id;
48672
48673 spinlock_t request_lock;
48674 struct list_head req_list;
48675diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
48676index 2b86f0b..ecc996f 100644
48677--- a/drivers/net/hyperv/rndis_filter.c
48678+++ b/drivers/net/hyperv/rndis_filter.c
48679@@ -102,7 +102,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
48680 * template
48681 */
48682 set = &rndis_msg->msg.set_req;
48683- set->req_id = atomic_inc_return(&dev->new_req_id);
48684+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
48685
48686 /* Add to the request list */
48687 spin_lock_irqsave(&dev->request_lock, flags);
48688@@ -911,7 +911,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
48689
48690 /* Setup the rndis set */
48691 halt = &request->request_msg.msg.halt_req;
48692- halt->req_id = atomic_inc_return(&dev->new_req_id);
48693+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
48694
48695 /* Ignore return since this msg is optional. */
48696 rndis_filter_send_request(dev, request);
48697diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
48698index 6cbc56a..5f7e6c8 100644
48699--- a/drivers/net/ieee802154/fakehard.c
48700+++ b/drivers/net/ieee802154/fakehard.c
48701@@ -365,7 +365,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
48702 phy->transmit_power = 0xbf;
48703
48704 dev->netdev_ops = &fake_ops;
48705- dev->ml_priv = &fake_mlme;
48706+ dev->ml_priv = (void *)&fake_mlme;
48707
48708 priv = netdev_priv(dev);
48709 priv->phy = phy;
48710diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
48711index 5f17ad0..e0463c8 100644
48712--- a/drivers/net/macvlan.c
48713+++ b/drivers/net/macvlan.c
48714@@ -264,7 +264,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
48715 free_nskb:
48716 kfree_skb(nskb);
48717 err:
48718- atomic_long_inc(&skb->dev->rx_dropped);
48719+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
48720 }
48721
48722 /* called under rcu_read_lock() from netif_receive_skb */
48723@@ -1150,13 +1150,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
48724 int macvlan_link_register(struct rtnl_link_ops *ops)
48725 {
48726 /* common fields */
48727- ops->priv_size = sizeof(struct macvlan_dev);
48728- ops->validate = macvlan_validate;
48729- ops->maxtype = IFLA_MACVLAN_MAX;
48730- ops->policy = macvlan_policy;
48731- ops->changelink = macvlan_changelink;
48732- ops->get_size = macvlan_get_size;
48733- ops->fill_info = macvlan_fill_info;
48734+ pax_open_kernel();
48735+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
48736+ *(void **)&ops->validate = macvlan_validate;
48737+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
48738+ *(const void **)&ops->policy = macvlan_policy;
48739+ *(void **)&ops->changelink = macvlan_changelink;
48740+ *(void **)&ops->get_size = macvlan_get_size;
48741+ *(void **)&ops->fill_info = macvlan_fill_info;
48742+ pax_close_kernel();
48743
48744 return rtnl_link_register(ops);
48745 };
48746@@ -1236,7 +1238,7 @@ static int macvlan_device_event(struct notifier_block *unused,
48747 return NOTIFY_DONE;
48748 }
48749
48750-static struct notifier_block macvlan_notifier_block __read_mostly = {
48751+static struct notifier_block macvlan_notifier_block = {
48752 .notifier_call = macvlan_device_event,
48753 };
48754
48755diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
48756index 07c942b..2d8b073 100644
48757--- a/drivers/net/macvtap.c
48758+++ b/drivers/net/macvtap.c
48759@@ -1023,7 +1023,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
48760 }
48761
48762 ret = 0;
48763- if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
48764+ if (copy_to_user(ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
48765 put_user(q->flags, &ifr->ifr_flags))
48766 ret = -EFAULT;
48767 macvtap_put_vlan(vlan);
48768@@ -1193,7 +1193,7 @@ static int macvtap_device_event(struct notifier_block *unused,
48769 return NOTIFY_DONE;
48770 }
48771
48772-static struct notifier_block macvtap_notifier_block __read_mostly = {
48773+static struct notifier_block macvtap_notifier_block = {
48774 .notifier_call = macvtap_device_event,
48775 };
48776
48777diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
48778index 17ecdd6..79ad848 100644
48779--- a/drivers/net/ppp/ppp_generic.c
48780+++ b/drivers/net/ppp/ppp_generic.c
48781@@ -1020,7 +1020,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48782 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
48783 struct ppp_stats stats;
48784 struct ppp_comp_stats cstats;
48785- char *vers;
48786
48787 switch (cmd) {
48788 case SIOCGPPPSTATS:
48789@@ -1042,8 +1041,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48790 break;
48791
48792 case SIOCGPPPVER:
48793- vers = PPP_VERSION;
48794- if (copy_to_user(addr, vers, strlen(vers) + 1))
48795+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
48796 break;
48797 err = 0;
48798 break;
48799diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
48800index 079f7ad..b2a2bfa7 100644
48801--- a/drivers/net/slip/slhc.c
48802+++ b/drivers/net/slip/slhc.c
48803@@ -487,7 +487,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
48804 register struct tcphdr *thp;
48805 register struct iphdr *ip;
48806 register struct cstate *cs;
48807- int len, hdrlen;
48808+ long len, hdrlen;
48809 unsigned char *cp = icp;
48810
48811 /* We've got a compressed packet; read the change byte */
48812diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
48813index 1f76c2ea..9681171 100644
48814--- a/drivers/net/team/team.c
48815+++ b/drivers/net/team/team.c
48816@@ -2862,7 +2862,7 @@ static int team_device_event(struct notifier_block *unused,
48817 return NOTIFY_DONE;
48818 }
48819
48820-static struct notifier_block team_notifier_block __read_mostly = {
48821+static struct notifier_block team_notifier_block = {
48822 .notifier_call = team_device_event,
48823 };
48824
48825diff --git a/drivers/net/tun.c b/drivers/net/tun.c
48826index d965e8a..f119e64 100644
48827--- a/drivers/net/tun.c
48828+++ b/drivers/net/tun.c
48829@@ -1861,7 +1861,7 @@ unlock:
48830 }
48831
48832 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
48833- unsigned long arg, int ifreq_len)
48834+ unsigned long arg, size_t ifreq_len)
48835 {
48836 struct tun_file *tfile = file->private_data;
48837 struct tun_struct *tun;
48838@@ -1874,6 +1874,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
48839 unsigned int ifindex;
48840 int ret;
48841
48842+ if (ifreq_len > sizeof ifr)
48843+ return -EFAULT;
48844+
48845 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
48846 if (copy_from_user(&ifr, argp, ifreq_len))
48847 return -EFAULT;
48848diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
48849index babda7d..e40c90a 100644
48850--- a/drivers/net/usb/hso.c
48851+++ b/drivers/net/usb/hso.c
48852@@ -71,7 +71,7 @@
48853 #include <asm/byteorder.h>
48854 #include <linux/serial_core.h>
48855 #include <linux/serial.h>
48856-
48857+#include <asm/local.h>
48858
48859 #define MOD_AUTHOR "Option Wireless"
48860 #define MOD_DESCRIPTION "USB High Speed Option driver"
48861@@ -1178,7 +1178,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
48862 struct urb *urb;
48863
48864 urb = serial->rx_urb[0];
48865- if (serial->port.count > 0) {
48866+ if (atomic_read(&serial->port.count) > 0) {
48867 count = put_rxbuf_data(urb, serial);
48868 if (count == -1)
48869 return;
48870@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
48871 DUMP1(urb->transfer_buffer, urb->actual_length);
48872
48873 /* Anyone listening? */
48874- if (serial->port.count == 0)
48875+ if (atomic_read(&serial->port.count) == 0)
48876 return;
48877
48878 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
48879@@ -1278,8 +1278,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
48880 tty_port_tty_set(&serial->port, tty);
48881
48882 /* check for port already opened, if not set the termios */
48883- serial->port.count++;
48884- if (serial->port.count == 1) {
48885+ if (atomic_inc_return(&serial->port.count) == 1) {
48886 serial->rx_state = RX_IDLE;
48887 /* Force default termio settings */
48888 _hso_serial_set_termios(tty, NULL);
48889@@ -1289,7 +1288,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
48890 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
48891 if (result) {
48892 hso_stop_serial_device(serial->parent);
48893- serial->port.count--;
48894+ atomic_dec(&serial->port.count);
48895 kref_put(&serial->parent->ref, hso_serial_ref_free);
48896 }
48897 } else {
48898@@ -1326,10 +1325,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
48899
48900 /* reset the rts and dtr */
48901 /* do the actual close */
48902- serial->port.count--;
48903+ atomic_dec(&serial->port.count);
48904
48905- if (serial->port.count <= 0) {
48906- serial->port.count = 0;
48907+ if (atomic_read(&serial->port.count) <= 0) {
48908+ atomic_set(&serial->port.count, 0);
48909 tty_port_tty_set(&serial->port, NULL);
48910 if (!usb_gone)
48911 hso_stop_serial_device(serial->parent);
48912@@ -1404,7 +1403,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
48913
48914 /* the actual setup */
48915 spin_lock_irqsave(&serial->serial_lock, flags);
48916- if (serial->port.count)
48917+ if (atomic_read(&serial->port.count))
48918 _hso_serial_set_termios(tty, old);
48919 else
48920 tty->termios = *old;
48921@@ -1873,7 +1872,7 @@ static void intr_callback(struct urb *urb)
48922 D1("Pending read interrupt on port %d\n", i);
48923 spin_lock(&serial->serial_lock);
48924 if (serial->rx_state == RX_IDLE &&
48925- serial->port.count > 0) {
48926+ atomic_read(&serial->port.count) > 0) {
48927 /* Setup and send a ctrl req read on
48928 * port i */
48929 if (!serial->rx_urb_filled[0]) {
48930@@ -3047,7 +3046,7 @@ static int hso_resume(struct usb_interface *iface)
48931 /* Start all serial ports */
48932 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
48933 if (serial_table[i] && (serial_table[i]->interface == iface)) {
48934- if (dev2ser(serial_table[i])->port.count) {
48935+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
48936 result =
48937 hso_start_serial_device(serial_table[i], GFP_NOIO);
48938 hso_kick_transmit(dev2ser(serial_table[i]));
48939diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
48940index 604ef21..d1f49a1 100644
48941--- a/drivers/net/usb/r8152.c
48942+++ b/drivers/net/usb/r8152.c
48943@@ -575,7 +575,7 @@ struct r8152 {
48944 void (*up)(struct r8152 *);
48945 void (*down)(struct r8152 *);
48946 void (*unload)(struct r8152 *);
48947- } rtl_ops;
48948+ } __no_const rtl_ops;
48949
48950 int intr_interval;
48951 u32 saved_wolopts;
48952diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
48953index a2515887..6d13233 100644
48954--- a/drivers/net/usb/sierra_net.c
48955+++ b/drivers/net/usb/sierra_net.c
48956@@ -51,7 +51,7 @@ static const char driver_name[] = "sierra_net";
48957 /* atomic counter partially included in MAC address to make sure 2 devices
48958 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
48959 */
48960-static atomic_t iface_counter = ATOMIC_INIT(0);
48961+static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
48962
48963 /*
48964 * SYNC Timer Delay definition used to set the expiry time
48965@@ -697,7 +697,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
48966 dev->net->netdev_ops = &sierra_net_device_ops;
48967
48968 /* change MAC addr to include, ifacenum, and to be unique */
48969- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
48970+ dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
48971 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
48972
48973 /* we will have to manufacture ethernet headers, prepare template */
48974diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
48975index 59caa06..de191b3 100644
48976--- a/drivers/net/virtio_net.c
48977+++ b/drivers/net/virtio_net.c
48978@@ -48,7 +48,7 @@ module_param(gso, bool, 0444);
48979 #define RECEIVE_AVG_WEIGHT 64
48980
48981 /* Minimum alignment for mergeable packet buffers. */
48982-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
48983+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256UL)
48984
48985 #define VIRTNET_DRIVER_VERSION "1.0.0"
48986
48987diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
48988index 81a8a29..ae60a58 100644
48989--- a/drivers/net/vxlan.c
48990+++ b/drivers/net/vxlan.c
48991@@ -2762,7 +2762,7 @@ nla_put_failure:
48992 return -EMSGSIZE;
48993 }
48994
48995-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
48996+static struct rtnl_link_ops vxlan_link_ops = {
48997 .kind = "vxlan",
48998 .maxtype = IFLA_VXLAN_MAX,
48999 .policy = vxlan_policy,
49000@@ -2809,7 +2809,7 @@ static int vxlan_lowerdev_event(struct notifier_block *unused,
49001 return NOTIFY_DONE;
49002 }
49003
49004-static struct notifier_block vxlan_notifier_block __read_mostly = {
49005+static struct notifier_block vxlan_notifier_block = {
49006 .notifier_call = vxlan_lowerdev_event,
49007 };
49008
49009diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
49010index 5920c99..ff2e4a5 100644
49011--- a/drivers/net/wan/lmc/lmc_media.c
49012+++ b/drivers/net/wan/lmc/lmc_media.c
49013@@ -95,62 +95,63 @@ static inline void write_av9110_bit (lmc_softc_t *, int);
49014 static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32);
49015
49016 lmc_media_t lmc_ds3_media = {
49017- lmc_ds3_init, /* special media init stuff */
49018- lmc_ds3_default, /* reset to default state */
49019- lmc_ds3_set_status, /* reset status to state provided */
49020- lmc_dummy_set_1, /* set clock source */
49021- lmc_dummy_set2_1, /* set line speed */
49022- lmc_ds3_set_100ft, /* set cable length */
49023- lmc_ds3_set_scram, /* set scrambler */
49024- lmc_ds3_get_link_status, /* get link status */
49025- lmc_dummy_set_1, /* set link status */
49026- lmc_ds3_set_crc_length, /* set CRC length */
49027- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49028- lmc_ds3_watchdog
49029+ .init = lmc_ds3_init, /* special media init stuff */
49030+ .defaults = lmc_ds3_default, /* reset to default state */
49031+ .set_status = lmc_ds3_set_status, /* reset status to state provided */
49032+ .set_clock_source = lmc_dummy_set_1, /* set clock source */
49033+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49034+ .set_cable_length = lmc_ds3_set_100ft, /* set cable length */
49035+ .set_scrambler = lmc_ds3_set_scram, /* set scrambler */
49036+ .get_link_status = lmc_ds3_get_link_status, /* get link status */
49037+ .set_link_status = lmc_dummy_set_1, /* set link status */
49038+ .set_crc_length = lmc_ds3_set_crc_length, /* set CRC length */
49039+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49040+ .watchdog = lmc_ds3_watchdog
49041 };
49042
49043 lmc_media_t lmc_hssi_media = {
49044- lmc_hssi_init, /* special media init stuff */
49045- lmc_hssi_default, /* reset to default state */
49046- lmc_hssi_set_status, /* reset status to state provided */
49047- lmc_hssi_set_clock, /* set clock source */
49048- lmc_dummy_set2_1, /* set line speed */
49049- lmc_dummy_set_1, /* set cable length */
49050- lmc_dummy_set_1, /* set scrambler */
49051- lmc_hssi_get_link_status, /* get link status */
49052- lmc_hssi_set_link_status, /* set link status */
49053- lmc_hssi_set_crc_length, /* set CRC length */
49054- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49055- lmc_hssi_watchdog
49056+ .init = lmc_hssi_init, /* special media init stuff */
49057+ .defaults = lmc_hssi_default, /* reset to default state */
49058+ .set_status = lmc_hssi_set_status, /* reset status to state provided */
49059+ .set_clock_source = lmc_hssi_set_clock, /* set clock source */
49060+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49061+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49062+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49063+ .get_link_status = lmc_hssi_get_link_status, /* get link status */
49064+ .set_link_status = lmc_hssi_set_link_status, /* set link status */
49065+ .set_crc_length = lmc_hssi_set_crc_length, /* set CRC length */
49066+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49067+ .watchdog = lmc_hssi_watchdog
49068 };
49069
49070-lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
49071- lmc_ssi_default, /* reset to default state */
49072- lmc_ssi_set_status, /* reset status to state provided */
49073- lmc_ssi_set_clock, /* set clock source */
49074- lmc_ssi_set_speed, /* set line speed */
49075- lmc_dummy_set_1, /* set cable length */
49076- lmc_dummy_set_1, /* set scrambler */
49077- lmc_ssi_get_link_status, /* get link status */
49078- lmc_ssi_set_link_status, /* set link status */
49079- lmc_ssi_set_crc_length, /* set CRC length */
49080- lmc_dummy_set_1, /* set T1 or E1 circuit type */
49081- lmc_ssi_watchdog
49082+lmc_media_t lmc_ssi_media = {
49083+ .init = lmc_ssi_init, /* special media init stuff */
49084+ .defaults = lmc_ssi_default, /* reset to default state */
49085+ .set_status = lmc_ssi_set_status, /* reset status to state provided */
49086+ .set_clock_source = lmc_ssi_set_clock, /* set clock source */
49087+ .set_speed = lmc_ssi_set_speed, /* set line speed */
49088+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49089+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49090+ .get_link_status = lmc_ssi_get_link_status, /* get link status */
49091+ .set_link_status = lmc_ssi_set_link_status, /* set link status */
49092+ .set_crc_length = lmc_ssi_set_crc_length, /* set CRC length */
49093+ .set_circuit_type = lmc_dummy_set_1, /* set T1 or E1 circuit type */
49094+ .watchdog = lmc_ssi_watchdog
49095 };
49096
49097 lmc_media_t lmc_t1_media = {
49098- lmc_t1_init, /* special media init stuff */
49099- lmc_t1_default, /* reset to default state */
49100- lmc_t1_set_status, /* reset status to state provided */
49101- lmc_t1_set_clock, /* set clock source */
49102- lmc_dummy_set2_1, /* set line speed */
49103- lmc_dummy_set_1, /* set cable length */
49104- lmc_dummy_set_1, /* set scrambler */
49105- lmc_t1_get_link_status, /* get link status */
49106- lmc_dummy_set_1, /* set link status */
49107- lmc_t1_set_crc_length, /* set CRC length */
49108- lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
49109- lmc_t1_watchdog
49110+ .init = lmc_t1_init, /* special media init stuff */
49111+ .defaults = lmc_t1_default, /* reset to default state */
49112+ .set_status = lmc_t1_set_status, /* reset status to state provided */
49113+ .set_clock_source = lmc_t1_set_clock, /* set clock source */
49114+ .set_speed = lmc_dummy_set2_1, /* set line speed */
49115+ .set_cable_length = lmc_dummy_set_1, /* set cable length */
49116+ .set_scrambler = lmc_dummy_set_1, /* set scrambler */
49117+ .get_link_status = lmc_t1_get_link_status, /* get link status */
49118+ .set_link_status = lmc_dummy_set_1, /* set link status */
49119+ .set_crc_length = lmc_t1_set_crc_length, /* set CRC length */
49120+ .set_circuit_type = lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
49121+ .watchdog = lmc_t1_watchdog
49122 };
49123
49124 static void
49125diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
49126index feacc3b..5bac0de 100644
49127--- a/drivers/net/wan/z85230.c
49128+++ b/drivers/net/wan/z85230.c
49129@@ -485,9 +485,9 @@ static void z8530_status(struct z8530_channel *chan)
49130
49131 struct z8530_irqhandler z8530_sync =
49132 {
49133- z8530_rx,
49134- z8530_tx,
49135- z8530_status
49136+ .rx = z8530_rx,
49137+ .tx = z8530_tx,
49138+ .status = z8530_status
49139 };
49140
49141 EXPORT_SYMBOL(z8530_sync);
49142@@ -605,15 +605,15 @@ static void z8530_dma_status(struct z8530_channel *chan)
49143 }
49144
49145 static struct z8530_irqhandler z8530_dma_sync = {
49146- z8530_dma_rx,
49147- z8530_dma_tx,
49148- z8530_dma_status
49149+ .rx = z8530_dma_rx,
49150+ .tx = z8530_dma_tx,
49151+ .status = z8530_dma_status
49152 };
49153
49154 static struct z8530_irqhandler z8530_txdma_sync = {
49155- z8530_rx,
49156- z8530_dma_tx,
49157- z8530_dma_status
49158+ .rx = z8530_rx,
49159+ .tx = z8530_dma_tx,
49160+ .status = z8530_dma_status
49161 };
49162
49163 /**
49164@@ -680,9 +680,9 @@ static void z8530_status_clear(struct z8530_channel *chan)
49165
49166 struct z8530_irqhandler z8530_nop=
49167 {
49168- z8530_rx_clear,
49169- z8530_tx_clear,
49170- z8530_status_clear
49171+ .rx = z8530_rx_clear,
49172+ .tx = z8530_tx_clear,
49173+ .status = z8530_status_clear
49174 };
49175
49176
49177diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
49178index 0b60295..b8bfa5b 100644
49179--- a/drivers/net/wimax/i2400m/rx.c
49180+++ b/drivers/net/wimax/i2400m/rx.c
49181@@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
49182 if (i2400m->rx_roq == NULL)
49183 goto error_roq_alloc;
49184
49185- rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
49186+ rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
49187 GFP_KERNEL);
49188 if (rd == NULL) {
49189 result = -ENOMEM;
49190diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
49191index e71a2ce..2268d61 100644
49192--- a/drivers/net/wireless/airo.c
49193+++ b/drivers/net/wireless/airo.c
49194@@ -7846,7 +7846,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
49195 struct airo_info *ai = dev->ml_priv;
49196 int ridcode;
49197 int enabled;
49198- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
49199+ int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
49200 unsigned char *iobuf;
49201
49202 /* Only super-user can write RIDs */
49203diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
49204index da92bfa..5a9001a 100644
49205--- a/drivers/net/wireless/at76c50x-usb.c
49206+++ b/drivers/net/wireless/at76c50x-usb.c
49207@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
49208 }
49209
49210 /* Convert timeout from the DFU status to jiffies */
49211-static inline unsigned long at76_get_timeout(struct dfu_status *s)
49212+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
49213 {
49214 return msecs_to_jiffies((s->poll_timeout[2] << 16)
49215 | (s->poll_timeout[1] << 8)
49216diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
49217index 5fdc40d..3975205 100644
49218--- a/drivers/net/wireless/ath/ath10k/htc.c
49219+++ b/drivers/net/wireless/ath/ath10k/htc.c
49220@@ -856,7 +856,10 @@ void ath10k_htc_stop(struct ath10k_htc *htc)
49221 /* registered target arrival callback from the HIF layer */
49222 int ath10k_htc_init(struct ath10k *ar)
49223 {
49224- struct ath10k_hif_cb htc_callbacks;
49225+ static struct ath10k_hif_cb htc_callbacks = {
49226+ .rx_completion = ath10k_htc_rx_completion_handler,
49227+ .tx_completion = ath10k_htc_tx_completion_handler,
49228+ };
49229 struct ath10k_htc_ep *ep = NULL;
49230 struct ath10k_htc *htc = &ar->htc;
49231
49232@@ -866,8 +869,6 @@ int ath10k_htc_init(struct ath10k *ar)
49233 ath10k_htc_reset_endpoint_states(htc);
49234
49235 /* setup HIF layer callbacks */
49236- htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
49237- htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
49238 htc->ar = ar;
49239
49240 /* Get HIF default pipe for HTC message exchange */
49241diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
49242index 4716d33..a688310 100644
49243--- a/drivers/net/wireless/ath/ath10k/htc.h
49244+++ b/drivers/net/wireless/ath/ath10k/htc.h
49245@@ -271,13 +271,13 @@ enum ath10k_htc_ep_id {
49246
49247 struct ath10k_htc_ops {
49248 void (*target_send_suspend_complete)(struct ath10k *ar);
49249-};
49250+} __no_const;
49251
49252 struct ath10k_htc_ep_ops {
49253 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
49254 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
49255 void (*ep_tx_credits)(struct ath10k *);
49256-};
49257+} __no_const;
49258
49259 /* service connection information */
49260 struct ath10k_htc_svc_conn_req {
49261diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
49262index 59af9f9..5f3564f 100644
49263--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
49264+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
49265@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49266 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
49267 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
49268
49269- ACCESS_ONCE(ads->ds_link) = i->link;
49270- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
49271+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
49272+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
49273
49274 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
49275 ctl6 = SM(i->keytype, AR_EncrType);
49276@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49277
49278 if ((i->is_first || i->is_last) &&
49279 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
49280- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
49281+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
49282 | set11nTries(i->rates, 1)
49283 | set11nTries(i->rates, 2)
49284 | set11nTries(i->rates, 3)
49285 | (i->dur_update ? AR_DurUpdateEna : 0)
49286 | SM(0, AR_BurstDur);
49287
49288- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
49289+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
49290 | set11nRate(i->rates, 1)
49291 | set11nRate(i->rates, 2)
49292 | set11nRate(i->rates, 3);
49293 } else {
49294- ACCESS_ONCE(ads->ds_ctl2) = 0;
49295- ACCESS_ONCE(ads->ds_ctl3) = 0;
49296+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
49297+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
49298 }
49299
49300 if (!i->is_first) {
49301- ACCESS_ONCE(ads->ds_ctl0) = 0;
49302- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
49303- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
49304+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
49305+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
49306+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
49307 return;
49308 }
49309
49310@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49311 break;
49312 }
49313
49314- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
49315+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
49316 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
49317 | SM(i->txpower, AR_XmitPower0)
49318 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
49319@@ -289,27 +289,27 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49320 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
49321 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
49322
49323- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
49324- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
49325+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
49326+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
49327
49328 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
49329 return;
49330
49331- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
49332+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
49333 | set11nPktDurRTSCTS(i->rates, 1);
49334
49335- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
49336+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
49337 | set11nPktDurRTSCTS(i->rates, 3);
49338
49339- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
49340+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
49341 | set11nRateFlags(i->rates, 1)
49342 | set11nRateFlags(i->rates, 2)
49343 | set11nRateFlags(i->rates, 3)
49344 | SM(i->rtscts_rate, AR_RTSCTSRate);
49345
49346- ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower, AR_XmitPower1);
49347- ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower, AR_XmitPower2);
49348- ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower, AR_XmitPower3);
49349+ ACCESS_ONCE_RW(ads->ds_ctl9) = SM(i->txpower, AR_XmitPower1);
49350+ ACCESS_ONCE_RW(ads->ds_ctl10) = SM(i->txpower, AR_XmitPower2);
49351+ ACCESS_ONCE_RW(ads->ds_ctl11) = SM(i->txpower, AR_XmitPower3);
49352 }
49353
49354 static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
49355diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
49356index 71e38e8..5ac96ca 100644
49357--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
49358+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
49359@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49360 (i->qcu << AR_TxQcuNum_S) | desc_len;
49361
49362 checksum += val;
49363- ACCESS_ONCE(ads->info) = val;
49364+ ACCESS_ONCE_RW(ads->info) = val;
49365
49366 checksum += i->link;
49367- ACCESS_ONCE(ads->link) = i->link;
49368+ ACCESS_ONCE_RW(ads->link) = i->link;
49369
49370 checksum += i->buf_addr[0];
49371- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
49372+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
49373 checksum += i->buf_addr[1];
49374- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
49375+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
49376 checksum += i->buf_addr[2];
49377- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
49378+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
49379 checksum += i->buf_addr[3];
49380- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
49381+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
49382
49383 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
49384- ACCESS_ONCE(ads->ctl3) = val;
49385+ ACCESS_ONCE_RW(ads->ctl3) = val;
49386 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
49387- ACCESS_ONCE(ads->ctl5) = val;
49388+ ACCESS_ONCE_RW(ads->ctl5) = val;
49389 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
49390- ACCESS_ONCE(ads->ctl7) = val;
49391+ ACCESS_ONCE_RW(ads->ctl7) = val;
49392 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
49393- ACCESS_ONCE(ads->ctl9) = val;
49394+ ACCESS_ONCE_RW(ads->ctl9) = val;
49395
49396 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
49397- ACCESS_ONCE(ads->ctl10) = checksum;
49398+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
49399
49400 if (i->is_first || i->is_last) {
49401- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
49402+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
49403 | set11nTries(i->rates, 1)
49404 | set11nTries(i->rates, 2)
49405 | set11nTries(i->rates, 3)
49406 | (i->dur_update ? AR_DurUpdateEna : 0)
49407 | SM(0, AR_BurstDur);
49408
49409- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
49410+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
49411 | set11nRate(i->rates, 1)
49412 | set11nRate(i->rates, 2)
49413 | set11nRate(i->rates, 3);
49414 } else {
49415- ACCESS_ONCE(ads->ctl13) = 0;
49416- ACCESS_ONCE(ads->ctl14) = 0;
49417+ ACCESS_ONCE_RW(ads->ctl13) = 0;
49418+ ACCESS_ONCE_RW(ads->ctl14) = 0;
49419 }
49420
49421 ads->ctl20 = 0;
49422@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49423
49424 ctl17 = SM(i->keytype, AR_EncrType);
49425 if (!i->is_first) {
49426- ACCESS_ONCE(ads->ctl11) = 0;
49427- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
49428- ACCESS_ONCE(ads->ctl15) = 0;
49429- ACCESS_ONCE(ads->ctl16) = 0;
49430- ACCESS_ONCE(ads->ctl17) = ctl17;
49431- ACCESS_ONCE(ads->ctl18) = 0;
49432- ACCESS_ONCE(ads->ctl19) = 0;
49433+ ACCESS_ONCE_RW(ads->ctl11) = 0;
49434+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
49435+ ACCESS_ONCE_RW(ads->ctl15) = 0;
49436+ ACCESS_ONCE_RW(ads->ctl16) = 0;
49437+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
49438+ ACCESS_ONCE_RW(ads->ctl18) = 0;
49439+ ACCESS_ONCE_RW(ads->ctl19) = 0;
49440 return;
49441 }
49442
49443- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
49444+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
49445 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
49446 | SM(i->txpower, AR_XmitPower0)
49447 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
49448@@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
49449 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
49450 ctl12 |= SM(val, AR_PAPRDChainMask);
49451
49452- ACCESS_ONCE(ads->ctl12) = ctl12;
49453- ACCESS_ONCE(ads->ctl17) = ctl17;
49454+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
49455+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
49456
49457- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
49458+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
49459 | set11nPktDurRTSCTS(i->rates, 1);
49460
49461- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
49462+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
49463 | set11nPktDurRTSCTS(i->rates, 3);
49464
49465- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
49466+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
49467 | set11nRateFlags(i->rates, 1)
49468 | set11nRateFlags(i->rates, 2)
49469 | set11nRateFlags(i->rates, 3)
49470 | SM(i->rtscts_rate, AR_RTSCTSRate);
49471
49472- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
49473+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
49474
49475- ACCESS_ONCE(ads->ctl20) = SM(i->txpower, AR_XmitPower1);
49476- ACCESS_ONCE(ads->ctl21) = SM(i->txpower, AR_XmitPower2);
49477- ACCESS_ONCE(ads->ctl22) = SM(i->txpower, AR_XmitPower3);
49478+ ACCESS_ONCE_RW(ads->ctl20) = SM(i->txpower, AR_XmitPower1);
49479+ ACCESS_ONCE_RW(ads->ctl21) = SM(i->txpower, AR_XmitPower2);
49480+ ACCESS_ONCE_RW(ads->ctl22) = SM(i->txpower, AR_XmitPower3);
49481 }
49482
49483 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
49484diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
49485index 51b4ebe..d1929dd 100644
49486--- a/drivers/net/wireless/ath/ath9k/hw.h
49487+++ b/drivers/net/wireless/ath/ath9k/hw.h
49488@@ -629,7 +629,7 @@ struct ath_hw_private_ops {
49489
49490 /* ANI */
49491 void (*ani_cache_ini_regs)(struct ath_hw *ah);
49492-};
49493+} __no_const;
49494
49495 /**
49496 * struct ath_spec_scan - parameters for Atheros spectral scan
49497@@ -706,7 +706,7 @@ struct ath_hw_ops {
49498 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
49499 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
49500 #endif
49501-};
49502+} __no_const;
49503
49504 struct ath_nf_limits {
49505 s16 max;
49506diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
49507index 4b148bb..ac738fa 100644
49508--- a/drivers/net/wireless/ath/ath9k/main.c
49509+++ b/drivers/net/wireless/ath/ath9k/main.c
49510@@ -2592,16 +2592,18 @@ void ath9k_fill_chanctx_ops(void)
49511 if (!ath9k_use_chanctx)
49512 return;
49513
49514- ath9k_ops.hw_scan = ath9k_hw_scan;
49515- ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
49516- ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
49517- ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
49518- ath9k_ops.add_chanctx = ath9k_add_chanctx;
49519- ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
49520- ath9k_ops.change_chanctx = ath9k_change_chanctx;
49521- ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
49522- ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
49523- ath9k_ops.mgd_prepare_tx = ath9k_chanctx_force_active;
49524+ pax_open_kernel();
49525+ *(void **)&ath9k_ops.hw_scan = ath9k_hw_scan;
49526+ *(void **)&ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan;
49527+ *(void **)&ath9k_ops.remain_on_channel = ath9k_remain_on_channel;
49528+ *(void **)&ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel;
49529+ *(void **)&ath9k_ops.add_chanctx = ath9k_add_chanctx;
49530+ *(void **)&ath9k_ops.remove_chanctx = ath9k_remove_chanctx;
49531+ *(void **)&ath9k_ops.change_chanctx = ath9k_change_chanctx;
49532+ *(void **)&ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx;
49533+ *(void **)&ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx;
49534+ *(void **)&ath9k_ops.mgd_prepare_tx = ath9k_chanctx_force_active;
49535+ pax_close_kernel();
49536 }
49537
49538 struct ieee80211_ops ath9k_ops = {
49539diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
49540index 92190da..f3a4c4c 100644
49541--- a/drivers/net/wireless/b43/phy_lp.c
49542+++ b/drivers/net/wireless/b43/phy_lp.c
49543@@ -2514,7 +2514,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
49544 {
49545 struct ssb_bus *bus = dev->dev->sdev->bus;
49546
49547- static const struct b206x_channel *chandata = NULL;
49548+ const struct b206x_channel *chandata = NULL;
49549 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
49550 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
49551 u16 old_comm15, scale;
49552diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
49553index dc1d20c..f7a4f06 100644
49554--- a/drivers/net/wireless/iwlegacy/3945-mac.c
49555+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
49556@@ -3633,7 +3633,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
49557 */
49558 if (il3945_mod_params.disable_hw_scan) {
49559 D_INFO("Disabling hw_scan\n");
49560- il3945_mac_ops.hw_scan = NULL;
49561+ pax_open_kernel();
49562+ *(void **)&il3945_mac_ops.hw_scan = NULL;
49563+ pax_close_kernel();
49564 }
49565
49566 D_INFO("*** LOAD DRIVER ***\n");
49567diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49568index 0ffb6ff..c0b7f0e 100644
49569--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49570+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
49571@@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
49572 {
49573 struct iwl_priv *priv = file->private_data;
49574 char buf[64];
49575- int buf_size;
49576+ size_t buf_size;
49577 u32 offset, len;
49578
49579 memset(buf, 0, sizeof(buf));
49580@@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
49581 struct iwl_priv *priv = file->private_data;
49582
49583 char buf[8];
49584- int buf_size;
49585+ size_t buf_size;
49586 u32 reset_flag;
49587
49588 memset(buf, 0, sizeof(buf));
49589@@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
49590 {
49591 struct iwl_priv *priv = file->private_data;
49592 char buf[8];
49593- int buf_size;
49594+ size_t buf_size;
49595 int ht40;
49596
49597 memset(buf, 0, sizeof(buf));
49598@@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
49599 {
49600 struct iwl_priv *priv = file->private_data;
49601 char buf[8];
49602- int buf_size;
49603+ size_t buf_size;
49604 int value;
49605
49606 memset(buf, 0, sizeof(buf));
49607@@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
49608 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
49609 DEBUGFS_READ_FILE_OPS(current_sleep_command);
49610
49611-static const char *fmt_value = " %-30s %10u\n";
49612-static const char *fmt_hex = " %-30s 0x%02X\n";
49613-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
49614-static const char *fmt_header =
49615+static const char fmt_value[] = " %-30s %10u\n";
49616+static const char fmt_hex[] = " %-30s 0x%02X\n";
49617+static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
49618+static const char fmt_header[] =
49619 "%-32s current cumulative delta max\n";
49620
49621 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
49622@@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
49623 {
49624 struct iwl_priv *priv = file->private_data;
49625 char buf[8];
49626- int buf_size;
49627+ size_t buf_size;
49628 int clear;
49629
49630 memset(buf, 0, sizeof(buf));
49631@@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
49632 {
49633 struct iwl_priv *priv = file->private_data;
49634 char buf[8];
49635- int buf_size;
49636+ size_t buf_size;
49637 int trace;
49638
49639 memset(buf, 0, sizeof(buf));
49640@@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
49641 {
49642 struct iwl_priv *priv = file->private_data;
49643 char buf[8];
49644- int buf_size;
49645+ size_t buf_size;
49646 int missed;
49647
49648 memset(buf, 0, sizeof(buf));
49649@@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
49650
49651 struct iwl_priv *priv = file->private_data;
49652 char buf[8];
49653- int buf_size;
49654+ size_t buf_size;
49655 int plcp;
49656
49657 memset(buf, 0, sizeof(buf));
49658@@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
49659
49660 struct iwl_priv *priv = file->private_data;
49661 char buf[8];
49662- int buf_size;
49663+ size_t buf_size;
49664 int flush;
49665
49666 memset(buf, 0, sizeof(buf));
49667@@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
49668
49669 struct iwl_priv *priv = file->private_data;
49670 char buf[8];
49671- int buf_size;
49672+ size_t buf_size;
49673 int rts;
49674
49675 if (!priv->cfg->ht_params)
49676@@ -2204,7 +2204,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
49677 {
49678 struct iwl_priv *priv = file->private_data;
49679 char buf[8];
49680- int buf_size;
49681+ size_t buf_size;
49682
49683 memset(buf, 0, sizeof(buf));
49684 buf_size = min(count, sizeof(buf) - 1);
49685@@ -2238,7 +2238,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
49686 struct iwl_priv *priv = file->private_data;
49687 u32 event_log_flag;
49688 char buf[8];
49689- int buf_size;
49690+ size_t buf_size;
49691
49692 /* check that the interface is up */
49693 if (!iwl_is_ready(priv))
49694@@ -2292,7 +2292,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
49695 struct iwl_priv *priv = file->private_data;
49696 char buf[8];
49697 u32 calib_disabled;
49698- int buf_size;
49699+ size_t buf_size;
49700
49701 memset(buf, 0, sizeof(buf));
49702 buf_size = min(count, sizeof(buf) - 1);
49703diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
49704index bb36d67..a43451e 100644
49705--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
49706+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
49707@@ -1686,7 +1686,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
49708 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
49709
49710 char buf[8];
49711- int buf_size;
49712+ size_t buf_size;
49713 u32 reset_flag;
49714
49715 memset(buf, 0, sizeof(buf));
49716@@ -1707,7 +1707,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
49717 {
49718 struct iwl_trans *trans = file->private_data;
49719 char buf[8];
49720- int buf_size;
49721+ size_t buf_size;
49722 int csr;
49723
49724 memset(buf, 0, sizeof(buf));
49725diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
49726index 6b48c865..19646a7 100644
49727--- a/drivers/net/wireless/mac80211_hwsim.c
49728+++ b/drivers/net/wireless/mac80211_hwsim.c
49729@@ -2577,20 +2577,20 @@ static int __init init_mac80211_hwsim(void)
49730 if (channels < 1)
49731 return -EINVAL;
49732
49733- mac80211_hwsim_mchan_ops = mac80211_hwsim_ops;
49734- mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
49735- mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
49736- mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
49737- mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
49738- mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
49739- mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
49740- mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
49741- mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
49742- mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
49743- mac80211_hwsim_mchan_ops.assign_vif_chanctx =
49744- mac80211_hwsim_assign_vif_chanctx;
49745- mac80211_hwsim_mchan_ops.unassign_vif_chanctx =
49746- mac80211_hwsim_unassign_vif_chanctx;
49747+ pax_open_kernel();
49748+ memcpy((void *)&mac80211_hwsim_mchan_ops, &mac80211_hwsim_ops, sizeof mac80211_hwsim_mchan_ops);
49749+ *(void **)&mac80211_hwsim_mchan_ops.hw_scan = mac80211_hwsim_hw_scan;
49750+ *(void **)&mac80211_hwsim_mchan_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
49751+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_start = NULL;
49752+ *(void **)&mac80211_hwsim_mchan_ops.sw_scan_complete = NULL;
49753+ *(void **)&mac80211_hwsim_mchan_ops.remain_on_channel = mac80211_hwsim_roc;
49754+ *(void **)&mac80211_hwsim_mchan_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
49755+ *(void **)&mac80211_hwsim_mchan_ops.add_chanctx = mac80211_hwsim_add_chanctx;
49756+ *(void **)&mac80211_hwsim_mchan_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
49757+ *(void **)&mac80211_hwsim_mchan_ops.change_chanctx = mac80211_hwsim_change_chanctx;
49758+ *(void **)&mac80211_hwsim_mchan_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
49759+ *(void **)&mac80211_hwsim_mchan_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
49760+ pax_close_kernel();
49761
49762 spin_lock_init(&hwsim_radio_lock);
49763 INIT_LIST_HEAD(&hwsim_radios);
49764diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
49765index d2a9a08..0cb175d 100644
49766--- a/drivers/net/wireless/rndis_wlan.c
49767+++ b/drivers/net/wireless/rndis_wlan.c
49768@@ -1236,7 +1236,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
49769
49770 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
49771
49772- if (rts_threshold < 0 || rts_threshold > 2347)
49773+ if (rts_threshold > 2347)
49774 rts_threshold = 2347;
49775
49776 tmp = cpu_to_le32(rts_threshold);
49777diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
49778index d13f25c..2573994 100644
49779--- a/drivers/net/wireless/rt2x00/rt2x00.h
49780+++ b/drivers/net/wireless/rt2x00/rt2x00.h
49781@@ -375,7 +375,7 @@ struct rt2x00_intf {
49782 * for hardware which doesn't support hardware
49783 * sequence counting.
49784 */
49785- atomic_t seqno;
49786+ atomic_unchecked_t seqno;
49787 };
49788
49789 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
49790diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
49791index 66ff364..3ce34f7 100644
49792--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
49793+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
49794@@ -224,9 +224,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
49795 * sequence counter given by mac80211.
49796 */
49797 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
49798- seqno = atomic_add_return(0x10, &intf->seqno);
49799+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
49800 else
49801- seqno = atomic_read(&intf->seqno);
49802+ seqno = atomic_read_unchecked(&intf->seqno);
49803
49804 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
49805 hdr->seq_ctrl |= cpu_to_le16(seqno);
49806diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
49807index b661f896..ddf7d2b 100644
49808--- a/drivers/net/wireless/ti/wl1251/sdio.c
49809+++ b/drivers/net/wireless/ti/wl1251/sdio.c
49810@@ -282,13 +282,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
49811
49812 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
49813
49814- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
49815- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
49816+ pax_open_kernel();
49817+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
49818+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
49819+ pax_close_kernel();
49820
49821 wl1251_info("using dedicated interrupt line");
49822 } else {
49823- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
49824- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
49825+ pax_open_kernel();
49826+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
49827+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
49828+ pax_close_kernel();
49829
49830 wl1251_info("using SDIO interrupt");
49831 }
49832diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
49833index 0bccf12..3d95068 100644
49834--- a/drivers/net/wireless/ti/wl12xx/main.c
49835+++ b/drivers/net/wireless/ti/wl12xx/main.c
49836@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
49837 sizeof(wl->conf.mem));
49838
49839 /* read data preparation is only needed by wl127x */
49840- wl->ops->prepare_read = wl127x_prepare_read;
49841+ pax_open_kernel();
49842+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
49843+ pax_close_kernel();
49844
49845 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
49846 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
49847@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
49848 sizeof(wl->conf.mem));
49849
49850 /* read data preparation is only needed by wl127x */
49851- wl->ops->prepare_read = wl127x_prepare_read;
49852+ pax_open_kernel();
49853+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
49854+ pax_close_kernel();
49855
49856 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
49857 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
49858diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
49859index 7af1936..128bb35 100644
49860--- a/drivers/net/wireless/ti/wl18xx/main.c
49861+++ b/drivers/net/wireless/ti/wl18xx/main.c
49862@@ -1916,8 +1916,10 @@ static int wl18xx_setup(struct wl1271 *wl)
49863 }
49864
49865 if (!checksum_param) {
49866- wl18xx_ops.set_rx_csum = NULL;
49867- wl18xx_ops.init_vif = NULL;
49868+ pax_open_kernel();
49869+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
49870+ *(void **)&wl18xx_ops.init_vif = NULL;
49871+ pax_close_kernel();
49872 }
49873
49874 /* Enable 11a Band only if we have 5G antennas */
49875diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
49876index a912dc0..a8225ba 100644
49877--- a/drivers/net/wireless/zd1211rw/zd_usb.c
49878+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
49879@@ -385,7 +385,7 @@ static inline void handle_regs_int(struct urb *urb)
49880 {
49881 struct zd_usb *usb = urb->context;
49882 struct zd_usb_interrupt *intr = &usb->intr;
49883- int len;
49884+ unsigned int len;
49885 u16 int_num;
49886
49887 ZD_ASSERT(in_interrupt());
49888diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
49889index ca82f54..3767771 100644
49890--- a/drivers/net/xen-netfront.c
49891+++ b/drivers/net/xen-netfront.c
49892@@ -496,9 +496,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
49893 len = skb_frag_size(frag);
49894 offset = frag->page_offset;
49895
49896- /* Data must not cross a page boundary. */
49897- BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
49898-
49899 /* Skip unused frames from start of page */
49900 page += offset >> PAGE_SHIFT;
49901 offset &= ~PAGE_MASK;
49902@@ -506,8 +503,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
49903 while (len > 0) {
49904 unsigned long bytes;
49905
49906- BUG_ON(offset >= PAGE_SIZE);
49907-
49908 bytes = PAGE_SIZE - offset;
49909 if (bytes > len)
49910 bytes = len;
49911diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
49912index 683671a..4519fc2 100644
49913--- a/drivers/nfc/nfcwilink.c
49914+++ b/drivers/nfc/nfcwilink.c
49915@@ -497,7 +497,7 @@ static struct nci_ops nfcwilink_ops = {
49916
49917 static int nfcwilink_probe(struct platform_device *pdev)
49918 {
49919- static struct nfcwilink *drv;
49920+ struct nfcwilink *drv;
49921 int rc;
49922 __u32 protocols;
49923
49924diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
49925index d93b2b6..ae50401 100644
49926--- a/drivers/oprofile/buffer_sync.c
49927+++ b/drivers/oprofile/buffer_sync.c
49928@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
49929 if (cookie == NO_COOKIE)
49930 offset = pc;
49931 if (cookie == INVALID_COOKIE) {
49932- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
49933+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
49934 offset = pc;
49935 }
49936 if (cookie != last_cookie) {
49937@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
49938 /* add userspace sample */
49939
49940 if (!mm) {
49941- atomic_inc(&oprofile_stats.sample_lost_no_mm);
49942+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
49943 return 0;
49944 }
49945
49946 cookie = lookup_dcookie(mm, s->eip, &offset);
49947
49948 if (cookie == INVALID_COOKIE) {
49949- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
49950+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
49951 return 0;
49952 }
49953
49954@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
49955 /* ignore backtraces if failed to add a sample */
49956 if (state == sb_bt_start) {
49957 state = sb_bt_ignore;
49958- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
49959+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
49960 }
49961 }
49962 release_mm(mm);
49963diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
49964index c0cc4e7..44d4e54 100644
49965--- a/drivers/oprofile/event_buffer.c
49966+++ b/drivers/oprofile/event_buffer.c
49967@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
49968 }
49969
49970 if (buffer_pos == buffer_size) {
49971- atomic_inc(&oprofile_stats.event_lost_overflow);
49972+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
49973 return;
49974 }
49975
49976diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
49977index ed2c3ec..deda85a 100644
49978--- a/drivers/oprofile/oprof.c
49979+++ b/drivers/oprofile/oprof.c
49980@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
49981 if (oprofile_ops.switch_events())
49982 return;
49983
49984- atomic_inc(&oprofile_stats.multiplex_counter);
49985+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
49986 start_switch_worker();
49987 }
49988
49989diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
49990index ee2cfce..7f8f699 100644
49991--- a/drivers/oprofile/oprofile_files.c
49992+++ b/drivers/oprofile/oprofile_files.c
49993@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
49994
49995 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
49996
49997-static ssize_t timeout_read(struct file *file, char __user *buf,
49998+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
49999 size_t count, loff_t *offset)
50000 {
50001 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
50002diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
50003index 59659ce..6c860a0 100644
50004--- a/drivers/oprofile/oprofile_stats.c
50005+++ b/drivers/oprofile/oprofile_stats.c
50006@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
50007 cpu_buf->sample_invalid_eip = 0;
50008 }
50009
50010- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
50011- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
50012- atomic_set(&oprofile_stats.event_lost_overflow, 0);
50013- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
50014- atomic_set(&oprofile_stats.multiplex_counter, 0);
50015+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
50016+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
50017+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
50018+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
50019+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
50020 }
50021
50022
50023diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
50024index 1fc622b..8c48fc3 100644
50025--- a/drivers/oprofile/oprofile_stats.h
50026+++ b/drivers/oprofile/oprofile_stats.h
50027@@ -13,11 +13,11 @@
50028 #include <linux/atomic.h>
50029
50030 struct oprofile_stat_struct {
50031- atomic_t sample_lost_no_mm;
50032- atomic_t sample_lost_no_mapping;
50033- atomic_t bt_lost_no_mapping;
50034- atomic_t event_lost_overflow;
50035- atomic_t multiplex_counter;
50036+ atomic_unchecked_t sample_lost_no_mm;
50037+ atomic_unchecked_t sample_lost_no_mapping;
50038+ atomic_unchecked_t bt_lost_no_mapping;
50039+ atomic_unchecked_t event_lost_overflow;
50040+ atomic_unchecked_t multiplex_counter;
50041 };
50042
50043 extern struct oprofile_stat_struct oprofile_stats;
50044diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
50045index 3f49345..c750d0b 100644
50046--- a/drivers/oprofile/oprofilefs.c
50047+++ b/drivers/oprofile/oprofilefs.c
50048@@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
50049
50050 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
50051 {
50052- atomic_t *val = file->private_data;
50053- return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
50054+ atomic_unchecked_t *val = file->private_data;
50055+ return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
50056 }
50057
50058
50059@@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
50060
50061
50062 int oprofilefs_create_ro_atomic(struct dentry *root,
50063- char const *name, atomic_t *val)
50064+ char const *name, atomic_unchecked_t *val)
50065 {
50066 return __oprofilefs_create_file(root, name,
50067 &atomic_ro_fops, 0444, val);
50068diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
50069index 61be1d9..dec05d7 100644
50070--- a/drivers/oprofile/timer_int.c
50071+++ b/drivers/oprofile/timer_int.c
50072@@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
50073 return NOTIFY_OK;
50074 }
50075
50076-static struct notifier_block __refdata oprofile_cpu_notifier = {
50077+static struct notifier_block oprofile_cpu_notifier = {
50078 .notifier_call = oprofile_cpu_notify,
50079 };
50080
50081diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
50082index 3b47080..6cd05dd 100644
50083--- a/drivers/parport/procfs.c
50084+++ b/drivers/parport/procfs.c
50085@@ -64,7 +64,7 @@ static int do_active_device(struct ctl_table *table, int write,
50086
50087 *ppos += len;
50088
50089- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
50090+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
50091 }
50092
50093 #ifdef CONFIG_PARPORT_1284
50094@@ -106,7 +106,7 @@ static int do_autoprobe(struct ctl_table *table, int write,
50095
50096 *ppos += len;
50097
50098- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
50099+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
50100 }
50101 #endif /* IEEE1284.3 support. */
50102
50103diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
50104index 8dcccff..35d701d 100644
50105--- a/drivers/pci/hotplug/acpiphp_ibm.c
50106+++ b/drivers/pci/hotplug/acpiphp_ibm.c
50107@@ -452,7 +452,9 @@ static int __init ibm_acpiphp_init(void)
50108 goto init_cleanup;
50109 }
50110
50111- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
50112+ pax_open_kernel();
50113+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
50114+ pax_close_kernel();
50115 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
50116
50117 return retval;
50118diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
50119index 04fcd78..39e83f1 100644
50120--- a/drivers/pci/hotplug/cpcihp_generic.c
50121+++ b/drivers/pci/hotplug/cpcihp_generic.c
50122@@ -73,7 +73,6 @@ static u16 port;
50123 static unsigned int enum_bit;
50124 static u8 enum_mask;
50125
50126-static struct cpci_hp_controller_ops generic_hpc_ops;
50127 static struct cpci_hp_controller generic_hpc;
50128
50129 static int __init validate_parameters(void)
50130@@ -139,6 +138,10 @@ static int query_enum(void)
50131 return ((value & enum_mask) == enum_mask);
50132 }
50133
50134+static struct cpci_hp_controller_ops generic_hpc_ops = {
50135+ .query_enum = query_enum,
50136+};
50137+
50138 static int __init cpcihp_generic_init(void)
50139 {
50140 int status;
50141@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
50142 pci_dev_put(dev);
50143
50144 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
50145- generic_hpc_ops.query_enum = query_enum;
50146 generic_hpc.ops = &generic_hpc_ops;
50147
50148 status = cpci_hp_register_controller(&generic_hpc);
50149diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
50150index 6757b3e..d3bad62 100644
50151--- a/drivers/pci/hotplug/cpcihp_zt5550.c
50152+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
50153@@ -59,7 +59,6 @@
50154 /* local variables */
50155 static bool debug;
50156 static bool poll;
50157-static struct cpci_hp_controller_ops zt5550_hpc_ops;
50158 static struct cpci_hp_controller zt5550_hpc;
50159
50160 /* Primary cPCI bus bridge device */
50161@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
50162 return 0;
50163 }
50164
50165+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
50166+ .query_enum = zt5550_hc_query_enum,
50167+};
50168+
50169 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
50170 {
50171 int status;
50172@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
50173 dbg("returned from zt5550_hc_config");
50174
50175 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
50176- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
50177 zt5550_hpc.ops = &zt5550_hpc_ops;
50178 if(!poll) {
50179 zt5550_hpc.irq = hc_dev->irq;
50180 zt5550_hpc.irq_flags = IRQF_SHARED;
50181 zt5550_hpc.dev_id = hc_dev;
50182
50183- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
50184- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
50185- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
50186+ pax_open_kernel();
50187+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
50188+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
50189+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
50190+ pax_open_kernel();
50191 } else {
50192 info("using ENUM# polling mode");
50193 }
50194diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
50195index 0968a9b..5a00edf 100644
50196--- a/drivers/pci/hotplug/cpqphp_nvram.c
50197+++ b/drivers/pci/hotplug/cpqphp_nvram.c
50198@@ -427,9 +427,13 @@ static u32 store_HRT (void __iomem *rom_start)
50199
50200 void compaq_nvram_init (void __iomem *rom_start)
50201 {
50202+
50203+#ifndef CONFIG_PAX_KERNEXEC
50204 if (rom_start) {
50205 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
50206 }
50207+#endif
50208+
50209 dbg("int15 entry = %p\n", compaq_int15_entry_point);
50210
50211 /* initialize our int15 lock */
50212diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
50213index 56d8486..f26113f 100644
50214--- a/drivers/pci/hotplug/pci_hotplug_core.c
50215+++ b/drivers/pci/hotplug/pci_hotplug_core.c
50216@@ -436,8 +436,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
50217 return -EINVAL;
50218 }
50219
50220- slot->ops->owner = owner;
50221- slot->ops->mod_name = mod_name;
50222+ pax_open_kernel();
50223+ *(struct module **)&slot->ops->owner = owner;
50224+ *(const char **)&slot->ops->mod_name = mod_name;
50225+ pax_close_kernel();
50226
50227 mutex_lock(&pci_hp_mutex);
50228 /*
50229diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
50230index 07aa722..84514b4 100644
50231--- a/drivers/pci/hotplug/pciehp_core.c
50232+++ b/drivers/pci/hotplug/pciehp_core.c
50233@@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
50234 struct slot *slot = ctrl->slot;
50235 struct hotplug_slot *hotplug = NULL;
50236 struct hotplug_slot_info *info = NULL;
50237- struct hotplug_slot_ops *ops = NULL;
50238+ hotplug_slot_ops_no_const *ops = NULL;
50239 char name[SLOT_NAME_SIZE];
50240 int retval = -ENOMEM;
50241
50242diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
50243index 6807edd..086a7dc 100644
50244--- a/drivers/pci/msi.c
50245+++ b/drivers/pci/msi.c
50246@@ -507,8 +507,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
50247 {
50248 struct attribute **msi_attrs;
50249 struct attribute *msi_attr;
50250- struct device_attribute *msi_dev_attr;
50251- struct attribute_group *msi_irq_group;
50252+ device_attribute_no_const *msi_dev_attr;
50253+ attribute_group_no_const *msi_irq_group;
50254 const struct attribute_group **msi_irq_groups;
50255 struct msi_desc *entry;
50256 int ret = -ENOMEM;
50257@@ -568,7 +568,7 @@ error_attrs:
50258 count = 0;
50259 msi_attr = msi_attrs[count];
50260 while (msi_attr) {
50261- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
50262+ msi_dev_attr = container_of(msi_attr, device_attribute_no_const, attr);
50263 kfree(msi_attr->name);
50264 kfree(msi_dev_attr);
50265 ++count;
50266diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
50267index 6d04771..4126004 100644
50268--- a/drivers/pci/pci-sysfs.c
50269+++ b/drivers/pci/pci-sysfs.c
50270@@ -1134,7 +1134,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
50271 {
50272 /* allocate attribute structure, piggyback attribute name */
50273 int name_len = write_combine ? 13 : 10;
50274- struct bin_attribute *res_attr;
50275+ bin_attribute_no_const *res_attr;
50276 int retval;
50277
50278 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
50279@@ -1311,7 +1311,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
50280 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
50281 {
50282 int retval;
50283- struct bin_attribute *attr;
50284+ bin_attribute_no_const *attr;
50285
50286 /* If the device has VPD, try to expose it in sysfs. */
50287 if (dev->vpd) {
50288@@ -1358,7 +1358,7 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
50289 {
50290 int retval;
50291 int rom_size = 0;
50292- struct bin_attribute *attr;
50293+ bin_attribute_no_const *attr;
50294
50295 if (!sysfs_initialized)
50296 return -EACCES;
50297diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
50298index 0601890..dc15007 100644
50299--- a/drivers/pci/pci.h
50300+++ b/drivers/pci/pci.h
50301@@ -91,7 +91,7 @@ struct pci_vpd_ops {
50302 struct pci_vpd {
50303 unsigned int len;
50304 const struct pci_vpd_ops *ops;
50305- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
50306+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
50307 };
50308
50309 int pci_vpd_pci22_init(struct pci_dev *dev);
50310diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
50311index e1e7026..d28dd33 100644
50312--- a/drivers/pci/pcie/aspm.c
50313+++ b/drivers/pci/pcie/aspm.c
50314@@ -27,9 +27,9 @@
50315 #define MODULE_PARAM_PREFIX "pcie_aspm."
50316
50317 /* Note: those are not register definitions */
50318-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
50319-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
50320-#define ASPM_STATE_L1 (4) /* L1 state */
50321+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
50322+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
50323+#define ASPM_STATE_L1 (4U) /* L1 state */
50324 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
50325 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
50326
50327diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
50328index 9cce960..7c530f4 100644
50329--- a/drivers/pci/probe.c
50330+++ b/drivers/pci/probe.c
50331@@ -176,7 +176,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
50332 struct pci_bus_region region, inverted_region;
50333 bool bar_too_big = false, bar_too_high = false, bar_invalid = false;
50334
50335- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
50336+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
50337
50338 /* No printks while decoding is disabled! */
50339 if (!dev->mmio_always_on) {
50340diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
50341index 3f155e7..0f4b1f0 100644
50342--- a/drivers/pci/proc.c
50343+++ b/drivers/pci/proc.c
50344@@ -434,7 +434,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
50345 static int __init pci_proc_init(void)
50346 {
50347 struct pci_dev *dev = NULL;
50348+
50349+#ifdef CONFIG_GRKERNSEC_PROC_ADD
50350+#ifdef CONFIG_GRKERNSEC_PROC_USER
50351+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
50352+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
50353+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
50354+#endif
50355+#else
50356 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
50357+#endif
50358 proc_create("devices", 0, proc_bus_pci_dir,
50359 &proc_bus_pci_dev_operations);
50360 proc_initialized = 1;
50361diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
50362index d866db8..c827d1f 100644
50363--- a/drivers/platform/chrome/chromeos_laptop.c
50364+++ b/drivers/platform/chrome/chromeos_laptop.c
50365@@ -479,7 +479,7 @@ static struct chromeos_laptop cr48 = {
50366 .callback = chromeos_laptop_dmi_matched, \
50367 .driver_data = (void *)&board_
50368
50369-static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
50370+static struct dmi_system_id chromeos_laptop_dmi_table[] __initconst = {
50371 {
50372 .ident = "Samsung Series 5 550",
50373 .matches = {
50374diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
50375index c5af23b..3d62d5e 100644
50376--- a/drivers/platform/x86/alienware-wmi.c
50377+++ b/drivers/platform/x86/alienware-wmi.c
50378@@ -150,7 +150,7 @@ struct wmax_led_args {
50379 } __packed;
50380
50381 static struct platform_device *platform_device;
50382-static struct device_attribute *zone_dev_attrs;
50383+static device_attribute_no_const *zone_dev_attrs;
50384 static struct attribute **zone_attrs;
50385 static struct platform_zone *zone_data;
50386
50387@@ -161,7 +161,7 @@ static struct platform_driver platform_driver = {
50388 }
50389 };
50390
50391-static struct attribute_group zone_attribute_group = {
50392+static attribute_group_no_const zone_attribute_group = {
50393 .name = "rgb_zones",
50394 };
50395
50396diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
50397index 21fc932..ee9394a 100644
50398--- a/drivers/platform/x86/asus-wmi.c
50399+++ b/drivers/platform/x86/asus-wmi.c
50400@@ -1590,6 +1590,10 @@ static int show_dsts(struct seq_file *m, void *data)
50401 int err;
50402 u32 retval = -1;
50403
50404+#ifdef CONFIG_GRKERNSEC_KMEM
50405+ return -EPERM;
50406+#endif
50407+
50408 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
50409
50410 if (err < 0)
50411@@ -1606,6 +1610,10 @@ static int show_devs(struct seq_file *m, void *data)
50412 int err;
50413 u32 retval = -1;
50414
50415+#ifdef CONFIG_GRKERNSEC_KMEM
50416+ return -EPERM;
50417+#endif
50418+
50419 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
50420 &retval);
50421
50422@@ -1630,6 +1638,10 @@ static int show_call(struct seq_file *m, void *data)
50423 union acpi_object *obj;
50424 acpi_status status;
50425
50426+#ifdef CONFIG_GRKERNSEC_KMEM
50427+ return -EPERM;
50428+#endif
50429+
50430 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
50431 1, asus->debug.method_id,
50432 &input, &output);
50433diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
50434index 62f8030..c7f2a45 100644
50435--- a/drivers/platform/x86/msi-laptop.c
50436+++ b/drivers/platform/x86/msi-laptop.c
50437@@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
50438
50439 if (!quirks->ec_read_only) {
50440 /* allow userland write sysfs file */
50441- dev_attr_bluetooth.store = store_bluetooth;
50442- dev_attr_wlan.store = store_wlan;
50443- dev_attr_threeg.store = store_threeg;
50444- dev_attr_bluetooth.attr.mode |= S_IWUSR;
50445- dev_attr_wlan.attr.mode |= S_IWUSR;
50446- dev_attr_threeg.attr.mode |= S_IWUSR;
50447+ pax_open_kernel();
50448+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
50449+ *(void **)&dev_attr_wlan.store = store_wlan;
50450+ *(void **)&dev_attr_threeg.store = store_threeg;
50451+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
50452+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
50453+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
50454+ pax_close_kernel();
50455 }
50456
50457 /* disable hardware control by fn key */
50458diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
50459index 70222f2..8c8ce66 100644
50460--- a/drivers/platform/x86/msi-wmi.c
50461+++ b/drivers/platform/x86/msi-wmi.c
50462@@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
50463 static void msi_wmi_notify(u32 value, void *context)
50464 {
50465 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
50466- static struct key_entry *key;
50467+ struct key_entry *key;
50468 union acpi_object *obj;
50469 acpi_status status;
50470
50471diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
50472index 26ad9ff..7c52909 100644
50473--- a/drivers/platform/x86/sony-laptop.c
50474+++ b/drivers/platform/x86/sony-laptop.c
50475@@ -2527,7 +2527,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
50476 }
50477
50478 /* High speed charging function */
50479-static struct device_attribute *hsc_handle;
50480+static device_attribute_no_const *hsc_handle;
50481
50482 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
50483 struct device_attribute *attr,
50484@@ -2601,7 +2601,7 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
50485 }
50486
50487 /* low battery function */
50488-static struct device_attribute *lowbatt_handle;
50489+static device_attribute_no_const *lowbatt_handle;
50490
50491 static ssize_t sony_nc_lowbatt_store(struct device *dev,
50492 struct device_attribute *attr,
50493@@ -2667,7 +2667,7 @@ static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
50494 }
50495
50496 /* fan speed function */
50497-static struct device_attribute *fan_handle, *hsf_handle;
50498+static device_attribute_no_const *fan_handle, *hsf_handle;
50499
50500 static ssize_t sony_nc_hsfan_store(struct device *dev,
50501 struct device_attribute *attr,
50502@@ -2774,7 +2774,7 @@ static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
50503 }
50504
50505 /* USB charge function */
50506-static struct device_attribute *uc_handle;
50507+static device_attribute_no_const *uc_handle;
50508
50509 static ssize_t sony_nc_usb_charge_store(struct device *dev,
50510 struct device_attribute *attr,
50511@@ -2848,7 +2848,7 @@ static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
50512 }
50513
50514 /* Panel ID function */
50515-static struct device_attribute *panel_handle;
50516+static device_attribute_no_const *panel_handle;
50517
50518 static ssize_t sony_nc_panelid_show(struct device *dev,
50519 struct device_attribute *attr, char *buffer)
50520@@ -2895,7 +2895,7 @@ static void sony_nc_panelid_cleanup(struct platform_device *pd)
50521 }
50522
50523 /* smart connect function */
50524-static struct device_attribute *sc_handle;
50525+static device_attribute_no_const *sc_handle;
50526
50527 static ssize_t sony_nc_smart_conn_store(struct device *dev,
50528 struct device_attribute *attr,
50529diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
50530index 3bbc6eb..7760460 100644
50531--- a/drivers/platform/x86/thinkpad_acpi.c
50532+++ b/drivers/platform/x86/thinkpad_acpi.c
50533@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
50534 return 0;
50535 }
50536
50537-void static hotkey_mask_warn_incomplete_mask(void)
50538+static void hotkey_mask_warn_incomplete_mask(void)
50539 {
50540 /* log only what the user can fix... */
50541 const u32 wantedmask = hotkey_driver_mask &
50542@@ -2438,10 +2438,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
50543 && !tp_features.bright_unkfw)
50544 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
50545 }
50546+}
50547
50548 #undef TPACPI_COMPARE_KEY
50549 #undef TPACPI_MAY_SEND_KEY
50550-}
50551
50552 /*
50553 * Polling driver
50554diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
50555index 438d4c7..ca8a2fb 100644
50556--- a/drivers/pnp/pnpbios/bioscalls.c
50557+++ b/drivers/pnp/pnpbios/bioscalls.c
50558@@ -59,7 +59,7 @@ do { \
50559 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
50560 } while(0)
50561
50562-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
50563+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
50564 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
50565
50566 /*
50567@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
50568
50569 cpu = get_cpu();
50570 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
50571+
50572+ pax_open_kernel();
50573 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
50574+ pax_close_kernel();
50575
50576 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
50577 spin_lock_irqsave(&pnp_bios_lock, flags);
50578@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
50579 :"memory");
50580 spin_unlock_irqrestore(&pnp_bios_lock, flags);
50581
50582+ pax_open_kernel();
50583 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
50584+ pax_close_kernel();
50585+
50586 put_cpu();
50587
50588 /* If we get here and this is set then the PnP BIOS faulted on us. */
50589@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
50590 return status;
50591 }
50592
50593-void pnpbios_calls_init(union pnp_bios_install_struct *header)
50594+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
50595 {
50596 int i;
50597
50598@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
50599 pnp_bios_callpoint.offset = header->fields.pm16offset;
50600 pnp_bios_callpoint.segment = PNP_CS16;
50601
50602+ pax_open_kernel();
50603+
50604 for_each_possible_cpu(i) {
50605 struct desc_struct *gdt = get_cpu_gdt_table(i);
50606 if (!gdt)
50607@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
50608 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
50609 (unsigned long)__va(header->fields.pm16dseg));
50610 }
50611+
50612+ pax_close_kernel();
50613 }
50614diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
50615index 0c52e2a..3421ab7 100644
50616--- a/drivers/power/pda_power.c
50617+++ b/drivers/power/pda_power.c
50618@@ -37,7 +37,11 @@ static int polling;
50619
50620 #if IS_ENABLED(CONFIG_USB_PHY)
50621 static struct usb_phy *transceiver;
50622-static struct notifier_block otg_nb;
50623+static int otg_handle_notification(struct notifier_block *nb,
50624+ unsigned long event, void *unused);
50625+static struct notifier_block otg_nb = {
50626+ .notifier_call = otg_handle_notification
50627+};
50628 #endif
50629
50630 static struct regulator *ac_draw;
50631@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
50632
50633 #if IS_ENABLED(CONFIG_USB_PHY)
50634 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
50635- otg_nb.notifier_call = otg_handle_notification;
50636 ret = usb_register_notifier(transceiver, &otg_nb);
50637 if (ret) {
50638 dev_err(dev, "failure to register otg notifier\n");
50639diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
50640index cc439fd..8fa30df 100644
50641--- a/drivers/power/power_supply.h
50642+++ b/drivers/power/power_supply.h
50643@@ -16,12 +16,12 @@ struct power_supply;
50644
50645 #ifdef CONFIG_SYSFS
50646
50647-extern void power_supply_init_attrs(struct device_type *dev_type);
50648+extern void power_supply_init_attrs(void);
50649 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
50650
50651 #else
50652
50653-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
50654+static inline void power_supply_init_attrs(void) {}
50655 #define power_supply_uevent NULL
50656
50657 #endif /* CONFIG_SYSFS */
50658diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
50659index 078afd6..fbac9da 100644
50660--- a/drivers/power/power_supply_core.c
50661+++ b/drivers/power/power_supply_core.c
50662@@ -28,7 +28,10 @@ EXPORT_SYMBOL_GPL(power_supply_class);
50663 ATOMIC_NOTIFIER_HEAD(power_supply_notifier);
50664 EXPORT_SYMBOL_GPL(power_supply_notifier);
50665
50666-static struct device_type power_supply_dev_type;
50667+extern const struct attribute_group *power_supply_attr_groups[];
50668+static struct device_type power_supply_dev_type = {
50669+ .groups = power_supply_attr_groups,
50670+};
50671
50672 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
50673 struct power_supply *supply)
50674@@ -640,7 +643,7 @@ static int __init power_supply_class_init(void)
50675 return PTR_ERR(power_supply_class);
50676
50677 power_supply_class->dev_uevent = power_supply_uevent;
50678- power_supply_init_attrs(&power_supply_dev_type);
50679+ power_supply_init_attrs();
50680
50681 return 0;
50682 }
50683diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
50684index 750a202..99c8f4b 100644
50685--- a/drivers/power/power_supply_sysfs.c
50686+++ b/drivers/power/power_supply_sysfs.c
50687@@ -234,17 +234,15 @@ static struct attribute_group power_supply_attr_group = {
50688 .is_visible = power_supply_attr_is_visible,
50689 };
50690
50691-static const struct attribute_group *power_supply_attr_groups[] = {
50692+const struct attribute_group *power_supply_attr_groups[] = {
50693 &power_supply_attr_group,
50694 NULL,
50695 };
50696
50697-void power_supply_init_attrs(struct device_type *dev_type)
50698+void power_supply_init_attrs(void)
50699 {
50700 int i;
50701
50702- dev_type->groups = power_supply_attr_groups;
50703-
50704 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
50705 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
50706 }
50707diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
50708index 84419af..268ede8 100644
50709--- a/drivers/powercap/powercap_sys.c
50710+++ b/drivers/powercap/powercap_sys.c
50711@@ -154,8 +154,77 @@ struct powercap_constraint_attr {
50712 struct device_attribute name_attr;
50713 };
50714
50715+static ssize_t show_constraint_name(struct device *dev,
50716+ struct device_attribute *dev_attr,
50717+ char *buf);
50718+
50719 static struct powercap_constraint_attr
50720- constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
50721+ constraint_attrs[MAX_CONSTRAINTS_PER_ZONE] = {
50722+ [0 ... MAX_CONSTRAINTS_PER_ZONE - 1] = {
50723+ .power_limit_attr = {
50724+ .attr = {
50725+ .name = NULL,
50726+ .mode = S_IWUSR | S_IRUGO
50727+ },
50728+ .show = show_constraint_power_limit_uw,
50729+ .store = store_constraint_power_limit_uw
50730+ },
50731+
50732+ .time_window_attr = {
50733+ .attr = {
50734+ .name = NULL,
50735+ .mode = S_IWUSR | S_IRUGO
50736+ },
50737+ .show = show_constraint_time_window_us,
50738+ .store = store_constraint_time_window_us
50739+ },
50740+
50741+ .max_power_attr = {
50742+ .attr = {
50743+ .name = NULL,
50744+ .mode = S_IRUGO
50745+ },
50746+ .show = show_constraint_max_power_uw,
50747+ .store = NULL
50748+ },
50749+
50750+ .min_power_attr = {
50751+ .attr = {
50752+ .name = NULL,
50753+ .mode = S_IRUGO
50754+ },
50755+ .show = show_constraint_min_power_uw,
50756+ .store = NULL
50757+ },
50758+
50759+ .max_time_window_attr = {
50760+ .attr = {
50761+ .name = NULL,
50762+ .mode = S_IRUGO
50763+ },
50764+ .show = show_constraint_max_time_window_us,
50765+ .store = NULL
50766+ },
50767+
50768+ .min_time_window_attr = {
50769+ .attr = {
50770+ .name = NULL,
50771+ .mode = S_IRUGO
50772+ },
50773+ .show = show_constraint_min_time_window_us,
50774+ .store = NULL
50775+ },
50776+
50777+ .name_attr = {
50778+ .attr = {
50779+ .name = NULL,
50780+ .mode = S_IRUGO
50781+ },
50782+ .show = show_constraint_name,
50783+ .store = NULL
50784+ }
50785+ }
50786+};
50787
50788 /* A list of powercap control_types */
50789 static LIST_HEAD(powercap_cntrl_list);
50790@@ -193,23 +262,16 @@ static ssize_t show_constraint_name(struct device *dev,
50791 }
50792
50793 static int create_constraint_attribute(int id, const char *name,
50794- int mode,
50795- struct device_attribute *dev_attr,
50796- ssize_t (*show)(struct device *,
50797- struct device_attribute *, char *),
50798- ssize_t (*store)(struct device *,
50799- struct device_attribute *,
50800- const char *, size_t)
50801- )
50802+ struct device_attribute *dev_attr)
50803 {
50804+ name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name);
50805
50806- dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
50807- id, name);
50808- if (!dev_attr->attr.name)
50809+ if (!name)
50810 return -ENOMEM;
50811- dev_attr->attr.mode = mode;
50812- dev_attr->show = show;
50813- dev_attr->store = store;
50814+
50815+ pax_open_kernel();
50816+ *(const char **)&dev_attr->attr.name = name;
50817+ pax_close_kernel();
50818
50819 return 0;
50820 }
50821@@ -236,49 +298,31 @@ static int seed_constraint_attributes(void)
50822
50823 for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
50824 ret = create_constraint_attribute(i, "power_limit_uw",
50825- S_IWUSR | S_IRUGO,
50826- &constraint_attrs[i].power_limit_attr,
50827- show_constraint_power_limit_uw,
50828- store_constraint_power_limit_uw);
50829+ &constraint_attrs[i].power_limit_attr);
50830 if (ret)
50831 goto err_alloc;
50832 ret = create_constraint_attribute(i, "time_window_us",
50833- S_IWUSR | S_IRUGO,
50834- &constraint_attrs[i].time_window_attr,
50835- show_constraint_time_window_us,
50836- store_constraint_time_window_us);
50837+ &constraint_attrs[i].time_window_attr);
50838 if (ret)
50839 goto err_alloc;
50840- ret = create_constraint_attribute(i, "name", S_IRUGO,
50841- &constraint_attrs[i].name_attr,
50842- show_constraint_name,
50843- NULL);
50844+ ret = create_constraint_attribute(i, "name",
50845+ &constraint_attrs[i].name_attr);
50846 if (ret)
50847 goto err_alloc;
50848- ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
50849- &constraint_attrs[i].max_power_attr,
50850- show_constraint_max_power_uw,
50851- NULL);
50852+ ret = create_constraint_attribute(i, "max_power_uw",
50853+ &constraint_attrs[i].max_power_attr);
50854 if (ret)
50855 goto err_alloc;
50856- ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
50857- &constraint_attrs[i].min_power_attr,
50858- show_constraint_min_power_uw,
50859- NULL);
50860+ ret = create_constraint_attribute(i, "min_power_uw",
50861+ &constraint_attrs[i].min_power_attr);
50862 if (ret)
50863 goto err_alloc;
50864 ret = create_constraint_attribute(i, "max_time_window_us",
50865- S_IRUGO,
50866- &constraint_attrs[i].max_time_window_attr,
50867- show_constraint_max_time_window_us,
50868- NULL);
50869+ &constraint_attrs[i].max_time_window_attr);
50870 if (ret)
50871 goto err_alloc;
50872 ret = create_constraint_attribute(i, "min_time_window_us",
50873- S_IRUGO,
50874- &constraint_attrs[i].min_time_window_attr,
50875- show_constraint_min_time_window_us,
50876- NULL);
50877+ &constraint_attrs[i].min_time_window_attr);
50878 if (ret)
50879 goto err_alloc;
50880
50881@@ -378,10 +422,12 @@ static void create_power_zone_common_attributes(
50882 power_zone->zone_dev_attrs[count++] =
50883 &dev_attr_max_energy_range_uj.attr;
50884 if (power_zone->ops->get_energy_uj) {
50885+ pax_open_kernel();
50886 if (power_zone->ops->reset_energy_uj)
50887- dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
50888+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO;
50889 else
50890- dev_attr_energy_uj.attr.mode = S_IRUGO;
50891+ *(umode_t *)&dev_attr_energy_uj.attr.mode = S_IRUGO;
50892+ pax_close_kernel();
50893 power_zone->zone_dev_attrs[count++] =
50894 &dev_attr_energy_uj.attr;
50895 }
50896diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
50897index 9c5d414..c7900ce 100644
50898--- a/drivers/ptp/ptp_private.h
50899+++ b/drivers/ptp/ptp_private.h
50900@@ -51,7 +51,7 @@ struct ptp_clock {
50901 struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
50902 wait_queue_head_t tsev_wq;
50903 int defunct; /* tells readers to go away when clock is being removed */
50904- struct device_attribute *pin_dev_attr;
50905+ device_attribute_no_const *pin_dev_attr;
50906 struct attribute **pin_attr;
50907 struct attribute_group pin_attr_group;
50908 };
50909diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
50910index 302e626..12579af 100644
50911--- a/drivers/ptp/ptp_sysfs.c
50912+++ b/drivers/ptp/ptp_sysfs.c
50913@@ -280,7 +280,7 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
50914 goto no_pin_attr;
50915
50916 for (i = 0; i < n_pins; i++) {
50917- struct device_attribute *da = &ptp->pin_dev_attr[i];
50918+ device_attribute_no_const *da = &ptp->pin_dev_attr[i];
50919 sysfs_attr_init(&da->attr);
50920 da->attr.name = info->pin_config[i].name;
50921 da->attr.mode = 0644;
50922diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
50923index a3c3785..c901e3a 100644
50924--- a/drivers/regulator/core.c
50925+++ b/drivers/regulator/core.c
50926@@ -3481,7 +3481,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
50927 {
50928 const struct regulation_constraints *constraints = NULL;
50929 const struct regulator_init_data *init_data;
50930- static atomic_t regulator_no = ATOMIC_INIT(0);
50931+ static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
50932 struct regulator_dev *rdev;
50933 struct device *dev;
50934 int ret, i;
50935@@ -3551,7 +3551,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
50936 rdev->dev.of_node = of_node_get(config->of_node);
50937 rdev->dev.parent = dev;
50938 dev_set_name(&rdev->dev, "regulator.%d",
50939- atomic_inc_return(&regulator_no) - 1);
50940+ atomic_inc_return_unchecked(&regulator_no) - 1);
50941 ret = device_register(&rdev->dev);
50942 if (ret != 0) {
50943 put_device(&rdev->dev);
50944diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
50945index 2fc4111..6aa88ca 100644
50946--- a/drivers/regulator/max8660.c
50947+++ b/drivers/regulator/max8660.c
50948@@ -424,8 +424,10 @@ static int max8660_probe(struct i2c_client *client,
50949 max8660->shadow_regs[MAX8660_OVER1] = 5;
50950 } else {
50951 /* Otherwise devices can be toggled via software */
50952- max8660_dcdc_ops.enable = max8660_dcdc_enable;
50953- max8660_dcdc_ops.disable = max8660_dcdc_disable;
50954+ pax_open_kernel();
50955+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
50956+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
50957+ pax_close_kernel();
50958 }
50959
50960 /*
50961diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
50962index dbedf17..18ff6b7 100644
50963--- a/drivers/regulator/max8973-regulator.c
50964+++ b/drivers/regulator/max8973-regulator.c
50965@@ -403,9 +403,11 @@ static int max8973_probe(struct i2c_client *client,
50966 if (!pdata || !pdata->enable_ext_control) {
50967 max->desc.enable_reg = MAX8973_VOUT;
50968 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
50969- max->ops.enable = regulator_enable_regmap;
50970- max->ops.disable = regulator_disable_regmap;
50971- max->ops.is_enabled = regulator_is_enabled_regmap;
50972+ pax_open_kernel();
50973+ *(void **)&max->ops.enable = regulator_enable_regmap;
50974+ *(void **)&max->ops.disable = regulator_disable_regmap;
50975+ *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
50976+ pax_close_kernel();
50977 }
50978
50979 if (pdata) {
50980diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
50981index f374fa5..26f0683 100644
50982--- a/drivers/regulator/mc13892-regulator.c
50983+++ b/drivers/regulator/mc13892-regulator.c
50984@@ -582,10 +582,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
50985 }
50986 mc13xxx_unlock(mc13892);
50987
50988- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
50989+ pax_open_kernel();
50990+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
50991 = mc13892_vcam_set_mode;
50992- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
50993+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
50994 = mc13892_vcam_get_mode;
50995+ pax_close_kernel();
50996
50997 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
50998 ARRAY_SIZE(mc13892_regulators));
50999diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
51000index 5b2e761..c8c8a4a 100644
51001--- a/drivers/rtc/rtc-cmos.c
51002+++ b/drivers/rtc/rtc-cmos.c
51003@@ -789,7 +789,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
51004 hpet_rtc_timer_init();
51005
51006 /* export at least the first block of NVRAM */
51007- nvram.size = address_space - NVRAM_OFFSET;
51008+ pax_open_kernel();
51009+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
51010+ pax_close_kernel();
51011 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
51012 if (retval < 0) {
51013 dev_dbg(dev, "can't create nvram file? %d\n", retval);
51014diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
51015index d049393..bb20be0 100644
51016--- a/drivers/rtc/rtc-dev.c
51017+++ b/drivers/rtc/rtc-dev.c
51018@@ -16,6 +16,7 @@
51019 #include <linux/module.h>
51020 #include <linux/rtc.h>
51021 #include <linux/sched.h>
51022+#include <linux/grsecurity.h>
51023 #include "rtc-core.h"
51024
51025 static dev_t rtc_devt;
51026@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
51027 if (copy_from_user(&tm, uarg, sizeof(tm)))
51028 return -EFAULT;
51029
51030+ gr_log_timechange();
51031+
51032 return rtc_set_time(rtc, &tm);
51033
51034 case RTC_PIE_ON:
51035diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
51036index f03d5ba..8325bf6 100644
51037--- a/drivers/rtc/rtc-ds1307.c
51038+++ b/drivers/rtc/rtc-ds1307.c
51039@@ -107,7 +107,7 @@ struct ds1307 {
51040 u8 offset; /* register's offset */
51041 u8 regs[11];
51042 u16 nvram_offset;
51043- struct bin_attribute *nvram;
51044+ bin_attribute_no_const *nvram;
51045 enum ds_type type;
51046 unsigned long flags;
51047 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
51048diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
51049index 11880c1..b823aa4 100644
51050--- a/drivers/rtc/rtc-m48t59.c
51051+++ b/drivers/rtc/rtc-m48t59.c
51052@@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
51053 if (IS_ERR(m48t59->rtc))
51054 return PTR_ERR(m48t59->rtc);
51055
51056- m48t59_nvram_attr.size = pdata->offset;
51057+ pax_open_kernel();
51058+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
51059+ pax_close_kernel();
51060
51061 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
51062 if (ret)
51063diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
51064index e693af6..2e525b6 100644
51065--- a/drivers/scsi/bfa/bfa_fcpim.h
51066+++ b/drivers/scsi/bfa/bfa_fcpim.h
51067@@ -36,7 +36,7 @@ struct bfa_iotag_s {
51068
51069 struct bfa_itn_s {
51070 bfa_isr_func_t isr;
51071-};
51072+} __no_const;
51073
51074 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
51075 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
51076diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
51077index 0f19455..ef7adb5 100644
51078--- a/drivers/scsi/bfa/bfa_fcs.c
51079+++ b/drivers/scsi/bfa/bfa_fcs.c
51080@@ -38,10 +38,21 @@ struct bfa_fcs_mod_s {
51081 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
51082
51083 static struct bfa_fcs_mod_s fcs_modules[] = {
51084- { bfa_fcs_port_attach, NULL, NULL },
51085- { bfa_fcs_uf_attach, NULL, NULL },
51086- { bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
51087- bfa_fcs_fabric_modexit },
51088+ {
51089+ .attach = bfa_fcs_port_attach,
51090+ .modinit = NULL,
51091+ .modexit = NULL
51092+ },
51093+ {
51094+ .attach = bfa_fcs_uf_attach,
51095+ .modinit = NULL,
51096+ .modexit = NULL
51097+ },
51098+ {
51099+ .attach = bfa_fcs_fabric_attach,
51100+ .modinit = bfa_fcs_fabric_modinit,
51101+ .modexit = bfa_fcs_fabric_modexit
51102+ },
51103 };
51104
51105 /*
51106diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
51107index ff75ef8..2dfe00a 100644
51108--- a/drivers/scsi/bfa/bfa_fcs_lport.c
51109+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
51110@@ -89,15 +89,26 @@ static struct {
51111 void (*offline) (struct bfa_fcs_lport_s *port);
51112 } __port_action[] = {
51113 {
51114- bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
51115- bfa_fcs_lport_unknown_offline}, {
51116- bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
51117- bfa_fcs_lport_fab_offline}, {
51118- bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
51119- bfa_fcs_lport_n2n_offline}, {
51120- bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online,
51121- bfa_fcs_lport_loop_offline},
51122- };
51123+ .init = bfa_fcs_lport_unknown_init,
51124+ .online = bfa_fcs_lport_unknown_online,
51125+ .offline = bfa_fcs_lport_unknown_offline
51126+ },
51127+ {
51128+ .init = bfa_fcs_lport_fab_init,
51129+ .online = bfa_fcs_lport_fab_online,
51130+ .offline = bfa_fcs_lport_fab_offline
51131+ },
51132+ {
51133+ .init = bfa_fcs_lport_n2n_init,
51134+ .online = bfa_fcs_lport_n2n_online,
51135+ .offline = bfa_fcs_lport_n2n_offline
51136+ },
51137+ {
51138+ .init = bfa_fcs_lport_loop_init,
51139+ .online = bfa_fcs_lport_loop_online,
51140+ .offline = bfa_fcs_lport_loop_offline
51141+ },
51142+};
51143
51144 /*
51145 * fcs_port_sm FCS logical port state machine
51146diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
51147index a38aafa0..fe8f03b 100644
51148--- a/drivers/scsi/bfa/bfa_ioc.h
51149+++ b/drivers/scsi/bfa/bfa_ioc.h
51150@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
51151 bfa_ioc_disable_cbfn_t disable_cbfn;
51152 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
51153 bfa_ioc_reset_cbfn_t reset_cbfn;
51154-};
51155+} __no_const;
51156
51157 /*
51158 * IOC event notification mechanism.
51159@@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
51160 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
51161 enum bfi_ioc_state fwstate);
51162 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
51163-};
51164+} __no_const;
51165
51166 /*
51167 * Queue element to wait for room in request queue. FIFO order is
51168diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
51169index a14c784..6de6790 100644
51170--- a/drivers/scsi/bfa/bfa_modules.h
51171+++ b/drivers/scsi/bfa/bfa_modules.h
51172@@ -78,12 +78,12 @@ enum {
51173 \
51174 extern struct bfa_module_s hal_mod_ ## __mod; \
51175 struct bfa_module_s hal_mod_ ## __mod = { \
51176- bfa_ ## __mod ## _meminfo, \
51177- bfa_ ## __mod ## _attach, \
51178- bfa_ ## __mod ## _detach, \
51179- bfa_ ## __mod ## _start, \
51180- bfa_ ## __mod ## _stop, \
51181- bfa_ ## __mod ## _iocdisable, \
51182+ .meminfo = bfa_ ## __mod ## _meminfo, \
51183+ .attach = bfa_ ## __mod ## _attach, \
51184+ .detach = bfa_ ## __mod ## _detach, \
51185+ .start = bfa_ ## __mod ## _start, \
51186+ .stop = bfa_ ## __mod ## _stop, \
51187+ .iocdisable = bfa_ ## __mod ## _iocdisable, \
51188 }
51189
51190 #define BFA_CACHELINE_SZ (256)
51191diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
51192index 045c4e1..13de803 100644
51193--- a/drivers/scsi/fcoe/fcoe_sysfs.c
51194+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
51195@@ -33,8 +33,8 @@
51196 */
51197 #include "libfcoe.h"
51198
51199-static atomic_t ctlr_num;
51200-static atomic_t fcf_num;
51201+static atomic_unchecked_t ctlr_num;
51202+static atomic_unchecked_t fcf_num;
51203
51204 /*
51205 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
51206@@ -685,7 +685,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
51207 if (!ctlr)
51208 goto out;
51209
51210- ctlr->id = atomic_inc_return(&ctlr_num) - 1;
51211+ ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
51212 ctlr->f = f;
51213 ctlr->mode = FIP_CONN_TYPE_FABRIC;
51214 INIT_LIST_HEAD(&ctlr->fcfs);
51215@@ -902,7 +902,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
51216 fcf->dev.parent = &ctlr->dev;
51217 fcf->dev.bus = &fcoe_bus_type;
51218 fcf->dev.type = &fcoe_fcf_device_type;
51219- fcf->id = atomic_inc_return(&fcf_num) - 1;
51220+ fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
51221 fcf->state = FCOE_FCF_STATE_UNKNOWN;
51222
51223 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
51224@@ -938,8 +938,8 @@ int __init fcoe_sysfs_setup(void)
51225 {
51226 int error;
51227
51228- atomic_set(&ctlr_num, 0);
51229- atomic_set(&fcf_num, 0);
51230+ atomic_set_unchecked(&ctlr_num, 0);
51231+ atomic_set_unchecked(&fcf_num, 0);
51232
51233 error = bus_register(&fcoe_bus_type);
51234 if (error)
51235diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
51236index 6de80e3..a11e0ac 100644
51237--- a/drivers/scsi/hosts.c
51238+++ b/drivers/scsi/hosts.c
51239@@ -42,7 +42,7 @@
51240 #include "scsi_logging.h"
51241
51242
51243-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
51244+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
51245
51246
51247 static void scsi_host_cls_release(struct device *dev)
51248@@ -392,7 +392,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
51249 * subtract one because we increment first then return, but we need to
51250 * know what the next host number was before increment
51251 */
51252- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
51253+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
51254 shost->dma_channel = 0xff;
51255
51256 /* These three are default values which can be overridden */
51257diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
51258index 6b35d0d..2880305 100644
51259--- a/drivers/scsi/hpsa.c
51260+++ b/drivers/scsi/hpsa.c
51261@@ -701,10 +701,10 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
51262 unsigned long flags;
51263
51264 if (h->transMethod & CFGTBL_Trans_io_accel1)
51265- return h->access.command_completed(h, q);
51266+ return h->access->command_completed(h, q);
51267
51268 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
51269- return h->access.command_completed(h, q);
51270+ return h->access->command_completed(h, q);
51271
51272 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
51273 a = rq->head[rq->current_entry];
51274@@ -5454,7 +5454,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
51275 while (!list_empty(&h->reqQ)) {
51276 c = list_entry(h->reqQ.next, struct CommandList, list);
51277 /* can't do anything if fifo is full */
51278- if ((h->access.fifo_full(h))) {
51279+ if ((h->access->fifo_full(h))) {
51280 h->fifo_recently_full = 1;
51281 dev_warn(&h->pdev->dev, "fifo full\n");
51282 break;
51283@@ -5476,7 +5476,7 @@ static void start_io(struct ctlr_info *h, unsigned long *flags)
51284
51285 /* Tell the controller execute command */
51286 spin_unlock_irqrestore(&h->lock, *flags);
51287- h->access.submit_command(h, c);
51288+ h->access->submit_command(h, c);
51289 spin_lock_irqsave(&h->lock, *flags);
51290 }
51291 }
51292@@ -5492,17 +5492,17 @@ static void lock_and_start_io(struct ctlr_info *h)
51293
51294 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
51295 {
51296- return h->access.command_completed(h, q);
51297+ return h->access->command_completed(h, q);
51298 }
51299
51300 static inline bool interrupt_pending(struct ctlr_info *h)
51301 {
51302- return h->access.intr_pending(h);
51303+ return h->access->intr_pending(h);
51304 }
51305
51306 static inline long interrupt_not_for_us(struct ctlr_info *h)
51307 {
51308- return (h->access.intr_pending(h) == 0) ||
51309+ return (h->access->intr_pending(h) == 0) ||
51310 (h->interrupts_enabled == 0);
51311 }
51312
51313@@ -6458,7 +6458,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
51314 if (prod_index < 0)
51315 return -ENODEV;
51316 h->product_name = products[prod_index].product_name;
51317- h->access = *(products[prod_index].access);
51318+ h->access = products[prod_index].access;
51319
51320 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
51321 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
51322@@ -6780,7 +6780,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
51323 unsigned long flags;
51324 u32 lockup_detected;
51325
51326- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51327+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51328 spin_lock_irqsave(&h->lock, flags);
51329 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
51330 if (!lockup_detected) {
51331@@ -7027,7 +7027,7 @@ reinit_after_soft_reset:
51332 }
51333
51334 /* make sure the board interrupts are off */
51335- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51336+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51337
51338 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
51339 goto clean2;
51340@@ -7062,7 +7062,7 @@ reinit_after_soft_reset:
51341 * fake ones to scoop up any residual completions.
51342 */
51343 spin_lock_irqsave(&h->lock, flags);
51344- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51345+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51346 spin_unlock_irqrestore(&h->lock, flags);
51347 free_irqs(h);
51348 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
51349@@ -7081,9 +7081,9 @@ reinit_after_soft_reset:
51350 dev_info(&h->pdev->dev, "Board READY.\n");
51351 dev_info(&h->pdev->dev,
51352 "Waiting for stale completions to drain.\n");
51353- h->access.set_intr_mask(h, HPSA_INTR_ON);
51354+ h->access->set_intr_mask(h, HPSA_INTR_ON);
51355 msleep(10000);
51356- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51357+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51358
51359 rc = controller_reset_failed(h->cfgtable);
51360 if (rc)
51361@@ -7109,7 +7109,7 @@ reinit_after_soft_reset:
51362 h->drv_req_rescan = 0;
51363
51364 /* Turn the interrupts on so we can service requests */
51365- h->access.set_intr_mask(h, HPSA_INTR_ON);
51366+ h->access->set_intr_mask(h, HPSA_INTR_ON);
51367
51368 hpsa_hba_inquiry(h);
51369 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
51370@@ -7174,7 +7174,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
51371 * To write all data in the battery backed cache to disks
51372 */
51373 hpsa_flush_cache(h);
51374- h->access.set_intr_mask(h, HPSA_INTR_OFF);
51375+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
51376 hpsa_free_irqs_and_disable_msix(h);
51377 }
51378
51379@@ -7292,7 +7292,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
51380 CFGTBL_Trans_enable_directed_msix |
51381 (trans_support & (CFGTBL_Trans_io_accel1 |
51382 CFGTBL_Trans_io_accel2));
51383- struct access_method access = SA5_performant_access;
51384+ struct access_method *access = &SA5_performant_access;
51385
51386 /* This is a bit complicated. There are 8 registers on
51387 * the controller which we write to to tell it 8 different
51388@@ -7334,7 +7334,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
51389 * perform the superfluous readl() after each command submission.
51390 */
51391 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
51392- access = SA5_performant_access_no_read;
51393+ access = &SA5_performant_access_no_read;
51394
51395 /* Controller spec: zero out this buffer. */
51396 for (i = 0; i < h->nreply_queues; i++)
51397@@ -7364,12 +7364,12 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
51398 * enable outbound interrupt coalescing in accelerator mode;
51399 */
51400 if (trans_support & CFGTBL_Trans_io_accel1) {
51401- access = SA5_ioaccel_mode1_access;
51402+ access = &SA5_ioaccel_mode1_access;
51403 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
51404 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
51405 } else {
51406 if (trans_support & CFGTBL_Trans_io_accel2) {
51407- access = SA5_ioaccel_mode2_access;
51408+ access = &SA5_ioaccel_mode2_access;
51409 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
51410 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
51411 }
51412diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
51413index 24472ce..8782caf 100644
51414--- a/drivers/scsi/hpsa.h
51415+++ b/drivers/scsi/hpsa.h
51416@@ -127,7 +127,7 @@ struct ctlr_info {
51417 unsigned int msix_vector;
51418 unsigned int msi_vector;
51419 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
51420- struct access_method access;
51421+ struct access_method *access;
51422 char hba_mode_enabled;
51423
51424 /* queue and queue Info */
51425@@ -536,43 +536,43 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
51426 }
51427
51428 static struct access_method SA5_access = {
51429- SA5_submit_command,
51430- SA5_intr_mask,
51431- SA5_fifo_full,
51432- SA5_intr_pending,
51433- SA5_completed,
51434+ .submit_command = SA5_submit_command,
51435+ .set_intr_mask = SA5_intr_mask,
51436+ .fifo_full = SA5_fifo_full,
51437+ .intr_pending = SA5_intr_pending,
51438+ .command_completed = SA5_completed,
51439 };
51440
51441 static struct access_method SA5_ioaccel_mode1_access = {
51442- SA5_submit_command,
51443- SA5_performant_intr_mask,
51444- SA5_fifo_full,
51445- SA5_ioaccel_mode1_intr_pending,
51446- SA5_ioaccel_mode1_completed,
51447+ .submit_command = SA5_submit_command,
51448+ .set_intr_mask = SA5_performant_intr_mask,
51449+ .fifo_full = SA5_fifo_full,
51450+ .intr_pending = SA5_ioaccel_mode1_intr_pending,
51451+ .command_completed = SA5_ioaccel_mode1_completed,
51452 };
51453
51454 static struct access_method SA5_ioaccel_mode2_access = {
51455- SA5_submit_command_ioaccel2,
51456- SA5_performant_intr_mask,
51457- SA5_fifo_full,
51458- SA5_performant_intr_pending,
51459- SA5_performant_completed,
51460+ .submit_command = SA5_submit_command_ioaccel2,
51461+ .set_intr_mask = SA5_performant_intr_mask,
51462+ .fifo_full = SA5_fifo_full,
51463+ .intr_pending = SA5_performant_intr_pending,
51464+ .command_completed = SA5_performant_completed,
51465 };
51466
51467 static struct access_method SA5_performant_access = {
51468- SA5_submit_command,
51469- SA5_performant_intr_mask,
51470- SA5_fifo_full,
51471- SA5_performant_intr_pending,
51472- SA5_performant_completed,
51473+ .submit_command = SA5_submit_command,
51474+ .set_intr_mask = SA5_performant_intr_mask,
51475+ .fifo_full = SA5_fifo_full,
51476+ .intr_pending = SA5_performant_intr_pending,
51477+ .command_completed = SA5_performant_completed,
51478 };
51479
51480 static struct access_method SA5_performant_access_no_read = {
51481- SA5_submit_command_no_read,
51482- SA5_performant_intr_mask,
51483- SA5_fifo_full,
51484- SA5_performant_intr_pending,
51485- SA5_performant_completed,
51486+ .submit_command = SA5_submit_command_no_read,
51487+ .set_intr_mask = SA5_performant_intr_mask,
51488+ .fifo_full = SA5_fifo_full,
51489+ .intr_pending = SA5_performant_intr_pending,
51490+ .command_completed = SA5_performant_completed,
51491 };
51492
51493 struct board_type {
51494diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
51495index 1b3a094..068e683 100644
51496--- a/drivers/scsi/libfc/fc_exch.c
51497+++ b/drivers/scsi/libfc/fc_exch.c
51498@@ -101,12 +101,12 @@ struct fc_exch_mgr {
51499 u16 pool_max_index;
51500
51501 struct {
51502- atomic_t no_free_exch;
51503- atomic_t no_free_exch_xid;
51504- atomic_t xid_not_found;
51505- atomic_t xid_busy;
51506- atomic_t seq_not_found;
51507- atomic_t non_bls_resp;
51508+ atomic_unchecked_t no_free_exch;
51509+ atomic_unchecked_t no_free_exch_xid;
51510+ atomic_unchecked_t xid_not_found;
51511+ atomic_unchecked_t xid_busy;
51512+ atomic_unchecked_t seq_not_found;
51513+ atomic_unchecked_t non_bls_resp;
51514 } stats;
51515 };
51516
51517@@ -811,7 +811,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
51518 /* allocate memory for exchange */
51519 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
51520 if (!ep) {
51521- atomic_inc(&mp->stats.no_free_exch);
51522+ atomic_inc_unchecked(&mp->stats.no_free_exch);
51523 goto out;
51524 }
51525 memset(ep, 0, sizeof(*ep));
51526@@ -874,7 +874,7 @@ out:
51527 return ep;
51528 err:
51529 spin_unlock_bh(&pool->lock);
51530- atomic_inc(&mp->stats.no_free_exch_xid);
51531+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
51532 mempool_free(ep, mp->ep_pool);
51533 return NULL;
51534 }
51535@@ -1023,7 +1023,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51536 xid = ntohs(fh->fh_ox_id); /* we originated exch */
51537 ep = fc_exch_find(mp, xid);
51538 if (!ep) {
51539- atomic_inc(&mp->stats.xid_not_found);
51540+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51541 reject = FC_RJT_OX_ID;
51542 goto out;
51543 }
51544@@ -1053,7 +1053,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51545 ep = fc_exch_find(mp, xid);
51546 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
51547 if (ep) {
51548- atomic_inc(&mp->stats.xid_busy);
51549+ atomic_inc_unchecked(&mp->stats.xid_busy);
51550 reject = FC_RJT_RX_ID;
51551 goto rel;
51552 }
51553@@ -1064,7 +1064,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51554 }
51555 xid = ep->xid; /* get our XID */
51556 } else if (!ep) {
51557- atomic_inc(&mp->stats.xid_not_found);
51558+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51559 reject = FC_RJT_RX_ID; /* XID not found */
51560 goto out;
51561 }
51562@@ -1082,7 +1082,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
51563 } else {
51564 sp = &ep->seq;
51565 if (sp->id != fh->fh_seq_id) {
51566- atomic_inc(&mp->stats.seq_not_found);
51567+ atomic_inc_unchecked(&mp->stats.seq_not_found);
51568 if (f_ctl & FC_FC_END_SEQ) {
51569 /*
51570 * Update sequence_id based on incoming last
51571@@ -1533,22 +1533,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51572
51573 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
51574 if (!ep) {
51575- atomic_inc(&mp->stats.xid_not_found);
51576+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51577 goto out;
51578 }
51579 if (ep->esb_stat & ESB_ST_COMPLETE) {
51580- atomic_inc(&mp->stats.xid_not_found);
51581+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51582 goto rel;
51583 }
51584 if (ep->rxid == FC_XID_UNKNOWN)
51585 ep->rxid = ntohs(fh->fh_rx_id);
51586 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
51587- atomic_inc(&mp->stats.xid_not_found);
51588+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51589 goto rel;
51590 }
51591 if (ep->did != ntoh24(fh->fh_s_id) &&
51592 ep->did != FC_FID_FLOGI) {
51593- atomic_inc(&mp->stats.xid_not_found);
51594+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51595 goto rel;
51596 }
51597 sof = fr_sof(fp);
51598@@ -1557,7 +1557,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51599 sp->ssb_stat |= SSB_ST_RESP;
51600 sp->id = fh->fh_seq_id;
51601 } else if (sp->id != fh->fh_seq_id) {
51602- atomic_inc(&mp->stats.seq_not_found);
51603+ atomic_inc_unchecked(&mp->stats.seq_not_found);
51604 goto rel;
51605 }
51606
51607@@ -1619,9 +1619,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
51608 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
51609
51610 if (!sp)
51611- atomic_inc(&mp->stats.xid_not_found);
51612+ atomic_inc_unchecked(&mp->stats.xid_not_found);
51613 else
51614- atomic_inc(&mp->stats.non_bls_resp);
51615+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
51616
51617 fc_frame_free(fp);
51618 }
51619@@ -2261,13 +2261,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
51620
51621 list_for_each_entry(ema, &lport->ema_list, ema_list) {
51622 mp = ema->mp;
51623- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
51624+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
51625 st->fc_no_free_exch_xid +=
51626- atomic_read(&mp->stats.no_free_exch_xid);
51627- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
51628- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
51629- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
51630- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
51631+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
51632+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
51633+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
51634+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
51635+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
51636 }
51637 }
51638 EXPORT_SYMBOL(fc_exch_update_stats);
51639diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
51640index 766098a..1c6c971 100644
51641--- a/drivers/scsi/libsas/sas_ata.c
51642+++ b/drivers/scsi/libsas/sas_ata.c
51643@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
51644 .postreset = ata_std_postreset,
51645 .error_handler = ata_std_error_handler,
51646 .post_internal_cmd = sas_ata_post_internal,
51647- .qc_defer = ata_std_qc_defer,
51648+ .qc_defer = ata_std_qc_defer,
51649 .qc_prep = ata_noop_qc_prep,
51650 .qc_issue = sas_ata_qc_issue,
51651 .qc_fill_rtf = sas_ata_qc_fill_rtf,
51652diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
51653index 434e903..5a4a79b 100644
51654--- a/drivers/scsi/lpfc/lpfc.h
51655+++ b/drivers/scsi/lpfc/lpfc.h
51656@@ -430,7 +430,7 @@ struct lpfc_vport {
51657 struct dentry *debug_nodelist;
51658 struct dentry *vport_debugfs_root;
51659 struct lpfc_debugfs_trc *disc_trc;
51660- atomic_t disc_trc_cnt;
51661+ atomic_unchecked_t disc_trc_cnt;
51662 #endif
51663 uint8_t stat_data_enabled;
51664 uint8_t stat_data_blocked;
51665@@ -880,8 +880,8 @@ struct lpfc_hba {
51666 struct timer_list fabric_block_timer;
51667 unsigned long bit_flags;
51668 #define FABRIC_COMANDS_BLOCKED 0
51669- atomic_t num_rsrc_err;
51670- atomic_t num_cmd_success;
51671+ atomic_unchecked_t num_rsrc_err;
51672+ atomic_unchecked_t num_cmd_success;
51673 unsigned long last_rsrc_error_time;
51674 unsigned long last_ramp_down_time;
51675 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
51676@@ -916,7 +916,7 @@ struct lpfc_hba {
51677
51678 struct dentry *debug_slow_ring_trc;
51679 struct lpfc_debugfs_trc *slow_ring_trc;
51680- atomic_t slow_ring_trc_cnt;
51681+ atomic_unchecked_t slow_ring_trc_cnt;
51682 /* iDiag debugfs sub-directory */
51683 struct dentry *idiag_root;
51684 struct dentry *idiag_pci_cfg;
51685diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
51686index b0aedce..89c6ca6 100644
51687--- a/drivers/scsi/lpfc/lpfc_debugfs.c
51688+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
51689@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
51690
51691 #include <linux/debugfs.h>
51692
51693-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
51694+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
51695 static unsigned long lpfc_debugfs_start_time = 0L;
51696
51697 /* iDiag */
51698@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
51699 lpfc_debugfs_enable = 0;
51700
51701 len = 0;
51702- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
51703+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
51704 (lpfc_debugfs_max_disc_trc - 1);
51705 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
51706 dtp = vport->disc_trc + i;
51707@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
51708 lpfc_debugfs_enable = 0;
51709
51710 len = 0;
51711- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
51712+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
51713 (lpfc_debugfs_max_slow_ring_trc - 1);
51714 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
51715 dtp = phba->slow_ring_trc + i;
51716@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
51717 !vport || !vport->disc_trc)
51718 return;
51719
51720- index = atomic_inc_return(&vport->disc_trc_cnt) &
51721+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
51722 (lpfc_debugfs_max_disc_trc - 1);
51723 dtp = vport->disc_trc + index;
51724 dtp->fmt = fmt;
51725 dtp->data1 = data1;
51726 dtp->data2 = data2;
51727 dtp->data3 = data3;
51728- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
51729+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
51730 dtp->jif = jiffies;
51731 #endif
51732 return;
51733@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
51734 !phba || !phba->slow_ring_trc)
51735 return;
51736
51737- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
51738+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
51739 (lpfc_debugfs_max_slow_ring_trc - 1);
51740 dtp = phba->slow_ring_trc + index;
51741 dtp->fmt = fmt;
51742 dtp->data1 = data1;
51743 dtp->data2 = data2;
51744 dtp->data3 = data3;
51745- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
51746+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
51747 dtp->jif = jiffies;
51748 #endif
51749 return;
51750@@ -4268,7 +4268,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
51751 "slow_ring buffer\n");
51752 goto debug_failed;
51753 }
51754- atomic_set(&phba->slow_ring_trc_cnt, 0);
51755+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
51756 memset(phba->slow_ring_trc, 0,
51757 (sizeof(struct lpfc_debugfs_trc) *
51758 lpfc_debugfs_max_slow_ring_trc));
51759@@ -4314,7 +4314,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
51760 "buffer\n");
51761 goto debug_failed;
51762 }
51763- atomic_set(&vport->disc_trc_cnt, 0);
51764+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
51765
51766 snprintf(name, sizeof(name), "discovery_trace");
51767 vport->debug_disc_trc =
51768diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
51769index a5769a9..718ecc7 100644
51770--- a/drivers/scsi/lpfc/lpfc_init.c
51771+++ b/drivers/scsi/lpfc/lpfc_init.c
51772@@ -11299,8 +11299,10 @@ lpfc_init(void)
51773 "misc_register returned with status %d", error);
51774
51775 if (lpfc_enable_npiv) {
51776- lpfc_transport_functions.vport_create = lpfc_vport_create;
51777- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
51778+ pax_open_kernel();
51779+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
51780+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
51781+ pax_close_kernel();
51782 }
51783 lpfc_transport_template =
51784 fc_attach_transport(&lpfc_transport_functions);
51785diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
51786index 7862c55..5aa65df 100644
51787--- a/drivers/scsi/lpfc/lpfc_scsi.c
51788+++ b/drivers/scsi/lpfc/lpfc_scsi.c
51789@@ -382,7 +382,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
51790 uint32_t evt_posted;
51791
51792 spin_lock_irqsave(&phba->hbalock, flags);
51793- atomic_inc(&phba->num_rsrc_err);
51794+ atomic_inc_unchecked(&phba->num_rsrc_err);
51795 phba->last_rsrc_error_time = jiffies;
51796
51797 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
51798@@ -423,8 +423,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
51799 unsigned long num_rsrc_err, num_cmd_success;
51800 int i;
51801
51802- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
51803- num_cmd_success = atomic_read(&phba->num_cmd_success);
51804+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
51805+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
51806
51807 /*
51808 * The error and success command counters are global per
51809@@ -452,8 +452,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
51810 }
51811 }
51812 lpfc_destroy_vport_work_array(phba, vports);
51813- atomic_set(&phba->num_rsrc_err, 0);
51814- atomic_set(&phba->num_cmd_success, 0);
51815+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
51816+ atomic_set_unchecked(&phba->num_cmd_success, 0);
51817 }
51818
51819 /**
51820diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51821index dd46101..ca80eb9 100644
51822--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51823+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
51824@@ -1559,7 +1559,7 @@ _scsih_get_resync(struct device *dev)
51825 {
51826 struct scsi_device *sdev = to_scsi_device(dev);
51827 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
51828- static struct _raid_device *raid_device;
51829+ struct _raid_device *raid_device;
51830 unsigned long flags;
51831 Mpi2RaidVolPage0_t vol_pg0;
51832 Mpi2ConfigReply_t mpi_reply;
51833@@ -1611,7 +1611,7 @@ _scsih_get_state(struct device *dev)
51834 {
51835 struct scsi_device *sdev = to_scsi_device(dev);
51836 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
51837- static struct _raid_device *raid_device;
51838+ struct _raid_device *raid_device;
51839 unsigned long flags;
51840 Mpi2RaidVolPage0_t vol_pg0;
51841 Mpi2ConfigReply_t mpi_reply;
51842@@ -6648,7 +6648,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
51843 Mpi2EventDataIrOperationStatus_t *event_data =
51844 (Mpi2EventDataIrOperationStatus_t *)
51845 fw_event->event_data;
51846- static struct _raid_device *raid_device;
51847+ struct _raid_device *raid_device;
51848 unsigned long flags;
51849 u16 handle;
51850
51851@@ -7119,7 +7119,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
51852 u64 sas_address;
51853 struct _sas_device *sas_device;
51854 struct _sas_node *expander_device;
51855- static struct _raid_device *raid_device;
51856+ struct _raid_device *raid_device;
51857 u8 retry_count;
51858 unsigned long flags;
51859
51860diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
51861index 6f3275d..fa5e6b6 100644
51862--- a/drivers/scsi/pmcraid.c
51863+++ b/drivers/scsi/pmcraid.c
51864@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
51865 res->scsi_dev = scsi_dev;
51866 scsi_dev->hostdata = res;
51867 res->change_detected = 0;
51868- atomic_set(&res->read_failures, 0);
51869- atomic_set(&res->write_failures, 0);
51870+ atomic_set_unchecked(&res->read_failures, 0);
51871+ atomic_set_unchecked(&res->write_failures, 0);
51872 rc = 0;
51873 }
51874 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
51875@@ -2687,9 +2687,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
51876
51877 /* If this was a SCSI read/write command keep count of errors */
51878 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
51879- atomic_inc(&res->read_failures);
51880+ atomic_inc_unchecked(&res->read_failures);
51881 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
51882- atomic_inc(&res->write_failures);
51883+ atomic_inc_unchecked(&res->write_failures);
51884
51885 if (!RES_IS_GSCSI(res->cfg_entry) &&
51886 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
51887@@ -3545,7 +3545,7 @@ static int pmcraid_queuecommand_lck(
51888 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
51889 * hrrq_id assigned here in queuecommand
51890 */
51891- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
51892+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
51893 pinstance->num_hrrq;
51894 cmd->cmd_done = pmcraid_io_done;
51895
51896@@ -3857,7 +3857,7 @@ static long pmcraid_ioctl_passthrough(
51897 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
51898 * hrrq_id assigned here in queuecommand
51899 */
51900- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
51901+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
51902 pinstance->num_hrrq;
51903
51904 if (request_size) {
51905@@ -4495,7 +4495,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
51906
51907 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
51908 /* add resources only after host is added into system */
51909- if (!atomic_read(&pinstance->expose_resources))
51910+ if (!atomic_read_unchecked(&pinstance->expose_resources))
51911 return;
51912
51913 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
51914@@ -5322,8 +5322,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
51915 init_waitqueue_head(&pinstance->reset_wait_q);
51916
51917 atomic_set(&pinstance->outstanding_cmds, 0);
51918- atomic_set(&pinstance->last_message_id, 0);
51919- atomic_set(&pinstance->expose_resources, 0);
51920+ atomic_set_unchecked(&pinstance->last_message_id, 0);
51921+ atomic_set_unchecked(&pinstance->expose_resources, 0);
51922
51923 INIT_LIST_HEAD(&pinstance->free_res_q);
51924 INIT_LIST_HEAD(&pinstance->used_res_q);
51925@@ -6036,7 +6036,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
51926 /* Schedule worker thread to handle CCN and take care of adding and
51927 * removing devices to OS
51928 */
51929- atomic_set(&pinstance->expose_resources, 1);
51930+ atomic_set_unchecked(&pinstance->expose_resources, 1);
51931 schedule_work(&pinstance->worker_q);
51932 return rc;
51933
51934diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
51935index e1d150f..6c6df44 100644
51936--- a/drivers/scsi/pmcraid.h
51937+++ b/drivers/scsi/pmcraid.h
51938@@ -748,7 +748,7 @@ struct pmcraid_instance {
51939 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
51940
51941 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
51942- atomic_t last_message_id;
51943+ atomic_unchecked_t last_message_id;
51944
51945 /* configuration table */
51946 struct pmcraid_config_table *cfg_table;
51947@@ -777,7 +777,7 @@ struct pmcraid_instance {
51948 atomic_t outstanding_cmds;
51949
51950 /* should add/delete resources to mid-layer now ?*/
51951- atomic_t expose_resources;
51952+ atomic_unchecked_t expose_resources;
51953
51954
51955
51956@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
51957 struct pmcraid_config_table_entry_ext cfg_entry_ext;
51958 };
51959 struct scsi_device *scsi_dev; /* Link scsi_device structure */
51960- atomic_t read_failures; /* count of failed READ commands */
51961- atomic_t write_failures; /* count of failed WRITE commands */
51962+ atomic_unchecked_t read_failures; /* count of failed READ commands */
51963+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
51964
51965 /* To indicate add/delete/modify during CCN */
51966 u8 change_detected;
51967diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
51968index 16fe519..3b1ec82 100644
51969--- a/drivers/scsi/qla2xxx/qla_attr.c
51970+++ b/drivers/scsi/qla2xxx/qla_attr.c
51971@@ -2188,7 +2188,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
51972 return 0;
51973 }
51974
51975-struct fc_function_template qla2xxx_transport_functions = {
51976+fc_function_template_no_const qla2xxx_transport_functions = {
51977
51978 .show_host_node_name = 1,
51979 .show_host_port_name = 1,
51980@@ -2236,7 +2236,7 @@ struct fc_function_template qla2xxx_transport_functions = {
51981 .bsg_timeout = qla24xx_bsg_timeout,
51982 };
51983
51984-struct fc_function_template qla2xxx_transport_vport_functions = {
51985+fc_function_template_no_const qla2xxx_transport_vport_functions = {
51986
51987 .show_host_node_name = 1,
51988 .show_host_port_name = 1,
51989diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
51990index d646540..5b13554 100644
51991--- a/drivers/scsi/qla2xxx/qla_gbl.h
51992+++ b/drivers/scsi/qla2xxx/qla_gbl.h
51993@@ -569,8 +569,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
51994 struct device_attribute;
51995 extern struct device_attribute *qla2x00_host_attrs[];
51996 struct fc_function_template;
51997-extern struct fc_function_template qla2xxx_transport_functions;
51998-extern struct fc_function_template qla2xxx_transport_vport_functions;
51999+extern fc_function_template_no_const qla2xxx_transport_functions;
52000+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
52001 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
52002 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);
52003 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
52004diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
52005index 8252c0e..613adad 100644
52006--- a/drivers/scsi/qla2xxx/qla_os.c
52007+++ b/drivers/scsi/qla2xxx/qla_os.c
52008@@ -1493,8 +1493,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
52009 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
52010 /* Ok, a 64bit DMA mask is applicable. */
52011 ha->flags.enable_64bit_addressing = 1;
52012- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
52013- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
52014+ pax_open_kernel();
52015+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
52016+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
52017+ pax_close_kernel();
52018 return;
52019 }
52020 }
52021diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
52022index 8f6d0fb..1b21097 100644
52023--- a/drivers/scsi/qla4xxx/ql4_def.h
52024+++ b/drivers/scsi/qla4xxx/ql4_def.h
52025@@ -305,7 +305,7 @@ struct ddb_entry {
52026 * (4000 only) */
52027 atomic_t relogin_timer; /* Max Time to wait for
52028 * relogin to complete */
52029- atomic_t relogin_retry_count; /* Num of times relogin has been
52030+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
52031 * retried */
52032 uint32_t default_time2wait; /* Default Min time between
52033 * relogins (+aens) */
52034diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
52035index 199fcf7..3c3a918 100644
52036--- a/drivers/scsi/qla4xxx/ql4_os.c
52037+++ b/drivers/scsi/qla4xxx/ql4_os.c
52038@@ -4496,12 +4496,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
52039 */
52040 if (!iscsi_is_session_online(cls_sess)) {
52041 /* Reset retry relogin timer */
52042- atomic_inc(&ddb_entry->relogin_retry_count);
52043+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
52044 DEBUG2(ql4_printk(KERN_INFO, ha,
52045 "%s: index[%d] relogin timed out-retrying"
52046 " relogin (%d), retry (%d)\n", __func__,
52047 ddb_entry->fw_ddb_index,
52048- atomic_read(&ddb_entry->relogin_retry_count),
52049+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
52050 ddb_entry->default_time2wait + 4));
52051 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
52052 atomic_set(&ddb_entry->retry_relogin_timer,
52053@@ -6609,7 +6609,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
52054
52055 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
52056 atomic_set(&ddb_entry->relogin_timer, 0);
52057- atomic_set(&ddb_entry->relogin_retry_count, 0);
52058+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
52059 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
52060 ddb_entry->default_relogin_timeout =
52061 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
52062diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
52063index d81f3cc..0093e5b 100644
52064--- a/drivers/scsi/scsi.c
52065+++ b/drivers/scsi/scsi.c
52066@@ -645,7 +645,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
52067 struct Scsi_Host *host = cmd->device->host;
52068 int rtn = 0;
52069
52070- atomic_inc(&cmd->device->iorequest_cnt);
52071+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
52072
52073 /* check if the device is still usable */
52074 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
52075diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
52076index 7cb8c73..14561b5 100644
52077--- a/drivers/scsi/scsi_lib.c
52078+++ b/drivers/scsi/scsi_lib.c
52079@@ -1581,7 +1581,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
52080 shost = sdev->host;
52081 scsi_init_cmd_errh(cmd);
52082 cmd->result = DID_NO_CONNECT << 16;
52083- atomic_inc(&cmd->device->iorequest_cnt);
52084+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
52085
52086 /*
52087 * SCSI request completion path will do scsi_device_unbusy(),
52088@@ -1604,9 +1604,9 @@ static void scsi_softirq_done(struct request *rq)
52089
52090 INIT_LIST_HEAD(&cmd->eh_entry);
52091
52092- atomic_inc(&cmd->device->iodone_cnt);
52093+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
52094 if (cmd->result)
52095- atomic_inc(&cmd->device->ioerr_cnt);
52096+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
52097
52098 disposition = scsi_decide_disposition(cmd);
52099 if (disposition != SUCCESS &&
52100diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
52101index 8b4105a..1f58363 100644
52102--- a/drivers/scsi/scsi_sysfs.c
52103+++ b/drivers/scsi/scsi_sysfs.c
52104@@ -805,7 +805,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
52105 char *buf) \
52106 { \
52107 struct scsi_device *sdev = to_scsi_device(dev); \
52108- unsigned long long count = atomic_read(&sdev->field); \
52109+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
52110 return snprintf(buf, 20, "0x%llx\n", count); \
52111 } \
52112 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
52113diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
52114index 5d6f348..18778a6b 100644
52115--- a/drivers/scsi/scsi_transport_fc.c
52116+++ b/drivers/scsi/scsi_transport_fc.c
52117@@ -501,7 +501,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
52118 * Netlink Infrastructure
52119 */
52120
52121-static atomic_t fc_event_seq;
52122+static atomic_unchecked_t fc_event_seq;
52123
52124 /**
52125 * fc_get_event_number - Obtain the next sequential FC event number
52126@@ -514,7 +514,7 @@ static atomic_t fc_event_seq;
52127 u32
52128 fc_get_event_number(void)
52129 {
52130- return atomic_add_return(1, &fc_event_seq);
52131+ return atomic_add_return_unchecked(1, &fc_event_seq);
52132 }
52133 EXPORT_SYMBOL(fc_get_event_number);
52134
52135@@ -658,7 +658,7 @@ static __init int fc_transport_init(void)
52136 {
52137 int error;
52138
52139- atomic_set(&fc_event_seq, 0);
52140+ atomic_set_unchecked(&fc_event_seq, 0);
52141
52142 error = transport_class_register(&fc_host_class);
52143 if (error)
52144@@ -848,7 +848,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
52145 char *cp;
52146
52147 *val = simple_strtoul(buf, &cp, 0);
52148- if ((*cp && (*cp != '\n')) || (*val < 0))
52149+ if (*cp && (*cp != '\n'))
52150 return -EINVAL;
52151 /*
52152 * Check for overflow; dev_loss_tmo is u32
52153diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
52154index 67d43e3..8cee73c 100644
52155--- a/drivers/scsi/scsi_transport_iscsi.c
52156+++ b/drivers/scsi/scsi_transport_iscsi.c
52157@@ -79,7 +79,7 @@ struct iscsi_internal {
52158 struct transport_container session_cont;
52159 };
52160
52161-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
52162+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
52163 static struct workqueue_struct *iscsi_eh_timer_workq;
52164
52165 static DEFINE_IDA(iscsi_sess_ida);
52166@@ -2071,7 +2071,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
52167 int err;
52168
52169 ihost = shost->shost_data;
52170- session->sid = atomic_add_return(1, &iscsi_session_nr);
52171+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
52172
52173 if (target_id == ISCSI_MAX_TARGET) {
52174 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
52175@@ -4515,7 +4515,7 @@ static __init int iscsi_transport_init(void)
52176 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
52177 ISCSI_TRANSPORT_VERSION);
52178
52179- atomic_set(&iscsi_session_nr, 0);
52180+ atomic_set_unchecked(&iscsi_session_nr, 0);
52181
52182 err = class_register(&iscsi_transport_class);
52183 if (err)
52184diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
52185index ae45bd9..c32a586 100644
52186--- a/drivers/scsi/scsi_transport_srp.c
52187+++ b/drivers/scsi/scsi_transport_srp.c
52188@@ -35,7 +35,7 @@
52189 #include "scsi_priv.h"
52190
52191 struct srp_host_attrs {
52192- atomic_t next_port_id;
52193+ atomic_unchecked_t next_port_id;
52194 };
52195 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
52196
52197@@ -100,7 +100,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
52198 struct Scsi_Host *shost = dev_to_shost(dev);
52199 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
52200
52201- atomic_set(&srp_host->next_port_id, 0);
52202+ atomic_set_unchecked(&srp_host->next_port_id, 0);
52203 return 0;
52204 }
52205
52206@@ -734,7 +734,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
52207 rport_fast_io_fail_timedout);
52208 INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout);
52209
52210- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
52211+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
52212 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
52213
52214 transport_setup_device(&rport->dev);
52215diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
52216index 2c2041c..9d94085 100644
52217--- a/drivers/scsi/sd.c
52218+++ b/drivers/scsi/sd.c
52219@@ -3002,7 +3002,7 @@ static int sd_probe(struct device *dev)
52220 sdkp->disk = gd;
52221 sdkp->index = index;
52222 atomic_set(&sdkp->openers, 0);
52223- atomic_set(&sdkp->device->ioerr_cnt, 0);
52224+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
52225
52226 if (!sdp->request_queue->rq_timeout) {
52227 if (sdp->type != TYPE_MOD)
52228diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
52229index 01cf888..59e0475 100644
52230--- a/drivers/scsi/sg.c
52231+++ b/drivers/scsi/sg.c
52232@@ -1138,7 +1138,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
52233 sdp->disk->disk_name,
52234 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
52235 NULL,
52236- (char *)arg);
52237+ (char __user *)arg);
52238 case BLKTRACESTART:
52239 return blk_trace_startstop(sdp->device->request_queue, 1);
52240 case BLKTRACESTOP:
52241diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
52242index 11a5043..e36f04c 100644
52243--- a/drivers/soc/tegra/fuse/fuse-tegra.c
52244+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
52245@@ -70,7 +70,7 @@ static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
52246 return i;
52247 }
52248
52249-static struct bin_attribute fuse_bin_attr = {
52250+static bin_attribute_no_const fuse_bin_attr = {
52251 .attr = { .name = "fuse", .mode = S_IRUGO, },
52252 .read = fuse_read,
52253 };
52254diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
52255index 2bf2dfa..b4d9008 100644
52256--- a/drivers/spi/spi.c
52257+++ b/drivers/spi/spi.c
52258@@ -2210,7 +2210,7 @@ int spi_bus_unlock(struct spi_master *master)
52259 EXPORT_SYMBOL_GPL(spi_bus_unlock);
52260
52261 /* portable code must never pass more than 32 bytes */
52262-#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
52263+#define SPI_BUFSIZ max(32UL, SMP_CACHE_BYTES)
52264
52265 static u8 *buf;
52266
52267diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
52268index b41429f..2de5373 100644
52269--- a/drivers/staging/android/timed_output.c
52270+++ b/drivers/staging/android/timed_output.c
52271@@ -25,7 +25,7 @@
52272 #include "timed_output.h"
52273
52274 static struct class *timed_output_class;
52275-static atomic_t device_count;
52276+static atomic_unchecked_t device_count;
52277
52278 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
52279 char *buf)
52280@@ -65,7 +65,7 @@ static int create_timed_output_class(void)
52281 timed_output_class = class_create(THIS_MODULE, "timed_output");
52282 if (IS_ERR(timed_output_class))
52283 return PTR_ERR(timed_output_class);
52284- atomic_set(&device_count, 0);
52285+ atomic_set_unchecked(&device_count, 0);
52286 timed_output_class->dev_groups = timed_output_groups;
52287 }
52288
52289@@ -83,7 +83,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
52290 if (ret < 0)
52291 return ret;
52292
52293- tdev->index = atomic_inc_return(&device_count);
52294+ tdev->index = atomic_inc_return_unchecked(&device_count);
52295 tdev->dev = device_create(timed_output_class, NULL,
52296 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
52297 if (IS_ERR(tdev->dev))
52298diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
52299index 001348c..cfaac8a 100644
52300--- a/drivers/staging/gdm724x/gdm_tty.c
52301+++ b/drivers/staging/gdm724x/gdm_tty.c
52302@@ -44,7 +44,7 @@
52303 #define gdm_tty_send_control(n, r, v, d, l) (\
52304 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
52305
52306-#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
52307+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
52308
52309 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
52310 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
52311diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
52312index 6b22106..6c6e641 100644
52313--- a/drivers/staging/imx-drm/imx-drm-core.c
52314+++ b/drivers/staging/imx-drm/imx-drm-core.c
52315@@ -355,7 +355,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc,
52316 if (imxdrm->pipes >= MAX_CRTC)
52317 return -EINVAL;
52318
52319- if (imxdrm->drm->open_count)
52320+ if (local_read(&imxdrm->drm->open_count))
52321 return -EBUSY;
52322
52323 imx_drm_crtc = kzalloc(sizeof(*imx_drm_crtc), GFP_KERNEL);
52324diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
52325index 503b2d7..c918745 100644
52326--- a/drivers/staging/line6/driver.c
52327+++ b/drivers/staging/line6/driver.c
52328@@ -463,7 +463,7 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
52329 {
52330 struct usb_device *usbdev = line6->usbdev;
52331 int ret;
52332- unsigned char len;
52333+ unsigned char *plen;
52334
52335 /* query the serial number: */
52336 ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
52337@@ -476,27 +476,34 @@ int line6_read_data(struct usb_line6 *line6, int address, void *data,
52338 return ret;
52339 }
52340
52341+ plen = kmalloc(1, GFP_KERNEL);
52342+ if (plen == NULL)
52343+ return -ENOMEM;
52344+
52345 /* Wait for data length. We'll get 0xff until length arrives. */
52346 do {
52347 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
52348 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
52349 USB_DIR_IN,
52350- 0x0012, 0x0000, &len, 1,
52351+ 0x0012, 0x0000, plen, 1,
52352 LINE6_TIMEOUT * HZ);
52353 if (ret < 0) {
52354 dev_err(line6->ifcdev,
52355 "receive length failed (error %d)\n", ret);
52356+ kfree(plen);
52357 return ret;
52358 }
52359- } while (len == 0xff);
52360+ } while (*plen == 0xff);
52361
52362- if (len != datalen) {
52363+ if (*plen != datalen) {
52364 /* should be equal or something went wrong */
52365 dev_err(line6->ifcdev,
52366 "length mismatch (expected %d, got %d)\n",
52367- (int)datalen, (int)len);
52368+ (int)datalen, (int)*plen);
52369+ kfree(plen);
52370 return -EINVAL;
52371 }
52372+ kfree(plen);
52373
52374 /* receive the result: */
52375 ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
52376diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
52377index bcce919..f30fcf9 100644
52378--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
52379+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
52380@@ -488,13 +488,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
52381 return 0;
52382 }
52383
52384-sfw_test_client_ops_t brw_test_client;
52385-void brw_init_test_client(void)
52386-{
52387- brw_test_client.tso_init = brw_client_init;
52388- brw_test_client.tso_fini = brw_client_fini;
52389- brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
52390- brw_test_client.tso_done_rpc = brw_client_done_rpc;
52391+sfw_test_client_ops_t brw_test_client = {
52392+ .tso_init = brw_client_init,
52393+ .tso_fini = brw_client_fini,
52394+ .tso_prep_rpc = brw_client_prep_rpc,
52395+ .tso_done_rpc = brw_client_done_rpc,
52396 };
52397
52398 srpc_service_t brw_test_service;
52399diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
52400index 7e83dff..1f9a545 100644
52401--- a/drivers/staging/lustre/lnet/selftest/framework.c
52402+++ b/drivers/staging/lustre/lnet/selftest/framework.c
52403@@ -1633,12 +1633,10 @@ static srpc_service_t sfw_services[] =
52404
52405 extern sfw_test_client_ops_t ping_test_client;
52406 extern srpc_service_t ping_test_service;
52407-extern void ping_init_test_client(void);
52408 extern void ping_init_test_service(void);
52409
52410 extern sfw_test_client_ops_t brw_test_client;
52411 extern srpc_service_t brw_test_service;
52412-extern void brw_init_test_client(void);
52413 extern void brw_init_test_service(void);
52414
52415
52416@@ -1682,12 +1680,10 @@ sfw_startup (void)
52417 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
52418 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
52419
52420- brw_init_test_client();
52421 brw_init_test_service();
52422 rc = sfw_register_test(&brw_test_service, &brw_test_client);
52423 LASSERT (rc == 0);
52424
52425- ping_init_test_client();
52426 ping_init_test_service();
52427 rc = sfw_register_test(&ping_test_service, &ping_test_client);
52428 LASSERT (rc == 0);
52429diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
52430index 750cac4..e4d751f 100644
52431--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
52432+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
52433@@ -211,14 +211,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
52434 return 0;
52435 }
52436
52437-sfw_test_client_ops_t ping_test_client;
52438-void ping_init_test_client(void)
52439-{
52440- ping_test_client.tso_init = ping_client_init;
52441- ping_test_client.tso_fini = ping_client_fini;
52442- ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
52443- ping_test_client.tso_done_rpc = ping_client_done_rpc;
52444-}
52445+sfw_test_client_ops_t ping_test_client = {
52446+ .tso_init = ping_client_init,
52447+ .tso_fini = ping_client_fini,
52448+ .tso_prep_rpc = ping_client_prep_rpc,
52449+ .tso_done_rpc = ping_client_done_rpc,
52450+};
52451
52452 srpc_service_t ping_test_service;
52453 void ping_init_test_service(void)
52454diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
52455index 30b1812f..9e5bd0b 100644
52456--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
52457+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
52458@@ -1141,7 +1141,7 @@ struct ldlm_callback_suite {
52459 ldlm_completion_callback lcs_completion;
52460 ldlm_blocking_callback lcs_blocking;
52461 ldlm_glimpse_callback lcs_glimpse;
52462-};
52463+} __no_const;
52464
52465 /* ldlm_lockd.c */
52466 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
52467diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
52468index 489bdd3..65058081 100644
52469--- a/drivers/staging/lustre/lustre/include/obd.h
52470+++ b/drivers/staging/lustre/lustre/include/obd.h
52471@@ -1438,7 +1438,7 @@ struct md_ops {
52472 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
52473 * wrapper function in include/linux/obd_class.h.
52474 */
52475-};
52476+} __no_const;
52477
52478 struct lsm_operations {
52479 void (*lsm_free)(struct lov_stripe_md *);
52480diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52481index b798daa..b28ca8f 100644
52482--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52483+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
52484@@ -258,7 +258,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
52485 int added = (mode == LCK_NL);
52486 int overlaps = 0;
52487 int splitted = 0;
52488- const struct ldlm_callback_suite null_cbs = { NULL };
52489+ const struct ldlm_callback_suite null_cbs = { };
52490
52491 CDEBUG(D_DLMTRACE, "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
52492 *flags, new->l_policy_data.l_flock.owner,
52493diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52494index 13a9266..3439390 100644
52495--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52496+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
52497@@ -235,7 +235,7 @@ int proc_console_max_delay_cs(struct ctl_table *table, int write,
52498 void __user *buffer, size_t *lenp, loff_t *ppos)
52499 {
52500 int rc, max_delay_cs;
52501- struct ctl_table dummy = *table;
52502+ ctl_table_no_const dummy = *table;
52503 long d;
52504
52505 dummy.data = &max_delay_cs;
52506@@ -267,7 +267,7 @@ int proc_console_min_delay_cs(struct ctl_table *table, int write,
52507 void __user *buffer, size_t *lenp, loff_t *ppos)
52508 {
52509 int rc, min_delay_cs;
52510- struct ctl_table dummy = *table;
52511+ ctl_table_no_const dummy = *table;
52512 long d;
52513
52514 dummy.data = &min_delay_cs;
52515@@ -299,7 +299,7 @@ int proc_console_backoff(struct ctl_table *table, int write,
52516 void __user *buffer, size_t *lenp, loff_t *ppos)
52517 {
52518 int rc, backoff;
52519- struct ctl_table dummy = *table;
52520+ ctl_table_no_const dummy = *table;
52521
52522 dummy.data = &backoff;
52523 dummy.proc_handler = &proc_dointvec;
52524diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
52525index 3396858..c0bd996 100644
52526--- a/drivers/staging/lustre/lustre/libcfs/module.c
52527+++ b/drivers/staging/lustre/lustre/libcfs/module.c
52528@@ -314,11 +314,11 @@ out:
52529
52530
52531 struct cfs_psdev_ops libcfs_psdev_ops = {
52532- libcfs_psdev_open,
52533- libcfs_psdev_release,
52534- NULL,
52535- NULL,
52536- libcfs_ioctl
52537+ .p_open = libcfs_psdev_open,
52538+ .p_close = libcfs_psdev_release,
52539+ .p_read = NULL,
52540+ .p_write = NULL,
52541+ .p_ioctl = libcfs_ioctl
52542 };
52543
52544 extern int insert_proc(void);
52545diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
52546index efa2faf..03a9836 100644
52547--- a/drivers/staging/lustre/lustre/llite/dir.c
52548+++ b/drivers/staging/lustre/lustre/llite/dir.c
52549@@ -659,7 +659,7 @@ int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
52550 int mode;
52551 int err;
52552
52553- mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current->fs->umask) | S_IFDIR;
52554+ mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current_umask()) | S_IFDIR;
52555 op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
52556 strlen(filename), mode, LUSTRE_OPC_MKDIR,
52557 lump);
52558diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
52559index a0f4868..139f1fb 100644
52560--- a/drivers/staging/octeon/ethernet-rx.c
52561+++ b/drivers/staging/octeon/ethernet-rx.c
52562@@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
52563 /* Increment RX stats for virtual ports */
52564 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
52565 #ifdef CONFIG_64BIT
52566- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
52567- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
52568+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
52569+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
52570 #else
52571- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
52572- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
52573+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
52574+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
52575 #endif
52576 }
52577 netif_receive_skb(skb);
52578@@ -432,9 +432,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
52579 dev->name);
52580 */
52581 #ifdef CONFIG_64BIT
52582- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
52583+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
52584 #else
52585- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
52586+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
52587 #endif
52588 dev_kfree_skb_irq(skb);
52589 }
52590diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
52591index 2aa7235..ba3c205 100644
52592--- a/drivers/staging/octeon/ethernet.c
52593+++ b/drivers/staging/octeon/ethernet.c
52594@@ -247,11 +247,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
52595 * since the RX tasklet also increments it.
52596 */
52597 #ifdef CONFIG_64BIT
52598- atomic64_add(rx_status.dropped_packets,
52599- (atomic64_t *)&priv->stats.rx_dropped);
52600+ atomic64_add_unchecked(rx_status.dropped_packets,
52601+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
52602 #else
52603- atomic_add(rx_status.dropped_packets,
52604- (atomic_t *)&priv->stats.rx_dropped);
52605+ atomic_add_unchecked(rx_status.dropped_packets,
52606+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
52607 #endif
52608 }
52609
52610diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
52611index 56d5c50..a14f4db 100644
52612--- a/drivers/staging/rtl8188eu/include/hal_intf.h
52613+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
52614@@ -234,7 +234,7 @@ struct hal_ops {
52615
52616 void (*hal_notch_filter)(struct adapter *adapter, bool enable);
52617 void (*hal_reset_security_engine)(struct adapter *adapter);
52618-};
52619+} __no_const;
52620
52621 enum rt_eeprom_type {
52622 EEPROM_93C46,
52623diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
52624index dc23395..cf7e9b1 100644
52625--- a/drivers/staging/rtl8712/rtl871x_io.h
52626+++ b/drivers/staging/rtl8712/rtl871x_io.h
52627@@ -108,7 +108,7 @@ struct _io_ops {
52628 u8 *pmem);
52629 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
52630 u8 *pmem);
52631-};
52632+} __no_const;
52633
52634 struct io_req {
52635 struct list_head list;
52636diff --git a/drivers/staging/unisys/visorchipset/visorchipset.h b/drivers/staging/unisys/visorchipset/visorchipset.h
52637index 2bf2e2f..84421c9 100644
52638--- a/drivers/staging/unisys/visorchipset/visorchipset.h
52639+++ b/drivers/staging/unisys/visorchipset/visorchipset.h
52640@@ -228,7 +228,7 @@ typedef struct {
52641 void (*device_resume)(ulong busNo, ulong devNo);
52642 int (*get_channel_info)(uuid_le typeGuid, ulong *minSize,
52643 ulong *maxSize);
52644-} VISORCHIPSET_BUSDEV_NOTIFIERS;
52645+} __no_const VISORCHIPSET_BUSDEV_NOTIFIERS;
52646
52647 /* These functions live inside visorchipset, and will be called to indicate
52648 * responses to specific events (by code outside of visorchipset).
52649@@ -243,7 +243,7 @@ typedef struct {
52650 void (*device_destroy)(ulong busNo, ulong devNo, int response);
52651 void (*device_pause)(ulong busNo, ulong devNo, int response);
52652 void (*device_resume)(ulong busNo, ulong devNo, int response);
52653-} VISORCHIPSET_BUSDEV_RESPONDERS;
52654+} __no_const VISORCHIPSET_BUSDEV_RESPONDERS;
52655
52656 /** Register functions (in the bus driver) to get called by visorchipset
52657 * whenever a bus or device appears for which this service partition is
52658diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
52659index 164136b..7244df5 100644
52660--- a/drivers/staging/vt6655/hostap.c
52661+++ b/drivers/staging/vt6655/hostap.c
52662@@ -68,14 +68,13 @@ static int msglevel = MSG_LEVEL_INFO;
52663 *
52664 */
52665
52666+static net_device_ops_no_const apdev_netdev_ops;
52667+
52668 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
52669 {
52670 PSDevice apdev_priv;
52671 struct net_device *dev = pDevice->dev;
52672 int ret;
52673- const struct net_device_ops apdev_netdev_ops = {
52674- .ndo_start_xmit = pDevice->tx_80211,
52675- };
52676
52677 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
52678
52679@@ -87,6 +86,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
52680 *apdev_priv = *pDevice;
52681 eth_hw_addr_inherit(pDevice->apdev, dev);
52682
52683+ /* only half broken now */
52684+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
52685 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
52686
52687 pDevice->apdev->type = ARPHRD_IEEE80211;
52688diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
52689index e7e9372..161f530 100644
52690--- a/drivers/target/sbp/sbp_target.c
52691+++ b/drivers/target/sbp/sbp_target.c
52692@@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
52693
52694 #define SESSION_MAINTENANCE_INTERVAL HZ
52695
52696-static atomic_t login_id = ATOMIC_INIT(0);
52697+static atomic_unchecked_t login_id = ATOMIC_INIT(0);
52698
52699 static void session_maintenance_work(struct work_struct *);
52700 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
52701@@ -444,7 +444,7 @@ static void sbp_management_request_login(
52702 login->lun = se_lun;
52703 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
52704 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
52705- login->login_id = atomic_inc_return(&login_id);
52706+ login->login_id = atomic_inc_return_unchecked(&login_id);
52707
52708 login->tgt_agt = sbp_target_agent_register(login);
52709 if (IS_ERR(login->tgt_agt)) {
52710diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
52711index 15a1c13..6c9b96b 100644
52712--- a/drivers/target/target_core_device.c
52713+++ b/drivers/target/target_core_device.c
52714@@ -1526,7 +1526,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
52715 spin_lock_init(&dev->se_tmr_lock);
52716 spin_lock_init(&dev->qf_cmd_lock);
52717 sema_init(&dev->caw_sem, 1);
52718- atomic_set(&dev->dev_ordered_id, 0);
52719+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
52720 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
52721 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
52722 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
52723diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
52724index a9c77b5..024a07d 100644
52725--- a/drivers/target/target_core_transport.c
52726+++ b/drivers/target/target_core_transport.c
52727@@ -1165,7 +1165,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
52728 * Used to determine when ORDERED commands should go from
52729 * Dormant to Active status.
52730 */
52731- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
52732+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
52733 smp_mb__after_atomic();
52734 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
52735 cmd->se_ordered_id, cmd->sam_task_attr,
52736diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
52737index 4b2b999..cad9fa5 100644
52738--- a/drivers/thermal/of-thermal.c
52739+++ b/drivers/thermal/of-thermal.c
52740@@ -30,6 +30,7 @@
52741 #include <linux/err.h>
52742 #include <linux/export.h>
52743 #include <linux/string.h>
52744+#include <linux/mm.h>
52745
52746 #include "thermal_core.h"
52747
52748@@ -341,8 +342,10 @@ thermal_zone_of_add_sensor(struct device_node *zone,
52749 tz->get_trend = get_trend;
52750 tz->sensor_data = data;
52751
52752- tzd->ops->get_temp = of_thermal_get_temp;
52753- tzd->ops->get_trend = of_thermal_get_trend;
52754+ pax_open_kernel();
52755+ *(void **)&tzd->ops->get_temp = of_thermal_get_temp;
52756+ *(void **)&tzd->ops->get_trend = of_thermal_get_trend;
52757+ pax_close_kernel();
52758 mutex_unlock(&tzd->lock);
52759
52760 return tzd;
52761@@ -461,8 +464,10 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
52762 return;
52763
52764 mutex_lock(&tzd->lock);
52765- tzd->ops->get_temp = NULL;
52766- tzd->ops->get_trend = NULL;
52767+ pax_open_kernel();
52768+ *(void **)&tzd->ops->get_temp = NULL;
52769+ *(void **)&tzd->ops->get_trend = NULL;
52770+ pax_close_kernel();
52771
52772 tz->get_temp = NULL;
52773 tz->get_trend = NULL;
52774diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
52775index fd66f57..48e6376 100644
52776--- a/drivers/tty/cyclades.c
52777+++ b/drivers/tty/cyclades.c
52778@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
52779 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
52780 info->port.count);
52781 #endif
52782- info->port.count++;
52783+ atomic_inc(&info->port.count);
52784 #ifdef CY_DEBUG_COUNT
52785 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
52786- current->pid, info->port.count);
52787+ current->pid, atomic_read(&info->port.count));
52788 #endif
52789
52790 /*
52791@@ -3974,7 +3974,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
52792 for (j = 0; j < cy_card[i].nports; j++) {
52793 info = &cy_card[i].ports[j];
52794
52795- if (info->port.count) {
52796+ if (atomic_read(&info->port.count)) {
52797 /* XXX is the ldisc num worth this? */
52798 struct tty_struct *tty;
52799 struct tty_ldisc *ld;
52800diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
52801index 4fcec1d..5a036f7 100644
52802--- a/drivers/tty/hvc/hvc_console.c
52803+++ b/drivers/tty/hvc/hvc_console.c
52804@@ -342,7 +342,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
52805
52806 spin_lock_irqsave(&hp->port.lock, flags);
52807 /* Check and then increment for fast path open. */
52808- if (hp->port.count++ > 0) {
52809+ if (atomic_inc_return(&hp->port.count) > 1) {
52810 spin_unlock_irqrestore(&hp->port.lock, flags);
52811 hvc_kick();
52812 return 0;
52813@@ -397,7 +397,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
52814
52815 spin_lock_irqsave(&hp->port.lock, flags);
52816
52817- if (--hp->port.count == 0) {
52818+ if (atomic_dec_return(&hp->port.count) == 0) {
52819 spin_unlock_irqrestore(&hp->port.lock, flags);
52820 /* We are done with the tty pointer now. */
52821 tty_port_tty_set(&hp->port, NULL);
52822@@ -419,9 +419,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
52823 */
52824 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
52825 } else {
52826- if (hp->port.count < 0)
52827+ if (atomic_read(&hp->port.count) < 0)
52828 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
52829- hp->vtermno, hp->port.count);
52830+ hp->vtermno, atomic_read(&hp->port.count));
52831 spin_unlock_irqrestore(&hp->port.lock, flags);
52832 }
52833 }
52834@@ -451,12 +451,12 @@ static void hvc_hangup(struct tty_struct *tty)
52835 * open->hangup case this can be called after the final close so prevent
52836 * that from happening for now.
52837 */
52838- if (hp->port.count <= 0) {
52839+ if (atomic_read(&hp->port.count) <= 0) {
52840 spin_unlock_irqrestore(&hp->port.lock, flags);
52841 return;
52842 }
52843
52844- hp->port.count = 0;
52845+ atomic_set(&hp->port.count, 0);
52846 spin_unlock_irqrestore(&hp->port.lock, flags);
52847 tty_port_tty_set(&hp->port, NULL);
52848
52849@@ -504,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
52850 return -EPIPE;
52851
52852 /* FIXME what's this (unprotected) check for? */
52853- if (hp->port.count <= 0)
52854+ if (atomic_read(&hp->port.count) <= 0)
52855 return -EIO;
52856
52857 spin_lock_irqsave(&hp->lock, flags);
52858diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
52859index 81e939e..95ead10 100644
52860--- a/drivers/tty/hvc/hvcs.c
52861+++ b/drivers/tty/hvc/hvcs.c
52862@@ -83,6 +83,7 @@
52863 #include <asm/hvcserver.h>
52864 #include <asm/uaccess.h>
52865 #include <asm/vio.h>
52866+#include <asm/local.h>
52867
52868 /*
52869 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
52870@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
52871
52872 spin_lock_irqsave(&hvcsd->lock, flags);
52873
52874- if (hvcsd->port.count > 0) {
52875+ if (atomic_read(&hvcsd->port.count) > 0) {
52876 spin_unlock_irqrestore(&hvcsd->lock, flags);
52877 printk(KERN_INFO "HVCS: vterm state unchanged. "
52878 "The hvcs device node is still in use.\n");
52879@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
52880 }
52881 }
52882
52883- hvcsd->port.count = 0;
52884+ atomic_set(&hvcsd->port.count, 0);
52885 hvcsd->port.tty = tty;
52886 tty->driver_data = hvcsd;
52887
52888@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
52889 unsigned long flags;
52890
52891 spin_lock_irqsave(&hvcsd->lock, flags);
52892- hvcsd->port.count++;
52893+ atomic_inc(&hvcsd->port.count);
52894 hvcsd->todo_mask |= HVCS_SCHED_READ;
52895 spin_unlock_irqrestore(&hvcsd->lock, flags);
52896
52897@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
52898 hvcsd = tty->driver_data;
52899
52900 spin_lock_irqsave(&hvcsd->lock, flags);
52901- if (--hvcsd->port.count == 0) {
52902+ if (atomic_dec_and_test(&hvcsd->port.count)) {
52903
52904 vio_disable_interrupts(hvcsd->vdev);
52905
52906@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
52907
52908 free_irq(irq, hvcsd);
52909 return;
52910- } else if (hvcsd->port.count < 0) {
52911+ } else if (atomic_read(&hvcsd->port.count) < 0) {
52912 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
52913 " is missmanaged.\n",
52914- hvcsd->vdev->unit_address, hvcsd->port.count);
52915+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
52916 }
52917
52918 spin_unlock_irqrestore(&hvcsd->lock, flags);
52919@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
52920
52921 spin_lock_irqsave(&hvcsd->lock, flags);
52922 /* Preserve this so that we know how many kref refs to put */
52923- temp_open_count = hvcsd->port.count;
52924+ temp_open_count = atomic_read(&hvcsd->port.count);
52925
52926 /*
52927 * Don't kref put inside the spinlock because the destruction
52928@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
52929 tty->driver_data = NULL;
52930 hvcsd->port.tty = NULL;
52931
52932- hvcsd->port.count = 0;
52933+ atomic_set(&hvcsd->port.count, 0);
52934
52935 /* This will drop any buffered data on the floor which is OK in a hangup
52936 * scenario. */
52937@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
52938 * the middle of a write operation? This is a crummy place to do this
52939 * but we want to keep it all in the spinlock.
52940 */
52941- if (hvcsd->port.count <= 0) {
52942+ if (atomic_read(&hvcsd->port.count) <= 0) {
52943 spin_unlock_irqrestore(&hvcsd->lock, flags);
52944 return -ENODEV;
52945 }
52946@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
52947 {
52948 struct hvcs_struct *hvcsd = tty->driver_data;
52949
52950- if (!hvcsd || hvcsd->port.count <= 0)
52951+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
52952 return 0;
52953
52954 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
52955diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
52956index 4190199..06d5bfa 100644
52957--- a/drivers/tty/hvc/hvsi.c
52958+++ b/drivers/tty/hvc/hvsi.c
52959@@ -85,7 +85,7 @@ struct hvsi_struct {
52960 int n_outbuf;
52961 uint32_t vtermno;
52962 uint32_t virq;
52963- atomic_t seqno; /* HVSI packet sequence number */
52964+ atomic_unchecked_t seqno; /* HVSI packet sequence number */
52965 uint16_t mctrl;
52966 uint8_t state; /* HVSI protocol state */
52967 uint8_t flags;
52968@@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
52969
52970 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
52971 packet.hdr.len = sizeof(struct hvsi_query_response);
52972- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52973+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52974 packet.verb = VSV_SEND_VERSION_NUMBER;
52975 packet.u.version = HVSI_VERSION;
52976 packet.query_seqno = query_seqno+1;
52977@@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
52978
52979 packet.hdr.type = VS_QUERY_PACKET_HEADER;
52980 packet.hdr.len = sizeof(struct hvsi_query);
52981- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52982+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52983 packet.verb = verb;
52984
52985 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
52986@@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
52987 int wrote;
52988
52989 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
52990- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
52991+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
52992 packet.hdr.len = sizeof(struct hvsi_control);
52993 packet.verb = VSV_SET_MODEM_CTL;
52994 packet.mask = HVSI_TSDTR;
52995@@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
52996 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
52997
52998 packet.hdr.type = VS_DATA_PACKET_HEADER;
52999- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53000+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53001 packet.hdr.len = count + sizeof(struct hvsi_header);
53002 memcpy(&packet.data, buf, count);
53003
53004@@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
53005 struct hvsi_control packet __ALIGNED__;
53006
53007 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
53008- packet.hdr.seqno = atomic_inc_return(&hp->seqno);
53009+ packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
53010 packet.hdr.len = 6;
53011 packet.verb = VSV_CLOSE_PROTOCOL;
53012
53013@@ -725,7 +725,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
53014
53015 tty_port_tty_set(&hp->port, tty);
53016 spin_lock_irqsave(&hp->lock, flags);
53017- hp->port.count++;
53018+ atomic_inc(&hp->port.count);
53019 atomic_set(&hp->seqno, 0);
53020 h_vio_signal(hp->vtermno, VIO_IRQ_ENABLE);
53021 spin_unlock_irqrestore(&hp->lock, flags);
53022@@ -782,7 +782,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
53023
53024 spin_lock_irqsave(&hp->lock, flags);
53025
53026- if (--hp->port.count == 0) {
53027+ if (atomic_dec_return(&hp->port.count) == 0) {
53028 tty_port_tty_set(&hp->port, NULL);
53029 hp->inbuf_end = hp->inbuf; /* discard remaining partial packets */
53030
53031@@ -815,9 +815,9 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
53032
53033 spin_lock_irqsave(&hp->lock, flags);
53034 }
53035- } else if (hp->port.count < 0)
53036+ } else if (atomic_read(&hp->port.count) < 0)
53037 printk(KERN_ERR "hvsi_close %lu: oops, count is %d\n",
53038- hp - hvsi_ports, hp->port.count);
53039+ hp - hvsi_ports, atomic_read(&hp->port.count));
53040
53041 spin_unlock_irqrestore(&hp->lock, flags);
53042 }
53043@@ -832,7 +832,7 @@ static void hvsi_hangup(struct tty_struct *tty)
53044 tty_port_tty_set(&hp->port, NULL);
53045
53046 spin_lock_irqsave(&hp->lock, flags);
53047- hp->port.count = 0;
53048+ atomic_set(&hp->port.count, 0);
53049 hp->n_outbuf = 0;
53050 spin_unlock_irqrestore(&hp->lock, flags);
53051 }
53052diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
53053index 7ae6c29..05c6dba 100644
53054--- a/drivers/tty/hvc/hvsi_lib.c
53055+++ b/drivers/tty/hvc/hvsi_lib.c
53056@@ -8,7 +8,7 @@
53057
53058 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
53059 {
53060- packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
53061+ packet->seqno = cpu_to_be16(atomic_inc_return_unchecked(&pv->seqno));
53062
53063 /* Assumes that always succeeds, works in practice */
53064 return pv->put_chars(pv->termno, (char *)packet, packet->len);
53065@@ -20,7 +20,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
53066
53067 /* Reset state */
53068 pv->established = 0;
53069- atomic_set(&pv->seqno, 0);
53070+ atomic_set_unchecked(&pv->seqno, 0);
53071
53072 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
53073
53074diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
53075index 345cebb..d5a1e9e 100644
53076--- a/drivers/tty/ipwireless/tty.c
53077+++ b/drivers/tty/ipwireless/tty.c
53078@@ -28,6 +28,7 @@
53079 #include <linux/tty_driver.h>
53080 #include <linux/tty_flip.h>
53081 #include <linux/uaccess.h>
53082+#include <asm/local.h>
53083
53084 #include "tty.h"
53085 #include "network.h"
53086@@ -93,10 +94,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
53087 return -ENODEV;
53088
53089 mutex_lock(&tty->ipw_tty_mutex);
53090- if (tty->port.count == 0)
53091+ if (atomic_read(&tty->port.count) == 0)
53092 tty->tx_bytes_queued = 0;
53093
53094- tty->port.count++;
53095+ atomic_inc(&tty->port.count);
53096
53097 tty->port.tty = linux_tty;
53098 linux_tty->driver_data = tty;
53099@@ -112,9 +113,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
53100
53101 static void do_ipw_close(struct ipw_tty *tty)
53102 {
53103- tty->port.count--;
53104-
53105- if (tty->port.count == 0) {
53106+ if (atomic_dec_return(&tty->port.count) == 0) {
53107 struct tty_struct *linux_tty = tty->port.tty;
53108
53109 if (linux_tty != NULL) {
53110@@ -135,7 +134,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
53111 return;
53112
53113 mutex_lock(&tty->ipw_tty_mutex);
53114- if (tty->port.count == 0) {
53115+ if (atomic_read(&tty->port.count) == 0) {
53116 mutex_unlock(&tty->ipw_tty_mutex);
53117 return;
53118 }
53119@@ -158,7 +157,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
53120
53121 mutex_lock(&tty->ipw_tty_mutex);
53122
53123- if (!tty->port.count) {
53124+ if (!atomic_read(&tty->port.count)) {
53125 mutex_unlock(&tty->ipw_tty_mutex);
53126 return;
53127 }
53128@@ -197,7 +196,7 @@ static int ipw_write(struct tty_struct *linux_tty,
53129 return -ENODEV;
53130
53131 mutex_lock(&tty->ipw_tty_mutex);
53132- if (!tty->port.count) {
53133+ if (!atomic_read(&tty->port.count)) {
53134 mutex_unlock(&tty->ipw_tty_mutex);
53135 return -EINVAL;
53136 }
53137@@ -237,7 +236,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
53138 if (!tty)
53139 return -ENODEV;
53140
53141- if (!tty->port.count)
53142+ if (!atomic_read(&tty->port.count))
53143 return -EINVAL;
53144
53145 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
53146@@ -279,7 +278,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
53147 if (!tty)
53148 return 0;
53149
53150- if (!tty->port.count)
53151+ if (!atomic_read(&tty->port.count))
53152 return 0;
53153
53154 return tty->tx_bytes_queued;
53155@@ -360,7 +359,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
53156 if (!tty)
53157 return -ENODEV;
53158
53159- if (!tty->port.count)
53160+ if (!atomic_read(&tty->port.count))
53161 return -EINVAL;
53162
53163 return get_control_lines(tty);
53164@@ -376,7 +375,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
53165 if (!tty)
53166 return -ENODEV;
53167
53168- if (!tty->port.count)
53169+ if (!atomic_read(&tty->port.count))
53170 return -EINVAL;
53171
53172 return set_control_lines(tty, set, clear);
53173@@ -390,7 +389,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
53174 if (!tty)
53175 return -ENODEV;
53176
53177- if (!tty->port.count)
53178+ if (!atomic_read(&tty->port.count))
53179 return -EINVAL;
53180
53181 /* FIXME: Exactly how is the tty object locked here .. */
53182@@ -546,7 +545,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
53183 * are gone */
53184 mutex_lock(&ttyj->ipw_tty_mutex);
53185 }
53186- while (ttyj->port.count)
53187+ while (atomic_read(&ttyj->port.count))
53188 do_ipw_close(ttyj);
53189 ipwireless_disassociate_network_ttys(network,
53190 ttyj->channel_idx);
53191diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
53192index 1deaca4..c8582d4 100644
53193--- a/drivers/tty/moxa.c
53194+++ b/drivers/tty/moxa.c
53195@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
53196 }
53197
53198 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
53199- ch->port.count++;
53200+ atomic_inc(&ch->port.count);
53201 tty->driver_data = ch;
53202 tty_port_tty_set(&ch->port, tty);
53203 mutex_lock(&ch->port.mutex);
53204diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
53205index c434376..114ce13 100644
53206--- a/drivers/tty/n_gsm.c
53207+++ b/drivers/tty/n_gsm.c
53208@@ -1644,7 +1644,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
53209 spin_lock_init(&dlci->lock);
53210 mutex_init(&dlci->mutex);
53211 dlci->fifo = &dlci->_fifo;
53212- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
53213+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
53214 kfree(dlci);
53215 return NULL;
53216 }
53217@@ -2958,7 +2958,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
53218 struct gsm_dlci *dlci = tty->driver_data;
53219 struct tty_port *port = &dlci->port;
53220
53221- port->count++;
53222+ atomic_inc(&port->count);
53223 tty_port_tty_set(port, tty);
53224
53225 dlci->modem_rx = 0;
53226diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
53227index f44f1ba..a8d5915 100644
53228--- a/drivers/tty/n_tty.c
53229+++ b/drivers/tty/n_tty.c
53230@@ -115,7 +115,7 @@ struct n_tty_data {
53231 int minimum_to_wake;
53232
53233 /* consumer-published */
53234- size_t read_tail;
53235+ size_t read_tail __intentional_overflow(-1);
53236 size_t line_start;
53237
53238 /* protected by output lock */
53239@@ -2517,6 +2517,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
53240 {
53241 *ops = tty_ldisc_N_TTY;
53242 ops->owner = NULL;
53243- ops->refcount = ops->flags = 0;
53244+ atomic_set(&ops->refcount, 0);
53245+ ops->flags = 0;
53246 }
53247 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
53248diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
53249index 9bbdb1d..dc514ee 100644
53250--- a/drivers/tty/pty.c
53251+++ b/drivers/tty/pty.c
53252@@ -789,8 +789,10 @@ static void __init unix98_pty_init(void)
53253 panic("Couldn't register Unix98 pts driver");
53254
53255 /* Now create the /dev/ptmx special device */
53256+ pax_open_kernel();
53257 tty_default_fops(&ptmx_fops);
53258- ptmx_fops.open = ptmx_open;
53259+ *(void **)&ptmx_fops.open = ptmx_open;
53260+ pax_close_kernel();
53261
53262 cdev_init(&ptmx_cdev, &ptmx_fops);
53263 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
53264diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
53265index 383c4c7..d408e21 100644
53266--- a/drivers/tty/rocket.c
53267+++ b/drivers/tty/rocket.c
53268@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
53269 tty->driver_data = info;
53270 tty_port_tty_set(port, tty);
53271
53272- if (port->count++ == 0) {
53273+ if (atomic_inc_return(&port->count) == 1) {
53274 atomic_inc(&rp_num_ports_open);
53275
53276 #ifdef ROCKET_DEBUG_OPEN
53277@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
53278 #endif
53279 }
53280 #ifdef ROCKET_DEBUG_OPEN
53281- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
53282+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
53283 #endif
53284
53285 /*
53286@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
53287 spin_unlock_irqrestore(&info->port.lock, flags);
53288 return;
53289 }
53290- if (info->port.count)
53291+ if (atomic_read(&info->port.count))
53292 atomic_dec(&rp_num_ports_open);
53293 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
53294 spin_unlock_irqrestore(&info->port.lock, flags);
53295diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
53296index aa28209..e08fb85 100644
53297--- a/drivers/tty/serial/ioc4_serial.c
53298+++ b/drivers/tty/serial/ioc4_serial.c
53299@@ -437,7 +437,7 @@ struct ioc4_soft {
53300 } is_intr_info[MAX_IOC4_INTR_ENTS];
53301
53302 /* Number of entries active in the above array */
53303- atomic_t is_num_intrs;
53304+ atomic_unchecked_t is_num_intrs;
53305 } is_intr_type[IOC4_NUM_INTR_TYPES];
53306
53307 /* is_ir_lock must be held while
53308@@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
53309 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
53310 || (type == IOC4_OTHER_INTR_TYPE)));
53311
53312- i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
53313+ i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
53314 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
53315
53316 /* Save off the lower level interrupt handler */
53317@@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
53318
53319 soft = arg;
53320 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
53321- num_intrs = (int)atomic_read(
53322+ num_intrs = (int)atomic_read_unchecked(
53323 &soft->is_intr_type[intr_type].is_num_intrs);
53324
53325 this_mir = this_ir = pending_intrs(soft, intr_type);
53326diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
53327index 6ec7501..265bcbf 100644
53328--- a/drivers/tty/serial/kgdb_nmi.c
53329+++ b/drivers/tty/serial/kgdb_nmi.c
53330@@ -51,7 +51,9 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
53331 * I/O utilities that messages sent to the console will automatically
53332 * be displayed on the dbg_io.
53333 */
53334- dbg_io_ops->is_console = true;
53335+ pax_open_kernel();
53336+ *(int *)&dbg_io_ops->is_console = true;
53337+ pax_close_kernel();
53338
53339 return 0;
53340 }
53341diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
53342index a260cde..6b2b5ce 100644
53343--- a/drivers/tty/serial/kgdboc.c
53344+++ b/drivers/tty/serial/kgdboc.c
53345@@ -24,8 +24,9 @@
53346 #define MAX_CONFIG_LEN 40
53347
53348 static struct kgdb_io kgdboc_io_ops;
53349+static struct kgdb_io kgdboc_io_ops_console;
53350
53351-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
53352+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
53353 static int configured = -1;
53354
53355 static char config[MAX_CONFIG_LEN];
53356@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
53357 kgdboc_unregister_kbd();
53358 if (configured == 1)
53359 kgdb_unregister_io_module(&kgdboc_io_ops);
53360+ else if (configured == 2)
53361+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
53362 }
53363
53364 static int configure_kgdboc(void)
53365@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
53366 int err;
53367 char *cptr = config;
53368 struct console *cons;
53369+ int is_console = 0;
53370
53371 err = kgdboc_option_setup(config);
53372 if (err || !strlen(config) || isspace(config[0]))
53373 goto noconfig;
53374
53375 err = -ENODEV;
53376- kgdboc_io_ops.is_console = 0;
53377 kgdb_tty_driver = NULL;
53378
53379 kgdboc_use_kms = 0;
53380@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
53381 int idx;
53382 if (cons->device && cons->device(cons, &idx) == p &&
53383 idx == tty_line) {
53384- kgdboc_io_ops.is_console = 1;
53385+ is_console = 1;
53386 break;
53387 }
53388 cons = cons->next;
53389@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
53390 kgdb_tty_line = tty_line;
53391
53392 do_register:
53393- err = kgdb_register_io_module(&kgdboc_io_ops);
53394+ if (is_console) {
53395+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
53396+ configured = 2;
53397+ } else {
53398+ err = kgdb_register_io_module(&kgdboc_io_ops);
53399+ configured = 1;
53400+ }
53401 if (err)
53402 goto noconfig;
53403
53404@@ -205,8 +214,6 @@ do_register:
53405 if (err)
53406 goto nmi_con_failed;
53407
53408- configured = 1;
53409-
53410 return 0;
53411
53412 nmi_con_failed:
53413@@ -223,7 +230,7 @@ noconfig:
53414 static int __init init_kgdboc(void)
53415 {
53416 /* Already configured? */
53417- if (configured == 1)
53418+ if (configured >= 1)
53419 return 0;
53420
53421 return configure_kgdboc();
53422@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
53423 if (config[len - 1] == '\n')
53424 config[len - 1] = '\0';
53425
53426- if (configured == 1)
53427+ if (configured >= 1)
53428 cleanup_kgdboc();
53429
53430 /* Go and configure with the new params. */
53431@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
53432 .post_exception = kgdboc_post_exp_handler,
53433 };
53434
53435+static struct kgdb_io kgdboc_io_ops_console = {
53436+ .name = "kgdboc",
53437+ .read_char = kgdboc_get_char,
53438+ .write_char = kgdboc_put_char,
53439+ .pre_exception = kgdboc_pre_exp_handler,
53440+ .post_exception = kgdboc_post_exp_handler,
53441+ .is_console = 1
53442+};
53443+
53444 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
53445 /* This is only available if kgdboc is a built in for early debugging */
53446 static int __init kgdboc_early_init(char *opt)
53447diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
53448index 077570a..12550a9 100644
53449--- a/drivers/tty/serial/msm_serial.c
53450+++ b/drivers/tty/serial/msm_serial.c
53451@@ -981,7 +981,7 @@ static struct uart_driver msm_uart_driver = {
53452 .cons = MSM_CONSOLE,
53453 };
53454
53455-static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
53456+static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
53457
53458 static const struct of_device_id msm_uartdm_table[] = {
53459 { .compatible = "qcom,msm-uartdm-v1.1", .data = (void *)UARTDM_1P1 },
53460@@ -1000,7 +1000,7 @@ static int msm_serial_probe(struct platform_device *pdev)
53461 int irq;
53462
53463 if (pdev->id == -1)
53464- pdev->id = atomic_inc_return(&msm_uart_next_id) - 1;
53465+ pdev->id = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
53466
53467 if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
53468 return -ENXIO;
53469diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
53470index c78f43a..22b1dab 100644
53471--- a/drivers/tty/serial/samsung.c
53472+++ b/drivers/tty/serial/samsung.c
53473@@ -478,11 +478,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
53474 }
53475 }
53476
53477+static int s3c64xx_serial_startup(struct uart_port *port);
53478 static int s3c24xx_serial_startup(struct uart_port *port)
53479 {
53480 struct s3c24xx_uart_port *ourport = to_ourport(port);
53481 int ret;
53482
53483+ /* Startup sequence is different for s3c64xx and higher SoC's */
53484+ if (s3c24xx_serial_has_interrupt_mask(port))
53485+ return s3c64xx_serial_startup(port);
53486+
53487 dbg("s3c24xx_serial_startup: port=%p (%08llx,%p)\n",
53488 port, (unsigned long long)port->mapbase, port->membase);
53489
53490@@ -1155,10 +1160,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
53491 /* setup info for port */
53492 port->dev = &platdev->dev;
53493
53494- /* Startup sequence is different for s3c64xx and higher SoC's */
53495- if (s3c24xx_serial_has_interrupt_mask(port))
53496- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
53497-
53498 port->uartclk = 1;
53499
53500 if (cfg->uart_flags & UPF_CONS_FLOW) {
53501diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
53502index 0f03988..8a8038d 100644
53503--- a/drivers/tty/serial/serial_core.c
53504+++ b/drivers/tty/serial/serial_core.c
53505@@ -1343,7 +1343,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
53506
53507 pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
53508
53509- if (!port->count || tty_port_close_start(port, tty, filp) == 0)
53510+ if (!atomic_read(&port->count) || tty_port_close_start(port, tty, filp) == 0)
53511 return;
53512
53513 /*
53514@@ -1470,7 +1470,7 @@ static void uart_hangup(struct tty_struct *tty)
53515 uart_flush_buffer(tty);
53516 uart_shutdown(tty, state);
53517 spin_lock_irqsave(&port->lock, flags);
53518- port->count = 0;
53519+ atomic_set(&port->count, 0);
53520 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
53521 spin_unlock_irqrestore(&port->lock, flags);
53522 tty_port_tty_set(port, NULL);
53523@@ -1568,7 +1568,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
53524 goto end;
53525 }
53526
53527- port->count++;
53528+ atomic_inc(&port->count);
53529 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
53530 retval = -ENXIO;
53531 goto err_dec_count;
53532@@ -1600,7 +1600,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
53533 end:
53534 return retval;
53535 err_dec_count:
53536- port->count--;
53537+ atomic_inc(&port->count);
53538 mutex_unlock(&port->mutex);
53539 goto end;
53540 }
53541diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
53542index b799170..87dafd5 100644
53543--- a/drivers/tty/synclink.c
53544+++ b/drivers/tty/synclink.c
53545@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
53546
53547 if (debug_level >= DEBUG_LEVEL_INFO)
53548 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
53549- __FILE__,__LINE__, info->device_name, info->port.count);
53550+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
53551
53552 if (tty_port_close_start(&info->port, tty, filp) == 0)
53553 goto cleanup;
53554@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
53555 cleanup:
53556 if (debug_level >= DEBUG_LEVEL_INFO)
53557 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
53558- tty->driver->name, info->port.count);
53559+ tty->driver->name, atomic_read(&info->port.count));
53560
53561 } /* end of mgsl_close() */
53562
53563@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
53564
53565 mgsl_flush_buffer(tty);
53566 shutdown(info);
53567-
53568- info->port.count = 0;
53569+
53570+ atomic_set(&info->port.count, 0);
53571 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53572 info->port.tty = NULL;
53573
53574@@ -3296,10 +3296,10 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53575
53576 if (debug_level >= DEBUG_LEVEL_INFO)
53577 printk("%s(%d):block_til_ready before block on %s count=%d\n",
53578- __FILE__,__LINE__, tty->driver->name, port->count );
53579+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53580
53581 spin_lock_irqsave(&info->irq_spinlock, flags);
53582- port->count--;
53583+ atomic_dec(&port->count);
53584 spin_unlock_irqrestore(&info->irq_spinlock, flags);
53585 port->blocked_open++;
53586
53587@@ -3327,7 +3327,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53588
53589 if (debug_level >= DEBUG_LEVEL_INFO)
53590 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
53591- __FILE__,__LINE__, tty->driver->name, port->count );
53592+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53593
53594 tty_unlock(tty);
53595 schedule();
53596@@ -3339,12 +3339,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
53597
53598 /* FIXME: Racy on hangup during close wait */
53599 if (!tty_hung_up_p(filp))
53600- port->count++;
53601+ atomic_inc(&port->count);
53602 port->blocked_open--;
53603
53604 if (debug_level >= DEBUG_LEVEL_INFO)
53605 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
53606- __FILE__,__LINE__, tty->driver->name, port->count );
53607+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53608
53609 if (!retval)
53610 port->flags |= ASYNC_NORMAL_ACTIVE;
53611@@ -3396,7 +3396,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
53612
53613 if (debug_level >= DEBUG_LEVEL_INFO)
53614 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
53615- __FILE__,__LINE__,tty->driver->name, info->port.count);
53616+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
53617
53618 /* If port is closing, signal caller to try again */
53619 if (info->port.flags & ASYNC_CLOSING){
53620@@ -3415,10 +3415,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
53621 spin_unlock_irqrestore(&info->netlock, flags);
53622 goto cleanup;
53623 }
53624- info->port.count++;
53625+ atomic_inc(&info->port.count);
53626 spin_unlock_irqrestore(&info->netlock, flags);
53627
53628- if (info->port.count == 1) {
53629+ if (atomic_read(&info->port.count) == 1) {
53630 /* 1st open on this device, init hardware */
53631 retval = startup(info);
53632 if (retval < 0)
53633@@ -3442,8 +3442,8 @@ cleanup:
53634 if (retval) {
53635 if (tty->count == 1)
53636 info->port.tty = NULL; /* tty layer will release tty struct */
53637- if(info->port.count)
53638- info->port.count--;
53639+ if (atomic_read(&info->port.count))
53640+ atomic_dec(&info->port.count);
53641 }
53642
53643 return retval;
53644@@ -7661,7 +7661,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53645 unsigned short new_crctype;
53646
53647 /* return error if TTY interface open */
53648- if (info->port.count)
53649+ if (atomic_read(&info->port.count))
53650 return -EBUSY;
53651
53652 switch (encoding)
53653@@ -7756,7 +7756,7 @@ static int hdlcdev_open(struct net_device *dev)
53654
53655 /* arbitrate between network and tty opens */
53656 spin_lock_irqsave(&info->netlock, flags);
53657- if (info->port.count != 0 || info->netcount != 0) {
53658+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53659 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
53660 spin_unlock_irqrestore(&info->netlock, flags);
53661 return -EBUSY;
53662@@ -7842,7 +7842,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53663 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
53664
53665 /* return error if TTY interface open */
53666- if (info->port.count)
53667+ if (atomic_read(&info->port.count))
53668 return -EBUSY;
53669
53670 if (cmd != SIOCWANDEV)
53671diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
53672index 0e8c39b..e0cb171 100644
53673--- a/drivers/tty/synclink_gt.c
53674+++ b/drivers/tty/synclink_gt.c
53675@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
53676 tty->driver_data = info;
53677 info->port.tty = tty;
53678
53679- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
53680+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
53681
53682 /* If port is closing, signal caller to try again */
53683 if (info->port.flags & ASYNC_CLOSING){
53684@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
53685 mutex_unlock(&info->port.mutex);
53686 goto cleanup;
53687 }
53688- info->port.count++;
53689+ atomic_inc(&info->port.count);
53690 spin_unlock_irqrestore(&info->netlock, flags);
53691
53692- if (info->port.count == 1) {
53693+ if (atomic_read(&info->port.count) == 1) {
53694 /* 1st open on this device, init hardware */
53695 retval = startup(info);
53696 if (retval < 0) {
53697@@ -715,8 +715,8 @@ cleanup:
53698 if (retval) {
53699 if (tty->count == 1)
53700 info->port.tty = NULL; /* tty layer will release tty struct */
53701- if(info->port.count)
53702- info->port.count--;
53703+ if(atomic_read(&info->port.count))
53704+ atomic_dec(&info->port.count);
53705 }
53706
53707 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
53708@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53709
53710 if (sanity_check(info, tty->name, "close"))
53711 return;
53712- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
53713+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
53714
53715 if (tty_port_close_start(&info->port, tty, filp) == 0)
53716 goto cleanup;
53717@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53718 tty_port_close_end(&info->port, tty);
53719 info->port.tty = NULL;
53720 cleanup:
53721- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
53722+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
53723 }
53724
53725 static void hangup(struct tty_struct *tty)
53726@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
53727 shutdown(info);
53728
53729 spin_lock_irqsave(&info->port.lock, flags);
53730- info->port.count = 0;
53731+ atomic_set(&info->port.count, 0);
53732 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53733 info->port.tty = NULL;
53734 spin_unlock_irqrestore(&info->port.lock, flags);
53735@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53736 unsigned short new_crctype;
53737
53738 /* return error if TTY interface open */
53739- if (info->port.count)
53740+ if (atomic_read(&info->port.count))
53741 return -EBUSY;
53742
53743 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
53744@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
53745
53746 /* arbitrate between network and tty opens */
53747 spin_lock_irqsave(&info->netlock, flags);
53748- if (info->port.count != 0 || info->netcount != 0) {
53749+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53750 DBGINFO(("%s hdlc_open busy\n", dev->name));
53751 spin_unlock_irqrestore(&info->netlock, flags);
53752 return -EBUSY;
53753@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53754 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
53755
53756 /* return error if TTY interface open */
53757- if (info->port.count)
53758+ if (atomic_read(&info->port.count))
53759 return -EBUSY;
53760
53761 if (cmd != SIOCWANDEV)
53762@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
53763 if (port == NULL)
53764 continue;
53765 spin_lock(&port->lock);
53766- if ((port->port.count || port->netcount) &&
53767+ if ((atomic_read(&port->port.count) || port->netcount) &&
53768 port->pending_bh && !port->bh_running &&
53769 !port->bh_requested) {
53770 DBGISR(("%s bh queued\n", port->device_name));
53771@@ -3299,7 +3299,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53772 add_wait_queue(&port->open_wait, &wait);
53773
53774 spin_lock_irqsave(&info->lock, flags);
53775- port->count--;
53776+ atomic_dec(&port->count);
53777 spin_unlock_irqrestore(&info->lock, flags);
53778 port->blocked_open++;
53779
53780@@ -3335,7 +3335,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53781 remove_wait_queue(&port->open_wait, &wait);
53782
53783 if (!tty_hung_up_p(filp))
53784- port->count++;
53785+ atomic_inc(&port->count);
53786 port->blocked_open--;
53787
53788 if (!retval)
53789diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
53790index c3f9091..abe4601 100644
53791--- a/drivers/tty/synclinkmp.c
53792+++ b/drivers/tty/synclinkmp.c
53793@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
53794
53795 if (debug_level >= DEBUG_LEVEL_INFO)
53796 printk("%s(%d):%s open(), old ref count = %d\n",
53797- __FILE__,__LINE__,tty->driver->name, info->port.count);
53798+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
53799
53800 /* If port is closing, signal caller to try again */
53801 if (info->port.flags & ASYNC_CLOSING){
53802@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
53803 spin_unlock_irqrestore(&info->netlock, flags);
53804 goto cleanup;
53805 }
53806- info->port.count++;
53807+ atomic_inc(&info->port.count);
53808 spin_unlock_irqrestore(&info->netlock, flags);
53809
53810- if (info->port.count == 1) {
53811+ if (atomic_read(&info->port.count) == 1) {
53812 /* 1st open on this device, init hardware */
53813 retval = startup(info);
53814 if (retval < 0)
53815@@ -796,8 +796,8 @@ cleanup:
53816 if (retval) {
53817 if (tty->count == 1)
53818 info->port.tty = NULL; /* tty layer will release tty struct */
53819- if(info->port.count)
53820- info->port.count--;
53821+ if(atomic_read(&info->port.count))
53822+ atomic_dec(&info->port.count);
53823 }
53824
53825 return retval;
53826@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53827
53828 if (debug_level >= DEBUG_LEVEL_INFO)
53829 printk("%s(%d):%s close() entry, count=%d\n",
53830- __FILE__,__LINE__, info->device_name, info->port.count);
53831+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
53832
53833 if (tty_port_close_start(&info->port, tty, filp) == 0)
53834 goto cleanup;
53835@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
53836 cleanup:
53837 if (debug_level >= DEBUG_LEVEL_INFO)
53838 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
53839- tty->driver->name, info->port.count);
53840+ tty->driver->name, atomic_read(&info->port.count));
53841 }
53842
53843 /* Called by tty_hangup() when a hangup is signaled.
53844@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
53845 shutdown(info);
53846
53847 spin_lock_irqsave(&info->port.lock, flags);
53848- info->port.count = 0;
53849+ atomic_set(&info->port.count, 0);
53850 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
53851 info->port.tty = NULL;
53852 spin_unlock_irqrestore(&info->port.lock, flags);
53853@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
53854 unsigned short new_crctype;
53855
53856 /* return error if TTY interface open */
53857- if (info->port.count)
53858+ if (atomic_read(&info->port.count))
53859 return -EBUSY;
53860
53861 switch (encoding)
53862@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
53863
53864 /* arbitrate between network and tty opens */
53865 spin_lock_irqsave(&info->netlock, flags);
53866- if (info->port.count != 0 || info->netcount != 0) {
53867+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
53868 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
53869 spin_unlock_irqrestore(&info->netlock, flags);
53870 return -EBUSY;
53871@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
53872 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
53873
53874 /* return error if TTY interface open */
53875- if (info->port.count)
53876+ if (atomic_read(&info->port.count))
53877 return -EBUSY;
53878
53879 if (cmd != SIOCWANDEV)
53880@@ -2621,7 +2621,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
53881 * do not request bottom half processing if the
53882 * device is not open in a normal mode.
53883 */
53884- if ( port && (port->port.count || port->netcount) &&
53885+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
53886 port->pending_bh && !port->bh_running &&
53887 !port->bh_requested ) {
53888 if ( debug_level >= DEBUG_LEVEL_ISR )
53889@@ -3318,10 +3318,10 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53890
53891 if (debug_level >= DEBUG_LEVEL_INFO)
53892 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
53893- __FILE__,__LINE__, tty->driver->name, port->count );
53894+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53895
53896 spin_lock_irqsave(&info->lock, flags);
53897- port->count--;
53898+ atomic_dec(&port->count);
53899 spin_unlock_irqrestore(&info->lock, flags);
53900 port->blocked_open++;
53901
53902@@ -3349,7 +3349,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53903
53904 if (debug_level >= DEBUG_LEVEL_INFO)
53905 printk("%s(%d):%s block_til_ready() count=%d\n",
53906- __FILE__,__LINE__, tty->driver->name, port->count );
53907+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53908
53909 tty_unlock(tty);
53910 schedule();
53911@@ -3359,12 +3359,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
53912 set_current_state(TASK_RUNNING);
53913 remove_wait_queue(&port->open_wait, &wait);
53914 if (!tty_hung_up_p(filp))
53915- port->count++;
53916+ atomic_inc(&port->count);
53917 port->blocked_open--;
53918
53919 if (debug_level >= DEBUG_LEVEL_INFO)
53920 printk("%s(%d):%s block_til_ready() after, count=%d\n",
53921- __FILE__,__LINE__, tty->driver->name, port->count );
53922+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
53923
53924 if (!retval)
53925 port->flags |= ASYNC_NORMAL_ACTIVE;
53926diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
53927index 42bad18..447d7a2 100644
53928--- a/drivers/tty/sysrq.c
53929+++ b/drivers/tty/sysrq.c
53930@@ -1084,7 +1084,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
53931 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
53932 size_t count, loff_t *ppos)
53933 {
53934- if (count) {
53935+ if (count && capable(CAP_SYS_ADMIN)) {
53936 char c;
53937
53938 if (get_user(c, buf))
53939diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
53940index 848c17a..e930437 100644
53941--- a/drivers/tty/tty_io.c
53942+++ b/drivers/tty/tty_io.c
53943@@ -3469,7 +3469,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
53944
53945 void tty_default_fops(struct file_operations *fops)
53946 {
53947- *fops = tty_fops;
53948+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
53949 }
53950
53951 /*
53952diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
53953index 2d822aa..a566234 100644
53954--- a/drivers/tty/tty_ldisc.c
53955+++ b/drivers/tty/tty_ldisc.c
53956@@ -71,7 +71,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
53957 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53958 tty_ldiscs[disc] = new_ldisc;
53959 new_ldisc->num = disc;
53960- new_ldisc->refcount = 0;
53961+ atomic_set(&new_ldisc->refcount, 0);
53962 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
53963
53964 return ret;
53965@@ -99,7 +99,7 @@ int tty_unregister_ldisc(int disc)
53966 return -EINVAL;
53967
53968 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53969- if (tty_ldiscs[disc]->refcount)
53970+ if (atomic_read(&tty_ldiscs[disc]->refcount))
53971 ret = -EBUSY;
53972 else
53973 tty_ldiscs[disc] = NULL;
53974@@ -120,7 +120,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
53975 if (ldops) {
53976 ret = ERR_PTR(-EAGAIN);
53977 if (try_module_get(ldops->owner)) {
53978- ldops->refcount++;
53979+ atomic_inc(&ldops->refcount);
53980 ret = ldops;
53981 }
53982 }
53983@@ -133,7 +133,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
53984 unsigned long flags;
53985
53986 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
53987- ldops->refcount--;
53988+ atomic_dec(&ldops->refcount);
53989 module_put(ldops->owner);
53990 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
53991 }
53992diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
53993index 1b93357..ea9f82c 100644
53994--- a/drivers/tty/tty_port.c
53995+++ b/drivers/tty/tty_port.c
53996@@ -237,7 +237,7 @@ void tty_port_hangup(struct tty_port *port)
53997 unsigned long flags;
53998
53999 spin_lock_irqsave(&port->lock, flags);
54000- port->count = 0;
54001+ atomic_set(&port->count, 0);
54002 port->flags &= ~ASYNC_NORMAL_ACTIVE;
54003 tty = port->tty;
54004 if (tty)
54005@@ -399,7 +399,7 @@ int tty_port_block_til_ready(struct tty_port *port,
54006
54007 /* The port lock protects the port counts */
54008 spin_lock_irqsave(&port->lock, flags);
54009- port->count--;
54010+ atomic_dec(&port->count);
54011 port->blocked_open++;
54012 spin_unlock_irqrestore(&port->lock, flags);
54013
54014@@ -441,7 +441,7 @@ int tty_port_block_til_ready(struct tty_port *port,
54015 we must not mess that up further */
54016 spin_lock_irqsave(&port->lock, flags);
54017 if (!tty_hung_up_p(filp))
54018- port->count++;
54019+ atomic_inc(&port->count);
54020 port->blocked_open--;
54021 if (retval == 0)
54022 port->flags |= ASYNC_NORMAL_ACTIVE;
54023@@ -479,19 +479,19 @@ int tty_port_close_start(struct tty_port *port,
54024 return 0;
54025 }
54026
54027- if (tty->count == 1 && port->count != 1) {
54028+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
54029 printk(KERN_WARNING
54030 "tty_port_close_start: tty->count = 1 port count = %d.\n",
54031- port->count);
54032- port->count = 1;
54033+ atomic_read(&port->count));
54034+ atomic_set(&port->count, 1);
54035 }
54036- if (--port->count < 0) {
54037+ if (atomic_dec_return(&port->count) < 0) {
54038 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
54039- port->count);
54040- port->count = 0;
54041+ atomic_read(&port->count));
54042+ atomic_set(&port->count, 0);
54043 }
54044
54045- if (port->count) {
54046+ if (atomic_read(&port->count)) {
54047 spin_unlock_irqrestore(&port->lock, flags);
54048 return 0;
54049 }
54050@@ -592,7 +592,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
54051 struct file *filp)
54052 {
54053 spin_lock_irq(&port->lock);
54054- ++port->count;
54055+ atomic_inc(&port->count);
54056 spin_unlock_irq(&port->lock);
54057 tty_port_tty_set(port, tty);
54058
54059diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
54060index d0e3a44..5f8b754 100644
54061--- a/drivers/tty/vt/keyboard.c
54062+++ b/drivers/tty/vt/keyboard.c
54063@@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
54064 kbd->kbdmode == VC_OFF) &&
54065 value != KVAL(K_SAK))
54066 return; /* SAK is allowed even in raw mode */
54067+
54068+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
54069+ {
54070+ void *func = fn_handler[value];
54071+ if (func == fn_show_state || func == fn_show_ptregs ||
54072+ func == fn_show_mem)
54073+ return;
54074+ }
54075+#endif
54076+
54077 fn_handler[value](vc);
54078 }
54079
54080@@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
54081 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
54082 return -EFAULT;
54083
54084- if (!capable(CAP_SYS_TTY_CONFIG))
54085- perm = 0;
54086-
54087 switch (cmd) {
54088 case KDGKBENT:
54089 /* Ensure another thread doesn't free it under us */
54090@@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
54091 spin_unlock_irqrestore(&kbd_event_lock, flags);
54092 return put_user(val, &user_kbe->kb_value);
54093 case KDSKBENT:
54094+ if (!capable(CAP_SYS_TTY_CONFIG))
54095+ perm = 0;
54096+
54097 if (!perm)
54098 return -EPERM;
54099 if (!i && v == K_NOSUCHMAP) {
54100@@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
54101 int i, j, k;
54102 int ret;
54103
54104- if (!capable(CAP_SYS_TTY_CONFIG))
54105- perm = 0;
54106-
54107 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
54108 if (!kbs) {
54109 ret = -ENOMEM;
54110@@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
54111 kfree(kbs);
54112 return ((p && *p) ? -EOVERFLOW : 0);
54113 case KDSKBSENT:
54114+ if (!capable(CAP_SYS_TTY_CONFIG))
54115+ perm = 0;
54116+
54117 if (!perm) {
54118 ret = -EPERM;
54119 goto reterr;
54120diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
54121index a673e5b..36e5d32 100644
54122--- a/drivers/uio/uio.c
54123+++ b/drivers/uio/uio.c
54124@@ -25,6 +25,7 @@
54125 #include <linux/kobject.h>
54126 #include <linux/cdev.h>
54127 #include <linux/uio_driver.h>
54128+#include <asm/local.h>
54129
54130 #define UIO_MAX_DEVICES (1U << MINORBITS)
54131
54132@@ -32,7 +33,7 @@ struct uio_device {
54133 struct module *owner;
54134 struct device *dev;
54135 int minor;
54136- atomic_t event;
54137+ atomic_unchecked_t event;
54138 struct fasync_struct *async_queue;
54139 wait_queue_head_t wait;
54140 struct uio_info *info;
54141@@ -243,7 +244,7 @@ static ssize_t event_show(struct device *dev,
54142 struct device_attribute *attr, char *buf)
54143 {
54144 struct uio_device *idev = dev_get_drvdata(dev);
54145- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
54146+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
54147 }
54148 static DEVICE_ATTR_RO(event);
54149
54150@@ -405,7 +406,7 @@ void uio_event_notify(struct uio_info *info)
54151 {
54152 struct uio_device *idev = info->uio_dev;
54153
54154- atomic_inc(&idev->event);
54155+ atomic_inc_unchecked(&idev->event);
54156 wake_up_interruptible(&idev->wait);
54157 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
54158 }
54159@@ -458,7 +459,7 @@ static int uio_open(struct inode *inode, struct file *filep)
54160 }
54161
54162 listener->dev = idev;
54163- listener->event_count = atomic_read(&idev->event);
54164+ listener->event_count = atomic_read_unchecked(&idev->event);
54165 filep->private_data = listener;
54166
54167 if (idev->info->open) {
54168@@ -509,7 +510,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
54169 return -EIO;
54170
54171 poll_wait(filep, &idev->wait, wait);
54172- if (listener->event_count != atomic_read(&idev->event))
54173+ if (listener->event_count != atomic_read_unchecked(&idev->event))
54174 return POLLIN | POLLRDNORM;
54175 return 0;
54176 }
54177@@ -534,7 +535,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
54178 do {
54179 set_current_state(TASK_INTERRUPTIBLE);
54180
54181- event_count = atomic_read(&idev->event);
54182+ event_count = atomic_read_unchecked(&idev->event);
54183 if (event_count != listener->event_count) {
54184 if (copy_to_user(buf, &event_count, count))
54185 retval = -EFAULT;
54186@@ -591,9 +592,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
54187 static int uio_find_mem_index(struct vm_area_struct *vma)
54188 {
54189 struct uio_device *idev = vma->vm_private_data;
54190+ unsigned long size;
54191
54192 if (vma->vm_pgoff < MAX_UIO_MAPS) {
54193- if (idev->info->mem[vma->vm_pgoff].size == 0)
54194+ size = idev->info->mem[vma->vm_pgoff].size;
54195+ if (size == 0)
54196+ return -1;
54197+ if (vma->vm_end - vma->vm_start > size)
54198 return -1;
54199 return (int)vma->vm_pgoff;
54200 }
54201@@ -825,7 +830,7 @@ int __uio_register_device(struct module *owner,
54202 idev->owner = owner;
54203 idev->info = info;
54204 init_waitqueue_head(&idev->wait);
54205- atomic_set(&idev->event, 0);
54206+ atomic_set_unchecked(&idev->event, 0);
54207
54208 ret = uio_get_minor(idev);
54209 if (ret)
54210diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
54211index 813d4d3..a71934f 100644
54212--- a/drivers/usb/atm/cxacru.c
54213+++ b/drivers/usb/atm/cxacru.c
54214@@ -472,7 +472,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
54215 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
54216 if (ret < 2)
54217 return -EINVAL;
54218- if (index < 0 || index > 0x7f)
54219+ if (index > 0x7f)
54220 return -EINVAL;
54221 pos += tmp;
54222
54223diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
54224index dada014..1d0d517 100644
54225--- a/drivers/usb/atm/usbatm.c
54226+++ b/drivers/usb/atm/usbatm.c
54227@@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54228 if (printk_ratelimit())
54229 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
54230 __func__, vpi, vci);
54231- atomic_inc(&vcc->stats->rx_err);
54232+ atomic_inc_unchecked(&vcc->stats->rx_err);
54233 return;
54234 }
54235
54236@@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54237 if (length > ATM_MAX_AAL5_PDU) {
54238 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
54239 __func__, length, vcc);
54240- atomic_inc(&vcc->stats->rx_err);
54241+ atomic_inc_unchecked(&vcc->stats->rx_err);
54242 goto out;
54243 }
54244
54245@@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54246 if (sarb->len < pdu_length) {
54247 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
54248 __func__, pdu_length, sarb->len, vcc);
54249- atomic_inc(&vcc->stats->rx_err);
54250+ atomic_inc_unchecked(&vcc->stats->rx_err);
54251 goto out;
54252 }
54253
54254 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
54255 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
54256 __func__, vcc);
54257- atomic_inc(&vcc->stats->rx_err);
54258+ atomic_inc_unchecked(&vcc->stats->rx_err);
54259 goto out;
54260 }
54261
54262@@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54263 if (printk_ratelimit())
54264 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
54265 __func__, length);
54266- atomic_inc(&vcc->stats->rx_drop);
54267+ atomic_inc_unchecked(&vcc->stats->rx_drop);
54268 goto out;
54269 }
54270
54271@@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
54272
54273 vcc->push(vcc, skb);
54274
54275- atomic_inc(&vcc->stats->rx);
54276+ atomic_inc_unchecked(&vcc->stats->rx);
54277 out:
54278 skb_trim(sarb, 0);
54279 }
54280@@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
54281 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
54282
54283 usbatm_pop(vcc, skb);
54284- atomic_inc(&vcc->stats->tx);
54285+ atomic_inc_unchecked(&vcc->stats->tx);
54286
54287 skb = skb_dequeue(&instance->sndqueue);
54288 }
54289@@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t *pos, char *page
54290 if (!left--)
54291 return sprintf(page,
54292 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
54293- atomic_read(&atm_dev->stats.aal5.tx),
54294- atomic_read(&atm_dev->stats.aal5.tx_err),
54295- atomic_read(&atm_dev->stats.aal5.rx),
54296- atomic_read(&atm_dev->stats.aal5.rx_err),
54297- atomic_read(&atm_dev->stats.aal5.rx_drop));
54298+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
54299+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
54300+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
54301+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
54302+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
54303
54304 if (!left--) {
54305 if (instance->disconnected)
54306diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
54307index 2a3bbdf..91d72cf 100644
54308--- a/drivers/usb/core/devices.c
54309+++ b/drivers/usb/core/devices.c
54310@@ -126,7 +126,7 @@ static const char format_endpt[] =
54311 * time it gets called.
54312 */
54313 static struct device_connect_event {
54314- atomic_t count;
54315+ atomic_unchecked_t count;
54316 wait_queue_head_t wait;
54317 } device_event = {
54318 .count = ATOMIC_INIT(1),
54319@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
54320
54321 void usbfs_conn_disc_event(void)
54322 {
54323- atomic_add(2, &device_event.count);
54324+ atomic_add_unchecked(2, &device_event.count);
54325 wake_up(&device_event.wait);
54326 }
54327
54328@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
54329
54330 poll_wait(file, &device_event.wait, wait);
54331
54332- event_count = atomic_read(&device_event.count);
54333+ event_count = atomic_read_unchecked(&device_event.count);
54334 if (file->f_version != event_count) {
54335 file->f_version = event_count;
54336 return POLLIN | POLLRDNORM;
54337diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
54338index 0b59731..46ee7d1 100644
54339--- a/drivers/usb/core/devio.c
54340+++ b/drivers/usb/core/devio.c
54341@@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
54342 struct usb_dev_state *ps = file->private_data;
54343 struct usb_device *dev = ps->dev;
54344 ssize_t ret = 0;
54345- unsigned len;
54346+ size_t len;
54347 loff_t pos;
54348 int i;
54349
54350@@ -229,22 +229,22 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
54351 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
54352 struct usb_config_descriptor *config =
54353 (struct usb_config_descriptor *)dev->rawdescriptors[i];
54354- unsigned int length = le16_to_cpu(config->wTotalLength);
54355+ size_t length = le16_to_cpu(config->wTotalLength);
54356
54357 if (*ppos < pos + length) {
54358
54359 /* The descriptor may claim to be longer than it
54360 * really is. Here is the actual allocated length. */
54361- unsigned alloclen =
54362+ size_t alloclen =
54363 le16_to_cpu(dev->config[i].desc.wTotalLength);
54364
54365- len = length - (*ppos - pos);
54366+ len = length + pos - *ppos;
54367 if (len > nbytes)
54368 len = nbytes;
54369
54370 /* Simply don't write (skip over) unallocated parts */
54371 if (alloclen > (*ppos - pos)) {
54372- alloclen -= (*ppos - pos);
54373+ alloclen = alloclen + pos - *ppos;
54374 if (copy_to_user(buf,
54375 dev->rawdescriptors[i] + (*ppos - pos),
54376 min(len, alloclen))) {
54377diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
54378index 258e6fe..9ea48d7 100644
54379--- a/drivers/usb/core/hcd.c
54380+++ b/drivers/usb/core/hcd.c
54381@@ -1550,7 +1550,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
54382 */
54383 usb_get_urb(urb);
54384 atomic_inc(&urb->use_count);
54385- atomic_inc(&urb->dev->urbnum);
54386+ atomic_inc_unchecked(&urb->dev->urbnum);
54387 usbmon_urb_submit(&hcd->self, urb);
54388
54389 /* NOTE requirements on root-hub callers (usbfs and the hub
54390@@ -1577,7 +1577,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
54391 urb->hcpriv = NULL;
54392 INIT_LIST_HEAD(&urb->urb_list);
54393 atomic_dec(&urb->use_count);
54394- atomic_dec(&urb->dev->urbnum);
54395+ atomic_dec_unchecked(&urb->dev->urbnum);
54396 if (atomic_read(&urb->reject))
54397 wake_up(&usb_kill_urb_queue);
54398 usb_put_urb(urb);
54399diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
54400index 674c262..71fdd90 100644
54401--- a/drivers/usb/core/hub.c
54402+++ b/drivers/usb/core/hub.c
54403@@ -27,6 +27,7 @@
54404 #include <linux/freezer.h>
54405 #include <linux/random.h>
54406 #include <linux/pm_qos.h>
54407+#include <linux/grsecurity.h>
54408
54409 #include <asm/uaccess.h>
54410 #include <asm/byteorder.h>
54411@@ -4665,6 +4666,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
54412 goto done;
54413 return;
54414 }
54415+
54416+ if (gr_handle_new_usb())
54417+ goto done;
54418+
54419 if (hub_is_superspeed(hub->hdev))
54420 unit_load = 150;
54421 else
54422diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
54423index 0c8a7fc..c45b40a 100644
54424--- a/drivers/usb/core/message.c
54425+++ b/drivers/usb/core/message.c
54426@@ -128,7 +128,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
54427 * Return: If successful, the number of bytes transferred. Otherwise, a negative
54428 * error number.
54429 */
54430-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
54431+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
54432 __u8 requesttype, __u16 value, __u16 index, void *data,
54433 __u16 size, int timeout)
54434 {
54435@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
54436 * If successful, 0. Otherwise a negative error number. The number of actual
54437 * bytes transferred will be stored in the @actual_length parameter.
54438 */
54439-int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
54440+int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
54441 void *data, int len, int *actual_length, int timeout)
54442 {
54443 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
54444@@ -220,7 +220,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
54445 * bytes transferred will be stored in the @actual_length parameter.
54446 *
54447 */
54448-int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
54449+int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
54450 void *data, int len, int *actual_length, int timeout)
54451 {
54452 struct urb *urb;
54453diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
54454index 1236c60..d47a51c 100644
54455--- a/drivers/usb/core/sysfs.c
54456+++ b/drivers/usb/core/sysfs.c
54457@@ -244,7 +244,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
54458 struct usb_device *udev;
54459
54460 udev = to_usb_device(dev);
54461- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
54462+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
54463 }
54464 static DEVICE_ATTR_RO(urbnum);
54465
54466diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
54467index 2dd2362..1135437 100644
54468--- a/drivers/usb/core/usb.c
54469+++ b/drivers/usb/core/usb.c
54470@@ -433,7 +433,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
54471 set_dev_node(&dev->dev, dev_to_node(bus->controller));
54472 dev->state = USB_STATE_ATTACHED;
54473 dev->lpm_disable_count = 1;
54474- atomic_set(&dev->urbnum, 0);
54475+ atomic_set_unchecked(&dev->urbnum, 0);
54476
54477 INIT_LIST_HEAD(&dev->ep0.urb_list);
54478 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
54479diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
54480index 8cfc319..4868255 100644
54481--- a/drivers/usb/early/ehci-dbgp.c
54482+++ b/drivers/usb/early/ehci-dbgp.c
54483@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
54484
54485 #ifdef CONFIG_KGDB
54486 static struct kgdb_io kgdbdbgp_io_ops;
54487-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
54488+static struct kgdb_io kgdbdbgp_io_ops_console;
54489+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
54490 #else
54491 #define dbgp_kgdb_mode (0)
54492 #endif
54493@@ -1043,6 +1044,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
54494 .write_char = kgdbdbgp_write_char,
54495 };
54496
54497+static struct kgdb_io kgdbdbgp_io_ops_console = {
54498+ .name = "kgdbdbgp",
54499+ .read_char = kgdbdbgp_read_char,
54500+ .write_char = kgdbdbgp_write_char,
54501+ .is_console = 1
54502+};
54503+
54504 static int kgdbdbgp_wait_time;
54505
54506 static int __init kgdbdbgp_parse_config(char *str)
54507@@ -1058,8 +1066,10 @@ static int __init kgdbdbgp_parse_config(char *str)
54508 ptr++;
54509 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
54510 }
54511- kgdb_register_io_module(&kgdbdbgp_io_ops);
54512- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
54513+ if (early_dbgp_console.index != -1)
54514+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
54515+ else
54516+ kgdb_register_io_module(&kgdbdbgp_io_ops);
54517
54518 return 0;
54519 }
54520diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
54521index 2b4c82d..06a8ee6 100644
54522--- a/drivers/usb/gadget/function/f_uac1.c
54523+++ b/drivers/usb/gadget/function/f_uac1.c
54524@@ -13,6 +13,7 @@
54525 #include <linux/kernel.h>
54526 #include <linux/device.h>
54527 #include <linux/atomic.h>
54528+#include <linux/module.h>
54529
54530 #include "u_uac1.h"
54531
54532diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
54533index ad0aca8..8ff84865 100644
54534--- a/drivers/usb/gadget/function/u_serial.c
54535+++ b/drivers/usb/gadget/function/u_serial.c
54536@@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
54537 spin_lock_irq(&port->port_lock);
54538
54539 /* already open? Great. */
54540- if (port->port.count) {
54541+ if (atomic_read(&port->port.count)) {
54542 status = 0;
54543- port->port.count++;
54544+ atomic_inc(&port->port.count);
54545
54546 /* currently opening/closing? wait ... */
54547 } else if (port->openclose) {
54548@@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
54549 tty->driver_data = port;
54550 port->port.tty = tty;
54551
54552- port->port.count = 1;
54553+ atomic_set(&port->port.count, 1);
54554 port->openclose = false;
54555
54556 /* if connected, start the I/O stream */
54557@@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
54558
54559 spin_lock_irq(&port->port_lock);
54560
54561- if (port->port.count != 1) {
54562- if (port->port.count == 0)
54563+ if (atomic_read(&port->port.count) != 1) {
54564+ if (atomic_read(&port->port.count) == 0)
54565 WARN_ON(1);
54566 else
54567- --port->port.count;
54568+ atomic_dec(&port->port.count);
54569 goto exit;
54570 }
54571
54572@@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
54573 * and sleep if necessary
54574 */
54575 port->openclose = true;
54576- port->port.count = 0;
54577+ atomic_set(&port->port.count, 0);
54578
54579 gser = port->port_usb;
54580 if (gser && gser->disconnect)
54581@@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port)
54582 int cond;
54583
54584 spin_lock_irq(&port->port_lock);
54585- cond = (port->port.count == 0) && !port->openclose;
54586+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
54587 spin_unlock_irq(&port->port_lock);
54588 return cond;
54589 }
54590@@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
54591 /* if it's already open, start I/O ... and notify the serial
54592 * protocol about open/close status (connect/disconnect).
54593 */
54594- if (port->port.count) {
54595+ if (atomic_read(&port->port.count)) {
54596 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
54597 gs_start_io(port);
54598 if (gser->connect)
54599@@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser)
54600
54601 port->port_usb = NULL;
54602 gser->ioport = NULL;
54603- if (port->port.count > 0 || port->openclose) {
54604+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
54605 wake_up_interruptible(&port->drain_wait);
54606 if (port->port.tty)
54607 tty_hangup(port->port.tty);
54608@@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser)
54609
54610 /* finally, free any unused/unusable I/O buffers */
54611 spin_lock_irqsave(&port->port_lock, flags);
54612- if (port->port.count == 0 && !port->openclose)
54613+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
54614 gs_buf_free(&port->port_write_buf);
54615 gs_free_requests(gser->out, &port->read_pool, NULL);
54616 gs_free_requests(gser->out, &port->read_queue, NULL);
54617diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1.c
54618index 7a55fea..cc0ed4f 100644
54619--- a/drivers/usb/gadget/function/u_uac1.c
54620+++ b/drivers/usb/gadget/function/u_uac1.c
54621@@ -16,6 +16,7 @@
54622 #include <linux/ctype.h>
54623 #include <linux/random.h>
54624 #include <linux/syscalls.h>
54625+#include <linux/module.h>
54626
54627 #include "u_uac1.h"
54628
54629diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
54630index 6130b75..3b60008 100644
54631--- a/drivers/usb/host/ehci-hub.c
54632+++ b/drivers/usb/host/ehci-hub.c
54633@@ -771,7 +771,7 @@ static struct urb *request_single_step_set_feature_urb(
54634 urb->transfer_flags = URB_DIR_IN;
54635 usb_get_urb(urb);
54636 atomic_inc(&urb->use_count);
54637- atomic_inc(&urb->dev->urbnum);
54638+ atomic_inc_unchecked(&urb->dev->urbnum);
54639 urb->setup_dma = dma_map_single(
54640 hcd->self.controller,
54641 urb->setup_packet,
54642@@ -838,7 +838,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
54643 urb->status = -EINPROGRESS;
54644 usb_get_urb(urb);
54645 atomic_inc(&urb->use_count);
54646- atomic_inc(&urb->dev->urbnum);
54647+ atomic_inc_unchecked(&urb->dev->urbnum);
54648 retval = submit_single_step_set_feature(hcd, urb, 0);
54649 if (!retval && !wait_for_completion_timeout(&done,
54650 msecs_to_jiffies(2000))) {
54651diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
54652index d0d8fad..668ef7b 100644
54653--- a/drivers/usb/host/hwa-hc.c
54654+++ b/drivers/usb/host/hwa-hc.c
54655@@ -337,7 +337,10 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
54656 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
54657 struct wahc *wa = &hwahc->wa;
54658 struct device *dev = &wa->usb_iface->dev;
54659- u8 mas_le[UWB_NUM_MAS/8];
54660+ u8 *mas_le = kmalloc(UWB_NUM_MAS/8, GFP_KERNEL);
54661+
54662+ if (mas_le == NULL)
54663+ return -ENOMEM;
54664
54665 /* Set the stream index */
54666 result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
54667@@ -356,10 +359,12 @@ static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
54668 WUSB_REQ_SET_WUSB_MAS,
54669 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
54670 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
54671- mas_le, 32, USB_CTRL_SET_TIMEOUT);
54672+ mas_le, UWB_NUM_MAS/8, USB_CTRL_SET_TIMEOUT);
54673 if (result < 0)
54674 dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
54675 out:
54676+ kfree(mas_le);
54677+
54678 return result;
54679 }
54680
54681diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
54682index b3d245e..99549ed 100644
54683--- a/drivers/usb/misc/appledisplay.c
54684+++ b/drivers/usb/misc/appledisplay.c
54685@@ -84,7 +84,7 @@ struct appledisplay {
54686 struct mutex sysfslock; /* concurrent read and write */
54687 };
54688
54689-static atomic_t count_displays = ATOMIC_INIT(0);
54690+static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
54691 static struct workqueue_struct *wq;
54692
54693 static void appledisplay_complete(struct urb *urb)
54694@@ -288,7 +288,7 @@ static int appledisplay_probe(struct usb_interface *iface,
54695
54696 /* Register backlight device */
54697 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
54698- atomic_inc_return(&count_displays) - 1);
54699+ atomic_inc_return_unchecked(&count_displays) - 1);
54700 memset(&props, 0, sizeof(struct backlight_properties));
54701 props.type = BACKLIGHT_RAW;
54702 props.max_brightness = 0xff;
54703diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
54704index 8d7fc48..01c4986 100644
54705--- a/drivers/usb/serial/console.c
54706+++ b/drivers/usb/serial/console.c
54707@@ -123,7 +123,7 @@ static int usb_console_setup(struct console *co, char *options)
54708
54709 info->port = port;
54710
54711- ++port->port.count;
54712+ atomic_inc(&port->port.count);
54713 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
54714 if (serial->type->set_termios) {
54715 /*
54716@@ -167,7 +167,7 @@ static int usb_console_setup(struct console *co, char *options)
54717 }
54718 /* Now that any required fake tty operations are completed restore
54719 * the tty port count */
54720- --port->port.count;
54721+ atomic_dec(&port->port.count);
54722 /* The console is special in terms of closing the device so
54723 * indicate this port is now acting as a system console. */
54724 port->port.console = 1;
54725@@ -180,7 +180,7 @@ static int usb_console_setup(struct console *co, char *options)
54726 free_tty:
54727 kfree(tty);
54728 reset_open_count:
54729- port->port.count = 0;
54730+ atomic_set(&port->port.count, 0);
54731 usb_autopm_put_interface(serial->interface);
54732 error_get_interface:
54733 usb_serial_put(serial);
54734@@ -191,7 +191,7 @@ static int usb_console_setup(struct console *co, char *options)
54735 static void usb_console_write(struct console *co,
54736 const char *buf, unsigned count)
54737 {
54738- static struct usbcons_info *info = &usbcons_info;
54739+ struct usbcons_info *info = &usbcons_info;
54740 struct usb_serial_port *port = info->port;
54741 struct usb_serial *serial;
54742 int retval = -ENODEV;
54743diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
54744index 307e339..6aa97cb 100644
54745--- a/drivers/usb/storage/usb.h
54746+++ b/drivers/usb/storage/usb.h
54747@@ -63,7 +63,7 @@ struct us_unusual_dev {
54748 __u8 useProtocol;
54749 __u8 useTransport;
54750 int (*initFunction)(struct us_data *);
54751-};
54752+} __do_const;
54753
54754
54755 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
54756diff --git a/drivers/usb/usbip/vhci.h b/drivers/usb/usbip/vhci.h
54757index a863a98..d272795 100644
54758--- a/drivers/usb/usbip/vhci.h
54759+++ b/drivers/usb/usbip/vhci.h
54760@@ -83,7 +83,7 @@ struct vhci_hcd {
54761 unsigned resuming:1;
54762 unsigned long re_timeout;
54763
54764- atomic_t seqnum;
54765+ atomic_unchecked_t seqnum;
54766
54767 /*
54768 * NOTE:
54769diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
54770index c02374b..32d47a9 100644
54771--- a/drivers/usb/usbip/vhci_hcd.c
54772+++ b/drivers/usb/usbip/vhci_hcd.c
54773@@ -439,7 +439,7 @@ static void vhci_tx_urb(struct urb *urb)
54774
54775 spin_lock(&vdev->priv_lock);
54776
54777- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
54778+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
54779 if (priv->seqnum == 0xffff)
54780 dev_info(&urb->dev->dev, "seqnum max\n");
54781
54782@@ -686,7 +686,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
54783 return -ENOMEM;
54784 }
54785
54786- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
54787+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
54788 if (unlink->seqnum == 0xffff)
54789 pr_info("seqnum max\n");
54790
54791@@ -891,7 +891,7 @@ static int vhci_start(struct usb_hcd *hcd)
54792 vdev->rhport = rhport;
54793 }
54794
54795- atomic_set(&vhci->seqnum, 0);
54796+ atomic_set_unchecked(&vhci->seqnum, 0);
54797 spin_lock_init(&vhci->lock);
54798
54799 hcd->power_budget = 0; /* no limit */
54800diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
54801index 00e4a54..d676f85 100644
54802--- a/drivers/usb/usbip/vhci_rx.c
54803+++ b/drivers/usb/usbip/vhci_rx.c
54804@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
54805 if (!urb) {
54806 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
54807 pr_info("max seqnum %d\n",
54808- atomic_read(&the_controller->seqnum));
54809+ atomic_read_unchecked(&the_controller->seqnum));
54810 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
54811 return;
54812 }
54813diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
54814index f2a8d29..7bc3fe7 100644
54815--- a/drivers/usb/wusbcore/wa-hc.h
54816+++ b/drivers/usb/wusbcore/wa-hc.h
54817@@ -240,7 +240,7 @@ struct wahc {
54818 spinlock_t xfer_list_lock;
54819 struct work_struct xfer_enqueue_work;
54820 struct work_struct xfer_error_work;
54821- atomic_t xfer_id_count;
54822+ atomic_unchecked_t xfer_id_count;
54823
54824 kernel_ulong_t quirks;
54825 };
54826@@ -305,7 +305,7 @@ static inline void wa_init(struct wahc *wa)
54827 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
54828 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
54829 wa->dto_in_use = 0;
54830- atomic_set(&wa->xfer_id_count, 1);
54831+ atomic_set_unchecked(&wa->xfer_id_count, 1);
54832 /* init the buf in URBs */
54833 for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
54834 usb_init_urb(&(wa->buf_in_urbs[index]));
54835diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
54836index e279015..c2d0dae 100644
54837--- a/drivers/usb/wusbcore/wa-xfer.c
54838+++ b/drivers/usb/wusbcore/wa-xfer.c
54839@@ -314,7 +314,7 @@ static void wa_xfer_completion(struct wa_xfer *xfer)
54840 */
54841 static void wa_xfer_id_init(struct wa_xfer *xfer)
54842 {
54843- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
54844+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
54845 }
54846
54847 /* Return the xfer's ID. */
54848diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
54849index f018d8d..ccab63f 100644
54850--- a/drivers/vfio/vfio.c
54851+++ b/drivers/vfio/vfio.c
54852@@ -481,7 +481,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
54853 return 0;
54854
54855 /* TODO Prevent device auto probing */
54856- WARN("Device %s added to live group %d!\n", dev_name(dev),
54857+ WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
54858 iommu_group_id(group->iommu_group));
54859
54860 return 0;
54861diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
54862index 5174eba..451e6bc 100644
54863--- a/drivers/vhost/vringh.c
54864+++ b/drivers/vhost/vringh.c
54865@@ -530,17 +530,17 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
54866 /* Userspace access helpers: in this case, addresses are really userspace. */
54867 static inline int getu16_user(u16 *val, const u16 *p)
54868 {
54869- return get_user(*val, (__force u16 __user *)p);
54870+ return get_user(*val, (u16 __force_user *)p);
54871 }
54872
54873 static inline int putu16_user(u16 *p, u16 val)
54874 {
54875- return put_user(val, (__force u16 __user *)p);
54876+ return put_user(val, (u16 __force_user *)p);
54877 }
54878
54879 static inline int copydesc_user(void *dst, const void *src, size_t len)
54880 {
54881- return copy_from_user(dst, (__force void __user *)src, len) ?
54882+ return copy_from_user(dst, (void __force_user *)src, len) ?
54883 -EFAULT : 0;
54884 }
54885
54886@@ -548,19 +548,19 @@ static inline int putused_user(struct vring_used_elem *dst,
54887 const struct vring_used_elem *src,
54888 unsigned int num)
54889 {
54890- return copy_to_user((__force void __user *)dst, src,
54891+ return copy_to_user((void __force_user *)dst, src,
54892 sizeof(*dst) * num) ? -EFAULT : 0;
54893 }
54894
54895 static inline int xfer_from_user(void *src, void *dst, size_t len)
54896 {
54897- return copy_from_user(dst, (__force void __user *)src, len) ?
54898+ return copy_from_user(dst, (void __force_user *)src, len) ?
54899 -EFAULT : 0;
54900 }
54901
54902 static inline int xfer_to_user(void *dst, void *src, size_t len)
54903 {
54904- return copy_to_user((__force void __user *)dst, src, len) ?
54905+ return copy_to_user((void __force_user *)dst, src, len) ?
54906 -EFAULT : 0;
54907 }
54908
54909@@ -596,9 +596,9 @@ int vringh_init_user(struct vringh *vrh, u32 features,
54910 vrh->last_used_idx = 0;
54911 vrh->vring.num = num;
54912 /* vring expects kernel addresses, but only used via accessors. */
54913- vrh->vring.desc = (__force struct vring_desc *)desc;
54914- vrh->vring.avail = (__force struct vring_avail *)avail;
54915- vrh->vring.used = (__force struct vring_used *)used;
54916+ vrh->vring.desc = (__force_kernel struct vring_desc *)desc;
54917+ vrh->vring.avail = (__force_kernel struct vring_avail *)avail;
54918+ vrh->vring.used = (__force_kernel struct vring_used *)used;
54919 return 0;
54920 }
54921 EXPORT_SYMBOL(vringh_init_user);
54922@@ -800,7 +800,7 @@ static inline int getu16_kern(u16 *val, const u16 *p)
54923
54924 static inline int putu16_kern(u16 *p, u16 val)
54925 {
54926- ACCESS_ONCE(*p) = val;
54927+ ACCESS_ONCE_RW(*p) = val;
54928 return 0;
54929 }
54930
54931diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
54932index 84a110a..96312c3 100644
54933--- a/drivers/video/backlight/kb3886_bl.c
54934+++ b/drivers/video/backlight/kb3886_bl.c
54935@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
54936 static unsigned long kb3886bl_flags;
54937 #define KB3886BL_SUSPENDED 0x01
54938
54939-static struct dmi_system_id kb3886bl_device_table[] __initdata = {
54940+static const struct dmi_system_id kb3886bl_device_table[] __initconst = {
54941 {
54942 .ident = "Sahara Touch-iT",
54943 .matches = {
54944diff --git a/drivers/video/fbdev/arcfb.c b/drivers/video/fbdev/arcfb.c
54945index 1b0b233..6f34c2c 100644
54946--- a/drivers/video/fbdev/arcfb.c
54947+++ b/drivers/video/fbdev/arcfb.c
54948@@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
54949 return -ENOSPC;
54950
54951 err = 0;
54952- if ((count + p) > fbmemlength) {
54953+ if (count > (fbmemlength - p)) {
54954 count = fbmemlength - p;
54955 err = -ENOSPC;
54956 }
54957diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
54958index ff60701..814b973 100644
54959--- a/drivers/video/fbdev/aty/aty128fb.c
54960+++ b/drivers/video/fbdev/aty/aty128fb.c
54961@@ -149,7 +149,7 @@ enum {
54962 };
54963
54964 /* Must match above enum */
54965-static char * const r128_family[] = {
54966+static const char * const r128_family[] = {
54967 "AGP",
54968 "PCI",
54969 "PRO AGP",
54970diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
54971index 37ec09b..98f8862 100644
54972--- a/drivers/video/fbdev/aty/atyfb_base.c
54973+++ b/drivers/video/fbdev/aty/atyfb_base.c
54974@@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
54975 par->accel_flags = var->accel_flags; /* hack */
54976
54977 if (var->accel_flags) {
54978- info->fbops->fb_sync = atyfb_sync;
54979+ pax_open_kernel();
54980+ *(void **)&info->fbops->fb_sync = atyfb_sync;
54981+ pax_close_kernel();
54982 info->flags &= ~FBINFO_HWACCEL_DISABLED;
54983 } else {
54984- info->fbops->fb_sync = NULL;
54985+ pax_open_kernel();
54986+ *(void **)&info->fbops->fb_sync = NULL;
54987+ pax_close_kernel();
54988 info->flags |= FBINFO_HWACCEL_DISABLED;
54989 }
54990
54991diff --git a/drivers/video/fbdev/aty/mach64_cursor.c b/drivers/video/fbdev/aty/mach64_cursor.c
54992index 2fa0317..4983f2a 100644
54993--- a/drivers/video/fbdev/aty/mach64_cursor.c
54994+++ b/drivers/video/fbdev/aty/mach64_cursor.c
54995@@ -8,6 +8,7 @@
54996 #include "../core/fb_draw.h"
54997
54998 #include <asm/io.h>
54999+#include <asm/pgtable.h>
55000
55001 #ifdef __sparc__
55002 #include <asm/fbio.h>
55003@@ -218,7 +219,9 @@ int aty_init_cursor(struct fb_info *info)
55004 info->sprite.buf_align = 16; /* and 64 lines tall. */
55005 info->sprite.flags = FB_PIXMAP_IO;
55006
55007- info->fbops->fb_cursor = atyfb_cursor;
55008+ pax_open_kernel();
55009+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
55010+ pax_close_kernel();
55011
55012 return 0;
55013 }
55014diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
55015index 900aa4e..6d49418 100644
55016--- a/drivers/video/fbdev/core/fb_defio.c
55017+++ b/drivers/video/fbdev/core/fb_defio.c
55018@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
55019
55020 BUG_ON(!fbdefio);
55021 mutex_init(&fbdefio->lock);
55022- info->fbops->fb_mmap = fb_deferred_io_mmap;
55023+ pax_open_kernel();
55024+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
55025+ pax_close_kernel();
55026 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
55027 INIT_LIST_HEAD(&fbdefio->pagelist);
55028 if (fbdefio->delay == 0) /* set a default of 1 s */
55029@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
55030 page->mapping = NULL;
55031 }
55032
55033- info->fbops->fb_mmap = NULL;
55034+ *(void **)&info->fbops->fb_mmap = NULL;
55035 mutex_destroy(&fbdefio->lock);
55036 }
55037 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
55038diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
55039index b5e85f6..290f8c7 100644
55040--- a/drivers/video/fbdev/core/fbmem.c
55041+++ b/drivers/video/fbdev/core/fbmem.c
55042@@ -1301,7 +1301,7 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
55043 __u32 data;
55044 int err;
55045
55046- err = copy_to_user(&fix32->id, &fix->id, sizeof(fix32->id));
55047+ err = copy_to_user(fix32->id, &fix->id, sizeof(fix32->id));
55048
55049 data = (__u32) (unsigned long) fix->smem_start;
55050 err |= put_user(data, &fix32->smem_start);
55051diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
55052index 4254336..282567e 100644
55053--- a/drivers/video/fbdev/hyperv_fb.c
55054+++ b/drivers/video/fbdev/hyperv_fb.c
55055@@ -240,7 +240,7 @@ static uint screen_fb_size;
55056 static inline int synthvid_send(struct hv_device *hdev,
55057 struct synthvid_msg *msg)
55058 {
55059- static atomic64_t request_id = ATOMIC64_INIT(0);
55060+ static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
55061 int ret;
55062
55063 msg->pipe_hdr.type = PIPE_MSG_DATA;
55064@@ -248,7 +248,7 @@ static inline int synthvid_send(struct hv_device *hdev,
55065
55066 ret = vmbus_sendpacket(hdev->channel, msg,
55067 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
55068- atomic64_inc_return(&request_id),
55069+ atomic64_inc_return_unchecked(&request_id),
55070 VM_PKT_DATA_INBAND, 0);
55071
55072 if (ret)
55073diff --git a/drivers/video/fbdev/i810/i810_accel.c b/drivers/video/fbdev/i810/i810_accel.c
55074index 7672d2e..b56437f 100644
55075--- a/drivers/video/fbdev/i810/i810_accel.c
55076+++ b/drivers/video/fbdev/i810/i810_accel.c
55077@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
55078 }
55079 }
55080 printk("ringbuffer lockup!!!\n");
55081+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
55082 i810_report_error(mmio);
55083 par->dev_flags |= LOCKUP;
55084 info->pixmap.scan_align = 1;
55085diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55086index a01147f..5d896f8 100644
55087--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55088+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
55089@@ -1088,14 +1088,20 @@ static void MGAG100_restore(struct matrox_fb_info *minfo)
55090
55091 #ifdef CONFIG_FB_MATROX_MYSTIQUE
55092 struct matrox_switch matrox_mystique = {
55093- MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
55094+ .preinit = MGA1064_preinit,
55095+ .reset = MGA1064_reset,
55096+ .init = MGA1064_init,
55097+ .restore = MGA1064_restore,
55098 };
55099 EXPORT_SYMBOL(matrox_mystique);
55100 #endif
55101
55102 #ifdef CONFIG_FB_MATROX_G
55103 struct matrox_switch matrox_G100 = {
55104- MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
55105+ .preinit = MGAG100_preinit,
55106+ .reset = MGAG100_reset,
55107+ .init = MGAG100_init,
55108+ .restore = MGAG100_restore,
55109 };
55110 EXPORT_SYMBOL(matrox_G100);
55111 #endif
55112diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55113index 195ad7c..09743fc 100644
55114--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55115+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
55116@@ -738,7 +738,10 @@ static int Ti3026_preinit(struct matrox_fb_info *minfo)
55117 }
55118
55119 struct matrox_switch matrox_millennium = {
55120- Ti3026_preinit, Ti3026_reset, Ti3026_init, Ti3026_restore
55121+ .preinit = Ti3026_preinit,
55122+ .reset = Ti3026_reset,
55123+ .init = Ti3026_init,
55124+ .restore = Ti3026_restore
55125 };
55126 EXPORT_SYMBOL(matrox_millennium);
55127 #endif
55128diff --git a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55129index fe92eed..106e085 100644
55130--- a/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55131+++ b/drivers/video/fbdev/mb862xx/mb862xxfb_accel.c
55132@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
55133 struct mb862xxfb_par *par = info->par;
55134
55135 if (info->var.bits_per_pixel == 32) {
55136- info->fbops->fb_fillrect = cfb_fillrect;
55137- info->fbops->fb_copyarea = cfb_copyarea;
55138- info->fbops->fb_imageblit = cfb_imageblit;
55139+ pax_open_kernel();
55140+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
55141+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
55142+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
55143+ pax_close_kernel();
55144 } else {
55145 outreg(disp, GC_L0EM, 3);
55146- info->fbops->fb_fillrect = mb86290fb_fillrect;
55147- info->fbops->fb_copyarea = mb86290fb_copyarea;
55148- info->fbops->fb_imageblit = mb86290fb_imageblit;
55149+ pax_open_kernel();
55150+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
55151+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
55152+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
55153+ pax_close_kernel();
55154 }
55155 outreg(draw, GDC_REG_DRAW_BASE, 0);
55156 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
55157diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
55158index def0412..fed6529 100644
55159--- a/drivers/video/fbdev/nvidia/nvidia.c
55160+++ b/drivers/video/fbdev/nvidia/nvidia.c
55161@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
55162 info->fix.line_length = (info->var.xres_virtual *
55163 info->var.bits_per_pixel) >> 3;
55164 if (info->var.accel_flags) {
55165- info->fbops->fb_imageblit = nvidiafb_imageblit;
55166- info->fbops->fb_fillrect = nvidiafb_fillrect;
55167- info->fbops->fb_copyarea = nvidiafb_copyarea;
55168- info->fbops->fb_sync = nvidiafb_sync;
55169+ pax_open_kernel();
55170+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
55171+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
55172+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
55173+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
55174+ pax_close_kernel();
55175 info->pixmap.scan_align = 4;
55176 info->flags &= ~FBINFO_HWACCEL_DISABLED;
55177 info->flags |= FBINFO_READS_FAST;
55178 NVResetGraphics(info);
55179 } else {
55180- info->fbops->fb_imageblit = cfb_imageblit;
55181- info->fbops->fb_fillrect = cfb_fillrect;
55182- info->fbops->fb_copyarea = cfb_copyarea;
55183- info->fbops->fb_sync = NULL;
55184+ pax_open_kernel();
55185+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
55186+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
55187+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
55188+ *(void **)&info->fbops->fb_sync = NULL;
55189+ pax_close_kernel();
55190 info->pixmap.scan_align = 1;
55191 info->flags |= FBINFO_HWACCEL_DISABLED;
55192 info->flags &= ~FBINFO_READS_FAST;
55193@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
55194 info->pixmap.size = 8 * 1024;
55195 info->pixmap.flags = FB_PIXMAP_SYSTEM;
55196
55197- if (!hwcur)
55198- info->fbops->fb_cursor = NULL;
55199+ if (!hwcur) {
55200+ pax_open_kernel();
55201+ *(void **)&info->fbops->fb_cursor = NULL;
55202+ pax_close_kernel();
55203+ }
55204
55205 info->var.accel_flags = (!noaccel);
55206
55207diff --git a/drivers/video/fbdev/omap2/dss/display.c b/drivers/video/fbdev/omap2/dss/display.c
55208index 2412a0d..294215b 100644
55209--- a/drivers/video/fbdev/omap2/dss/display.c
55210+++ b/drivers/video/fbdev/omap2/dss/display.c
55211@@ -161,12 +161,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
55212 if (dssdev->name == NULL)
55213 dssdev->name = dssdev->alias;
55214
55215+ pax_open_kernel();
55216 if (drv && drv->get_resolution == NULL)
55217- drv->get_resolution = omapdss_default_get_resolution;
55218+ *(void **)&drv->get_resolution = omapdss_default_get_resolution;
55219 if (drv && drv->get_recommended_bpp == NULL)
55220- drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
55221+ *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
55222 if (drv && drv->get_timings == NULL)
55223- drv->get_timings = omapdss_default_get_timings;
55224+ *(void **)&drv->get_timings = omapdss_default_get_timings;
55225+ pax_close_kernel();
55226
55227 mutex_lock(&panel_list_mutex);
55228 list_add_tail(&dssdev->panel_list, &panel_list);
55229diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
55230index 83433cb..71e9b98 100644
55231--- a/drivers/video/fbdev/s1d13xxxfb.c
55232+++ b/drivers/video/fbdev/s1d13xxxfb.c
55233@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
55234
55235 switch(prod_id) {
55236 case S1D13506_PROD_ID: /* activate acceleration */
55237- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
55238- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
55239+ pax_open_kernel();
55240+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
55241+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
55242+ pax_close_kernel();
55243 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
55244 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
55245 break;
55246diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
55247index 2bcc84a..29dd1ea 100644
55248--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
55249+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
55250@@ -439,9 +439,9 @@ static unsigned long lcdc_sys_read_data(void *handle)
55251 }
55252
55253 static struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
55254- lcdc_sys_write_index,
55255- lcdc_sys_write_data,
55256- lcdc_sys_read_data,
55257+ .write_index = lcdc_sys_write_index,
55258+ .write_data = lcdc_sys_write_data,
55259+ .read_data = lcdc_sys_read_data,
55260 };
55261
55262 static int sh_mobile_lcdc_sginit(struct fb_info *info,
55263diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c
55264index d513ed6..90b0de9 100644
55265--- a/drivers/video/fbdev/smscufx.c
55266+++ b/drivers/video/fbdev/smscufx.c
55267@@ -1175,7 +1175,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
55268 fb_deferred_io_cleanup(info);
55269 kfree(info->fbdefio);
55270 info->fbdefio = NULL;
55271- info->fbops->fb_mmap = ufx_ops_mmap;
55272+ pax_open_kernel();
55273+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
55274+ pax_close_kernel();
55275 }
55276
55277 pr_debug("released /dev/fb%d user=%d count=%d",
55278diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
55279index 77b890e..458e666 100644
55280--- a/drivers/video/fbdev/udlfb.c
55281+++ b/drivers/video/fbdev/udlfb.c
55282@@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
55283 dlfb_urb_completion(urb);
55284
55285 error:
55286- atomic_add(bytes_sent, &dev->bytes_sent);
55287- atomic_add(bytes_identical, &dev->bytes_identical);
55288- atomic_add(width*height*2, &dev->bytes_rendered);
55289+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
55290+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
55291+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
55292 end_cycles = get_cycles();
55293- atomic_add(((unsigned int) ((end_cycles - start_cycles)
55294+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
55295 >> 10)), /* Kcycles */
55296 &dev->cpu_kcycles_used);
55297
55298@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
55299 dlfb_urb_completion(urb);
55300
55301 error:
55302- atomic_add(bytes_sent, &dev->bytes_sent);
55303- atomic_add(bytes_identical, &dev->bytes_identical);
55304- atomic_add(bytes_rendered, &dev->bytes_rendered);
55305+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
55306+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
55307+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
55308 end_cycles = get_cycles();
55309- atomic_add(((unsigned int) ((end_cycles - start_cycles)
55310+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
55311 >> 10)), /* Kcycles */
55312 &dev->cpu_kcycles_used);
55313 }
55314@@ -993,7 +993,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
55315 fb_deferred_io_cleanup(info);
55316 kfree(info->fbdefio);
55317 info->fbdefio = NULL;
55318- info->fbops->fb_mmap = dlfb_ops_mmap;
55319+ pax_open_kernel();
55320+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
55321+ pax_close_kernel();
55322 }
55323
55324 pr_warn("released /dev/fb%d user=%d count=%d\n",
55325@@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
55326 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55327 struct dlfb_data *dev = fb_info->par;
55328 return snprintf(buf, PAGE_SIZE, "%u\n",
55329- atomic_read(&dev->bytes_rendered));
55330+ atomic_read_unchecked(&dev->bytes_rendered));
55331 }
55332
55333 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
55334@@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
55335 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55336 struct dlfb_data *dev = fb_info->par;
55337 return snprintf(buf, PAGE_SIZE, "%u\n",
55338- atomic_read(&dev->bytes_identical));
55339+ atomic_read_unchecked(&dev->bytes_identical));
55340 }
55341
55342 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
55343@@ -1392,7 +1394,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
55344 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55345 struct dlfb_data *dev = fb_info->par;
55346 return snprintf(buf, PAGE_SIZE, "%u\n",
55347- atomic_read(&dev->bytes_sent));
55348+ atomic_read_unchecked(&dev->bytes_sent));
55349 }
55350
55351 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
55352@@ -1400,7 +1402,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
55353 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55354 struct dlfb_data *dev = fb_info->par;
55355 return snprintf(buf, PAGE_SIZE, "%u\n",
55356- atomic_read(&dev->cpu_kcycles_used));
55357+ atomic_read_unchecked(&dev->cpu_kcycles_used));
55358 }
55359
55360 static ssize_t edid_show(
55361@@ -1460,10 +1462,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
55362 struct fb_info *fb_info = dev_get_drvdata(fbdev);
55363 struct dlfb_data *dev = fb_info->par;
55364
55365- atomic_set(&dev->bytes_rendered, 0);
55366- atomic_set(&dev->bytes_identical, 0);
55367- atomic_set(&dev->bytes_sent, 0);
55368- atomic_set(&dev->cpu_kcycles_used, 0);
55369+ atomic_set_unchecked(&dev->bytes_rendered, 0);
55370+ atomic_set_unchecked(&dev->bytes_identical, 0);
55371+ atomic_set_unchecked(&dev->bytes_sent, 0);
55372+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
55373
55374 return count;
55375 }
55376diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
55377index 509d452..7c9d2de 100644
55378--- a/drivers/video/fbdev/uvesafb.c
55379+++ b/drivers/video/fbdev/uvesafb.c
55380@@ -19,6 +19,7 @@
55381 #include <linux/io.h>
55382 #include <linux/mutex.h>
55383 #include <linux/slab.h>
55384+#include <linux/moduleloader.h>
55385 #include <video/edid.h>
55386 #include <video/uvesafb.h>
55387 #ifdef CONFIG_X86
55388@@ -565,10 +566,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
55389 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
55390 par->pmi_setpal = par->ypan = 0;
55391 } else {
55392+
55393+#ifdef CONFIG_PAX_KERNEXEC
55394+#ifdef CONFIG_MODULES
55395+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
55396+#endif
55397+ if (!par->pmi_code) {
55398+ par->pmi_setpal = par->ypan = 0;
55399+ return 0;
55400+ }
55401+#endif
55402+
55403 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
55404 + task->t.regs.edi);
55405+
55406+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55407+ pax_open_kernel();
55408+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
55409+ pax_close_kernel();
55410+
55411+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
55412+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
55413+#else
55414 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
55415 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
55416+#endif
55417+
55418 printk(KERN_INFO "uvesafb: protected mode interface info at "
55419 "%04x:%04x\n",
55420 (u16)task->t.regs.es, (u16)task->t.regs.edi);
55421@@ -813,13 +836,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
55422 par->ypan = ypan;
55423
55424 if (par->pmi_setpal || par->ypan) {
55425+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
55426 if (__supported_pte_mask & _PAGE_NX) {
55427 par->pmi_setpal = par->ypan = 0;
55428 printk(KERN_WARNING "uvesafb: NX protection is active, "
55429 "better not use the PMI.\n");
55430- } else {
55431+ } else
55432+#endif
55433 uvesafb_vbe_getpmi(task, par);
55434- }
55435 }
55436 #else
55437 /* The protected mode interface is not available on non-x86. */
55438@@ -1453,8 +1477,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55439 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
55440
55441 /* Disable blanking if the user requested so. */
55442- if (!blank)
55443- info->fbops->fb_blank = NULL;
55444+ if (!blank) {
55445+ pax_open_kernel();
55446+ *(void **)&info->fbops->fb_blank = NULL;
55447+ pax_close_kernel();
55448+ }
55449
55450 /*
55451 * Find out how much IO memory is required for the mode with
55452@@ -1525,8 +1552,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
55453 info->flags = FBINFO_FLAG_DEFAULT |
55454 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
55455
55456- if (!par->ypan)
55457- info->fbops->fb_pan_display = NULL;
55458+ if (!par->ypan) {
55459+ pax_open_kernel();
55460+ *(void **)&info->fbops->fb_pan_display = NULL;
55461+ pax_close_kernel();
55462+ }
55463 }
55464
55465 static void uvesafb_init_mtrr(struct fb_info *info)
55466@@ -1787,6 +1817,11 @@ out_mode:
55467 out:
55468 kfree(par->vbe_modes);
55469
55470+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55471+ if (par->pmi_code)
55472+ module_free_exec(NULL, par->pmi_code);
55473+#endif
55474+
55475 framebuffer_release(info);
55476 return err;
55477 }
55478@@ -1811,6 +1846,11 @@ static int uvesafb_remove(struct platform_device *dev)
55479 kfree(par->vbe_state_orig);
55480 kfree(par->vbe_state_saved);
55481
55482+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55483+ if (par->pmi_code)
55484+ module_free_exec(NULL, par->pmi_code);
55485+#endif
55486+
55487 framebuffer_release(info);
55488 }
55489 return 0;
55490diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
55491index 6170e7f..dd63031 100644
55492--- a/drivers/video/fbdev/vesafb.c
55493+++ b/drivers/video/fbdev/vesafb.c
55494@@ -9,6 +9,7 @@
55495 */
55496
55497 #include <linux/module.h>
55498+#include <linux/moduleloader.h>
55499 #include <linux/kernel.h>
55500 #include <linux/errno.h>
55501 #include <linux/string.h>
55502@@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
55503 static int vram_total; /* Set total amount of memory */
55504 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
55505 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
55506-static void (*pmi_start)(void) __read_mostly;
55507-static void (*pmi_pal) (void) __read_mostly;
55508+static void (*pmi_start)(void) __read_only;
55509+static void (*pmi_pal) (void) __read_only;
55510 static int depth __read_mostly;
55511 static int vga_compat __read_mostly;
55512 /* --------------------------------------------------------------------- */
55513@@ -233,6 +234,7 @@ static int vesafb_probe(struct platform_device *dev)
55514 unsigned int size_remap;
55515 unsigned int size_total;
55516 char *option = NULL;
55517+ void *pmi_code = NULL;
55518
55519 /* ignore error return of fb_get_options */
55520 fb_get_options("vesafb", &option);
55521@@ -279,10 +281,6 @@ static int vesafb_probe(struct platform_device *dev)
55522 size_remap = size_total;
55523 vesafb_fix.smem_len = size_remap;
55524
55525-#ifndef __i386__
55526- screen_info.vesapm_seg = 0;
55527-#endif
55528-
55529 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
55530 printk(KERN_WARNING
55531 "vesafb: cannot reserve video memory at 0x%lx\n",
55532@@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
55533 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
55534 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
55535
55536+#ifdef __i386__
55537+
55538+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55539+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
55540+ if (!pmi_code)
55541+#elif !defined(CONFIG_PAX_KERNEXEC)
55542+ if (0)
55543+#endif
55544+
55545+#endif
55546+ screen_info.vesapm_seg = 0;
55547+
55548 if (screen_info.vesapm_seg) {
55549- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
55550- screen_info.vesapm_seg,screen_info.vesapm_off);
55551+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
55552+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
55553 }
55554
55555 if (screen_info.vesapm_seg < 0xc000)
55556@@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
55557
55558 if (ypan || pmi_setpal) {
55559 unsigned short *pmi_base;
55560+
55561 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
55562- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
55563- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
55564+
55565+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55566+ pax_open_kernel();
55567+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
55568+#else
55569+ pmi_code = pmi_base;
55570+#endif
55571+
55572+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
55573+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
55574+
55575+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55576+ pmi_start = ktva_ktla(pmi_start);
55577+ pmi_pal = ktva_ktla(pmi_pal);
55578+ pax_close_kernel();
55579+#endif
55580+
55581 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
55582 if (pmi_base[3]) {
55583 printk(KERN_INFO "vesafb: pmi: ports = ");
55584@@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
55585 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
55586 (ypan ? FBINFO_HWACCEL_YPAN : 0);
55587
55588- if (!ypan)
55589- info->fbops->fb_pan_display = NULL;
55590+ if (!ypan) {
55591+ pax_open_kernel();
55592+ *(void **)&info->fbops->fb_pan_display = NULL;
55593+ pax_close_kernel();
55594+ }
55595
55596 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
55597 err = -ENOMEM;
55598@@ -492,6 +521,11 @@ static int vesafb_probe(struct platform_device *dev)
55599 fb_info(info, "%s frame buffer device\n", info->fix.id);
55600 return 0;
55601 err:
55602+
55603+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
55604+ module_free_exec(NULL, pmi_code);
55605+#endif
55606+
55607 if (info->screen_base)
55608 iounmap(info->screen_base);
55609 framebuffer_release(info);
55610diff --git a/drivers/video/fbdev/via/via_clock.h b/drivers/video/fbdev/via/via_clock.h
55611index 88714ae..16c2e11 100644
55612--- a/drivers/video/fbdev/via/via_clock.h
55613+++ b/drivers/video/fbdev/via/via_clock.h
55614@@ -56,7 +56,7 @@ struct via_clock {
55615
55616 void (*set_engine_pll_state)(u8 state);
55617 void (*set_engine_pll)(struct via_pll_config config);
55618-};
55619+} __no_const;
55620
55621
55622 static inline u32 get_pll_internal_frequency(u32 ref_freq,
55623diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
55624index 3c14e43..2630570 100644
55625--- a/drivers/video/logo/logo_linux_clut224.ppm
55626+++ b/drivers/video/logo/logo_linux_clut224.ppm
55627@@ -2,1603 +2,1123 @@ P3
55628 # Standard 224-color Linux logo
55629 80 80
55630 255
55631- 0 0 0 0 0 0 0 0 0 0 0 0
55632- 0 0 0 0 0 0 0 0 0 0 0 0
55633- 0 0 0 0 0 0 0 0 0 0 0 0
55634- 0 0 0 0 0 0 0 0 0 0 0 0
55635- 0 0 0 0 0 0 0 0 0 0 0 0
55636- 0 0 0 0 0 0 0 0 0 0 0 0
55637- 0 0 0 0 0 0 0 0 0 0 0 0
55638- 0 0 0 0 0 0 0 0 0 0 0 0
55639- 0 0 0 0 0 0 0 0 0 0 0 0
55640- 6 6 6 6 6 6 10 10 10 10 10 10
55641- 10 10 10 6 6 6 6 6 6 6 6 6
55642- 0 0 0 0 0 0 0 0 0 0 0 0
55643- 0 0 0 0 0 0 0 0 0 0 0 0
55644- 0 0 0 0 0 0 0 0 0 0 0 0
55645- 0 0 0 0 0 0 0 0 0 0 0 0
55646- 0 0 0 0 0 0 0 0 0 0 0 0
55647- 0 0 0 0 0 0 0 0 0 0 0 0
55648- 0 0 0 0 0 0 0 0 0 0 0 0
55649- 0 0 0 0 0 0 0 0 0 0 0 0
55650- 0 0 0 0 0 0 0 0 0 0 0 0
55651- 0 0 0 0 0 0 0 0 0 0 0 0
55652- 0 0 0 0 0 0 0 0 0 0 0 0
55653- 0 0 0 0 0 0 0 0 0 0 0 0
55654- 0 0 0 0 0 0 0 0 0 0 0 0
55655- 0 0 0 0 0 0 0 0 0 0 0 0
55656- 0 0 0 0 0 0 0 0 0 0 0 0
55657- 0 0 0 0 0 0 0 0 0 0 0 0
55658- 0 0 0 0 0 0 0 0 0 0 0 0
55659- 0 0 0 6 6 6 10 10 10 14 14 14
55660- 22 22 22 26 26 26 30 30 30 34 34 34
55661- 30 30 30 30 30 30 26 26 26 18 18 18
55662- 14 14 14 10 10 10 6 6 6 0 0 0
55663- 0 0 0 0 0 0 0 0 0 0 0 0
55664- 0 0 0 0 0 0 0 0 0 0 0 0
55665- 0 0 0 0 0 0 0 0 0 0 0 0
55666- 0 0 0 0 0 0 0 0 0 0 0 0
55667- 0 0 0 0 0 0 0 0 0 0 0 0
55668- 0 0 0 0 0 0 0 0 0 0 0 0
55669- 0 0 0 0 0 0 0 0 0 0 0 0
55670- 0 0 0 0 0 0 0 0 0 0 0 0
55671- 0 0 0 0 0 0 0 0 0 0 0 0
55672- 0 0 0 0 0 1 0 0 1 0 0 0
55673- 0 0 0 0 0 0 0 0 0 0 0 0
55674- 0 0 0 0 0 0 0 0 0 0 0 0
55675- 0 0 0 0 0 0 0 0 0 0 0 0
55676- 0 0 0 0 0 0 0 0 0 0 0 0
55677- 0 0 0 0 0 0 0 0 0 0 0 0
55678- 0 0 0 0 0 0 0 0 0 0 0 0
55679- 6 6 6 14 14 14 26 26 26 42 42 42
55680- 54 54 54 66 66 66 78 78 78 78 78 78
55681- 78 78 78 74 74 74 66 66 66 54 54 54
55682- 42 42 42 26 26 26 18 18 18 10 10 10
55683- 6 6 6 0 0 0 0 0 0 0 0 0
55684- 0 0 0 0 0 0 0 0 0 0 0 0
55685- 0 0 0 0 0 0 0 0 0 0 0 0
55686- 0 0 0 0 0 0 0 0 0 0 0 0
55687- 0 0 0 0 0 0 0 0 0 0 0 0
55688- 0 0 0 0 0 0 0 0 0 0 0 0
55689- 0 0 0 0 0 0 0 0 0 0 0 0
55690- 0 0 0 0 0 0 0 0 0 0 0 0
55691- 0 0 0 0 0 0 0 0 0 0 0 0
55692- 0 0 1 0 0 0 0 0 0 0 0 0
55693- 0 0 0 0 0 0 0 0 0 0 0 0
55694- 0 0 0 0 0 0 0 0 0 0 0 0
55695- 0 0 0 0 0 0 0 0 0 0 0 0
55696- 0 0 0 0 0 0 0 0 0 0 0 0
55697- 0 0 0 0 0 0 0 0 0 0 0 0
55698- 0 0 0 0 0 0 0 0 0 10 10 10
55699- 22 22 22 42 42 42 66 66 66 86 86 86
55700- 66 66 66 38 38 38 38 38 38 22 22 22
55701- 26 26 26 34 34 34 54 54 54 66 66 66
55702- 86 86 86 70 70 70 46 46 46 26 26 26
55703- 14 14 14 6 6 6 0 0 0 0 0 0
55704- 0 0 0 0 0 0 0 0 0 0 0 0
55705- 0 0 0 0 0 0 0 0 0 0 0 0
55706- 0 0 0 0 0 0 0 0 0 0 0 0
55707- 0 0 0 0 0 0 0 0 0 0 0 0
55708- 0 0 0 0 0 0 0 0 0 0 0 0
55709- 0 0 0 0 0 0 0 0 0 0 0 0
55710- 0 0 0 0 0 0 0 0 0 0 0 0
55711- 0 0 0 0 0 0 0 0 0 0 0 0
55712- 0 0 1 0 0 1 0 0 1 0 0 0
55713- 0 0 0 0 0 0 0 0 0 0 0 0
55714- 0 0 0 0 0 0 0 0 0 0 0 0
55715- 0 0 0 0 0 0 0 0 0 0 0 0
55716- 0 0 0 0 0 0 0 0 0 0 0 0
55717- 0 0 0 0 0 0 0 0 0 0 0 0
55718- 0 0 0 0 0 0 10 10 10 26 26 26
55719- 50 50 50 82 82 82 58 58 58 6 6 6
55720- 2 2 6 2 2 6 2 2 6 2 2 6
55721- 2 2 6 2 2 6 2 2 6 2 2 6
55722- 6 6 6 54 54 54 86 86 86 66 66 66
55723- 38 38 38 18 18 18 6 6 6 0 0 0
55724- 0 0 0 0 0 0 0 0 0 0 0 0
55725- 0 0 0 0 0 0 0 0 0 0 0 0
55726- 0 0 0 0 0 0 0 0 0 0 0 0
55727- 0 0 0 0 0 0 0 0 0 0 0 0
55728- 0 0 0 0 0 0 0 0 0 0 0 0
55729- 0 0 0 0 0 0 0 0 0 0 0 0
55730- 0 0 0 0 0 0 0 0 0 0 0 0
55731- 0 0 0 0 0 0 0 0 0 0 0 0
55732- 0 0 0 0 0 0 0 0 0 0 0 0
55733- 0 0 0 0 0 0 0 0 0 0 0 0
55734- 0 0 0 0 0 0 0 0 0 0 0 0
55735- 0 0 0 0 0 0 0 0 0 0 0 0
55736- 0 0 0 0 0 0 0 0 0 0 0 0
55737- 0 0 0 0 0 0 0 0 0 0 0 0
55738- 0 0 0 6 6 6 22 22 22 50 50 50
55739- 78 78 78 34 34 34 2 2 6 2 2 6
55740- 2 2 6 2 2 6 2 2 6 2 2 6
55741- 2 2 6 2 2 6 2 2 6 2 2 6
55742- 2 2 6 2 2 6 6 6 6 70 70 70
55743- 78 78 78 46 46 46 22 22 22 6 6 6
55744- 0 0 0 0 0 0 0 0 0 0 0 0
55745- 0 0 0 0 0 0 0 0 0 0 0 0
55746- 0 0 0 0 0 0 0 0 0 0 0 0
55747- 0 0 0 0 0 0 0 0 0 0 0 0
55748- 0 0 0 0 0 0 0 0 0 0 0 0
55749- 0 0 0 0 0 0 0 0 0 0 0 0
55750- 0 0 0 0 0 0 0 0 0 0 0 0
55751- 0 0 0 0 0 0 0 0 0 0 0 0
55752- 0 0 1 0 0 1 0 0 1 0 0 0
55753- 0 0 0 0 0 0 0 0 0 0 0 0
55754- 0 0 0 0 0 0 0 0 0 0 0 0
55755- 0 0 0 0 0 0 0 0 0 0 0 0
55756- 0 0 0 0 0 0 0 0 0 0 0 0
55757- 0 0 0 0 0 0 0 0 0 0 0 0
55758- 6 6 6 18 18 18 42 42 42 82 82 82
55759- 26 26 26 2 2 6 2 2 6 2 2 6
55760- 2 2 6 2 2 6 2 2 6 2 2 6
55761- 2 2 6 2 2 6 2 2 6 14 14 14
55762- 46 46 46 34 34 34 6 6 6 2 2 6
55763- 42 42 42 78 78 78 42 42 42 18 18 18
55764- 6 6 6 0 0 0 0 0 0 0 0 0
55765- 0 0 0 0 0 0 0 0 0 0 0 0
55766- 0 0 0 0 0 0 0 0 0 0 0 0
55767- 0 0 0 0 0 0 0 0 0 0 0 0
55768- 0 0 0 0 0 0 0 0 0 0 0 0
55769- 0 0 0 0 0 0 0 0 0 0 0 0
55770- 0 0 0 0 0 0 0 0 0 0 0 0
55771- 0 0 0 0 0 0 0 0 0 0 0 0
55772- 0 0 1 0 0 0 0 0 1 0 0 0
55773- 0 0 0 0 0 0 0 0 0 0 0 0
55774- 0 0 0 0 0 0 0 0 0 0 0 0
55775- 0 0 0 0 0 0 0 0 0 0 0 0
55776- 0 0 0 0 0 0 0 0 0 0 0 0
55777- 0 0 0 0 0 0 0 0 0 0 0 0
55778- 10 10 10 30 30 30 66 66 66 58 58 58
55779- 2 2 6 2 2 6 2 2 6 2 2 6
55780- 2 2 6 2 2 6 2 2 6 2 2 6
55781- 2 2 6 2 2 6 2 2 6 26 26 26
55782- 86 86 86 101 101 101 46 46 46 10 10 10
55783- 2 2 6 58 58 58 70 70 70 34 34 34
55784- 10 10 10 0 0 0 0 0 0 0 0 0
55785- 0 0 0 0 0 0 0 0 0 0 0 0
55786- 0 0 0 0 0 0 0 0 0 0 0 0
55787- 0 0 0 0 0 0 0 0 0 0 0 0
55788- 0 0 0 0 0 0 0 0 0 0 0 0
55789- 0 0 0 0 0 0 0 0 0 0 0 0
55790- 0 0 0 0 0 0 0 0 0 0 0 0
55791- 0 0 0 0 0 0 0 0 0 0 0 0
55792- 0 0 1 0 0 1 0 0 1 0 0 0
55793- 0 0 0 0 0 0 0 0 0 0 0 0
55794- 0 0 0 0 0 0 0 0 0 0 0 0
55795- 0 0 0 0 0 0 0 0 0 0 0 0
55796- 0 0 0 0 0 0 0 0 0 0 0 0
55797- 0 0 0 0 0 0 0 0 0 0 0 0
55798- 14 14 14 42 42 42 86 86 86 10 10 10
55799- 2 2 6 2 2 6 2 2 6 2 2 6
55800- 2 2 6 2 2 6 2 2 6 2 2 6
55801- 2 2 6 2 2 6 2 2 6 30 30 30
55802- 94 94 94 94 94 94 58 58 58 26 26 26
55803- 2 2 6 6 6 6 78 78 78 54 54 54
55804- 22 22 22 6 6 6 0 0 0 0 0 0
55805- 0 0 0 0 0 0 0 0 0 0 0 0
55806- 0 0 0 0 0 0 0 0 0 0 0 0
55807- 0 0 0 0 0 0 0 0 0 0 0 0
55808- 0 0 0 0 0 0 0 0 0 0 0 0
55809- 0 0 0 0 0 0 0 0 0 0 0 0
55810- 0 0 0 0 0 0 0 0 0 0 0 0
55811- 0 0 0 0 0 0 0 0 0 0 0 0
55812- 0 0 0 0 0 0 0 0 0 0 0 0
55813- 0 0 0 0 0 0 0 0 0 0 0 0
55814- 0 0 0 0 0 0 0 0 0 0 0 0
55815- 0 0 0 0 0 0 0 0 0 0 0 0
55816- 0 0 0 0 0 0 0 0 0 0 0 0
55817- 0 0 0 0 0 0 0 0 0 6 6 6
55818- 22 22 22 62 62 62 62 62 62 2 2 6
55819- 2 2 6 2 2 6 2 2 6 2 2 6
55820- 2 2 6 2 2 6 2 2 6 2 2 6
55821- 2 2 6 2 2 6 2 2 6 26 26 26
55822- 54 54 54 38 38 38 18 18 18 10 10 10
55823- 2 2 6 2 2 6 34 34 34 82 82 82
55824- 38 38 38 14 14 14 0 0 0 0 0 0
55825- 0 0 0 0 0 0 0 0 0 0 0 0
55826- 0 0 0 0 0 0 0 0 0 0 0 0
55827- 0 0 0 0 0 0 0 0 0 0 0 0
55828- 0 0 0 0 0 0 0 0 0 0 0 0
55829- 0 0 0 0 0 0 0 0 0 0 0 0
55830- 0 0 0 0 0 0 0 0 0 0 0 0
55831- 0 0 0 0 0 0 0 0 0 0 0 0
55832- 0 0 0 0 0 1 0 0 1 0 0 0
55833- 0 0 0 0 0 0 0 0 0 0 0 0
55834- 0 0 0 0 0 0 0 0 0 0 0 0
55835- 0 0 0 0 0 0 0 0 0 0 0 0
55836- 0 0 0 0 0 0 0 0 0 0 0 0
55837- 0 0 0 0 0 0 0 0 0 6 6 6
55838- 30 30 30 78 78 78 30 30 30 2 2 6
55839- 2 2 6 2 2 6 2 2 6 2 2 6
55840- 2 2 6 2 2 6 2 2 6 2 2 6
55841- 2 2 6 2 2 6 2 2 6 10 10 10
55842- 10 10 10 2 2 6 2 2 6 2 2 6
55843- 2 2 6 2 2 6 2 2 6 78 78 78
55844- 50 50 50 18 18 18 6 6 6 0 0 0
55845- 0 0 0 0 0 0 0 0 0 0 0 0
55846- 0 0 0 0 0 0 0 0 0 0 0 0
55847- 0 0 0 0 0 0 0 0 0 0 0 0
55848- 0 0 0 0 0 0 0 0 0 0 0 0
55849- 0 0 0 0 0 0 0 0 0 0 0 0
55850- 0 0 0 0 0 0 0 0 0 0 0 0
55851- 0 0 0 0 0 0 0 0 0 0 0 0
55852- 0 0 1 0 0 0 0 0 0 0 0 0
55853- 0 0 0 0 0 0 0 0 0 0 0 0
55854- 0 0 0 0 0 0 0 0 0 0 0 0
55855- 0 0 0 0 0 0 0 0 0 0 0 0
55856- 0 0 0 0 0 0 0 0 0 0 0 0
55857- 0 0 0 0 0 0 0 0 0 10 10 10
55858- 38 38 38 86 86 86 14 14 14 2 2 6
55859- 2 2 6 2 2 6 2 2 6 2 2 6
55860- 2 2 6 2 2 6 2 2 6 2 2 6
55861- 2 2 6 2 2 6 2 2 6 2 2 6
55862- 2 2 6 2 2 6 2 2 6 2 2 6
55863- 2 2 6 2 2 6 2 2 6 54 54 54
55864- 66 66 66 26 26 26 6 6 6 0 0 0
55865- 0 0 0 0 0 0 0 0 0 0 0 0
55866- 0 0 0 0 0 0 0 0 0 0 0 0
55867- 0 0 0 0 0 0 0 0 0 0 0 0
55868- 0 0 0 0 0 0 0 0 0 0 0 0
55869- 0 0 0 0 0 0 0 0 0 0 0 0
55870- 0 0 0 0 0 0 0 0 0 0 0 0
55871- 0 0 0 0 0 0 0 0 0 0 0 0
55872- 0 0 0 0 0 1 0 0 1 0 0 0
55873- 0 0 0 0 0 0 0 0 0 0 0 0
55874- 0 0 0 0 0 0 0 0 0 0 0 0
55875- 0 0 0 0 0 0 0 0 0 0 0 0
55876- 0 0 0 0 0 0 0 0 0 0 0 0
55877- 0 0 0 0 0 0 0 0 0 14 14 14
55878- 42 42 42 82 82 82 2 2 6 2 2 6
55879- 2 2 6 6 6 6 10 10 10 2 2 6
55880- 2 2 6 2 2 6 2 2 6 2 2 6
55881- 2 2 6 2 2 6 2 2 6 6 6 6
55882- 14 14 14 10 10 10 2 2 6 2 2 6
55883- 2 2 6 2 2 6 2 2 6 18 18 18
55884- 82 82 82 34 34 34 10 10 10 0 0 0
55885- 0 0 0 0 0 0 0 0 0 0 0 0
55886- 0 0 0 0 0 0 0 0 0 0 0 0
55887- 0 0 0 0 0 0 0 0 0 0 0 0
55888- 0 0 0 0 0 0 0 0 0 0 0 0
55889- 0 0 0 0 0 0 0 0 0 0 0 0
55890- 0 0 0 0 0 0 0 0 0 0 0 0
55891- 0 0 0 0 0 0 0 0 0 0 0 0
55892- 0 0 1 0 0 0 0 0 0 0 0 0
55893- 0 0 0 0 0 0 0 0 0 0 0 0
55894- 0 0 0 0 0 0 0 0 0 0 0 0
55895- 0 0 0 0 0 0 0 0 0 0 0 0
55896- 0 0 0 0 0 0 0 0 0 0 0 0
55897- 0 0 0 0 0 0 0 0 0 14 14 14
55898- 46 46 46 86 86 86 2 2 6 2 2 6
55899- 6 6 6 6 6 6 22 22 22 34 34 34
55900- 6 6 6 2 2 6 2 2 6 2 2 6
55901- 2 2 6 2 2 6 18 18 18 34 34 34
55902- 10 10 10 50 50 50 22 22 22 2 2 6
55903- 2 2 6 2 2 6 2 2 6 10 10 10
55904- 86 86 86 42 42 42 14 14 14 0 0 0
55905- 0 0 0 0 0 0 0 0 0 0 0 0
55906- 0 0 0 0 0 0 0 0 0 0 0 0
55907- 0 0 0 0 0 0 0 0 0 0 0 0
55908- 0 0 0 0 0 0 0 0 0 0 0 0
55909- 0 0 0 0 0 0 0 0 0 0 0 0
55910- 0 0 0 0 0 0 0 0 0 0 0 0
55911- 0 0 0 0 0 0 0 0 0 0 0 0
55912- 0 0 1 0 0 1 0 0 1 0 0 0
55913- 0 0 0 0 0 0 0 0 0 0 0 0
55914- 0 0 0 0 0 0 0 0 0 0 0 0
55915- 0 0 0 0 0 0 0 0 0 0 0 0
55916- 0 0 0 0 0 0 0 0 0 0 0 0
55917- 0 0 0 0 0 0 0 0 0 14 14 14
55918- 46 46 46 86 86 86 2 2 6 2 2 6
55919- 38 38 38 116 116 116 94 94 94 22 22 22
55920- 22 22 22 2 2 6 2 2 6 2 2 6
55921- 14 14 14 86 86 86 138 138 138 162 162 162
55922-154 154 154 38 38 38 26 26 26 6 6 6
55923- 2 2 6 2 2 6 2 2 6 2 2 6
55924- 86 86 86 46 46 46 14 14 14 0 0 0
55925- 0 0 0 0 0 0 0 0 0 0 0 0
55926- 0 0 0 0 0 0 0 0 0 0 0 0
55927- 0 0 0 0 0 0 0 0 0 0 0 0
55928- 0 0 0 0 0 0 0 0 0 0 0 0
55929- 0 0 0 0 0 0 0 0 0 0 0 0
55930- 0 0 0 0 0 0 0 0 0 0 0 0
55931- 0 0 0 0 0 0 0 0 0 0 0 0
55932- 0 0 0 0 0 0 0 0 0 0 0 0
55933- 0 0 0 0 0 0 0 0 0 0 0 0
55934- 0 0 0 0 0 0 0 0 0 0 0 0
55935- 0 0 0 0 0 0 0 0 0 0 0 0
55936- 0 0 0 0 0 0 0 0 0 0 0 0
55937- 0 0 0 0 0 0 0 0 0 14 14 14
55938- 46 46 46 86 86 86 2 2 6 14 14 14
55939-134 134 134 198 198 198 195 195 195 116 116 116
55940- 10 10 10 2 2 6 2 2 6 6 6 6
55941-101 98 89 187 187 187 210 210 210 218 218 218
55942-214 214 214 134 134 134 14 14 14 6 6 6
55943- 2 2 6 2 2 6 2 2 6 2 2 6
55944- 86 86 86 50 50 50 18 18 18 6 6 6
55945- 0 0 0 0 0 0 0 0 0 0 0 0
55946- 0 0 0 0 0 0 0 0 0 0 0 0
55947- 0 0 0 0 0 0 0 0 0 0 0 0
55948- 0 0 0 0 0 0 0 0 0 0 0 0
55949- 0 0 0 0 0 0 0 0 0 0 0 0
55950- 0 0 0 0 0 0 0 0 0 0 0 0
55951- 0 0 0 0 0 0 0 0 1 0 0 0
55952- 0 0 1 0 0 1 0 0 1 0 0 0
55953- 0 0 0 0 0 0 0 0 0 0 0 0
55954- 0 0 0 0 0 0 0 0 0 0 0 0
55955- 0 0 0 0 0 0 0 0 0 0 0 0
55956- 0 0 0 0 0 0 0 0 0 0 0 0
55957- 0 0 0 0 0 0 0 0 0 14 14 14
55958- 46 46 46 86 86 86 2 2 6 54 54 54
55959-218 218 218 195 195 195 226 226 226 246 246 246
55960- 58 58 58 2 2 6 2 2 6 30 30 30
55961-210 210 210 253 253 253 174 174 174 123 123 123
55962-221 221 221 234 234 234 74 74 74 2 2 6
55963- 2 2 6 2 2 6 2 2 6 2 2 6
55964- 70 70 70 58 58 58 22 22 22 6 6 6
55965- 0 0 0 0 0 0 0 0 0 0 0 0
55966- 0 0 0 0 0 0 0 0 0 0 0 0
55967- 0 0 0 0 0 0 0 0 0 0 0 0
55968- 0 0 0 0 0 0 0 0 0 0 0 0
55969- 0 0 0 0 0 0 0 0 0 0 0 0
55970- 0 0 0 0 0 0 0 0 0 0 0 0
55971- 0 0 0 0 0 0 0 0 0 0 0 0
55972- 0 0 0 0 0 0 0 0 0 0 0 0
55973- 0 0 0 0 0 0 0 0 0 0 0 0
55974- 0 0 0 0 0 0 0 0 0 0 0 0
55975- 0 0 0 0 0 0 0 0 0 0 0 0
55976- 0 0 0 0 0 0 0 0 0 0 0 0
55977- 0 0 0 0 0 0 0 0 0 14 14 14
55978- 46 46 46 82 82 82 2 2 6 106 106 106
55979-170 170 170 26 26 26 86 86 86 226 226 226
55980-123 123 123 10 10 10 14 14 14 46 46 46
55981-231 231 231 190 190 190 6 6 6 70 70 70
55982- 90 90 90 238 238 238 158 158 158 2 2 6
55983- 2 2 6 2 2 6 2 2 6 2 2 6
55984- 70 70 70 58 58 58 22 22 22 6 6 6
55985- 0 0 0 0 0 0 0 0 0 0 0 0
55986- 0 0 0 0 0 0 0 0 0 0 0 0
55987- 0 0 0 0 0 0 0 0 0 0 0 0
55988- 0 0 0 0 0 0 0 0 0 0 0 0
55989- 0 0 0 0 0 0 0 0 0 0 0 0
55990- 0 0 0 0 0 0 0 0 0 0 0 0
55991- 0 0 0 0 0 0 0 0 1 0 0 0
55992- 0 0 1 0 0 1 0 0 1 0 0 0
55993- 0 0 0 0 0 0 0 0 0 0 0 0
55994- 0 0 0 0 0 0 0 0 0 0 0 0
55995- 0 0 0 0 0 0 0 0 0 0 0 0
55996- 0 0 0 0 0 0 0 0 0 0 0 0
55997- 0 0 0 0 0 0 0 0 0 14 14 14
55998- 42 42 42 86 86 86 6 6 6 116 116 116
55999-106 106 106 6 6 6 70 70 70 149 149 149
56000-128 128 128 18 18 18 38 38 38 54 54 54
56001-221 221 221 106 106 106 2 2 6 14 14 14
56002- 46 46 46 190 190 190 198 198 198 2 2 6
56003- 2 2 6 2 2 6 2 2 6 2 2 6
56004- 74 74 74 62 62 62 22 22 22 6 6 6
56005- 0 0 0 0 0 0 0 0 0 0 0 0
56006- 0 0 0 0 0 0 0 0 0 0 0 0
56007- 0 0 0 0 0 0 0 0 0 0 0 0
56008- 0 0 0 0 0 0 0 0 0 0 0 0
56009- 0 0 0 0 0 0 0 0 0 0 0 0
56010- 0 0 0 0 0 0 0 0 0 0 0 0
56011- 0 0 0 0 0 0 0 0 1 0 0 0
56012- 0 0 1 0 0 0 0 0 1 0 0 0
56013- 0 0 0 0 0 0 0 0 0 0 0 0
56014- 0 0 0 0 0 0 0 0 0 0 0 0
56015- 0 0 0 0 0 0 0 0 0 0 0 0
56016- 0 0 0 0 0 0 0 0 0 0 0 0
56017- 0 0 0 0 0 0 0 0 0 14 14 14
56018- 42 42 42 94 94 94 14 14 14 101 101 101
56019-128 128 128 2 2 6 18 18 18 116 116 116
56020-118 98 46 121 92 8 121 92 8 98 78 10
56021-162 162 162 106 106 106 2 2 6 2 2 6
56022- 2 2 6 195 195 195 195 195 195 6 6 6
56023- 2 2 6 2 2 6 2 2 6 2 2 6
56024- 74 74 74 62 62 62 22 22 22 6 6 6
56025- 0 0 0 0 0 0 0 0 0 0 0 0
56026- 0 0 0 0 0 0 0 0 0 0 0 0
56027- 0 0 0 0 0 0 0 0 0 0 0 0
56028- 0 0 0 0 0 0 0 0 0 0 0 0
56029- 0 0 0 0 0 0 0 0 0 0 0 0
56030- 0 0 0 0 0 0 0 0 0 0 0 0
56031- 0 0 0 0 0 0 0 0 1 0 0 1
56032- 0 0 1 0 0 0 0 0 1 0 0 0
56033- 0 0 0 0 0 0 0 0 0 0 0 0
56034- 0 0 0 0 0 0 0 0 0 0 0 0
56035- 0 0 0 0 0 0 0 0 0 0 0 0
56036- 0 0 0 0 0 0 0 0 0 0 0 0
56037- 0 0 0 0 0 0 0 0 0 10 10 10
56038- 38 38 38 90 90 90 14 14 14 58 58 58
56039-210 210 210 26 26 26 54 38 6 154 114 10
56040-226 170 11 236 186 11 225 175 15 184 144 12
56041-215 174 15 175 146 61 37 26 9 2 2 6
56042- 70 70 70 246 246 246 138 138 138 2 2 6
56043- 2 2 6 2 2 6 2 2 6 2 2 6
56044- 70 70 70 66 66 66 26 26 26 6 6 6
56045- 0 0 0 0 0 0 0 0 0 0 0 0
56046- 0 0 0 0 0 0 0 0 0 0 0 0
56047- 0 0 0 0 0 0 0 0 0 0 0 0
56048- 0 0 0 0 0 0 0 0 0 0 0 0
56049- 0 0 0 0 0 0 0 0 0 0 0 0
56050- 0 0 0 0 0 0 0 0 0 0 0 0
56051- 0 0 0 0 0 0 0 0 0 0 0 0
56052- 0 0 0 0 0 0 0 0 0 0 0 0
56053- 0 0 0 0 0 0 0 0 0 0 0 0
56054- 0 0 0 0 0 0 0 0 0 0 0 0
56055- 0 0 0 0 0 0 0 0 0 0 0 0
56056- 0 0 0 0 0 0 0 0 0 0 0 0
56057- 0 0 0 0 0 0 0 0 0 10 10 10
56058- 38 38 38 86 86 86 14 14 14 10 10 10
56059-195 195 195 188 164 115 192 133 9 225 175 15
56060-239 182 13 234 190 10 232 195 16 232 200 30
56061-245 207 45 241 208 19 232 195 16 184 144 12
56062-218 194 134 211 206 186 42 42 42 2 2 6
56063- 2 2 6 2 2 6 2 2 6 2 2 6
56064- 50 50 50 74 74 74 30 30 30 6 6 6
56065- 0 0 0 0 0 0 0 0 0 0 0 0
56066- 0 0 0 0 0 0 0 0 0 0 0 0
56067- 0 0 0 0 0 0 0 0 0 0 0 0
56068- 0 0 0 0 0 0 0 0 0 0 0 0
56069- 0 0 0 0 0 0 0 0 0 0 0 0
56070- 0 0 0 0 0 0 0 0 0 0 0 0
56071- 0 0 0 0 0 0 0 0 0 0 0 0
56072- 0 0 0 0 0 0 0 0 0 0 0 0
56073- 0 0 0 0 0 0 0 0 0 0 0 0
56074- 0 0 0 0 0 0 0 0 0 0 0 0
56075- 0 0 0 0 0 0 0 0 0 0 0 0
56076- 0 0 0 0 0 0 0 0 0 0 0 0
56077- 0 0 0 0 0 0 0 0 0 10 10 10
56078- 34 34 34 86 86 86 14 14 14 2 2 6
56079-121 87 25 192 133 9 219 162 10 239 182 13
56080-236 186 11 232 195 16 241 208 19 244 214 54
56081-246 218 60 246 218 38 246 215 20 241 208 19
56082-241 208 19 226 184 13 121 87 25 2 2 6
56083- 2 2 6 2 2 6 2 2 6 2 2 6
56084- 50 50 50 82 82 82 34 34 34 10 10 10
56085- 0 0 0 0 0 0 0 0 0 0 0 0
56086- 0 0 0 0 0 0 0 0 0 0 0 0
56087- 0 0 0 0 0 0 0 0 0 0 0 0
56088- 0 0 0 0 0 0 0 0 0 0 0 0
56089- 0 0 0 0 0 0 0 0 0 0 0 0
56090- 0 0 0 0 0 0 0 0 0 0 0 0
56091- 0 0 0 0 0 0 0 0 0 0 0 0
56092- 0 0 0 0 0 0 0 0 0 0 0 0
56093- 0 0 0 0 0 0 0 0 0 0 0 0
56094- 0 0 0 0 0 0 0 0 0 0 0 0
56095- 0 0 0 0 0 0 0 0 0 0 0 0
56096- 0 0 0 0 0 0 0 0 0 0 0 0
56097- 0 0 0 0 0 0 0 0 0 10 10 10
56098- 34 34 34 82 82 82 30 30 30 61 42 6
56099-180 123 7 206 145 10 230 174 11 239 182 13
56100-234 190 10 238 202 15 241 208 19 246 218 74
56101-246 218 38 246 215 20 246 215 20 246 215 20
56102-226 184 13 215 174 15 184 144 12 6 6 6
56103- 2 2 6 2 2 6 2 2 6 2 2 6
56104- 26 26 26 94 94 94 42 42 42 14 14 14
56105- 0 0 0 0 0 0 0 0 0 0 0 0
56106- 0 0 0 0 0 0 0 0 0 0 0 0
56107- 0 0 0 0 0 0 0 0 0 0 0 0
56108- 0 0 0 0 0 0 0 0 0 0 0 0
56109- 0 0 0 0 0 0 0 0 0 0 0 0
56110- 0 0 0 0 0 0 0 0 0 0 0 0
56111- 0 0 0 0 0 0 0 0 0 0 0 0
56112- 0 0 0 0 0 0 0 0 0 0 0 0
56113- 0 0 0 0 0 0 0 0 0 0 0 0
56114- 0 0 0 0 0 0 0 0 0 0 0 0
56115- 0 0 0 0 0 0 0 0 0 0 0 0
56116- 0 0 0 0 0 0 0 0 0 0 0 0
56117- 0 0 0 0 0 0 0 0 0 10 10 10
56118- 30 30 30 78 78 78 50 50 50 104 69 6
56119-192 133 9 216 158 10 236 178 12 236 186 11
56120-232 195 16 241 208 19 244 214 54 245 215 43
56121-246 215 20 246 215 20 241 208 19 198 155 10
56122-200 144 11 216 158 10 156 118 10 2 2 6
56123- 2 2 6 2 2 6 2 2 6 2 2 6
56124- 6 6 6 90 90 90 54 54 54 18 18 18
56125- 6 6 6 0 0 0 0 0 0 0 0 0
56126- 0 0 0 0 0 0 0 0 0 0 0 0
56127- 0 0 0 0 0 0 0 0 0 0 0 0
56128- 0 0 0 0 0 0 0 0 0 0 0 0
56129- 0 0 0 0 0 0 0 0 0 0 0 0
56130- 0 0 0 0 0 0 0 0 0 0 0 0
56131- 0 0 0 0 0 0 0 0 0 0 0 0
56132- 0 0 0 0 0 0 0 0 0 0 0 0
56133- 0 0 0 0 0 0 0 0 0 0 0 0
56134- 0 0 0 0 0 0 0 0 0 0 0 0
56135- 0 0 0 0 0 0 0 0 0 0 0 0
56136- 0 0 0 0 0 0 0 0 0 0 0 0
56137- 0 0 0 0 0 0 0 0 0 10 10 10
56138- 30 30 30 78 78 78 46 46 46 22 22 22
56139-137 92 6 210 162 10 239 182 13 238 190 10
56140-238 202 15 241 208 19 246 215 20 246 215 20
56141-241 208 19 203 166 17 185 133 11 210 150 10
56142-216 158 10 210 150 10 102 78 10 2 2 6
56143- 6 6 6 54 54 54 14 14 14 2 2 6
56144- 2 2 6 62 62 62 74 74 74 30 30 30
56145- 10 10 10 0 0 0 0 0 0 0 0 0
56146- 0 0 0 0 0 0 0 0 0 0 0 0
56147- 0 0 0 0 0 0 0 0 0 0 0 0
56148- 0 0 0 0 0 0 0 0 0 0 0 0
56149- 0 0 0 0 0 0 0 0 0 0 0 0
56150- 0 0 0 0 0 0 0 0 0 0 0 0
56151- 0 0 0 0 0 0 0 0 0 0 0 0
56152- 0 0 0 0 0 0 0 0 0 0 0 0
56153- 0 0 0 0 0 0 0 0 0 0 0 0
56154- 0 0 0 0 0 0 0 0 0 0 0 0
56155- 0 0 0 0 0 0 0 0 0 0 0 0
56156- 0 0 0 0 0 0 0 0 0 0 0 0
56157- 0 0 0 0 0 0 0 0 0 10 10 10
56158- 34 34 34 78 78 78 50 50 50 6 6 6
56159- 94 70 30 139 102 15 190 146 13 226 184 13
56160-232 200 30 232 195 16 215 174 15 190 146 13
56161-168 122 10 192 133 9 210 150 10 213 154 11
56162-202 150 34 182 157 106 101 98 89 2 2 6
56163- 2 2 6 78 78 78 116 116 116 58 58 58
56164- 2 2 6 22 22 22 90 90 90 46 46 46
56165- 18 18 18 6 6 6 0 0 0 0 0 0
56166- 0 0 0 0 0 0 0 0 0 0 0 0
56167- 0 0 0 0 0 0 0 0 0 0 0 0
56168- 0 0 0 0 0 0 0 0 0 0 0 0
56169- 0 0 0 0 0 0 0 0 0 0 0 0
56170- 0 0 0 0 0 0 0 0 0 0 0 0
56171- 0 0 0 0 0 0 0 0 0 0 0 0
56172- 0 0 0 0 0 0 0 0 0 0 0 0
56173- 0 0 0 0 0 0 0 0 0 0 0 0
56174- 0 0 0 0 0 0 0 0 0 0 0 0
56175- 0 0 0 0 0 0 0 0 0 0 0 0
56176- 0 0 0 0 0 0 0 0 0 0 0 0
56177- 0 0 0 0 0 0 0 0 0 10 10 10
56178- 38 38 38 86 86 86 50 50 50 6 6 6
56179-128 128 128 174 154 114 156 107 11 168 122 10
56180-198 155 10 184 144 12 197 138 11 200 144 11
56181-206 145 10 206 145 10 197 138 11 188 164 115
56182-195 195 195 198 198 198 174 174 174 14 14 14
56183- 2 2 6 22 22 22 116 116 116 116 116 116
56184- 22 22 22 2 2 6 74 74 74 70 70 70
56185- 30 30 30 10 10 10 0 0 0 0 0 0
56186- 0 0 0 0 0 0 0 0 0 0 0 0
56187- 0 0 0 0 0 0 0 0 0 0 0 0
56188- 0 0 0 0 0 0 0 0 0 0 0 0
56189- 0 0 0 0 0 0 0 0 0 0 0 0
56190- 0 0 0 0 0 0 0 0 0 0 0 0
56191- 0 0 0 0 0 0 0 0 0 0 0 0
56192- 0 0 0 0 0 0 0 0 0 0 0 0
56193- 0 0 0 0 0 0 0 0 0 0 0 0
56194- 0 0 0 0 0 0 0 0 0 0 0 0
56195- 0 0 0 0 0 0 0 0 0 0 0 0
56196- 0 0 0 0 0 0 0 0 0 0 0 0
56197- 0 0 0 0 0 0 6 6 6 18 18 18
56198- 50 50 50 101 101 101 26 26 26 10 10 10
56199-138 138 138 190 190 190 174 154 114 156 107 11
56200-197 138 11 200 144 11 197 138 11 192 133 9
56201-180 123 7 190 142 34 190 178 144 187 187 187
56202-202 202 202 221 221 221 214 214 214 66 66 66
56203- 2 2 6 2 2 6 50 50 50 62 62 62
56204- 6 6 6 2 2 6 10 10 10 90 90 90
56205- 50 50 50 18 18 18 6 6 6 0 0 0
56206- 0 0 0 0 0 0 0 0 0 0 0 0
56207- 0 0 0 0 0 0 0 0 0 0 0 0
56208- 0 0 0 0 0 0 0 0 0 0 0 0
56209- 0 0 0 0 0 0 0 0 0 0 0 0
56210- 0 0 0 0 0 0 0 0 0 0 0 0
56211- 0 0 0 0 0 0 0 0 0 0 0 0
56212- 0 0 0 0 0 0 0 0 0 0 0 0
56213- 0 0 0 0 0 0 0 0 0 0 0 0
56214- 0 0 0 0 0 0 0 0 0 0 0 0
56215- 0 0 0 0 0 0 0 0 0 0 0 0
56216- 0 0 0 0 0 0 0 0 0 0 0 0
56217- 0 0 0 0 0 0 10 10 10 34 34 34
56218- 74 74 74 74 74 74 2 2 6 6 6 6
56219-144 144 144 198 198 198 190 190 190 178 166 146
56220-154 121 60 156 107 11 156 107 11 168 124 44
56221-174 154 114 187 187 187 190 190 190 210 210 210
56222-246 246 246 253 253 253 253 253 253 182 182 182
56223- 6 6 6 2 2 6 2 2 6 2 2 6
56224- 2 2 6 2 2 6 2 2 6 62 62 62
56225- 74 74 74 34 34 34 14 14 14 0 0 0
56226- 0 0 0 0 0 0 0 0 0 0 0 0
56227- 0 0 0 0 0 0 0 0 0 0 0 0
56228- 0 0 0 0 0 0 0 0 0 0 0 0
56229- 0 0 0 0 0 0 0 0 0 0 0 0
56230- 0 0 0 0 0 0 0 0 0 0 0 0
56231- 0 0 0 0 0 0 0 0 0 0 0 0
56232- 0 0 0 0 0 0 0 0 0 0 0 0
56233- 0 0 0 0 0 0 0 0 0 0 0 0
56234- 0 0 0 0 0 0 0 0 0 0 0 0
56235- 0 0 0 0 0 0 0 0 0 0 0 0
56236- 0 0 0 0 0 0 0 0 0 0 0 0
56237- 0 0 0 10 10 10 22 22 22 54 54 54
56238- 94 94 94 18 18 18 2 2 6 46 46 46
56239-234 234 234 221 221 221 190 190 190 190 190 190
56240-190 190 190 187 187 187 187 187 187 190 190 190
56241-190 190 190 195 195 195 214 214 214 242 242 242
56242-253 253 253 253 253 253 253 253 253 253 253 253
56243- 82 82 82 2 2 6 2 2 6 2 2 6
56244- 2 2 6 2 2 6 2 2 6 14 14 14
56245- 86 86 86 54 54 54 22 22 22 6 6 6
56246- 0 0 0 0 0 0 0 0 0 0 0 0
56247- 0 0 0 0 0 0 0 0 0 0 0 0
56248- 0 0 0 0 0 0 0 0 0 0 0 0
56249- 0 0 0 0 0 0 0 0 0 0 0 0
56250- 0 0 0 0 0 0 0 0 0 0 0 0
56251- 0 0 0 0 0 0 0 0 0 0 0 0
56252- 0 0 0 0 0 0 0 0 0 0 0 0
56253- 0 0 0 0 0 0 0 0 0 0 0 0
56254- 0 0 0 0 0 0 0 0 0 0 0 0
56255- 0 0 0 0 0 0 0 0 0 0 0 0
56256- 0 0 0 0 0 0 0 0 0 0 0 0
56257- 6 6 6 18 18 18 46 46 46 90 90 90
56258- 46 46 46 18 18 18 6 6 6 182 182 182
56259-253 253 253 246 246 246 206 206 206 190 190 190
56260-190 190 190 190 190 190 190 190 190 190 190 190
56261-206 206 206 231 231 231 250 250 250 253 253 253
56262-253 253 253 253 253 253 253 253 253 253 253 253
56263-202 202 202 14 14 14 2 2 6 2 2 6
56264- 2 2 6 2 2 6 2 2 6 2 2 6
56265- 42 42 42 86 86 86 42 42 42 18 18 18
56266- 6 6 6 0 0 0 0 0 0 0 0 0
56267- 0 0 0 0 0 0 0 0 0 0 0 0
56268- 0 0 0 0 0 0 0 0 0 0 0 0
56269- 0 0 0 0 0 0 0 0 0 0 0 0
56270- 0 0 0 0 0 0 0 0 0 0 0 0
56271- 0 0 0 0 0 0 0 0 0 0 0 0
56272- 0 0 0 0 0 0 0 0 0 0 0 0
56273- 0 0 0 0 0 0 0 0 0 0 0 0
56274- 0 0 0 0 0 0 0 0 0 0 0 0
56275- 0 0 0 0 0 0 0 0 0 0 0 0
56276- 0 0 0 0 0 0 0 0 0 6 6 6
56277- 14 14 14 38 38 38 74 74 74 66 66 66
56278- 2 2 6 6 6 6 90 90 90 250 250 250
56279-253 253 253 253 253 253 238 238 238 198 198 198
56280-190 190 190 190 190 190 195 195 195 221 221 221
56281-246 246 246 253 253 253 253 253 253 253 253 253
56282-253 253 253 253 253 253 253 253 253 253 253 253
56283-253 253 253 82 82 82 2 2 6 2 2 6
56284- 2 2 6 2 2 6 2 2 6 2 2 6
56285- 2 2 6 78 78 78 70 70 70 34 34 34
56286- 14 14 14 6 6 6 0 0 0 0 0 0
56287- 0 0 0 0 0 0 0 0 0 0 0 0
56288- 0 0 0 0 0 0 0 0 0 0 0 0
56289- 0 0 0 0 0 0 0 0 0 0 0 0
56290- 0 0 0 0 0 0 0 0 0 0 0 0
56291- 0 0 0 0 0 0 0 0 0 0 0 0
56292- 0 0 0 0 0 0 0 0 0 0 0 0
56293- 0 0 0 0 0 0 0 0 0 0 0 0
56294- 0 0 0 0 0 0 0 0 0 0 0 0
56295- 0 0 0 0 0 0 0 0 0 0 0 0
56296- 0 0 0 0 0 0 0 0 0 14 14 14
56297- 34 34 34 66 66 66 78 78 78 6 6 6
56298- 2 2 6 18 18 18 218 218 218 253 253 253
56299-253 253 253 253 253 253 253 253 253 246 246 246
56300-226 226 226 231 231 231 246 246 246 253 253 253
56301-253 253 253 253 253 253 253 253 253 253 253 253
56302-253 253 253 253 253 253 253 253 253 253 253 253
56303-253 253 253 178 178 178 2 2 6 2 2 6
56304- 2 2 6 2 2 6 2 2 6 2 2 6
56305- 2 2 6 18 18 18 90 90 90 62 62 62
56306- 30 30 30 10 10 10 0 0 0 0 0 0
56307- 0 0 0 0 0 0 0 0 0 0 0 0
56308- 0 0 0 0 0 0 0 0 0 0 0 0
56309- 0 0 0 0 0 0 0 0 0 0 0 0
56310- 0 0 0 0 0 0 0 0 0 0 0 0
56311- 0 0 0 0 0 0 0 0 0 0 0 0
56312- 0 0 0 0 0 0 0 0 0 0 0 0
56313- 0 0 0 0 0 0 0 0 0 0 0 0
56314- 0 0 0 0 0 0 0 0 0 0 0 0
56315- 0 0 0 0 0 0 0 0 0 0 0 0
56316- 0 0 0 0 0 0 10 10 10 26 26 26
56317- 58 58 58 90 90 90 18 18 18 2 2 6
56318- 2 2 6 110 110 110 253 253 253 253 253 253
56319-253 253 253 253 253 253 253 253 253 253 253 253
56320-250 250 250 253 253 253 253 253 253 253 253 253
56321-253 253 253 253 253 253 253 253 253 253 253 253
56322-253 253 253 253 253 253 253 253 253 253 253 253
56323-253 253 253 231 231 231 18 18 18 2 2 6
56324- 2 2 6 2 2 6 2 2 6 2 2 6
56325- 2 2 6 2 2 6 18 18 18 94 94 94
56326- 54 54 54 26 26 26 10 10 10 0 0 0
56327- 0 0 0 0 0 0 0 0 0 0 0 0
56328- 0 0 0 0 0 0 0 0 0 0 0 0
56329- 0 0 0 0 0 0 0 0 0 0 0 0
56330- 0 0 0 0 0 0 0 0 0 0 0 0
56331- 0 0 0 0 0 0 0 0 0 0 0 0
56332- 0 0 0 0 0 0 0 0 0 0 0 0
56333- 0 0 0 0 0 0 0 0 0 0 0 0
56334- 0 0 0 0 0 0 0 0 0 0 0 0
56335- 0 0 0 0 0 0 0 0 0 0 0 0
56336- 0 0 0 6 6 6 22 22 22 50 50 50
56337- 90 90 90 26 26 26 2 2 6 2 2 6
56338- 14 14 14 195 195 195 250 250 250 253 253 253
56339-253 253 253 253 253 253 253 253 253 253 253 253
56340-253 253 253 253 253 253 253 253 253 253 253 253
56341-253 253 253 253 253 253 253 253 253 253 253 253
56342-253 253 253 253 253 253 253 253 253 253 253 253
56343-250 250 250 242 242 242 54 54 54 2 2 6
56344- 2 2 6 2 2 6 2 2 6 2 2 6
56345- 2 2 6 2 2 6 2 2 6 38 38 38
56346- 86 86 86 50 50 50 22 22 22 6 6 6
56347- 0 0 0 0 0 0 0 0 0 0 0 0
56348- 0 0 0 0 0 0 0 0 0 0 0 0
56349- 0 0 0 0 0 0 0 0 0 0 0 0
56350- 0 0 0 0 0 0 0 0 0 0 0 0
56351- 0 0 0 0 0 0 0 0 0 0 0 0
56352- 0 0 0 0 0 0 0 0 0 0 0 0
56353- 0 0 0 0 0 0 0 0 0 0 0 0
56354- 0 0 0 0 0 0 0 0 0 0 0 0
56355- 0 0 0 0 0 0 0 0 0 0 0 0
56356- 6 6 6 14 14 14 38 38 38 82 82 82
56357- 34 34 34 2 2 6 2 2 6 2 2 6
56358- 42 42 42 195 195 195 246 246 246 253 253 253
56359-253 253 253 253 253 253 253 253 253 250 250 250
56360-242 242 242 242 242 242 250 250 250 253 253 253
56361-253 253 253 253 253 253 253 253 253 253 253 253
56362-253 253 253 250 250 250 246 246 246 238 238 238
56363-226 226 226 231 231 231 101 101 101 6 6 6
56364- 2 2 6 2 2 6 2 2 6 2 2 6
56365- 2 2 6 2 2 6 2 2 6 2 2 6
56366- 38 38 38 82 82 82 42 42 42 14 14 14
56367- 6 6 6 0 0 0 0 0 0 0 0 0
56368- 0 0 0 0 0 0 0 0 0 0 0 0
56369- 0 0 0 0 0 0 0 0 0 0 0 0
56370- 0 0 0 0 0 0 0 0 0 0 0 0
56371- 0 0 0 0 0 0 0 0 0 0 0 0
56372- 0 0 0 0 0 0 0 0 0 0 0 0
56373- 0 0 0 0 0 0 0 0 0 0 0 0
56374- 0 0 0 0 0 0 0 0 0 0 0 0
56375- 0 0 0 0 0 0 0 0 0 0 0 0
56376- 10 10 10 26 26 26 62 62 62 66 66 66
56377- 2 2 6 2 2 6 2 2 6 6 6 6
56378- 70 70 70 170 170 170 206 206 206 234 234 234
56379-246 246 246 250 250 250 250 250 250 238 238 238
56380-226 226 226 231 231 231 238 238 238 250 250 250
56381-250 250 250 250 250 250 246 246 246 231 231 231
56382-214 214 214 206 206 206 202 202 202 202 202 202
56383-198 198 198 202 202 202 182 182 182 18 18 18
56384- 2 2 6 2 2 6 2 2 6 2 2 6
56385- 2 2 6 2 2 6 2 2 6 2 2 6
56386- 2 2 6 62 62 62 66 66 66 30 30 30
56387- 10 10 10 0 0 0 0 0 0 0 0 0
56388- 0 0 0 0 0 0 0 0 0 0 0 0
56389- 0 0 0 0 0 0 0 0 0 0 0 0
56390- 0 0 0 0 0 0 0 0 0 0 0 0
56391- 0 0 0 0 0 0 0 0 0 0 0 0
56392- 0 0 0 0 0 0 0 0 0 0 0 0
56393- 0 0 0 0 0 0 0 0 0 0 0 0
56394- 0 0 0 0 0 0 0 0 0 0 0 0
56395- 0 0 0 0 0 0 0 0 0 0 0 0
56396- 14 14 14 42 42 42 82 82 82 18 18 18
56397- 2 2 6 2 2 6 2 2 6 10 10 10
56398- 94 94 94 182 182 182 218 218 218 242 242 242
56399-250 250 250 253 253 253 253 253 253 250 250 250
56400-234 234 234 253 253 253 253 253 253 253 253 253
56401-253 253 253 253 253 253 253 253 253 246 246 246
56402-238 238 238 226 226 226 210 210 210 202 202 202
56403-195 195 195 195 195 195 210 210 210 158 158 158
56404- 6 6 6 14 14 14 50 50 50 14 14 14
56405- 2 2 6 2 2 6 2 2 6 2 2 6
56406- 2 2 6 6 6 6 86 86 86 46 46 46
56407- 18 18 18 6 6 6 0 0 0 0 0 0
56408- 0 0 0 0 0 0 0 0 0 0 0 0
56409- 0 0 0 0 0 0 0 0 0 0 0 0
56410- 0 0 0 0 0 0 0 0 0 0 0 0
56411- 0 0 0 0 0 0 0 0 0 0 0 0
56412- 0 0 0 0 0 0 0 0 0 0 0 0
56413- 0 0 0 0 0 0 0 0 0 0 0 0
56414- 0 0 0 0 0 0 0 0 0 0 0 0
56415- 0 0 0 0 0 0 0 0 0 6 6 6
56416- 22 22 22 54 54 54 70 70 70 2 2 6
56417- 2 2 6 10 10 10 2 2 6 22 22 22
56418-166 166 166 231 231 231 250 250 250 253 253 253
56419-253 253 253 253 253 253 253 253 253 250 250 250
56420-242 242 242 253 253 253 253 253 253 253 253 253
56421-253 253 253 253 253 253 253 253 253 253 253 253
56422-253 253 253 253 253 253 253 253 253 246 246 246
56423-231 231 231 206 206 206 198 198 198 226 226 226
56424- 94 94 94 2 2 6 6 6 6 38 38 38
56425- 30 30 30 2 2 6 2 2 6 2 2 6
56426- 2 2 6 2 2 6 62 62 62 66 66 66
56427- 26 26 26 10 10 10 0 0 0 0 0 0
56428- 0 0 0 0 0 0 0 0 0 0 0 0
56429- 0 0 0 0 0 0 0 0 0 0 0 0
56430- 0 0 0 0 0 0 0 0 0 0 0 0
56431- 0 0 0 0 0 0 0 0 0 0 0 0
56432- 0 0 0 0 0 0 0 0 0 0 0 0
56433- 0 0 0 0 0 0 0 0 0 0 0 0
56434- 0 0 0 0 0 0 0 0 0 0 0 0
56435- 0 0 0 0 0 0 0 0 0 10 10 10
56436- 30 30 30 74 74 74 50 50 50 2 2 6
56437- 26 26 26 26 26 26 2 2 6 106 106 106
56438-238 238 238 253 253 253 253 253 253 253 253 253
56439-253 253 253 253 253 253 253 253 253 253 253 253
56440-253 253 253 253 253 253 253 253 253 253 253 253
56441-253 253 253 253 253 253 253 253 253 253 253 253
56442-253 253 253 253 253 253 253 253 253 253 253 253
56443-253 253 253 246 246 246 218 218 218 202 202 202
56444-210 210 210 14 14 14 2 2 6 2 2 6
56445- 30 30 30 22 22 22 2 2 6 2 2 6
56446- 2 2 6 2 2 6 18 18 18 86 86 86
56447- 42 42 42 14 14 14 0 0 0 0 0 0
56448- 0 0 0 0 0 0 0 0 0 0 0 0
56449- 0 0 0 0 0 0 0 0 0 0 0 0
56450- 0 0 0 0 0 0 0 0 0 0 0 0
56451- 0 0 0 0 0 0 0 0 0 0 0 0
56452- 0 0 0 0 0 0 0 0 0 0 0 0
56453- 0 0 0 0 0 0 0 0 0 0 0 0
56454- 0 0 0 0 0 0 0 0 0 0 0 0
56455- 0 0 0 0 0 0 0 0 0 14 14 14
56456- 42 42 42 90 90 90 22 22 22 2 2 6
56457- 42 42 42 2 2 6 18 18 18 218 218 218
56458-253 253 253 253 253 253 253 253 253 253 253 253
56459-253 253 253 253 253 253 253 253 253 253 253 253
56460-253 253 253 253 253 253 253 253 253 253 253 253
56461-253 253 253 253 253 253 253 253 253 253 253 253
56462-253 253 253 253 253 253 253 253 253 253 253 253
56463-253 253 253 253 253 253 250 250 250 221 221 221
56464-218 218 218 101 101 101 2 2 6 14 14 14
56465- 18 18 18 38 38 38 10 10 10 2 2 6
56466- 2 2 6 2 2 6 2 2 6 78 78 78
56467- 58 58 58 22 22 22 6 6 6 0 0 0
56468- 0 0 0 0 0 0 0 0 0 0 0 0
56469- 0 0 0 0 0 0 0 0 0 0 0 0
56470- 0 0 0 0 0 0 0 0 0 0 0 0
56471- 0 0 0 0 0 0 0 0 0 0 0 0
56472- 0 0 0 0 0 0 0 0 0 0 0 0
56473- 0 0 0 0 0 0 0 0 0 0 0 0
56474- 0 0 0 0 0 0 0 0 0 0 0 0
56475- 0 0 0 0 0 0 6 6 6 18 18 18
56476- 54 54 54 82 82 82 2 2 6 26 26 26
56477- 22 22 22 2 2 6 123 123 123 253 253 253
56478-253 253 253 253 253 253 253 253 253 253 253 253
56479-253 253 253 253 253 253 253 253 253 253 253 253
56480-253 253 253 253 253 253 253 253 253 253 253 253
56481-253 253 253 253 253 253 253 253 253 253 253 253
56482-253 253 253 253 253 253 253 253 253 253 253 253
56483-253 253 253 253 253 253 253 253 253 250 250 250
56484-238 238 238 198 198 198 6 6 6 38 38 38
56485- 58 58 58 26 26 26 38 38 38 2 2 6
56486- 2 2 6 2 2 6 2 2 6 46 46 46
56487- 78 78 78 30 30 30 10 10 10 0 0 0
56488- 0 0 0 0 0 0 0 0 0 0 0 0
56489- 0 0 0 0 0 0 0 0 0 0 0 0
56490- 0 0 0 0 0 0 0 0 0 0 0 0
56491- 0 0 0 0 0 0 0 0 0 0 0 0
56492- 0 0 0 0 0 0 0 0 0 0 0 0
56493- 0 0 0 0 0 0 0 0 0 0 0 0
56494- 0 0 0 0 0 0 0 0 0 0 0 0
56495- 0 0 0 0 0 0 10 10 10 30 30 30
56496- 74 74 74 58 58 58 2 2 6 42 42 42
56497- 2 2 6 22 22 22 231 231 231 253 253 253
56498-253 253 253 253 253 253 253 253 253 253 253 253
56499-253 253 253 253 253 253 253 253 253 250 250 250
56500-253 253 253 253 253 253 253 253 253 253 253 253
56501-253 253 253 253 253 253 253 253 253 253 253 253
56502-253 253 253 253 253 253 253 253 253 253 253 253
56503-253 253 253 253 253 253 253 253 253 253 253 253
56504-253 253 253 246 246 246 46 46 46 38 38 38
56505- 42 42 42 14 14 14 38 38 38 14 14 14
56506- 2 2 6 2 2 6 2 2 6 6 6 6
56507- 86 86 86 46 46 46 14 14 14 0 0 0
56508- 0 0 0 0 0 0 0 0 0 0 0 0
56509- 0 0 0 0 0 0 0 0 0 0 0 0
56510- 0 0 0 0 0 0 0 0 0 0 0 0
56511- 0 0 0 0 0 0 0 0 0 0 0 0
56512- 0 0 0 0 0 0 0 0 0 0 0 0
56513- 0 0 0 0 0 0 0 0 0 0 0 0
56514- 0 0 0 0 0 0 0 0 0 0 0 0
56515- 0 0 0 6 6 6 14 14 14 42 42 42
56516- 90 90 90 18 18 18 18 18 18 26 26 26
56517- 2 2 6 116 116 116 253 253 253 253 253 253
56518-253 253 253 253 253 253 253 253 253 253 253 253
56519-253 253 253 253 253 253 250 250 250 238 238 238
56520-253 253 253 253 253 253 253 253 253 253 253 253
56521-253 253 253 253 253 253 253 253 253 253 253 253
56522-253 253 253 253 253 253 253 253 253 253 253 253
56523-253 253 253 253 253 253 253 253 253 253 253 253
56524-253 253 253 253 253 253 94 94 94 6 6 6
56525- 2 2 6 2 2 6 10 10 10 34 34 34
56526- 2 2 6 2 2 6 2 2 6 2 2 6
56527- 74 74 74 58 58 58 22 22 22 6 6 6
56528- 0 0 0 0 0 0 0 0 0 0 0 0
56529- 0 0 0 0 0 0 0 0 0 0 0 0
56530- 0 0 0 0 0 0 0 0 0 0 0 0
56531- 0 0 0 0 0 0 0 0 0 0 0 0
56532- 0 0 0 0 0 0 0 0 0 0 0 0
56533- 0 0 0 0 0 0 0 0 0 0 0 0
56534- 0 0 0 0 0 0 0 0 0 0 0 0
56535- 0 0 0 10 10 10 26 26 26 66 66 66
56536- 82 82 82 2 2 6 38 38 38 6 6 6
56537- 14 14 14 210 210 210 253 253 253 253 253 253
56538-253 253 253 253 253 253 253 253 253 253 253 253
56539-253 253 253 253 253 253 246 246 246 242 242 242
56540-253 253 253 253 253 253 253 253 253 253 253 253
56541-253 253 253 253 253 253 253 253 253 253 253 253
56542-253 253 253 253 253 253 253 253 253 253 253 253
56543-253 253 253 253 253 253 253 253 253 253 253 253
56544-253 253 253 253 253 253 144 144 144 2 2 6
56545- 2 2 6 2 2 6 2 2 6 46 46 46
56546- 2 2 6 2 2 6 2 2 6 2 2 6
56547- 42 42 42 74 74 74 30 30 30 10 10 10
56548- 0 0 0 0 0 0 0 0 0 0 0 0
56549- 0 0 0 0 0 0 0 0 0 0 0 0
56550- 0 0 0 0 0 0 0 0 0 0 0 0
56551- 0 0 0 0 0 0 0 0 0 0 0 0
56552- 0 0 0 0 0 0 0 0 0 0 0 0
56553- 0 0 0 0 0 0 0 0 0 0 0 0
56554- 0 0 0 0 0 0 0 0 0 0 0 0
56555- 6 6 6 14 14 14 42 42 42 90 90 90
56556- 26 26 26 6 6 6 42 42 42 2 2 6
56557- 74 74 74 250 250 250 253 253 253 253 253 253
56558-253 253 253 253 253 253 253 253 253 253 253 253
56559-253 253 253 253 253 253 242 242 242 242 242 242
56560-253 253 253 253 253 253 253 253 253 253 253 253
56561-253 253 253 253 253 253 253 253 253 253 253 253
56562-253 253 253 253 253 253 253 253 253 253 253 253
56563-253 253 253 253 253 253 253 253 253 253 253 253
56564-253 253 253 253 253 253 182 182 182 2 2 6
56565- 2 2 6 2 2 6 2 2 6 46 46 46
56566- 2 2 6 2 2 6 2 2 6 2 2 6
56567- 10 10 10 86 86 86 38 38 38 10 10 10
56568- 0 0 0 0 0 0 0 0 0 0 0 0
56569- 0 0 0 0 0 0 0 0 0 0 0 0
56570- 0 0 0 0 0 0 0 0 0 0 0 0
56571- 0 0 0 0 0 0 0 0 0 0 0 0
56572- 0 0 0 0 0 0 0 0 0 0 0 0
56573- 0 0 0 0 0 0 0 0 0 0 0 0
56574- 0 0 0 0 0 0 0 0 0 0 0 0
56575- 10 10 10 26 26 26 66 66 66 82 82 82
56576- 2 2 6 22 22 22 18 18 18 2 2 6
56577-149 149 149 253 253 253 253 253 253 253 253 253
56578-253 253 253 253 253 253 253 253 253 253 253 253
56579-253 253 253 253 253 253 234 234 234 242 242 242
56580-253 253 253 253 253 253 253 253 253 253 253 253
56581-253 253 253 253 253 253 253 253 253 253 253 253
56582-253 253 253 253 253 253 253 253 253 253 253 253
56583-253 253 253 253 253 253 253 253 253 253 253 253
56584-253 253 253 253 253 253 206 206 206 2 2 6
56585- 2 2 6 2 2 6 2 2 6 38 38 38
56586- 2 2 6 2 2 6 2 2 6 2 2 6
56587- 6 6 6 86 86 86 46 46 46 14 14 14
56588- 0 0 0 0 0 0 0 0 0 0 0 0
56589- 0 0 0 0 0 0 0 0 0 0 0 0
56590- 0 0 0 0 0 0 0 0 0 0 0 0
56591- 0 0 0 0 0 0 0 0 0 0 0 0
56592- 0 0 0 0 0 0 0 0 0 0 0 0
56593- 0 0 0 0 0 0 0 0 0 0 0 0
56594- 0 0 0 0 0 0 0 0 0 6 6 6
56595- 18 18 18 46 46 46 86 86 86 18 18 18
56596- 2 2 6 34 34 34 10 10 10 6 6 6
56597-210 210 210 253 253 253 253 253 253 253 253 253
56598-253 253 253 253 253 253 253 253 253 253 253 253
56599-253 253 253 253 253 253 234 234 234 242 242 242
56600-253 253 253 253 253 253 253 253 253 253 253 253
56601-253 253 253 253 253 253 253 253 253 253 253 253
56602-253 253 253 253 253 253 253 253 253 253 253 253
56603-253 253 253 253 253 253 253 253 253 253 253 253
56604-253 253 253 253 253 253 221 221 221 6 6 6
56605- 2 2 6 2 2 6 6 6 6 30 30 30
56606- 2 2 6 2 2 6 2 2 6 2 2 6
56607- 2 2 6 82 82 82 54 54 54 18 18 18
56608- 6 6 6 0 0 0 0 0 0 0 0 0
56609- 0 0 0 0 0 0 0 0 0 0 0 0
56610- 0 0 0 0 0 0 0 0 0 0 0 0
56611- 0 0 0 0 0 0 0 0 0 0 0 0
56612- 0 0 0 0 0 0 0 0 0 0 0 0
56613- 0 0 0 0 0 0 0 0 0 0 0 0
56614- 0 0 0 0 0 0 0 0 0 10 10 10
56615- 26 26 26 66 66 66 62 62 62 2 2 6
56616- 2 2 6 38 38 38 10 10 10 26 26 26
56617-238 238 238 253 253 253 253 253 253 253 253 253
56618-253 253 253 253 253 253 253 253 253 253 253 253
56619-253 253 253 253 253 253 231 231 231 238 238 238
56620-253 253 253 253 253 253 253 253 253 253 253 253
56621-253 253 253 253 253 253 253 253 253 253 253 253
56622-253 253 253 253 253 253 253 253 253 253 253 253
56623-253 253 253 253 253 253 253 253 253 253 253 253
56624-253 253 253 253 253 253 231 231 231 6 6 6
56625- 2 2 6 2 2 6 10 10 10 30 30 30
56626- 2 2 6 2 2 6 2 2 6 2 2 6
56627- 2 2 6 66 66 66 58 58 58 22 22 22
56628- 6 6 6 0 0 0 0 0 0 0 0 0
56629- 0 0 0 0 0 0 0 0 0 0 0 0
56630- 0 0 0 0 0 0 0 0 0 0 0 0
56631- 0 0 0 0 0 0 0 0 0 0 0 0
56632- 0 0 0 0 0 0 0 0 0 0 0 0
56633- 0 0 0 0 0 0 0 0 0 0 0 0
56634- 0 0 0 0 0 0 0 0 0 10 10 10
56635- 38 38 38 78 78 78 6 6 6 2 2 6
56636- 2 2 6 46 46 46 14 14 14 42 42 42
56637-246 246 246 253 253 253 253 253 253 253 253 253
56638-253 253 253 253 253 253 253 253 253 253 253 253
56639-253 253 253 253 253 253 231 231 231 242 242 242
56640-253 253 253 253 253 253 253 253 253 253 253 253
56641-253 253 253 253 253 253 253 253 253 253 253 253
56642-253 253 253 253 253 253 253 253 253 253 253 253
56643-253 253 253 253 253 253 253 253 253 253 253 253
56644-253 253 253 253 253 253 234 234 234 10 10 10
56645- 2 2 6 2 2 6 22 22 22 14 14 14
56646- 2 2 6 2 2 6 2 2 6 2 2 6
56647- 2 2 6 66 66 66 62 62 62 22 22 22
56648- 6 6 6 0 0 0 0 0 0 0 0 0
56649- 0 0 0 0 0 0 0 0 0 0 0 0
56650- 0 0 0 0 0 0 0 0 0 0 0 0
56651- 0 0 0 0 0 0 0 0 0 0 0 0
56652- 0 0 0 0 0 0 0 0 0 0 0 0
56653- 0 0 0 0 0 0 0 0 0 0 0 0
56654- 0 0 0 0 0 0 6 6 6 18 18 18
56655- 50 50 50 74 74 74 2 2 6 2 2 6
56656- 14 14 14 70 70 70 34 34 34 62 62 62
56657-250 250 250 253 253 253 253 253 253 253 253 253
56658-253 253 253 253 253 253 253 253 253 253 253 253
56659-253 253 253 253 253 253 231 231 231 246 246 246
56660-253 253 253 253 253 253 253 253 253 253 253 253
56661-253 253 253 253 253 253 253 253 253 253 253 253
56662-253 253 253 253 253 253 253 253 253 253 253 253
56663-253 253 253 253 253 253 253 253 253 253 253 253
56664-253 253 253 253 253 253 234 234 234 14 14 14
56665- 2 2 6 2 2 6 30 30 30 2 2 6
56666- 2 2 6 2 2 6 2 2 6 2 2 6
56667- 2 2 6 66 66 66 62 62 62 22 22 22
56668- 6 6 6 0 0 0 0 0 0 0 0 0
56669- 0 0 0 0 0 0 0 0 0 0 0 0
56670- 0 0 0 0 0 0 0 0 0 0 0 0
56671- 0 0 0 0 0 0 0 0 0 0 0 0
56672- 0 0 0 0 0 0 0 0 0 0 0 0
56673- 0 0 0 0 0 0 0 0 0 0 0 0
56674- 0 0 0 0 0 0 6 6 6 18 18 18
56675- 54 54 54 62 62 62 2 2 6 2 2 6
56676- 2 2 6 30 30 30 46 46 46 70 70 70
56677-250 250 250 253 253 253 253 253 253 253 253 253
56678-253 253 253 253 253 253 253 253 253 253 253 253
56679-253 253 253 253 253 253 231 231 231 246 246 246
56680-253 253 253 253 253 253 253 253 253 253 253 253
56681-253 253 253 253 253 253 253 253 253 253 253 253
56682-253 253 253 253 253 253 253 253 253 253 253 253
56683-253 253 253 253 253 253 253 253 253 253 253 253
56684-253 253 253 253 253 253 226 226 226 10 10 10
56685- 2 2 6 6 6 6 30 30 30 2 2 6
56686- 2 2 6 2 2 6 2 2 6 2 2 6
56687- 2 2 6 66 66 66 58 58 58 22 22 22
56688- 6 6 6 0 0 0 0 0 0 0 0 0
56689- 0 0 0 0 0 0 0 0 0 0 0 0
56690- 0 0 0 0 0 0 0 0 0 0 0 0
56691- 0 0 0 0 0 0 0 0 0 0 0 0
56692- 0 0 0 0 0 0 0 0 0 0 0 0
56693- 0 0 0 0 0 0 0 0 0 0 0 0
56694- 0 0 0 0 0 0 6 6 6 22 22 22
56695- 58 58 58 62 62 62 2 2 6 2 2 6
56696- 2 2 6 2 2 6 30 30 30 78 78 78
56697-250 250 250 253 253 253 253 253 253 253 253 253
56698-253 253 253 253 253 253 253 253 253 253 253 253
56699-253 253 253 253 253 253 231 231 231 246 246 246
56700-253 253 253 253 253 253 253 253 253 253 253 253
56701-253 253 253 253 253 253 253 253 253 253 253 253
56702-253 253 253 253 253 253 253 253 253 253 253 253
56703-253 253 253 253 253 253 253 253 253 253 253 253
56704-253 253 253 253 253 253 206 206 206 2 2 6
56705- 22 22 22 34 34 34 18 14 6 22 22 22
56706- 26 26 26 18 18 18 6 6 6 2 2 6
56707- 2 2 6 82 82 82 54 54 54 18 18 18
56708- 6 6 6 0 0 0 0 0 0 0 0 0
56709- 0 0 0 0 0 0 0 0 0 0 0 0
56710- 0 0 0 0 0 0 0 0 0 0 0 0
56711- 0 0 0 0 0 0 0 0 0 0 0 0
56712- 0 0 0 0 0 0 0 0 0 0 0 0
56713- 0 0 0 0 0 0 0 0 0 0 0 0
56714- 0 0 0 0 0 0 6 6 6 26 26 26
56715- 62 62 62 106 106 106 74 54 14 185 133 11
56716-210 162 10 121 92 8 6 6 6 62 62 62
56717-238 238 238 253 253 253 253 253 253 253 253 253
56718-253 253 253 253 253 253 253 253 253 253 253 253
56719-253 253 253 253 253 253 231 231 231 246 246 246
56720-253 253 253 253 253 253 253 253 253 253 253 253
56721-253 253 253 253 253 253 253 253 253 253 253 253
56722-253 253 253 253 253 253 253 253 253 253 253 253
56723-253 253 253 253 253 253 253 253 253 253 253 253
56724-253 253 253 253 253 253 158 158 158 18 18 18
56725- 14 14 14 2 2 6 2 2 6 2 2 6
56726- 6 6 6 18 18 18 66 66 66 38 38 38
56727- 6 6 6 94 94 94 50 50 50 18 18 18
56728- 6 6 6 0 0 0 0 0 0 0 0 0
56729- 0 0 0 0 0 0 0 0 0 0 0 0
56730- 0 0 0 0 0 0 0 0 0 0 0 0
56731- 0 0 0 0 0 0 0 0 0 0 0 0
56732- 0 0 0 0 0 0 0 0 0 0 0 0
56733- 0 0 0 0 0 0 0 0 0 6 6 6
56734- 10 10 10 10 10 10 18 18 18 38 38 38
56735- 78 78 78 142 134 106 216 158 10 242 186 14
56736-246 190 14 246 190 14 156 118 10 10 10 10
56737- 90 90 90 238 238 238 253 253 253 253 253 253
56738-253 253 253 253 253 253 253 253 253 253 253 253
56739-253 253 253 253 253 253 231 231 231 250 250 250
56740-253 253 253 253 253 253 253 253 253 253 253 253
56741-253 253 253 253 253 253 253 253 253 253 253 253
56742-253 253 253 253 253 253 253 253 253 253 253 253
56743-253 253 253 253 253 253 253 253 253 246 230 190
56744-238 204 91 238 204 91 181 142 44 37 26 9
56745- 2 2 6 2 2 6 2 2 6 2 2 6
56746- 2 2 6 2 2 6 38 38 38 46 46 46
56747- 26 26 26 106 106 106 54 54 54 18 18 18
56748- 6 6 6 0 0 0 0 0 0 0 0 0
56749- 0 0 0 0 0 0 0 0 0 0 0 0
56750- 0 0 0 0 0 0 0 0 0 0 0 0
56751- 0 0 0 0 0 0 0 0 0 0 0 0
56752- 0 0 0 0 0 0 0 0 0 0 0 0
56753- 0 0 0 6 6 6 14 14 14 22 22 22
56754- 30 30 30 38 38 38 50 50 50 70 70 70
56755-106 106 106 190 142 34 226 170 11 242 186 14
56756-246 190 14 246 190 14 246 190 14 154 114 10
56757- 6 6 6 74 74 74 226 226 226 253 253 253
56758-253 253 253 253 253 253 253 253 253 253 253 253
56759-253 253 253 253 253 253 231 231 231 250 250 250
56760-253 253 253 253 253 253 253 253 253 253 253 253
56761-253 253 253 253 253 253 253 253 253 253 253 253
56762-253 253 253 253 253 253 253 253 253 253 253 253
56763-253 253 253 253 253 253 253 253 253 228 184 62
56764-241 196 14 241 208 19 232 195 16 38 30 10
56765- 2 2 6 2 2 6 2 2 6 2 2 6
56766- 2 2 6 6 6 6 30 30 30 26 26 26
56767-203 166 17 154 142 90 66 66 66 26 26 26
56768- 6 6 6 0 0 0 0 0 0 0 0 0
56769- 0 0 0 0 0 0 0 0 0 0 0 0
56770- 0 0 0 0 0 0 0 0 0 0 0 0
56771- 0 0 0 0 0 0 0 0 0 0 0 0
56772- 0 0 0 0 0 0 0 0 0 0 0 0
56773- 6 6 6 18 18 18 38 38 38 58 58 58
56774- 78 78 78 86 86 86 101 101 101 123 123 123
56775-175 146 61 210 150 10 234 174 13 246 186 14
56776-246 190 14 246 190 14 246 190 14 238 190 10
56777-102 78 10 2 2 6 46 46 46 198 198 198
56778-253 253 253 253 253 253 253 253 253 253 253 253
56779-253 253 253 253 253 253 234 234 234 242 242 242
56780-253 253 253 253 253 253 253 253 253 253 253 253
56781-253 253 253 253 253 253 253 253 253 253 253 253
56782-253 253 253 253 253 253 253 253 253 253 253 253
56783-253 253 253 253 253 253 253 253 253 224 178 62
56784-242 186 14 241 196 14 210 166 10 22 18 6
56785- 2 2 6 2 2 6 2 2 6 2 2 6
56786- 2 2 6 2 2 6 6 6 6 121 92 8
56787-238 202 15 232 195 16 82 82 82 34 34 34
56788- 10 10 10 0 0 0 0 0 0 0 0 0
56789- 0 0 0 0 0 0 0 0 0 0 0 0
56790- 0 0 0 0 0 0 0 0 0 0 0 0
56791- 0 0 0 0 0 0 0 0 0 0 0 0
56792- 0 0 0 0 0 0 0 0 0 0 0 0
56793- 14 14 14 38 38 38 70 70 70 154 122 46
56794-190 142 34 200 144 11 197 138 11 197 138 11
56795-213 154 11 226 170 11 242 186 14 246 190 14
56796-246 190 14 246 190 14 246 190 14 246 190 14
56797-225 175 15 46 32 6 2 2 6 22 22 22
56798-158 158 158 250 250 250 253 253 253 253 253 253
56799-253 253 253 253 253 253 253 253 253 253 253 253
56800-253 253 253 253 253 253 253 253 253 253 253 253
56801-253 253 253 253 253 253 253 253 253 253 253 253
56802-253 253 253 253 253 253 253 253 253 253 253 253
56803-253 253 253 250 250 250 242 242 242 224 178 62
56804-239 182 13 236 186 11 213 154 11 46 32 6
56805- 2 2 6 2 2 6 2 2 6 2 2 6
56806- 2 2 6 2 2 6 61 42 6 225 175 15
56807-238 190 10 236 186 11 112 100 78 42 42 42
56808- 14 14 14 0 0 0 0 0 0 0 0 0
56809- 0 0 0 0 0 0 0 0 0 0 0 0
56810- 0 0 0 0 0 0 0 0 0 0 0 0
56811- 0 0 0 0 0 0 0 0 0 0 0 0
56812- 0 0 0 0 0 0 0 0 0 6 6 6
56813- 22 22 22 54 54 54 154 122 46 213 154 11
56814-226 170 11 230 174 11 226 170 11 226 170 11
56815-236 178 12 242 186 14 246 190 14 246 190 14
56816-246 190 14 246 190 14 246 190 14 246 190 14
56817-241 196 14 184 144 12 10 10 10 2 2 6
56818- 6 6 6 116 116 116 242 242 242 253 253 253
56819-253 253 253 253 253 253 253 253 253 253 253 253
56820-253 253 253 253 253 253 253 253 253 253 253 253
56821-253 253 253 253 253 253 253 253 253 253 253 253
56822-253 253 253 253 253 253 253 253 253 253 253 253
56823-253 253 253 231 231 231 198 198 198 214 170 54
56824-236 178 12 236 178 12 210 150 10 137 92 6
56825- 18 14 6 2 2 6 2 2 6 2 2 6
56826- 6 6 6 70 47 6 200 144 11 236 178 12
56827-239 182 13 239 182 13 124 112 88 58 58 58
56828- 22 22 22 6 6 6 0 0 0 0 0 0
56829- 0 0 0 0 0 0 0 0 0 0 0 0
56830- 0 0 0 0 0 0 0 0 0 0 0 0
56831- 0 0 0 0 0 0 0 0 0 0 0 0
56832- 0 0 0 0 0 0 0 0 0 10 10 10
56833- 30 30 30 70 70 70 180 133 36 226 170 11
56834-239 182 13 242 186 14 242 186 14 246 186 14
56835-246 190 14 246 190 14 246 190 14 246 190 14
56836-246 190 14 246 190 14 246 190 14 246 190 14
56837-246 190 14 232 195 16 98 70 6 2 2 6
56838- 2 2 6 2 2 6 66 66 66 221 221 221
56839-253 253 253 253 253 253 253 253 253 253 253 253
56840-253 253 253 253 253 253 253 253 253 253 253 253
56841-253 253 253 253 253 253 253 253 253 253 253 253
56842-253 253 253 253 253 253 253 253 253 253 253 253
56843-253 253 253 206 206 206 198 198 198 214 166 58
56844-230 174 11 230 174 11 216 158 10 192 133 9
56845-163 110 8 116 81 8 102 78 10 116 81 8
56846-167 114 7 197 138 11 226 170 11 239 182 13
56847-242 186 14 242 186 14 162 146 94 78 78 78
56848- 34 34 34 14 14 14 6 6 6 0 0 0
56849- 0 0 0 0 0 0 0 0 0 0 0 0
56850- 0 0 0 0 0 0 0 0 0 0 0 0
56851- 0 0 0 0 0 0 0 0 0 0 0 0
56852- 0 0 0 0 0 0 0 0 0 6 6 6
56853- 30 30 30 78 78 78 190 142 34 226 170 11
56854-239 182 13 246 190 14 246 190 14 246 190 14
56855-246 190 14 246 190 14 246 190 14 246 190 14
56856-246 190 14 246 190 14 246 190 14 246 190 14
56857-246 190 14 241 196 14 203 166 17 22 18 6
56858- 2 2 6 2 2 6 2 2 6 38 38 38
56859-218 218 218 253 253 253 253 253 253 253 253 253
56860-253 253 253 253 253 253 253 253 253 253 253 253
56861-253 253 253 253 253 253 253 253 253 253 253 253
56862-253 253 253 253 253 253 253 253 253 253 253 253
56863-250 250 250 206 206 206 198 198 198 202 162 69
56864-226 170 11 236 178 12 224 166 10 210 150 10
56865-200 144 11 197 138 11 192 133 9 197 138 11
56866-210 150 10 226 170 11 242 186 14 246 190 14
56867-246 190 14 246 186 14 225 175 15 124 112 88
56868- 62 62 62 30 30 30 14 14 14 6 6 6
56869- 0 0 0 0 0 0 0 0 0 0 0 0
56870- 0 0 0 0 0 0 0 0 0 0 0 0
56871- 0 0 0 0 0 0 0 0 0 0 0 0
56872- 0 0 0 0 0 0 0 0 0 10 10 10
56873- 30 30 30 78 78 78 174 135 50 224 166 10
56874-239 182 13 246 190 14 246 190 14 246 190 14
56875-246 190 14 246 190 14 246 190 14 246 190 14
56876-246 190 14 246 190 14 246 190 14 246 190 14
56877-246 190 14 246 190 14 241 196 14 139 102 15
56878- 2 2 6 2 2 6 2 2 6 2 2 6
56879- 78 78 78 250 250 250 253 253 253 253 253 253
56880-253 253 253 253 253 253 253 253 253 253 253 253
56881-253 253 253 253 253 253 253 253 253 253 253 253
56882-253 253 253 253 253 253 253 253 253 253 253 253
56883-250 250 250 214 214 214 198 198 198 190 150 46
56884-219 162 10 236 178 12 234 174 13 224 166 10
56885-216 158 10 213 154 11 213 154 11 216 158 10
56886-226 170 11 239 182 13 246 190 14 246 190 14
56887-246 190 14 246 190 14 242 186 14 206 162 42
56888-101 101 101 58 58 58 30 30 30 14 14 14
56889- 6 6 6 0 0 0 0 0 0 0 0 0
56890- 0 0 0 0 0 0 0 0 0 0 0 0
56891- 0 0 0 0 0 0 0 0 0 0 0 0
56892- 0 0 0 0 0 0 0 0 0 10 10 10
56893- 30 30 30 74 74 74 174 135 50 216 158 10
56894-236 178 12 246 190 14 246 190 14 246 190 14
56895-246 190 14 246 190 14 246 190 14 246 190 14
56896-246 190 14 246 190 14 246 190 14 246 190 14
56897-246 190 14 246 190 14 241 196 14 226 184 13
56898- 61 42 6 2 2 6 2 2 6 2 2 6
56899- 22 22 22 238 238 238 253 253 253 253 253 253
56900-253 253 253 253 253 253 253 253 253 253 253 253
56901-253 253 253 253 253 253 253 253 253 253 253 253
56902-253 253 253 253 253 253 253 253 253 253 253 253
56903-253 253 253 226 226 226 187 187 187 180 133 36
56904-216 158 10 236 178 12 239 182 13 236 178 12
56905-230 174 11 226 170 11 226 170 11 230 174 11
56906-236 178 12 242 186 14 246 190 14 246 190 14
56907-246 190 14 246 190 14 246 186 14 239 182 13
56908-206 162 42 106 106 106 66 66 66 34 34 34
56909- 14 14 14 6 6 6 0 0 0 0 0 0
56910- 0 0 0 0 0 0 0 0 0 0 0 0
56911- 0 0 0 0 0 0 0 0 0 0 0 0
56912- 0 0 0 0 0 0 0 0 0 6 6 6
56913- 26 26 26 70 70 70 163 133 67 213 154 11
56914-236 178 12 246 190 14 246 190 14 246 190 14
56915-246 190 14 246 190 14 246 190 14 246 190 14
56916-246 190 14 246 190 14 246 190 14 246 190 14
56917-246 190 14 246 190 14 246 190 14 241 196 14
56918-190 146 13 18 14 6 2 2 6 2 2 6
56919- 46 46 46 246 246 246 253 253 253 253 253 253
56920-253 253 253 253 253 253 253 253 253 253 253 253
56921-253 253 253 253 253 253 253 253 253 253 253 253
56922-253 253 253 253 253 253 253 253 253 253 253 253
56923-253 253 253 221 221 221 86 86 86 156 107 11
56924-216 158 10 236 178 12 242 186 14 246 186 14
56925-242 186 14 239 182 13 239 182 13 242 186 14
56926-242 186 14 246 186 14 246 190 14 246 190 14
56927-246 190 14 246 190 14 246 190 14 246 190 14
56928-242 186 14 225 175 15 142 122 72 66 66 66
56929- 30 30 30 10 10 10 0 0 0 0 0 0
56930- 0 0 0 0 0 0 0 0 0 0 0 0
56931- 0 0 0 0 0 0 0 0 0 0 0 0
56932- 0 0 0 0 0 0 0 0 0 6 6 6
56933- 26 26 26 70 70 70 163 133 67 210 150 10
56934-236 178 12 246 190 14 246 190 14 246 190 14
56935-246 190 14 246 190 14 246 190 14 246 190 14
56936-246 190 14 246 190 14 246 190 14 246 190 14
56937-246 190 14 246 190 14 246 190 14 246 190 14
56938-232 195 16 121 92 8 34 34 34 106 106 106
56939-221 221 221 253 253 253 253 253 253 253 253 253
56940-253 253 253 253 253 253 253 253 253 253 253 253
56941-253 253 253 253 253 253 253 253 253 253 253 253
56942-253 253 253 253 253 253 253 253 253 253 253 253
56943-242 242 242 82 82 82 18 14 6 163 110 8
56944-216 158 10 236 178 12 242 186 14 246 190 14
56945-246 190 14 246 190 14 246 190 14 246 190 14
56946-246 190 14 246 190 14 246 190 14 246 190 14
56947-246 190 14 246 190 14 246 190 14 246 190 14
56948-246 190 14 246 190 14 242 186 14 163 133 67
56949- 46 46 46 18 18 18 6 6 6 0 0 0
56950- 0 0 0 0 0 0 0 0 0 0 0 0
56951- 0 0 0 0 0 0 0 0 0 0 0 0
56952- 0 0 0 0 0 0 0 0 0 10 10 10
56953- 30 30 30 78 78 78 163 133 67 210 150 10
56954-236 178 12 246 186 14 246 190 14 246 190 14
56955-246 190 14 246 190 14 246 190 14 246 190 14
56956-246 190 14 246 190 14 246 190 14 246 190 14
56957-246 190 14 246 190 14 246 190 14 246 190 14
56958-241 196 14 215 174 15 190 178 144 253 253 253
56959-253 253 253 253 253 253 253 253 253 253 253 253
56960-253 253 253 253 253 253 253 253 253 253 253 253
56961-253 253 253 253 253 253 253 253 253 253 253 253
56962-253 253 253 253 253 253 253 253 253 218 218 218
56963- 58 58 58 2 2 6 22 18 6 167 114 7
56964-216 158 10 236 178 12 246 186 14 246 190 14
56965-246 190 14 246 190 14 246 190 14 246 190 14
56966-246 190 14 246 190 14 246 190 14 246 190 14
56967-246 190 14 246 190 14 246 190 14 246 190 14
56968-246 190 14 246 186 14 242 186 14 190 150 46
56969- 54 54 54 22 22 22 6 6 6 0 0 0
56970- 0 0 0 0 0 0 0 0 0 0 0 0
56971- 0 0 0 0 0 0 0 0 0 0 0 0
56972- 0 0 0 0 0 0 0 0 0 14 14 14
56973- 38 38 38 86 86 86 180 133 36 213 154 11
56974-236 178 12 246 186 14 246 190 14 246 190 14
56975-246 190 14 246 190 14 246 190 14 246 190 14
56976-246 190 14 246 190 14 246 190 14 246 190 14
56977-246 190 14 246 190 14 246 190 14 246 190 14
56978-246 190 14 232 195 16 190 146 13 214 214 214
56979-253 253 253 253 253 253 253 253 253 253 253 253
56980-253 253 253 253 253 253 253 253 253 253 253 253
56981-253 253 253 253 253 253 253 253 253 253 253 253
56982-253 253 253 250 250 250 170 170 170 26 26 26
56983- 2 2 6 2 2 6 37 26 9 163 110 8
56984-219 162 10 239 182 13 246 186 14 246 190 14
56985-246 190 14 246 190 14 246 190 14 246 190 14
56986-246 190 14 246 190 14 246 190 14 246 190 14
56987-246 190 14 246 190 14 246 190 14 246 190 14
56988-246 186 14 236 178 12 224 166 10 142 122 72
56989- 46 46 46 18 18 18 6 6 6 0 0 0
56990- 0 0 0 0 0 0 0 0 0 0 0 0
56991- 0 0 0 0 0 0 0 0 0 0 0 0
56992- 0 0 0 0 0 0 6 6 6 18 18 18
56993- 50 50 50 109 106 95 192 133 9 224 166 10
56994-242 186 14 246 190 14 246 190 14 246 190 14
56995-246 190 14 246 190 14 246 190 14 246 190 14
56996-246 190 14 246 190 14 246 190 14 246 190 14
56997-246 190 14 246 190 14 246 190 14 246 190 14
56998-242 186 14 226 184 13 210 162 10 142 110 46
56999-226 226 226 253 253 253 253 253 253 253 253 253
57000-253 253 253 253 253 253 253 253 253 253 253 253
57001-253 253 253 253 253 253 253 253 253 253 253 253
57002-198 198 198 66 66 66 2 2 6 2 2 6
57003- 2 2 6 2 2 6 50 34 6 156 107 11
57004-219 162 10 239 182 13 246 186 14 246 190 14
57005-246 190 14 246 190 14 246 190 14 246 190 14
57006-246 190 14 246 190 14 246 190 14 246 190 14
57007-246 190 14 246 190 14 246 190 14 242 186 14
57008-234 174 13 213 154 11 154 122 46 66 66 66
57009- 30 30 30 10 10 10 0 0 0 0 0 0
57010- 0 0 0 0 0 0 0 0 0 0 0 0
57011- 0 0 0 0 0 0 0 0 0 0 0 0
57012- 0 0 0 0 0 0 6 6 6 22 22 22
57013- 58 58 58 154 121 60 206 145 10 234 174 13
57014-242 186 14 246 186 14 246 190 14 246 190 14
57015-246 190 14 246 190 14 246 190 14 246 190 14
57016-246 190 14 246 190 14 246 190 14 246 190 14
57017-246 190 14 246 190 14 246 190 14 246 190 14
57018-246 186 14 236 178 12 210 162 10 163 110 8
57019- 61 42 6 138 138 138 218 218 218 250 250 250
57020-253 253 253 253 253 253 253 253 253 250 250 250
57021-242 242 242 210 210 210 144 144 144 66 66 66
57022- 6 6 6 2 2 6 2 2 6 2 2 6
57023- 2 2 6 2 2 6 61 42 6 163 110 8
57024-216 158 10 236 178 12 246 190 14 246 190 14
57025-246 190 14 246 190 14 246 190 14 246 190 14
57026-246 190 14 246 190 14 246 190 14 246 190 14
57027-246 190 14 239 182 13 230 174 11 216 158 10
57028-190 142 34 124 112 88 70 70 70 38 38 38
57029- 18 18 18 6 6 6 0 0 0 0 0 0
57030- 0 0 0 0 0 0 0 0 0 0 0 0
57031- 0 0 0 0 0 0 0 0 0 0 0 0
57032- 0 0 0 0 0 0 6 6 6 22 22 22
57033- 62 62 62 168 124 44 206 145 10 224 166 10
57034-236 178 12 239 182 13 242 186 14 242 186 14
57035-246 186 14 246 190 14 246 190 14 246 190 14
57036-246 190 14 246 190 14 246 190 14 246 190 14
57037-246 190 14 246 190 14 246 190 14 246 190 14
57038-246 190 14 236 178 12 216 158 10 175 118 6
57039- 80 54 7 2 2 6 6 6 6 30 30 30
57040- 54 54 54 62 62 62 50 50 50 38 38 38
57041- 14 14 14 2 2 6 2 2 6 2 2 6
57042- 2 2 6 2 2 6 2 2 6 2 2 6
57043- 2 2 6 6 6 6 80 54 7 167 114 7
57044-213 154 11 236 178 12 246 190 14 246 190 14
57045-246 190 14 246 190 14 246 190 14 246 190 14
57046-246 190 14 242 186 14 239 182 13 239 182 13
57047-230 174 11 210 150 10 174 135 50 124 112 88
57048- 82 82 82 54 54 54 34 34 34 18 18 18
57049- 6 6 6 0 0 0 0 0 0 0 0 0
57050- 0 0 0 0 0 0 0 0 0 0 0 0
57051- 0 0 0 0 0 0 0 0 0 0 0 0
57052- 0 0 0 0 0 0 6 6 6 18 18 18
57053- 50 50 50 158 118 36 192 133 9 200 144 11
57054-216 158 10 219 162 10 224 166 10 226 170 11
57055-230 174 11 236 178 12 239 182 13 239 182 13
57056-242 186 14 246 186 14 246 190 14 246 190 14
57057-246 190 14 246 190 14 246 190 14 246 190 14
57058-246 186 14 230 174 11 210 150 10 163 110 8
57059-104 69 6 10 10 10 2 2 6 2 2 6
57060- 2 2 6 2 2 6 2 2 6 2 2 6
57061- 2 2 6 2 2 6 2 2 6 2 2 6
57062- 2 2 6 2 2 6 2 2 6 2 2 6
57063- 2 2 6 6 6 6 91 60 6 167 114 7
57064-206 145 10 230 174 11 242 186 14 246 190 14
57065-246 190 14 246 190 14 246 186 14 242 186 14
57066-239 182 13 230 174 11 224 166 10 213 154 11
57067-180 133 36 124 112 88 86 86 86 58 58 58
57068- 38 38 38 22 22 22 10 10 10 6 6 6
57069- 0 0 0 0 0 0 0 0 0 0 0 0
57070- 0 0 0 0 0 0 0 0 0 0 0 0
57071- 0 0 0 0 0 0 0 0 0 0 0 0
57072- 0 0 0 0 0 0 0 0 0 14 14 14
57073- 34 34 34 70 70 70 138 110 50 158 118 36
57074-167 114 7 180 123 7 192 133 9 197 138 11
57075-200 144 11 206 145 10 213 154 11 219 162 10
57076-224 166 10 230 174 11 239 182 13 242 186 14
57077-246 186 14 246 186 14 246 186 14 246 186 14
57078-239 182 13 216 158 10 185 133 11 152 99 6
57079-104 69 6 18 14 6 2 2 6 2 2 6
57080- 2 2 6 2 2 6 2 2 6 2 2 6
57081- 2 2 6 2 2 6 2 2 6 2 2 6
57082- 2 2 6 2 2 6 2 2 6 2 2 6
57083- 2 2 6 6 6 6 80 54 7 152 99 6
57084-192 133 9 219 162 10 236 178 12 239 182 13
57085-246 186 14 242 186 14 239 182 13 236 178 12
57086-224 166 10 206 145 10 192 133 9 154 121 60
57087- 94 94 94 62 62 62 42 42 42 22 22 22
57088- 14 14 14 6 6 6 0 0 0 0 0 0
57089- 0 0 0 0 0 0 0 0 0 0 0 0
57090- 0 0 0 0 0 0 0 0 0 0 0 0
57091- 0 0 0 0 0 0 0 0 0 0 0 0
57092- 0 0 0 0 0 0 0 0 0 6 6 6
57093- 18 18 18 34 34 34 58 58 58 78 78 78
57094-101 98 89 124 112 88 142 110 46 156 107 11
57095-163 110 8 167 114 7 175 118 6 180 123 7
57096-185 133 11 197 138 11 210 150 10 219 162 10
57097-226 170 11 236 178 12 236 178 12 234 174 13
57098-219 162 10 197 138 11 163 110 8 130 83 6
57099- 91 60 6 10 10 10 2 2 6 2 2 6
57100- 18 18 18 38 38 38 38 38 38 38 38 38
57101- 38 38 38 38 38 38 38 38 38 38 38 38
57102- 38 38 38 38 38 38 26 26 26 2 2 6
57103- 2 2 6 6 6 6 70 47 6 137 92 6
57104-175 118 6 200 144 11 219 162 10 230 174 11
57105-234 174 13 230 174 11 219 162 10 210 150 10
57106-192 133 9 163 110 8 124 112 88 82 82 82
57107- 50 50 50 30 30 30 14 14 14 6 6 6
57108- 0 0 0 0 0 0 0 0 0 0 0 0
57109- 0 0 0 0 0 0 0 0 0 0 0 0
57110- 0 0 0 0 0 0 0 0 0 0 0 0
57111- 0 0 0 0 0 0 0 0 0 0 0 0
57112- 0 0 0 0 0 0 0 0 0 0 0 0
57113- 6 6 6 14 14 14 22 22 22 34 34 34
57114- 42 42 42 58 58 58 74 74 74 86 86 86
57115-101 98 89 122 102 70 130 98 46 121 87 25
57116-137 92 6 152 99 6 163 110 8 180 123 7
57117-185 133 11 197 138 11 206 145 10 200 144 11
57118-180 123 7 156 107 11 130 83 6 104 69 6
57119- 50 34 6 54 54 54 110 110 110 101 98 89
57120- 86 86 86 82 82 82 78 78 78 78 78 78
57121- 78 78 78 78 78 78 78 78 78 78 78 78
57122- 78 78 78 82 82 82 86 86 86 94 94 94
57123-106 106 106 101 101 101 86 66 34 124 80 6
57124-156 107 11 180 123 7 192 133 9 200 144 11
57125-206 145 10 200 144 11 192 133 9 175 118 6
57126-139 102 15 109 106 95 70 70 70 42 42 42
57127- 22 22 22 10 10 10 0 0 0 0 0 0
57128- 0 0 0 0 0 0 0 0 0 0 0 0
57129- 0 0 0 0 0 0 0 0 0 0 0 0
57130- 0 0 0 0 0 0 0 0 0 0 0 0
57131- 0 0 0 0 0 0 0 0 0 0 0 0
57132- 0 0 0 0 0 0 0 0 0 0 0 0
57133- 0 0 0 0 0 0 6 6 6 10 10 10
57134- 14 14 14 22 22 22 30 30 30 38 38 38
57135- 50 50 50 62 62 62 74 74 74 90 90 90
57136-101 98 89 112 100 78 121 87 25 124 80 6
57137-137 92 6 152 99 6 152 99 6 152 99 6
57138-138 86 6 124 80 6 98 70 6 86 66 30
57139-101 98 89 82 82 82 58 58 58 46 46 46
57140- 38 38 38 34 34 34 34 34 34 34 34 34
57141- 34 34 34 34 34 34 34 34 34 34 34 34
57142- 34 34 34 34 34 34 38 38 38 42 42 42
57143- 54 54 54 82 82 82 94 86 76 91 60 6
57144-134 86 6 156 107 11 167 114 7 175 118 6
57145-175 118 6 167 114 7 152 99 6 121 87 25
57146-101 98 89 62 62 62 34 34 34 18 18 18
57147- 6 6 6 0 0 0 0 0 0 0 0 0
57148- 0 0 0 0 0 0 0 0 0 0 0 0
57149- 0 0 0 0 0 0 0 0 0 0 0 0
57150- 0 0 0 0 0 0 0 0 0 0 0 0
57151- 0 0 0 0 0 0 0 0 0 0 0 0
57152- 0 0 0 0 0 0 0 0 0 0 0 0
57153- 0 0 0 0 0 0 0 0 0 0 0 0
57154- 0 0 0 6 6 6 6 6 6 10 10 10
57155- 18 18 18 22 22 22 30 30 30 42 42 42
57156- 50 50 50 66 66 66 86 86 86 101 98 89
57157-106 86 58 98 70 6 104 69 6 104 69 6
57158-104 69 6 91 60 6 82 62 34 90 90 90
57159- 62 62 62 38 38 38 22 22 22 14 14 14
57160- 10 10 10 10 10 10 10 10 10 10 10 10
57161- 10 10 10 10 10 10 6 6 6 10 10 10
57162- 10 10 10 10 10 10 10 10 10 14 14 14
57163- 22 22 22 42 42 42 70 70 70 89 81 66
57164- 80 54 7 104 69 6 124 80 6 137 92 6
57165-134 86 6 116 81 8 100 82 52 86 86 86
57166- 58 58 58 30 30 30 14 14 14 6 6 6
57167- 0 0 0 0 0 0 0 0 0 0 0 0
57168- 0 0 0 0 0 0 0 0 0 0 0 0
57169- 0 0 0 0 0 0 0 0 0 0 0 0
57170- 0 0 0 0 0 0 0 0 0 0 0 0
57171- 0 0 0 0 0 0 0 0 0 0 0 0
57172- 0 0 0 0 0 0 0 0 0 0 0 0
57173- 0 0 0 0 0 0 0 0 0 0 0 0
57174- 0 0 0 0 0 0 0 0 0 0 0 0
57175- 0 0 0 6 6 6 10 10 10 14 14 14
57176- 18 18 18 26 26 26 38 38 38 54 54 54
57177- 70 70 70 86 86 86 94 86 76 89 81 66
57178- 89 81 66 86 86 86 74 74 74 50 50 50
57179- 30 30 30 14 14 14 6 6 6 0 0 0
57180- 0 0 0 0 0 0 0 0 0 0 0 0
57181- 0 0 0 0 0 0 0 0 0 0 0 0
57182- 0 0 0 0 0 0 0 0 0 0 0 0
57183- 6 6 6 18 18 18 34 34 34 58 58 58
57184- 82 82 82 89 81 66 89 81 66 89 81 66
57185- 94 86 66 94 86 76 74 74 74 50 50 50
57186- 26 26 26 14 14 14 6 6 6 0 0 0
57187- 0 0 0 0 0 0 0 0 0 0 0 0
57188- 0 0 0 0 0 0 0 0 0 0 0 0
57189- 0 0 0 0 0 0 0 0 0 0 0 0
57190- 0 0 0 0 0 0 0 0 0 0 0 0
57191- 0 0 0 0 0 0 0 0 0 0 0 0
57192- 0 0 0 0 0 0 0 0 0 0 0 0
57193- 0 0 0 0 0 0 0 0 0 0 0 0
57194- 0 0 0 0 0 0 0 0 0 0 0 0
57195- 0 0 0 0 0 0 0 0 0 0 0 0
57196- 6 6 6 6 6 6 14 14 14 18 18 18
57197- 30 30 30 38 38 38 46 46 46 54 54 54
57198- 50 50 50 42 42 42 30 30 30 18 18 18
57199- 10 10 10 0 0 0 0 0 0 0 0 0
57200- 0 0 0 0 0 0 0 0 0 0 0 0
57201- 0 0 0 0 0 0 0 0 0 0 0 0
57202- 0 0 0 0 0 0 0 0 0 0 0 0
57203- 0 0 0 6 6 6 14 14 14 26 26 26
57204- 38 38 38 50 50 50 58 58 58 58 58 58
57205- 54 54 54 42 42 42 30 30 30 18 18 18
57206- 10 10 10 0 0 0 0 0 0 0 0 0
57207- 0 0 0 0 0 0 0 0 0 0 0 0
57208- 0 0 0 0 0 0 0 0 0 0 0 0
57209- 0 0 0 0 0 0 0 0 0 0 0 0
57210- 0 0 0 0 0 0 0 0 0 0 0 0
57211- 0 0 0 0 0 0 0 0 0 0 0 0
57212- 0 0 0 0 0 0 0 0 0 0 0 0
57213- 0 0 0 0 0 0 0 0 0 0 0 0
57214- 0 0 0 0 0 0 0 0 0 0 0 0
57215- 0 0 0 0 0 0 0 0 0 0 0 0
57216- 0 0 0 0 0 0 0 0 0 6 6 6
57217- 6 6 6 10 10 10 14 14 14 18 18 18
57218- 18 18 18 14 14 14 10 10 10 6 6 6
57219- 0 0 0 0 0 0 0 0 0 0 0 0
57220- 0 0 0 0 0 0 0 0 0 0 0 0
57221- 0 0 0 0 0 0 0 0 0 0 0 0
57222- 0 0 0 0 0 0 0 0 0 0 0 0
57223- 0 0 0 0 0 0 0 0 0 6 6 6
57224- 14 14 14 18 18 18 22 22 22 22 22 22
57225- 18 18 18 14 14 14 10 10 10 6 6 6
57226- 0 0 0 0 0 0 0 0 0 0 0 0
57227- 0 0 0 0 0 0 0 0 0 0 0 0
57228- 0 0 0 0 0 0 0 0 0 0 0 0
57229- 0 0 0 0 0 0 0 0 0 0 0 0
57230- 0 0 0 0 0 0 0 0 0 0 0 0
57231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57244+4 4 4 4 4 4
57245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57258+4 4 4 4 4 4
57259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57272+4 4 4 4 4 4
57273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57286+4 4 4 4 4 4
57287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57292+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57300+4 4 4 4 4 4
57301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57306+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57307+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57314+4 4 4 4 4 4
57315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57319+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
57320+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
57321+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57324+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
57325+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
57326+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
57327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57328+4 4 4 4 4 4
57329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57333+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
57334+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
57335+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57338+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
57339+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
57340+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
57341+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57342+4 4 4 4 4 4
57343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57346+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57347+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
57348+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
57349+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
57350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57352+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
57353+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
57354+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
57355+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
57356+4 4 4 4 4 4
57357+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57358+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57360+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
57361+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
57362+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
57363+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
57364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57365+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
57366+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
57367+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
57368+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
57369+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
57370+4 4 4 4 4 4
57371+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57372+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57373+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57374+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
57375+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
57376+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
57377+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
57378+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
57379+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
57380+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
57381+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
57382+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
57383+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
57384+4 4 4 4 4 4
57385+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57386+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57387+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
57388+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
57389+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
57390+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
57391+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
57392+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
57393+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
57394+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
57395+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
57396+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
57397+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
57398+4 4 4 4 4 4
57399+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57400+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57401+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
57402+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
57403+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
57404+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
57405+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
57406+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
57407+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
57408+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
57409+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
57410+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
57411+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
57412+4 4 4 4 4 4
57413+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57414+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57415+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
57416+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
57417+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
57418+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
57419+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
57420+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
57421+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
57422+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
57423+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
57424+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
57425+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
57426+4 4 4 4 4 4
57427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57428+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57429+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
57430+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
57431+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
57432+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
57433+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
57434+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
57435+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
57436+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
57437+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
57438+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
57439+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
57440+4 4 4 4 4 4
57441+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57443+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
57444+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
57445+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
57446+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
57447+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
57448+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
57449+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
57450+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
57451+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
57452+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
57453+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
57454+4 4 4 4 4 4
57455+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57456+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
57457+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
57458+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
57459+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
57460+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
57461+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
57462+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
57463+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
57464+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
57465+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
57466+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
57467+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
57468+4 4 4 4 4 4
57469+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57470+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
57471+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
57472+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
57473+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
57474+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
57475+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
57476+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
57477+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
57478+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
57479+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
57480+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
57481+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
57482+0 0 0 4 4 4
57483+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
57484+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
57485+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
57486+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
57487+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
57488+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
57489+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
57490+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
57491+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
57492+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
57493+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
57494+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
57495+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
57496+2 0 0 0 0 0
57497+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
57498+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
57499+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
57500+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
57501+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
57502+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
57503+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
57504+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
57505+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
57506+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
57507+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
57508+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
57509+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
57510+37 38 37 0 0 0
57511+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
57512+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
57513+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
57514+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
57515+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
57516+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
57517+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
57518+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
57519+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
57520+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
57521+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
57522+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
57523+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
57524+85 115 134 4 0 0
57525+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
57526+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
57527+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
57528+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
57529+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
57530+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
57531+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
57532+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
57533+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
57534+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
57535+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
57536+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
57537+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
57538+60 73 81 4 0 0
57539+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
57540+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
57541+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
57542+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
57543+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
57544+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
57545+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
57546+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
57547+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
57548+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
57549+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
57550+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
57551+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
57552+16 19 21 4 0 0
57553+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
57554+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
57555+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
57556+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
57557+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
57558+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
57559+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
57560+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
57561+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
57562+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
57563+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
57564+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
57565+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
57566+4 0 0 4 3 3
57567+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
57568+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
57569+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
57570+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
57571+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
57572+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
57573+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
57574+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
57575+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
57576+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
57577+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
57578+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
57579+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
57580+3 2 2 4 4 4
57581+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
57582+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
57583+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
57584+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
57585+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
57586+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
57587+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
57588+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
57589+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
57590+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
57591+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
57592+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
57593+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
57594+4 4 4 4 4 4
57595+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
57596+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
57597+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
57598+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
57599+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
57600+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
57601+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
57602+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
57603+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
57604+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
57605+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
57606+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
57607+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
57608+4 4 4 4 4 4
57609+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
57610+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
57611+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
57612+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
57613+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
57614+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
57615+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
57616+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
57617+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
57618+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
57619+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
57620+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
57621+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
57622+5 5 5 5 5 5
57623+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
57624+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
57625+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
57626+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
57627+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
57628+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57629+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
57630+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
57631+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
57632+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
57633+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
57634+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
57635+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
57636+5 5 5 4 4 4
57637+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
57638+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
57639+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
57640+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
57641+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57642+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
57643+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
57644+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
57645+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
57646+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
57647+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
57648+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
57649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57650+4 4 4 4 4 4
57651+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
57652+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
57653+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
57654+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
57655+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
57656+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57657+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57658+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
57659+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
57660+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
57661+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
57662+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
57663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57664+4 4 4 4 4 4
57665+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
57666+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
57667+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
57668+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
57669+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57670+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
57671+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
57672+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
57673+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
57674+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
57675+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
57676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57678+4 4 4 4 4 4
57679+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
57680+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
57681+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
57682+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
57683+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57684+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57685+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
57686+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
57687+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
57688+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
57689+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
57690+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57691+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57692+4 4 4 4 4 4
57693+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
57694+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
57695+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
57696+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
57697+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57698+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
57699+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
57700+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
57701+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
57702+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
57703+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57704+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57705+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57706+4 4 4 4 4 4
57707+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
57708+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
57709+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
57710+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
57711+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
57712+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
57713+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
57714+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
57715+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
57716+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
57717+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
57718+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57720+4 4 4 4 4 4
57721+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
57722+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
57723+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
57724+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
57725+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
57726+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
57727+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
57728+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
57729+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
57730+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
57731+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
57732+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57734+4 4 4 4 4 4
57735+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
57736+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
57737+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
57738+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
57739+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
57740+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
57741+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
57742+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
57743+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
57744+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
57745+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57746+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57748+4 4 4 4 4 4
57749+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
57750+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
57751+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
57752+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
57753+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57754+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
57755+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
57756+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
57757+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
57758+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
57759+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57762+4 4 4 4 4 4
57763+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
57764+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
57765+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
57766+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
57767+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57768+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
57769+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
57770+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
57771+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
57772+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
57773+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57776+4 4 4 4 4 4
57777+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
57778+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
57779+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
57780+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
57781+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57782+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
57783+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
57784+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
57785+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
57786+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57787+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57790+4 4 4 4 4 4
57791+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
57792+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
57793+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
57794+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
57795+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
57796+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
57797+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
57798+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
57799+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57800+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57801+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57804+4 4 4 4 4 4
57805+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
57806+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
57807+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
57808+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
57809+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57810+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
57811+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
57812+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
57813+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57814+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57815+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57818+4 4 4 4 4 4
57819+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
57820+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
57821+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
57822+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
57823+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
57824+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
57825+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
57826+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
57827+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57828+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57829+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57832+4 4 4 4 4 4
57833+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
57834+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
57835+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57836+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
57837+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
57838+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
57839+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
57840+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
57841+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
57842+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57843+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57846+4 4 4 4 4 4
57847+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
57848+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
57849+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
57850+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
57851+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
57852+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
57853+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
57854+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
57855+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57856+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57857+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57860+4 4 4 4 4 4
57861+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
57862+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
57863+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57864+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
57865+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
57866+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
57867+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
57868+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
57869+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
57870+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57871+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57874+4 4 4 4 4 4
57875+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
57876+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
57877+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
57878+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
57879+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
57880+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
57881+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
57882+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
57883+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57884+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57885+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57888+4 4 4 4 4 4
57889+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57890+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
57891+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
57892+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
57893+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
57894+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
57895+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
57896+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
57897+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57898+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57899+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57902+4 4 4 4 4 4
57903+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
57904+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
57905+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
57906+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
57907+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
57908+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
57909+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57910+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
57911+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
57912+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57913+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57916+4 4 4 4 4 4
57917+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57918+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
57919+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
57920+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
57921+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
57922+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
57923+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
57924+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
57925+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
57926+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57927+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57928+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57930+4 4 4 4 4 4
57931+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
57932+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
57933+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
57934+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
57935+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
57936+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
57937+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
57938+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
57939+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
57940+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57941+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57942+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57944+4 4 4 4 4 4
57945+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57946+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
57947+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
57948+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
57949+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
57950+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
57951+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
57952+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
57953+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
57954+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57955+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57958+4 4 4 4 4 4
57959+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
57960+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
57961+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
57962+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
57963+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
57964+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
57965+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
57966+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
57967+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
57968+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57969+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57972+4 4 4 4 4 4
57973+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
57974+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
57975+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
57976+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
57977+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
57978+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
57979+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
57980+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
57981+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
57982+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
57983+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57986+4 4 4 4 4 4
57987+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
57988+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
57989+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
57990+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
57991+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
57992+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
57993+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
57994+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
57995+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
57996+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
57997+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
57998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
57999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58000+4 4 4 4 4 4
58001+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
58002+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
58003+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
58004+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
58005+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
58006+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
58007+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
58008+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
58009+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
58010+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
58011+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58014+4 4 4 4 4 4
58015+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
58016+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
58017+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
58018+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
58019+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
58020+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
58021+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58022+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
58023+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
58024+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
58025+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58028+4 4 4 4 4 4
58029+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
58030+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
58031+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
58032+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
58033+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
58034+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
58035+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
58036+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
58037+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
58038+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
58039+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58042+4 4 4 4 4 4
58043+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
58044+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
58045+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
58046+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
58047+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
58048+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
58049+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
58050+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
58051+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
58052+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
58053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58056+4 4 4 4 4 4
58057+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58058+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
58059+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
58060+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
58061+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
58062+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
58063+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
58064+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
58065+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
58066+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
58067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58070+4 4 4 4 4 4
58071+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
58072+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
58073+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
58074+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
58075+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
58076+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
58077+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
58078+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
58079+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
58080+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
58081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58084+4 4 4 4 4 4
58085+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
58086+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
58087+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
58088+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
58089+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
58090+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
58091+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
58092+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
58093+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
58094+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58098+4 4 4 4 4 4
58099+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
58100+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58101+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
58102+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
58103+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
58104+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
58105+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
58106+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
58107+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
58108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58112+4 4 4 4 4 4
58113+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
58114+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
58115+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
58116+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
58117+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
58118+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
58119+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
58120+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
58121+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
58122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58126+4 4 4 4 4 4
58127+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
58128+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
58129+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
58130+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
58131+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
58132+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
58133+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
58134+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
58135+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58137+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58139+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58140+4 4 4 4 4 4
58141+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
58142+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
58143+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
58144+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
58145+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
58146+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
58147+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
58148+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
58149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58151+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58152+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58153+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58154+4 4 4 4 4 4
58155+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
58156+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
58157+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
58158+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
58159+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
58160+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
58161+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
58162+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
58163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58165+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58166+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58167+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58168+4 4 4 4 4 4
58169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58170+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
58171+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
58172+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
58173+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
58174+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
58175+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
58176+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
58177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58179+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58180+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58181+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58182+4 4 4 4 4 4
58183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58184+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
58185+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
58186+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
58187+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
58188+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
58189+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
58190+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
58191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58193+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58194+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58195+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58196+4 4 4 4 4 4
58197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58198+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
58199+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
58200+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
58201+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
58202+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
58203+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
58204+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58207+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58208+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58209+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58210+4 4 4 4 4 4
58211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58213+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
58214+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
58215+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
58216+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
58217+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
58218+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
58219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58221+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58222+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58223+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58224+4 4 4 4 4 4
58225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58228+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
58229+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
58230+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
58231+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
58232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58235+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58236+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58237+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58238+4 4 4 4 4 4
58239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58242+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
58243+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
58244+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
58245+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
58246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58249+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58250+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58251+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58252+4 4 4 4 4 4
58253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58256+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
58257+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
58258+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
58259+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
58260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58263+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58264+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58266+4 4 4 4 4 4
58267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58270+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
58271+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
58272+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
58273+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
58274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58278+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58280+4 4 4 4 4 4
58281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
58285+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
58286+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
58287+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
58288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58292+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58294+4 4 4 4 4 4
58295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58299+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
58300+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
58301+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
58302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58306+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58307+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58308+4 4 4 4 4 4
58309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58313+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
58314+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
58315+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58319+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58320+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58321+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58322+4 4 4 4 4 4
58323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58327+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
58328+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
58329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58333+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58334+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58335+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58336+4 4 4 4 4 4
58337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58340+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58341+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
58342+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
58343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58346+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58347+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58348+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58349+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
58350+4 4 4 4 4 4
58351diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
58352index fef20db..d28b1ab 100644
58353--- a/drivers/xen/xenfs/xenstored.c
58354+++ b/drivers/xen/xenfs/xenstored.c
58355@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
58356 static int xsd_kva_open(struct inode *inode, struct file *file)
58357 {
58358 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
58359+#ifdef CONFIG_GRKERNSEC_HIDESYM
58360+ NULL);
58361+#else
58362 xen_store_interface);
58363+#endif
58364+
58365 if (!file->private_data)
58366 return -ENOMEM;
58367 return 0;
58368diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
58369index cc1cfae..41158ad 100644
58370--- a/fs/9p/vfs_addr.c
58371+++ b/fs/9p/vfs_addr.c
58372@@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
58373
58374 retval = v9fs_file_write_internal(inode,
58375 v9inode->writeback_fid,
58376- (__force const char __user *)buffer,
58377+ (const char __force_user *)buffer,
58378 len, &offset, 0);
58379 if (retval > 0)
58380 retval = 0;
58381diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
58382index 7fa4f7a..a7ebf8c 100644
58383--- a/fs/9p/vfs_inode.c
58384+++ b/fs/9p/vfs_inode.c
58385@@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
58386 void
58387 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
58388 {
58389- char *s = nd_get_link(nd);
58390+ const char *s = nd_get_link(nd);
58391
58392 p9_debug(P9_DEBUG_VFS, " %s %s\n",
58393 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
58394diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
58395index 370b24c..ff0be7b 100644
58396--- a/fs/Kconfig.binfmt
58397+++ b/fs/Kconfig.binfmt
58398@@ -103,7 +103,7 @@ config HAVE_AOUT
58399
58400 config BINFMT_AOUT
58401 tristate "Kernel support for a.out and ECOFF binaries"
58402- depends on HAVE_AOUT
58403+ depends on HAVE_AOUT && BROKEN
58404 ---help---
58405 A.out (Assembler.OUTput) is a set of formats for libraries and
58406 executables used in the earliest versions of UNIX. Linux used
58407diff --git a/fs/afs/inode.c b/fs/afs/inode.c
58408index 2946712..f737435 100644
58409--- a/fs/afs/inode.c
58410+++ b/fs/afs/inode.c
58411@@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
58412 struct afs_vnode *vnode;
58413 struct super_block *sb;
58414 struct inode *inode;
58415- static atomic_t afs_autocell_ino;
58416+ static atomic_unchecked_t afs_autocell_ino;
58417
58418 _enter("{%x:%u},%*.*s,",
58419 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
58420@@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
58421 data.fid.unique = 0;
58422 data.fid.vnode = 0;
58423
58424- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
58425+ inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
58426 afs_iget5_autocell_test, afs_iget5_set,
58427 &data);
58428 if (!inode) {
58429diff --git a/fs/aio.c b/fs/aio.c
58430index 0ff7c46..7f5d132 100644
58431--- a/fs/aio.c
58432+++ b/fs/aio.c
58433@@ -388,7 +388,7 @@ static int aio_setup_ring(struct kioctx *ctx)
58434 size += sizeof(struct io_event) * nr_events;
58435
58436 nr_pages = PFN_UP(size);
58437- if (nr_pages < 0)
58438+ if (nr_pages <= 0)
58439 return -EINVAL;
58440
58441 file = aio_private_file(ctx, nr_pages);
58442diff --git a/fs/attr.c b/fs/attr.c
58443index 6530ced..4a827e2 100644
58444--- a/fs/attr.c
58445+++ b/fs/attr.c
58446@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
58447 unsigned long limit;
58448
58449 limit = rlimit(RLIMIT_FSIZE);
58450+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
58451 if (limit != RLIM_INFINITY && offset > limit)
58452 goto out_sig;
58453 if (offset > inode->i_sb->s_maxbytes)
58454diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
58455index 116fd38..c04182da 100644
58456--- a/fs/autofs4/waitq.c
58457+++ b/fs/autofs4/waitq.c
58458@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
58459 {
58460 unsigned long sigpipe, flags;
58461 mm_segment_t fs;
58462- const char *data = (const char *)addr;
58463+ const char __user *data = (const char __force_user *)addr;
58464 ssize_t wr = 0;
58465
58466 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
58467@@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
58468 return 1;
58469 }
58470
58471+#ifdef CONFIG_GRKERNSEC_HIDESYM
58472+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
58473+#endif
58474+
58475 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
58476 enum autofs_notify notify)
58477 {
58478@@ -385,7 +389,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
58479
58480 /* If this is a direct mount request create a dummy name */
58481 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
58482+#ifdef CONFIG_GRKERNSEC_HIDESYM
58483+ /* this name does get written to userland via autofs4_write() */
58484+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
58485+#else
58486 qstr.len = sprintf(name, "%p", dentry);
58487+#endif
58488 else {
58489 qstr.len = autofs4_getpath(sbi, dentry, &name);
58490 if (!qstr.len) {
58491diff --git a/fs/befs/endian.h b/fs/befs/endian.h
58492index 2722387..56059b5 100644
58493--- a/fs/befs/endian.h
58494+++ b/fs/befs/endian.h
58495@@ -11,7 +11,7 @@
58496
58497 #include <asm/byteorder.h>
58498
58499-static inline u64
58500+static inline u64 __intentional_overflow(-1)
58501 fs64_to_cpu(const struct super_block *sb, fs64 n)
58502 {
58503 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58504@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
58505 return (__force fs64)cpu_to_be64(n);
58506 }
58507
58508-static inline u32
58509+static inline u32 __intentional_overflow(-1)
58510 fs32_to_cpu(const struct super_block *sb, fs32 n)
58511 {
58512 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58513@@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
58514 return (__force fs32)cpu_to_be32(n);
58515 }
58516
58517-static inline u16
58518+static inline u16 __intentional_overflow(-1)
58519 fs16_to_cpu(const struct super_block *sb, fs16 n)
58520 {
58521 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
58522diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
58523index ca0ba15..0fa3257 100644
58524--- a/fs/binfmt_aout.c
58525+++ b/fs/binfmt_aout.c
58526@@ -16,6 +16,7 @@
58527 #include <linux/string.h>
58528 #include <linux/fs.h>
58529 #include <linux/file.h>
58530+#include <linux/security.h>
58531 #include <linux/stat.h>
58532 #include <linux/fcntl.h>
58533 #include <linux/ptrace.h>
58534@@ -58,6 +59,8 @@ static int aout_core_dump(struct coredump_params *cprm)
58535 #endif
58536 # define START_STACK(u) ((void __user *)u.start_stack)
58537
58538+ memset(&dump, 0, sizeof(dump));
58539+
58540 fs = get_fs();
58541 set_fs(KERNEL_DS);
58542 has_dumped = 1;
58543@@ -68,10 +71,12 @@ static int aout_core_dump(struct coredump_params *cprm)
58544
58545 /* If the size of the dump file exceeds the rlimit, then see what would happen
58546 if we wrote the stack, but not the data area. */
58547+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
58548 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
58549 dump.u_dsize = 0;
58550
58551 /* Make sure we have enough room to write the stack and data areas. */
58552+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
58553 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
58554 dump.u_ssize = 0;
58555
58556@@ -232,6 +237,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
58557 rlim = rlimit(RLIMIT_DATA);
58558 if (rlim >= RLIM_INFINITY)
58559 rlim = ~0;
58560+
58561+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
58562 if (ex.a_data + ex.a_bss > rlim)
58563 return -ENOMEM;
58564
58565@@ -264,6 +271,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
58566
58567 install_exec_creds(bprm);
58568
58569+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58570+ current->mm->pax_flags = 0UL;
58571+#endif
58572+
58573+#ifdef CONFIG_PAX_PAGEEXEC
58574+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
58575+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
58576+
58577+#ifdef CONFIG_PAX_EMUTRAMP
58578+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
58579+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
58580+#endif
58581+
58582+#ifdef CONFIG_PAX_MPROTECT
58583+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
58584+ current->mm->pax_flags |= MF_PAX_MPROTECT;
58585+#endif
58586+
58587+ }
58588+#endif
58589+
58590 if (N_MAGIC(ex) == OMAGIC) {
58591 unsigned long text_addr, map_size;
58592 loff_t pos;
58593@@ -321,7 +349,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
58594 }
58595
58596 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
58597- PROT_READ | PROT_WRITE | PROT_EXEC,
58598+ PROT_READ | PROT_WRITE,
58599 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
58600 fd_offset + ex.a_text);
58601 if (error != N_DATADDR(ex)) {
58602diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
58603index 3892c1a..4e27c04 100644
58604--- a/fs/binfmt_elf.c
58605+++ b/fs/binfmt_elf.c
58606@@ -34,6 +34,7 @@
58607 #include <linux/utsname.h>
58608 #include <linux/coredump.h>
58609 #include <linux/sched.h>
58610+#include <linux/xattr.h>
58611 #include <asm/uaccess.h>
58612 #include <asm/param.h>
58613 #include <asm/page.h>
58614@@ -47,7 +48,7 @@
58615
58616 static int load_elf_binary(struct linux_binprm *bprm);
58617 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
58618- int, int, unsigned long);
58619+ int, int, unsigned long) __intentional_overflow(-1);
58620
58621 #ifdef CONFIG_USELIB
58622 static int load_elf_library(struct file *);
58623@@ -65,6 +66,14 @@ static int elf_core_dump(struct coredump_params *cprm);
58624 #define elf_core_dump NULL
58625 #endif
58626
58627+#ifdef CONFIG_PAX_MPROTECT
58628+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
58629+#endif
58630+
58631+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58632+static void elf_handle_mmap(struct file *file);
58633+#endif
58634+
58635 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
58636 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
58637 #else
58638@@ -84,6 +93,15 @@ static struct linux_binfmt elf_format = {
58639 .load_binary = load_elf_binary,
58640 .load_shlib = load_elf_library,
58641 .core_dump = elf_core_dump,
58642+
58643+#ifdef CONFIG_PAX_MPROTECT
58644+ .handle_mprotect= elf_handle_mprotect,
58645+#endif
58646+
58647+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58648+ .handle_mmap = elf_handle_mmap,
58649+#endif
58650+
58651 .min_coredump = ELF_EXEC_PAGESIZE,
58652 };
58653
58654@@ -91,6 +109,8 @@ static struct linux_binfmt elf_format = {
58655
58656 static int set_brk(unsigned long start, unsigned long end)
58657 {
58658+ unsigned long e = end;
58659+
58660 start = ELF_PAGEALIGN(start);
58661 end = ELF_PAGEALIGN(end);
58662 if (end > start) {
58663@@ -99,7 +119,7 @@ static int set_brk(unsigned long start, unsigned long end)
58664 if (BAD_ADDR(addr))
58665 return addr;
58666 }
58667- current->mm->start_brk = current->mm->brk = end;
58668+ current->mm->start_brk = current->mm->brk = e;
58669 return 0;
58670 }
58671
58672@@ -160,12 +180,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58673 elf_addr_t __user *u_rand_bytes;
58674 const char *k_platform = ELF_PLATFORM;
58675 const char *k_base_platform = ELF_BASE_PLATFORM;
58676- unsigned char k_rand_bytes[16];
58677+ u32 k_rand_bytes[4];
58678 int items;
58679 elf_addr_t *elf_info;
58680 int ei_index = 0;
58681 const struct cred *cred = current_cred();
58682 struct vm_area_struct *vma;
58683+ unsigned long saved_auxv[AT_VECTOR_SIZE];
58684
58685 /*
58686 * In some cases (e.g. Hyper-Threading), we want to avoid L1
58687@@ -207,8 +228,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58688 * Generate 16 random bytes for userspace PRNG seeding.
58689 */
58690 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
58691- u_rand_bytes = (elf_addr_t __user *)
58692- STACK_ALLOC(p, sizeof(k_rand_bytes));
58693+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
58694+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
58695+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
58696+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
58697+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
58698+ u_rand_bytes = (elf_addr_t __user *) p;
58699 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
58700 return -EFAULT;
58701
58702@@ -323,9 +348,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58703 return -EFAULT;
58704 current->mm->env_end = p;
58705
58706+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
58707+
58708 /* Put the elf_info on the stack in the right place. */
58709 sp = (elf_addr_t __user *)envp + 1;
58710- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
58711+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
58712 return -EFAULT;
58713 return 0;
58714 }
58715@@ -393,15 +420,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
58716 an ELF header */
58717
58718 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58719- struct file *interpreter, unsigned long *interp_map_addr,
58720- unsigned long no_base)
58721+ struct file *interpreter, unsigned long no_base)
58722 {
58723 struct elf_phdr *elf_phdata;
58724 struct elf_phdr *eppnt;
58725- unsigned long load_addr = 0;
58726+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
58727 int load_addr_set = 0;
58728 unsigned long last_bss = 0, elf_bss = 0;
58729- unsigned long error = ~0UL;
58730+ unsigned long error = -EINVAL;
58731 unsigned long total_size;
58732 int retval, i, size;
58733
58734@@ -447,6 +473,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58735 goto out_close;
58736 }
58737
58738+#ifdef CONFIG_PAX_SEGMEXEC
58739+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
58740+ pax_task_size = SEGMEXEC_TASK_SIZE;
58741+#endif
58742+
58743 eppnt = elf_phdata;
58744 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
58745 if (eppnt->p_type == PT_LOAD) {
58746@@ -470,8 +501,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58747 map_addr = elf_map(interpreter, load_addr + vaddr,
58748 eppnt, elf_prot, elf_type, total_size);
58749 total_size = 0;
58750- if (!*interp_map_addr)
58751- *interp_map_addr = map_addr;
58752 error = map_addr;
58753 if (BAD_ADDR(map_addr))
58754 goto out_close;
58755@@ -490,8 +519,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58756 k = load_addr + eppnt->p_vaddr;
58757 if (BAD_ADDR(k) ||
58758 eppnt->p_filesz > eppnt->p_memsz ||
58759- eppnt->p_memsz > TASK_SIZE ||
58760- TASK_SIZE - eppnt->p_memsz < k) {
58761+ eppnt->p_memsz > pax_task_size ||
58762+ pax_task_size - eppnt->p_memsz < k) {
58763 error = -ENOMEM;
58764 goto out_close;
58765 }
58766@@ -530,9 +559,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58767 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
58768
58769 /* Map the last of the bss segment */
58770- error = vm_brk(elf_bss, last_bss - elf_bss);
58771- if (BAD_ADDR(error))
58772- goto out_close;
58773+ if (last_bss > elf_bss) {
58774+ error = vm_brk(elf_bss, last_bss - elf_bss);
58775+ if (BAD_ADDR(error))
58776+ goto out_close;
58777+ }
58778 }
58779
58780 error = load_addr;
58781@@ -543,6 +574,336 @@ out:
58782 return error;
58783 }
58784
58785+#ifdef CONFIG_PAX_PT_PAX_FLAGS
58786+#ifdef CONFIG_PAX_SOFTMODE
58787+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
58788+{
58789+ unsigned long pax_flags = 0UL;
58790+
58791+#ifdef CONFIG_PAX_PAGEEXEC
58792+ if (elf_phdata->p_flags & PF_PAGEEXEC)
58793+ pax_flags |= MF_PAX_PAGEEXEC;
58794+#endif
58795+
58796+#ifdef CONFIG_PAX_SEGMEXEC
58797+ if (elf_phdata->p_flags & PF_SEGMEXEC)
58798+ pax_flags |= MF_PAX_SEGMEXEC;
58799+#endif
58800+
58801+#ifdef CONFIG_PAX_EMUTRAMP
58802+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
58803+ pax_flags |= MF_PAX_EMUTRAMP;
58804+#endif
58805+
58806+#ifdef CONFIG_PAX_MPROTECT
58807+ if (elf_phdata->p_flags & PF_MPROTECT)
58808+ pax_flags |= MF_PAX_MPROTECT;
58809+#endif
58810+
58811+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58812+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
58813+ pax_flags |= MF_PAX_RANDMMAP;
58814+#endif
58815+
58816+ return pax_flags;
58817+}
58818+#endif
58819+
58820+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
58821+{
58822+ unsigned long pax_flags = 0UL;
58823+
58824+#ifdef CONFIG_PAX_PAGEEXEC
58825+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
58826+ pax_flags |= MF_PAX_PAGEEXEC;
58827+#endif
58828+
58829+#ifdef CONFIG_PAX_SEGMEXEC
58830+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
58831+ pax_flags |= MF_PAX_SEGMEXEC;
58832+#endif
58833+
58834+#ifdef CONFIG_PAX_EMUTRAMP
58835+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
58836+ pax_flags |= MF_PAX_EMUTRAMP;
58837+#endif
58838+
58839+#ifdef CONFIG_PAX_MPROTECT
58840+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
58841+ pax_flags |= MF_PAX_MPROTECT;
58842+#endif
58843+
58844+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58845+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
58846+ pax_flags |= MF_PAX_RANDMMAP;
58847+#endif
58848+
58849+ return pax_flags;
58850+}
58851+#endif
58852+
58853+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
58854+#ifdef CONFIG_PAX_SOFTMODE
58855+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
58856+{
58857+ unsigned long pax_flags = 0UL;
58858+
58859+#ifdef CONFIG_PAX_PAGEEXEC
58860+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
58861+ pax_flags |= MF_PAX_PAGEEXEC;
58862+#endif
58863+
58864+#ifdef CONFIG_PAX_SEGMEXEC
58865+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
58866+ pax_flags |= MF_PAX_SEGMEXEC;
58867+#endif
58868+
58869+#ifdef CONFIG_PAX_EMUTRAMP
58870+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
58871+ pax_flags |= MF_PAX_EMUTRAMP;
58872+#endif
58873+
58874+#ifdef CONFIG_PAX_MPROTECT
58875+ if (pax_flags_softmode & MF_PAX_MPROTECT)
58876+ pax_flags |= MF_PAX_MPROTECT;
58877+#endif
58878+
58879+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58880+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
58881+ pax_flags |= MF_PAX_RANDMMAP;
58882+#endif
58883+
58884+ return pax_flags;
58885+}
58886+#endif
58887+
58888+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
58889+{
58890+ unsigned long pax_flags = 0UL;
58891+
58892+#ifdef CONFIG_PAX_PAGEEXEC
58893+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
58894+ pax_flags |= MF_PAX_PAGEEXEC;
58895+#endif
58896+
58897+#ifdef CONFIG_PAX_SEGMEXEC
58898+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
58899+ pax_flags |= MF_PAX_SEGMEXEC;
58900+#endif
58901+
58902+#ifdef CONFIG_PAX_EMUTRAMP
58903+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
58904+ pax_flags |= MF_PAX_EMUTRAMP;
58905+#endif
58906+
58907+#ifdef CONFIG_PAX_MPROTECT
58908+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
58909+ pax_flags |= MF_PAX_MPROTECT;
58910+#endif
58911+
58912+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
58913+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
58914+ pax_flags |= MF_PAX_RANDMMAP;
58915+#endif
58916+
58917+ return pax_flags;
58918+}
58919+#endif
58920+
58921+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58922+static unsigned long pax_parse_defaults(void)
58923+{
58924+ unsigned long pax_flags = 0UL;
58925+
58926+#ifdef CONFIG_PAX_SOFTMODE
58927+ if (pax_softmode)
58928+ return pax_flags;
58929+#endif
58930+
58931+#ifdef CONFIG_PAX_PAGEEXEC
58932+ pax_flags |= MF_PAX_PAGEEXEC;
58933+#endif
58934+
58935+#ifdef CONFIG_PAX_SEGMEXEC
58936+ pax_flags |= MF_PAX_SEGMEXEC;
58937+#endif
58938+
58939+#ifdef CONFIG_PAX_MPROTECT
58940+ pax_flags |= MF_PAX_MPROTECT;
58941+#endif
58942+
58943+#ifdef CONFIG_PAX_RANDMMAP
58944+ if (randomize_va_space)
58945+ pax_flags |= MF_PAX_RANDMMAP;
58946+#endif
58947+
58948+ return pax_flags;
58949+}
58950+
58951+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
58952+{
58953+ unsigned long pax_flags = PAX_PARSE_FLAGS_FALLBACK;
58954+
58955+#ifdef CONFIG_PAX_EI_PAX
58956+
58957+#ifdef CONFIG_PAX_SOFTMODE
58958+ if (pax_softmode)
58959+ return pax_flags;
58960+#endif
58961+
58962+ pax_flags = 0UL;
58963+
58964+#ifdef CONFIG_PAX_PAGEEXEC
58965+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
58966+ pax_flags |= MF_PAX_PAGEEXEC;
58967+#endif
58968+
58969+#ifdef CONFIG_PAX_SEGMEXEC
58970+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
58971+ pax_flags |= MF_PAX_SEGMEXEC;
58972+#endif
58973+
58974+#ifdef CONFIG_PAX_EMUTRAMP
58975+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
58976+ pax_flags |= MF_PAX_EMUTRAMP;
58977+#endif
58978+
58979+#ifdef CONFIG_PAX_MPROTECT
58980+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
58981+ pax_flags |= MF_PAX_MPROTECT;
58982+#endif
58983+
58984+#ifdef CONFIG_PAX_ASLR
58985+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
58986+ pax_flags |= MF_PAX_RANDMMAP;
58987+#endif
58988+
58989+#endif
58990+
58991+ return pax_flags;
58992+
58993+}
58994+
58995+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
58996+{
58997+
58998+#ifdef CONFIG_PAX_PT_PAX_FLAGS
58999+ unsigned long i;
59000+
59001+ for (i = 0UL; i < elf_ex->e_phnum; i++)
59002+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
59003+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
59004+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
59005+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
59006+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
59007+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
59008+ return PAX_PARSE_FLAGS_FALLBACK;
59009+
59010+#ifdef CONFIG_PAX_SOFTMODE
59011+ if (pax_softmode)
59012+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
59013+ else
59014+#endif
59015+
59016+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
59017+ break;
59018+ }
59019+#endif
59020+
59021+ return PAX_PARSE_FLAGS_FALLBACK;
59022+}
59023+
59024+static unsigned long pax_parse_xattr_pax(struct file * const file)
59025+{
59026+
59027+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
59028+ ssize_t xattr_size, i;
59029+ unsigned char xattr_value[sizeof("pemrs") - 1];
59030+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
59031+
59032+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
59033+ if (xattr_size < 0 || xattr_size > sizeof xattr_value)
59034+ return PAX_PARSE_FLAGS_FALLBACK;
59035+
59036+ for (i = 0; i < xattr_size; i++)
59037+ switch (xattr_value[i]) {
59038+ default:
59039+ return PAX_PARSE_FLAGS_FALLBACK;
59040+
59041+#define parse_flag(option1, option2, flag) \
59042+ case option1: \
59043+ if (pax_flags_hardmode & MF_PAX_##flag) \
59044+ return PAX_PARSE_FLAGS_FALLBACK;\
59045+ pax_flags_hardmode |= MF_PAX_##flag; \
59046+ break; \
59047+ case option2: \
59048+ if (pax_flags_softmode & MF_PAX_##flag) \
59049+ return PAX_PARSE_FLAGS_FALLBACK;\
59050+ pax_flags_softmode |= MF_PAX_##flag; \
59051+ break;
59052+
59053+ parse_flag('p', 'P', PAGEEXEC);
59054+ parse_flag('e', 'E', EMUTRAMP);
59055+ parse_flag('m', 'M', MPROTECT);
59056+ parse_flag('r', 'R', RANDMMAP);
59057+ parse_flag('s', 'S', SEGMEXEC);
59058+
59059+#undef parse_flag
59060+ }
59061+
59062+ if (pax_flags_hardmode & pax_flags_softmode)
59063+ return PAX_PARSE_FLAGS_FALLBACK;
59064+
59065+#ifdef CONFIG_PAX_SOFTMODE
59066+ if (pax_softmode)
59067+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
59068+ else
59069+#endif
59070+
59071+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
59072+#else
59073+ return PAX_PARSE_FLAGS_FALLBACK;
59074+#endif
59075+
59076+}
59077+
59078+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
59079+{
59080+ unsigned long pax_flags, ei_pax_flags, pt_pax_flags, xattr_pax_flags;
59081+
59082+ pax_flags = pax_parse_defaults();
59083+ ei_pax_flags = pax_parse_ei_pax(elf_ex);
59084+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
59085+ xattr_pax_flags = pax_parse_xattr_pax(file);
59086+
59087+ if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
59088+ xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK &&
59089+ pt_pax_flags != xattr_pax_flags)
59090+ return -EINVAL;
59091+ if (xattr_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
59092+ pax_flags = xattr_pax_flags;
59093+ else if (pt_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
59094+ pax_flags = pt_pax_flags;
59095+ else if (ei_pax_flags != PAX_PARSE_FLAGS_FALLBACK)
59096+ pax_flags = ei_pax_flags;
59097+
59098+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
59099+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
59100+ if ((__supported_pte_mask & _PAGE_NX))
59101+ pax_flags &= ~MF_PAX_SEGMEXEC;
59102+ else
59103+ pax_flags &= ~MF_PAX_PAGEEXEC;
59104+ }
59105+#endif
59106+
59107+ if (0 > pax_check_flags(&pax_flags))
59108+ return -EINVAL;
59109+
59110+ current->mm->pax_flags = pax_flags;
59111+ return 0;
59112+}
59113+#endif
59114+
59115 /*
59116 * These are the functions used to load ELF style executables and shared
59117 * libraries. There is no binary dependent code anywhere else.
59118@@ -556,6 +917,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
59119 {
59120 unsigned int random_variable = 0;
59121
59122+#ifdef CONFIG_PAX_RANDUSTACK
59123+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
59124+ return stack_top - current->mm->delta_stack;
59125+#endif
59126+
59127 if ((current->flags & PF_RANDOMIZE) &&
59128 !(current->personality & ADDR_NO_RANDOMIZE)) {
59129 random_variable = get_random_int() & STACK_RND_MASK;
59130@@ -574,7 +940,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
59131 unsigned long load_addr = 0, load_bias = 0;
59132 int load_addr_set = 0;
59133 char * elf_interpreter = NULL;
59134- unsigned long error;
59135+ unsigned long error = 0;
59136 struct elf_phdr *elf_ppnt, *elf_phdata;
59137 unsigned long elf_bss, elf_brk;
59138 int retval, i;
59139@@ -589,6 +955,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
59140 struct elfhdr elf_ex;
59141 struct elfhdr interp_elf_ex;
59142 } *loc;
59143+ unsigned long pax_task_size;
59144
59145 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
59146 if (!loc) {
59147@@ -726,6 +1093,77 @@ static int load_elf_binary(struct linux_binprm *bprm)
59148 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
59149 may depend on the personality. */
59150 SET_PERSONALITY(loc->elf_ex);
59151+
59152+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59153+ current->mm->pax_flags = 0UL;
59154+#endif
59155+
59156+#ifdef CONFIG_PAX_DLRESOLVE
59157+ current->mm->call_dl_resolve = 0UL;
59158+#endif
59159+
59160+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
59161+ current->mm->call_syscall = 0UL;
59162+#endif
59163+
59164+#ifdef CONFIG_PAX_ASLR
59165+ current->mm->delta_mmap = 0UL;
59166+ current->mm->delta_stack = 0UL;
59167+#endif
59168+
59169+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
59170+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
59171+ send_sig(SIGKILL, current, 0);
59172+ goto out_free_dentry;
59173+ }
59174+#endif
59175+
59176+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
59177+ pax_set_initial_flags(bprm);
59178+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
59179+ if (pax_set_initial_flags_func)
59180+ (pax_set_initial_flags_func)(bprm);
59181+#endif
59182+
59183+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
59184+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
59185+ current->mm->context.user_cs_limit = PAGE_SIZE;
59186+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
59187+ }
59188+#endif
59189+
59190+#ifdef CONFIG_PAX_SEGMEXEC
59191+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
59192+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
59193+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
59194+ pax_task_size = SEGMEXEC_TASK_SIZE;
59195+ current->mm->def_flags |= VM_NOHUGEPAGE;
59196+ } else
59197+#endif
59198+
59199+ pax_task_size = TASK_SIZE;
59200+
59201+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
59202+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
59203+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
59204+ put_cpu();
59205+ }
59206+#endif
59207+
59208+#ifdef CONFIG_PAX_ASLR
59209+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
59210+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
59211+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
59212+ }
59213+#endif
59214+
59215+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
59216+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
59217+ executable_stack = EXSTACK_DISABLE_X;
59218+ current->personality &= ~READ_IMPLIES_EXEC;
59219+ } else
59220+#endif
59221+
59222 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
59223 current->personality |= READ_IMPLIES_EXEC;
59224
59225@@ -815,6 +1253,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
59226 #else
59227 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
59228 #endif
59229+
59230+#ifdef CONFIG_PAX_RANDMMAP
59231+ /* PaX: randomize base address at the default exe base if requested */
59232+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
59233+#ifdef CONFIG_SPARC64
59234+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
59235+#else
59236+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
59237+#endif
59238+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
59239+ elf_flags |= MAP_FIXED;
59240+ }
59241+#endif
59242+
59243 }
59244
59245 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
59246@@ -847,9 +1299,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
59247 * allowed task size. Note that p_filesz must always be
59248 * <= p_memsz so it is only necessary to check p_memsz.
59249 */
59250- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
59251- elf_ppnt->p_memsz > TASK_SIZE ||
59252- TASK_SIZE - elf_ppnt->p_memsz < k) {
59253+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
59254+ elf_ppnt->p_memsz > pax_task_size ||
59255+ pax_task_size - elf_ppnt->p_memsz < k) {
59256 /* set_brk can never work. Avoid overflows. */
59257 send_sig(SIGKILL, current, 0);
59258 retval = -EINVAL;
59259@@ -888,17 +1340,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
59260 goto out_free_dentry;
59261 }
59262 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
59263- send_sig(SIGSEGV, current, 0);
59264- retval = -EFAULT; /* Nobody gets to see this, but.. */
59265- goto out_free_dentry;
59266+ /*
59267+ * This bss-zeroing can fail if the ELF
59268+ * file specifies odd protections. So
59269+ * we don't check the return value
59270+ */
59271 }
59272
59273+#ifdef CONFIG_PAX_RANDMMAP
59274+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
59275+ unsigned long start, size, flags;
59276+ vm_flags_t vm_flags;
59277+
59278+ start = ELF_PAGEALIGN(elf_brk);
59279+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
59280+ flags = MAP_FIXED | MAP_PRIVATE;
59281+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
59282+
59283+ down_write(&current->mm->mmap_sem);
59284+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
59285+ retval = -ENOMEM;
59286+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
59287+// if (current->personality & ADDR_NO_RANDOMIZE)
59288+// vm_flags |= VM_READ | VM_MAYREAD;
59289+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
59290+ retval = IS_ERR_VALUE(start) ? start : 0;
59291+ }
59292+ up_write(&current->mm->mmap_sem);
59293+ if (retval == 0)
59294+ retval = set_brk(start + size, start + size + PAGE_SIZE);
59295+ if (retval < 0) {
59296+ send_sig(SIGKILL, current, 0);
59297+ goto out_free_dentry;
59298+ }
59299+ }
59300+#endif
59301+
59302 if (elf_interpreter) {
59303- unsigned long interp_map_addr = 0;
59304-
59305 elf_entry = load_elf_interp(&loc->interp_elf_ex,
59306 interpreter,
59307- &interp_map_addr,
59308 load_bias);
59309 if (!IS_ERR((void *)elf_entry)) {
59310 /*
59311@@ -1130,7 +1610,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
59312 * Decide what to dump of a segment, part, all or none.
59313 */
59314 static unsigned long vma_dump_size(struct vm_area_struct *vma,
59315- unsigned long mm_flags)
59316+ unsigned long mm_flags, long signr)
59317 {
59318 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
59319
59320@@ -1168,7 +1648,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
59321 if (vma->vm_file == NULL)
59322 return 0;
59323
59324- if (FILTER(MAPPED_PRIVATE))
59325+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
59326 goto whole;
59327
59328 /*
59329@@ -1375,9 +1855,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
59330 {
59331 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
59332 int i = 0;
59333- do
59334+ do {
59335 i += 2;
59336- while (auxv[i - 2] != AT_NULL);
59337+ } while (auxv[i - 2] != AT_NULL);
59338 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
59339 }
59340
59341@@ -1386,7 +1866,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
59342 {
59343 mm_segment_t old_fs = get_fs();
59344 set_fs(KERNEL_DS);
59345- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
59346+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
59347 set_fs(old_fs);
59348 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
59349 }
59350@@ -2010,14 +2490,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
59351 }
59352
59353 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
59354- unsigned long mm_flags)
59355+ struct coredump_params *cprm)
59356 {
59357 struct vm_area_struct *vma;
59358 size_t size = 0;
59359
59360 for (vma = first_vma(current, gate_vma); vma != NULL;
59361 vma = next_vma(vma, gate_vma))
59362- size += vma_dump_size(vma, mm_flags);
59363+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
59364 return size;
59365 }
59366
59367@@ -2108,7 +2588,7 @@ static int elf_core_dump(struct coredump_params *cprm)
59368
59369 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
59370
59371- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
59372+ offset += elf_core_vma_data_size(gate_vma, cprm);
59373 offset += elf_core_extra_data_size();
59374 e_shoff = offset;
59375
59376@@ -2136,7 +2616,7 @@ static int elf_core_dump(struct coredump_params *cprm)
59377 phdr.p_offset = offset;
59378 phdr.p_vaddr = vma->vm_start;
59379 phdr.p_paddr = 0;
59380- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
59381+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
59382 phdr.p_memsz = vma->vm_end - vma->vm_start;
59383 offset += phdr.p_filesz;
59384 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
59385@@ -2169,7 +2649,7 @@ static int elf_core_dump(struct coredump_params *cprm)
59386 unsigned long addr;
59387 unsigned long end;
59388
59389- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
59390+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
59391
59392 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
59393 struct page *page;
59394@@ -2210,6 +2690,167 @@ out:
59395
59396 #endif /* CONFIG_ELF_CORE */
59397
59398+#ifdef CONFIG_PAX_MPROTECT
59399+/* PaX: non-PIC ELF libraries need relocations on their executable segments
59400+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
59401+ * we'll remove VM_MAYWRITE for good on RELRO segments.
59402+ *
59403+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
59404+ * basis because we want to allow the common case and not the special ones.
59405+ */
59406+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
59407+{
59408+ struct elfhdr elf_h;
59409+ struct elf_phdr elf_p;
59410+ unsigned long i;
59411+ unsigned long oldflags;
59412+ bool is_textrel_rw, is_textrel_rx, is_relro;
59413+
59414+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
59415+ return;
59416+
59417+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
59418+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
59419+
59420+#ifdef CONFIG_PAX_ELFRELOCS
59421+ /* possible TEXTREL */
59422+ is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
59423+ is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
59424+#else
59425+ is_textrel_rw = false;
59426+ is_textrel_rx = false;
59427+#endif
59428+
59429+ /* possible RELRO */
59430+ is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
59431+
59432+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
59433+ return;
59434+
59435+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
59436+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
59437+
59438+#ifdef CONFIG_PAX_ETEXECRELOCS
59439+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
59440+#else
59441+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
59442+#endif
59443+
59444+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
59445+ !elf_check_arch(&elf_h) ||
59446+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
59447+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
59448+ return;
59449+
59450+ for (i = 0UL; i < elf_h.e_phnum; i++) {
59451+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
59452+ return;
59453+ switch (elf_p.p_type) {
59454+ case PT_DYNAMIC:
59455+ if (!is_textrel_rw && !is_textrel_rx)
59456+ continue;
59457+ i = 0UL;
59458+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
59459+ elf_dyn dyn;
59460+
59461+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
59462+ break;
59463+ if (dyn.d_tag == DT_NULL)
59464+ break;
59465+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
59466+ gr_log_textrel(vma);
59467+ if (is_textrel_rw)
59468+ vma->vm_flags |= VM_MAYWRITE;
59469+ else
59470+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
59471+ vma->vm_flags &= ~VM_MAYWRITE;
59472+ break;
59473+ }
59474+ i++;
59475+ }
59476+ is_textrel_rw = false;
59477+ is_textrel_rx = false;
59478+ continue;
59479+
59480+ case PT_GNU_RELRO:
59481+ if (!is_relro)
59482+ continue;
59483+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
59484+ vma->vm_flags &= ~VM_MAYWRITE;
59485+ is_relro = false;
59486+ continue;
59487+
59488+#ifdef CONFIG_PAX_PT_PAX_FLAGS
59489+ case PT_PAX_FLAGS: {
59490+ const char *msg_mprotect = "", *msg_emutramp = "";
59491+ char *buffer_lib, *buffer_exe;
59492+
59493+ if (elf_p.p_flags & PF_NOMPROTECT)
59494+ msg_mprotect = "MPROTECT disabled";
59495+
59496+#ifdef CONFIG_PAX_EMUTRAMP
59497+ if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
59498+ msg_emutramp = "EMUTRAMP enabled";
59499+#endif
59500+
59501+ if (!msg_mprotect[0] && !msg_emutramp[0])
59502+ continue;
59503+
59504+ if (!printk_ratelimit())
59505+ continue;
59506+
59507+ buffer_lib = (char *)__get_free_page(GFP_KERNEL);
59508+ buffer_exe = (char *)__get_free_page(GFP_KERNEL);
59509+ if (buffer_lib && buffer_exe) {
59510+ char *path_lib, *path_exe;
59511+
59512+ path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
59513+ path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
59514+
59515+ pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
59516+ (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
59517+
59518+ }
59519+ free_page((unsigned long)buffer_exe);
59520+ free_page((unsigned long)buffer_lib);
59521+ continue;
59522+ }
59523+#endif
59524+
59525+ }
59526+ }
59527+}
59528+#endif
59529+
59530+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59531+
59532+extern int grsec_enable_log_rwxmaps;
59533+
59534+static void elf_handle_mmap(struct file *file)
59535+{
59536+ struct elfhdr elf_h;
59537+ struct elf_phdr elf_p;
59538+ unsigned long i;
59539+
59540+ if (!grsec_enable_log_rwxmaps)
59541+ return;
59542+
59543+ if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
59544+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
59545+ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
59546+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
59547+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
59548+ return;
59549+
59550+ for (i = 0UL; i < elf_h.e_phnum; i++) {
59551+ if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
59552+ return;
59553+ if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
59554+ gr_log_ptgnustack(file);
59555+ }
59556+}
59557+#endif
59558+
59559 static int __init init_elf_binfmt(void)
59560 {
59561 register_binfmt(&elf_format);
59562diff --git a/fs/block_dev.c b/fs/block_dev.c
59563index 6d72746..536d1db 100644
59564--- a/fs/block_dev.c
59565+++ b/fs/block_dev.c
59566@@ -701,7 +701,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
59567 else if (bdev->bd_contains == bdev)
59568 return true; /* is a whole device which isn't held */
59569
59570- else if (whole->bd_holder == bd_may_claim)
59571+ else if (whole->bd_holder == (void *)bd_may_claim)
59572 return true; /* is a partition of a device that is being partitioned */
59573 else if (whole->bd_holder != NULL)
59574 return false; /* is a partition of a held device */
59575diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
59576index 8bbcc24..6f10d78 100644
59577--- a/fs/btrfs/ctree.c
59578+++ b/fs/btrfs/ctree.c
59579@@ -1174,9 +1174,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
59580 free_extent_buffer(buf);
59581 add_root_to_dirty_list(root);
59582 } else {
59583- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
59584- parent_start = parent->start;
59585- else
59586+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
59587+ if (parent)
59588+ parent_start = parent->start;
59589+ else
59590+ parent_start = 0;
59591+ } else
59592 parent_start = 0;
59593
59594 WARN_ON(trans->transid != btrfs_header_generation(parent));
59595diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
59596index a2e90f8..5135e5f 100644
59597--- a/fs/btrfs/delayed-inode.c
59598+++ b/fs/btrfs/delayed-inode.c
59599@@ -462,7 +462,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
59600
59601 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
59602 {
59603- int seq = atomic_inc_return(&delayed_root->items_seq);
59604+ int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
59605 if ((atomic_dec_return(&delayed_root->items) <
59606 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
59607 waitqueue_active(&delayed_root->wait))
59608@@ -1412,7 +1412,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
59609
59610 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
59611 {
59612- int val = atomic_read(&delayed_root->items_seq);
59613+ int val = atomic_read_unchecked(&delayed_root->items_seq);
59614
59615 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
59616 return 1;
59617@@ -1436,7 +1436,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
59618 int seq;
59619 int ret;
59620
59621- seq = atomic_read(&delayed_root->items_seq);
59622+ seq = atomic_read_unchecked(&delayed_root->items_seq);
59623
59624 ret = btrfs_wq_run_delayed_node(delayed_root, root, 0);
59625 if (ret)
59626diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
59627index f70119f..ab5894d 100644
59628--- a/fs/btrfs/delayed-inode.h
59629+++ b/fs/btrfs/delayed-inode.h
59630@@ -43,7 +43,7 @@ struct btrfs_delayed_root {
59631 */
59632 struct list_head prepare_list;
59633 atomic_t items; /* for delayed items */
59634- atomic_t items_seq; /* for delayed items */
59635+ atomic_unchecked_t items_seq; /* for delayed items */
59636 int nodes; /* for delayed nodes */
59637 wait_queue_head_t wait;
59638 };
59639@@ -90,7 +90,7 @@ static inline void btrfs_init_delayed_root(
59640 struct btrfs_delayed_root *delayed_root)
59641 {
59642 atomic_set(&delayed_root->items, 0);
59643- atomic_set(&delayed_root->items_seq, 0);
59644+ atomic_set_unchecked(&delayed_root->items_seq, 0);
59645 delayed_root->nodes = 0;
59646 spin_lock_init(&delayed_root->lock);
59647 init_waitqueue_head(&delayed_root->wait);
59648diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
59649index b765d41..5a8b0c3 100644
59650--- a/fs/btrfs/ioctl.c
59651+++ b/fs/btrfs/ioctl.c
59652@@ -3975,9 +3975,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
59653 for (i = 0; i < num_types; i++) {
59654 struct btrfs_space_info *tmp;
59655
59656+ /* Don't copy in more than we allocated */
59657 if (!slot_count)
59658 break;
59659
59660+ slot_count--;
59661+
59662 info = NULL;
59663 rcu_read_lock();
59664 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
59665@@ -3999,10 +4002,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
59666 memcpy(dest, &space, sizeof(space));
59667 dest++;
59668 space_args.total_spaces++;
59669- slot_count--;
59670 }
59671- if (!slot_count)
59672- break;
59673 }
59674 up_read(&info->groups_sem);
59675 }
59676diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
59677index c4124de..d7613eb6 100644
59678--- a/fs/btrfs/super.c
59679+++ b/fs/btrfs/super.c
59680@@ -270,7 +270,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
59681 function, line, errstr);
59682 return;
59683 }
59684- ACCESS_ONCE(trans->transaction->aborted) = errno;
59685+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
59686 /* Wake up anybody who may be waiting on this transaction */
59687 wake_up(&root->fs_info->transaction_wait);
59688 wake_up(&root->fs_info->transaction_blocked_wait);
59689diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
59690index 12e5355..cdf30c6 100644
59691--- a/fs/btrfs/sysfs.c
59692+++ b/fs/btrfs/sysfs.c
59693@@ -475,7 +475,7 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
59694 for (set = 0; set < FEAT_MAX; set++) {
59695 int i;
59696 struct attribute *attrs[2];
59697- struct attribute_group agroup = {
59698+ attribute_group_no_const agroup = {
59699 .name = "features",
59700 .attrs = attrs,
59701 };
59702diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
59703index e2e798a..f454c18 100644
59704--- a/fs/btrfs/tree-log.h
59705+++ b/fs/btrfs/tree-log.h
59706@@ -41,7 +41,7 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx)
59707 static inline void btrfs_set_log_full_commit(struct btrfs_fs_info *fs_info,
59708 struct btrfs_trans_handle *trans)
59709 {
59710- ACCESS_ONCE(fs_info->last_trans_log_full_commit) = trans->transid;
59711+ ACCESS_ONCE_RW(fs_info->last_trans_log_full_commit) = trans->transid;
59712 }
59713
59714 static inline int btrfs_need_log_full_commit(struct btrfs_fs_info *fs_info,
59715diff --git a/fs/buffer.c b/fs/buffer.c
59716index 72daaa5..60ffeb9 100644
59717--- a/fs/buffer.c
59718+++ b/fs/buffer.c
59719@@ -3432,7 +3432,7 @@ void __init buffer_init(void)
59720 bh_cachep = kmem_cache_create("buffer_head",
59721 sizeof(struct buffer_head), 0,
59722 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
59723- SLAB_MEM_SPREAD),
59724+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
59725 NULL);
59726
59727 /*
59728diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
59729index fbb08e9..0fda764 100644
59730--- a/fs/cachefiles/bind.c
59731+++ b/fs/cachefiles/bind.c
59732@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
59733 args);
59734
59735 /* start by checking things over */
59736- ASSERT(cache->fstop_percent >= 0 &&
59737- cache->fstop_percent < cache->fcull_percent &&
59738+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
59739 cache->fcull_percent < cache->frun_percent &&
59740 cache->frun_percent < 100);
59741
59742- ASSERT(cache->bstop_percent >= 0 &&
59743- cache->bstop_percent < cache->bcull_percent &&
59744+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
59745 cache->bcull_percent < cache->brun_percent &&
59746 cache->brun_percent < 100);
59747
59748diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
59749index ce1b115..4a6852c 100644
59750--- a/fs/cachefiles/daemon.c
59751+++ b/fs/cachefiles/daemon.c
59752@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
59753 if (n > buflen)
59754 return -EMSGSIZE;
59755
59756- if (copy_to_user(_buffer, buffer, n) != 0)
59757+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
59758 return -EFAULT;
59759
59760 return n;
59761@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
59762 if (test_bit(CACHEFILES_DEAD, &cache->flags))
59763 return -EIO;
59764
59765- if (datalen < 0 || datalen > PAGE_SIZE - 1)
59766+ if (datalen > PAGE_SIZE - 1)
59767 return -EOPNOTSUPP;
59768
59769 /* drag the command string into the kernel so we can parse it */
59770@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
59771 if (args[0] != '%' || args[1] != '\0')
59772 return -EINVAL;
59773
59774- if (fstop < 0 || fstop >= cache->fcull_percent)
59775+ if (fstop >= cache->fcull_percent)
59776 return cachefiles_daemon_range_error(cache, args);
59777
59778 cache->fstop_percent = fstop;
59779@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
59780 if (args[0] != '%' || args[1] != '\0')
59781 return -EINVAL;
59782
59783- if (bstop < 0 || bstop >= cache->bcull_percent)
59784+ if (bstop >= cache->bcull_percent)
59785 return cachefiles_daemon_range_error(cache, args);
59786
59787 cache->bstop_percent = bstop;
59788diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
59789index 8c52472..c4e3a69 100644
59790--- a/fs/cachefiles/internal.h
59791+++ b/fs/cachefiles/internal.h
59792@@ -66,7 +66,7 @@ struct cachefiles_cache {
59793 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
59794 struct rb_root active_nodes; /* active nodes (can't be culled) */
59795 rwlock_t active_lock; /* lock for active_nodes */
59796- atomic_t gravecounter; /* graveyard uniquifier */
59797+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
59798 unsigned frun_percent; /* when to stop culling (% files) */
59799 unsigned fcull_percent; /* when to start culling (% files) */
59800 unsigned fstop_percent; /* when to stop allocating (% files) */
59801@@ -178,19 +178,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
59802 * proc.c
59803 */
59804 #ifdef CONFIG_CACHEFILES_HISTOGRAM
59805-extern atomic_t cachefiles_lookup_histogram[HZ];
59806-extern atomic_t cachefiles_mkdir_histogram[HZ];
59807-extern atomic_t cachefiles_create_histogram[HZ];
59808+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
59809+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
59810+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
59811
59812 extern int __init cachefiles_proc_init(void);
59813 extern void cachefiles_proc_cleanup(void);
59814 static inline
59815-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
59816+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
59817 {
59818 unsigned long jif = jiffies - start_jif;
59819 if (jif >= HZ)
59820 jif = HZ - 1;
59821- atomic_inc(&histogram[jif]);
59822+ atomic_inc_unchecked(&histogram[jif]);
59823 }
59824
59825 #else
59826diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
59827index dad7d95..07475af 100644
59828--- a/fs/cachefiles/namei.c
59829+++ b/fs/cachefiles/namei.c
59830@@ -312,7 +312,7 @@ try_again:
59831 /* first step is to make up a grave dentry in the graveyard */
59832 sprintf(nbuffer, "%08x%08x",
59833 (uint32_t) get_seconds(),
59834- (uint32_t) atomic_inc_return(&cache->gravecounter));
59835+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
59836
59837 /* do the multiway lock magic */
59838 trap = lock_rename(cache->graveyard, dir);
59839diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
59840index eccd339..4c1d995 100644
59841--- a/fs/cachefiles/proc.c
59842+++ b/fs/cachefiles/proc.c
59843@@ -14,9 +14,9 @@
59844 #include <linux/seq_file.h>
59845 #include "internal.h"
59846
59847-atomic_t cachefiles_lookup_histogram[HZ];
59848-atomic_t cachefiles_mkdir_histogram[HZ];
59849-atomic_t cachefiles_create_histogram[HZ];
59850+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
59851+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
59852+atomic_unchecked_t cachefiles_create_histogram[HZ];
59853
59854 /*
59855 * display the latency histogram
59856@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
59857 return 0;
59858 default:
59859 index = (unsigned long) v - 3;
59860- x = atomic_read(&cachefiles_lookup_histogram[index]);
59861- y = atomic_read(&cachefiles_mkdir_histogram[index]);
59862- z = atomic_read(&cachefiles_create_histogram[index]);
59863+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
59864+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
59865+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
59866 if (x == 0 && y == 0 && z == 0)
59867 return 0;
59868
59869diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
59870index 25e745b..220e604 100644
59871--- a/fs/cachefiles/rdwr.c
59872+++ b/fs/cachefiles/rdwr.c
59873@@ -937,7 +937,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
59874 old_fs = get_fs();
59875 set_fs(KERNEL_DS);
59876 ret = file->f_op->write(
59877- file, (const void __user *) data, len, &pos);
59878+ file, (const void __force_user *) data, len, &pos);
59879 set_fs(old_fs);
59880 kunmap(page);
59881 file_end_write(file);
59882diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
59883index c29d6ae..719b9bb 100644
59884--- a/fs/ceph/dir.c
59885+++ b/fs/ceph/dir.c
59886@@ -129,6 +129,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
59887 struct dentry *dentry, *last;
59888 struct ceph_dentry_info *di;
59889 int err = 0;
59890+ char d_name[DNAME_INLINE_LEN];
59891+ const unsigned char *name;
59892
59893 /* claim ref on last dentry we returned */
59894 last = fi->dentry;
59895@@ -192,7 +194,12 @@ more:
59896
59897 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos,
59898 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
59899- if (!dir_emit(ctx, dentry->d_name.name,
59900+ name = dentry->d_name.name;
59901+ if (name == dentry->d_iname) {
59902+ memcpy(d_name, name, dentry->d_name.len);
59903+ name = d_name;
59904+ }
59905+ if (!dir_emit(ctx, name,
59906 dentry->d_name.len,
59907 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
59908 dentry->d_inode->i_mode >> 12)) {
59909@@ -250,7 +257,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
59910 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
59911 struct ceph_mds_client *mdsc = fsc->mdsc;
59912 unsigned frag = fpos_frag(ctx->pos);
59913- int off = fpos_off(ctx->pos);
59914+ unsigned int off = fpos_off(ctx->pos);
59915 int err;
59916 u32 ftype;
59917 struct ceph_mds_reply_info_parsed *rinfo;
59918diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
59919index a822a6e..4644256 100644
59920--- a/fs/ceph/ioctl.c
59921+++ b/fs/ceph/ioctl.c
59922@@ -41,7 +41,7 @@ static long __validate_layout(struct ceph_mds_client *mdsc,
59923 /* validate striping parameters */
59924 if ((l->object_size & ~PAGE_MASK) ||
59925 (l->stripe_unit & ~PAGE_MASK) ||
59926- (l->stripe_unit != 0 &&
59927+ ((unsigned)l->stripe_unit != 0 &&
59928 ((unsigned)l->object_size % (unsigned)l->stripe_unit)))
59929 return -EINVAL;
59930
59931diff --git a/fs/ceph/super.c b/fs/ceph/super.c
59932index f6e1237..796ffd1 100644
59933--- a/fs/ceph/super.c
59934+++ b/fs/ceph/super.c
59935@@ -895,7 +895,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
59936 /*
59937 * construct our own bdi so we can control readahead, etc.
59938 */
59939-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
59940+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
59941
59942 static int ceph_register_bdi(struct super_block *sb,
59943 struct ceph_fs_client *fsc)
59944@@ -912,7 +912,7 @@ static int ceph_register_bdi(struct super_block *sb,
59945 default_backing_dev_info.ra_pages;
59946
59947 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
59948- atomic_long_inc_return(&bdi_seq));
59949+ atomic_long_inc_return_unchecked(&bdi_seq));
59950 if (!err)
59951 sb->s_bdi = &fsc->backing_dev_info;
59952 return err;
59953diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
59954index 44ec726..bcb06a3 100644
59955--- a/fs/cifs/cifs_debug.c
59956+++ b/fs/cifs/cifs_debug.c
59957@@ -286,8 +286,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
59958
59959 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
59960 #ifdef CONFIG_CIFS_STATS2
59961- atomic_set(&totBufAllocCount, 0);
59962- atomic_set(&totSmBufAllocCount, 0);
59963+ atomic_set_unchecked(&totBufAllocCount, 0);
59964+ atomic_set_unchecked(&totSmBufAllocCount, 0);
59965 #endif /* CONFIG_CIFS_STATS2 */
59966 spin_lock(&cifs_tcp_ses_lock);
59967 list_for_each(tmp1, &cifs_tcp_ses_list) {
59968@@ -300,7 +300,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
59969 tcon = list_entry(tmp3,
59970 struct cifs_tcon,
59971 tcon_list);
59972- atomic_set(&tcon->num_smbs_sent, 0);
59973+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
59974 if (server->ops->clear_stats)
59975 server->ops->clear_stats(tcon);
59976 }
59977@@ -332,8 +332,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
59978 smBufAllocCount.counter, cifs_min_small);
59979 #ifdef CONFIG_CIFS_STATS2
59980 seq_printf(m, "Total Large %d Small %d Allocations\n",
59981- atomic_read(&totBufAllocCount),
59982- atomic_read(&totSmBufAllocCount));
59983+ atomic_read_unchecked(&totBufAllocCount),
59984+ atomic_read_unchecked(&totSmBufAllocCount));
59985 #endif /* CONFIG_CIFS_STATS2 */
59986
59987 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
59988@@ -362,7 +362,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
59989 if (tcon->need_reconnect)
59990 seq_puts(m, "\tDISCONNECTED ");
59991 seq_printf(m, "\nSMBs: %d",
59992- atomic_read(&tcon->num_smbs_sent));
59993+ atomic_read_unchecked(&tcon->num_smbs_sent));
59994 if (server->ops->print_stats)
59995 server->ops->print_stats(m, tcon);
59996 }
59997diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
59998index 889b984..fcb8431 100644
59999--- a/fs/cifs/cifsfs.c
60000+++ b/fs/cifs/cifsfs.c
60001@@ -1092,7 +1092,7 @@ cifs_init_request_bufs(void)
60002 */
60003 cifs_req_cachep = kmem_cache_create("cifs_request",
60004 CIFSMaxBufSize + max_hdr_size, 0,
60005- SLAB_HWCACHE_ALIGN, NULL);
60006+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
60007 if (cifs_req_cachep == NULL)
60008 return -ENOMEM;
60009
60010@@ -1119,7 +1119,7 @@ cifs_init_request_bufs(void)
60011 efficient to alloc 1 per page off the slab compared to 17K (5page)
60012 alloc of large cifs buffers even when page debugging is on */
60013 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
60014- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
60015+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
60016 NULL);
60017 if (cifs_sm_req_cachep == NULL) {
60018 mempool_destroy(cifs_req_poolp);
60019@@ -1204,8 +1204,8 @@ init_cifs(void)
60020 atomic_set(&bufAllocCount, 0);
60021 atomic_set(&smBufAllocCount, 0);
60022 #ifdef CONFIG_CIFS_STATS2
60023- atomic_set(&totBufAllocCount, 0);
60024- atomic_set(&totSmBufAllocCount, 0);
60025+ atomic_set_unchecked(&totBufAllocCount, 0);
60026+ atomic_set_unchecked(&totSmBufAllocCount, 0);
60027 #endif /* CONFIG_CIFS_STATS2 */
60028
60029 atomic_set(&midCount, 0);
60030diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
60031index 25b8392..01e46dc 100644
60032--- a/fs/cifs/cifsglob.h
60033+++ b/fs/cifs/cifsglob.h
60034@@ -821,35 +821,35 @@ struct cifs_tcon {
60035 __u16 Flags; /* optional support bits */
60036 enum statusEnum tidStatus;
60037 #ifdef CONFIG_CIFS_STATS
60038- atomic_t num_smbs_sent;
60039+ atomic_unchecked_t num_smbs_sent;
60040 union {
60041 struct {
60042- atomic_t num_writes;
60043- atomic_t num_reads;
60044- atomic_t num_flushes;
60045- atomic_t num_oplock_brks;
60046- atomic_t num_opens;
60047- atomic_t num_closes;
60048- atomic_t num_deletes;
60049- atomic_t num_mkdirs;
60050- atomic_t num_posixopens;
60051- atomic_t num_posixmkdirs;
60052- atomic_t num_rmdirs;
60053- atomic_t num_renames;
60054- atomic_t num_t2renames;
60055- atomic_t num_ffirst;
60056- atomic_t num_fnext;
60057- atomic_t num_fclose;
60058- atomic_t num_hardlinks;
60059- atomic_t num_symlinks;
60060- atomic_t num_locks;
60061- atomic_t num_acl_get;
60062- atomic_t num_acl_set;
60063+ atomic_unchecked_t num_writes;
60064+ atomic_unchecked_t num_reads;
60065+ atomic_unchecked_t num_flushes;
60066+ atomic_unchecked_t num_oplock_brks;
60067+ atomic_unchecked_t num_opens;
60068+ atomic_unchecked_t num_closes;
60069+ atomic_unchecked_t num_deletes;
60070+ atomic_unchecked_t num_mkdirs;
60071+ atomic_unchecked_t num_posixopens;
60072+ atomic_unchecked_t num_posixmkdirs;
60073+ atomic_unchecked_t num_rmdirs;
60074+ atomic_unchecked_t num_renames;
60075+ atomic_unchecked_t num_t2renames;
60076+ atomic_unchecked_t num_ffirst;
60077+ atomic_unchecked_t num_fnext;
60078+ atomic_unchecked_t num_fclose;
60079+ atomic_unchecked_t num_hardlinks;
60080+ atomic_unchecked_t num_symlinks;
60081+ atomic_unchecked_t num_locks;
60082+ atomic_unchecked_t num_acl_get;
60083+ atomic_unchecked_t num_acl_set;
60084 } cifs_stats;
60085 #ifdef CONFIG_CIFS_SMB2
60086 struct {
60087- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
60088- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
60089+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
60090+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
60091 } smb2_stats;
60092 #endif /* CONFIG_CIFS_SMB2 */
60093 } stats;
60094@@ -1190,7 +1190,7 @@ convert_delimiter(char *path, char delim)
60095 }
60096
60097 #ifdef CONFIG_CIFS_STATS
60098-#define cifs_stats_inc atomic_inc
60099+#define cifs_stats_inc atomic_inc_unchecked
60100
60101 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
60102 unsigned int bytes)
60103@@ -1557,8 +1557,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
60104 /* Various Debug counters */
60105 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
60106 #ifdef CONFIG_CIFS_STATS2
60107-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
60108-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
60109+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
60110+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
60111 #endif
60112 GLOBAL_EXTERN atomic_t smBufAllocCount;
60113 GLOBAL_EXTERN atomic_t midCount;
60114diff --git a/fs/cifs/file.c b/fs/cifs/file.c
60115index 5f29354..359bc0d 100644
60116--- a/fs/cifs/file.c
60117+++ b/fs/cifs/file.c
60118@@ -2056,10 +2056,14 @@ static int cifs_writepages(struct address_space *mapping,
60119 index = mapping->writeback_index; /* Start from prev offset */
60120 end = -1;
60121 } else {
60122- index = wbc->range_start >> PAGE_CACHE_SHIFT;
60123- end = wbc->range_end >> PAGE_CACHE_SHIFT;
60124- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
60125+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
60126 range_whole = true;
60127+ index = 0;
60128+ end = ULONG_MAX;
60129+ } else {
60130+ index = wbc->range_start >> PAGE_CACHE_SHIFT;
60131+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
60132+ }
60133 scanned = true;
60134 }
60135 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
60136diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
60137index b7415d5..3984ec0 100644
60138--- a/fs/cifs/misc.c
60139+++ b/fs/cifs/misc.c
60140@@ -170,7 +170,7 @@ cifs_buf_get(void)
60141 memset(ret_buf, 0, buf_size + 3);
60142 atomic_inc(&bufAllocCount);
60143 #ifdef CONFIG_CIFS_STATS2
60144- atomic_inc(&totBufAllocCount);
60145+ atomic_inc_unchecked(&totBufAllocCount);
60146 #endif /* CONFIG_CIFS_STATS2 */
60147 }
60148
60149@@ -205,7 +205,7 @@ cifs_small_buf_get(void)
60150 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
60151 atomic_inc(&smBufAllocCount);
60152 #ifdef CONFIG_CIFS_STATS2
60153- atomic_inc(&totSmBufAllocCount);
60154+ atomic_inc_unchecked(&totSmBufAllocCount);
60155 #endif /* CONFIG_CIFS_STATS2 */
60156
60157 }
60158diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
60159index 52131d8..fd79e97 100644
60160--- a/fs/cifs/smb1ops.c
60161+++ b/fs/cifs/smb1ops.c
60162@@ -626,27 +626,27 @@ static void
60163 cifs_clear_stats(struct cifs_tcon *tcon)
60164 {
60165 #ifdef CONFIG_CIFS_STATS
60166- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
60167- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
60168- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
60169- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
60170- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
60171- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
60172- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
60173- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
60174- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
60175- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
60176- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
60177- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
60178- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
60179- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
60180- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
60181- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
60182- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
60183- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
60184- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
60185- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
60186- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
60187+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
60188+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
60189+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
60190+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
60191+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
60192+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
60193+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
60194+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
60195+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
60196+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
60197+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
60198+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
60199+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
60200+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
60201+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
60202+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
60203+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
60204+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
60205+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
60206+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
60207+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
60208 #endif
60209 }
60210
60211@@ -655,36 +655,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
60212 {
60213 #ifdef CONFIG_CIFS_STATS
60214 seq_printf(m, " Oplocks breaks: %d",
60215- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
60216+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
60217 seq_printf(m, "\nReads: %d Bytes: %llu",
60218- atomic_read(&tcon->stats.cifs_stats.num_reads),
60219+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
60220 (long long)(tcon->bytes_read));
60221 seq_printf(m, "\nWrites: %d Bytes: %llu",
60222- atomic_read(&tcon->stats.cifs_stats.num_writes),
60223+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
60224 (long long)(tcon->bytes_written));
60225 seq_printf(m, "\nFlushes: %d",
60226- atomic_read(&tcon->stats.cifs_stats.num_flushes));
60227+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
60228 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
60229- atomic_read(&tcon->stats.cifs_stats.num_locks),
60230- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
60231- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
60232+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
60233+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
60234+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
60235 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
60236- atomic_read(&tcon->stats.cifs_stats.num_opens),
60237- atomic_read(&tcon->stats.cifs_stats.num_closes),
60238- atomic_read(&tcon->stats.cifs_stats.num_deletes));
60239+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
60240+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
60241+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
60242 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
60243- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
60244- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
60245+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
60246+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
60247 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
60248- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
60249- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
60250+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
60251+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
60252 seq_printf(m, "\nRenames: %d T2 Renames %d",
60253- atomic_read(&tcon->stats.cifs_stats.num_renames),
60254- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
60255+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
60256+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
60257 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
60258- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
60259- atomic_read(&tcon->stats.cifs_stats.num_fnext),
60260- atomic_read(&tcon->stats.cifs_stats.num_fclose));
60261+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
60262+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
60263+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
60264 #endif
60265 }
60266
60267diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
60268index f522193..586121b 100644
60269--- a/fs/cifs/smb2ops.c
60270+++ b/fs/cifs/smb2ops.c
60271@@ -414,8 +414,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
60272 #ifdef CONFIG_CIFS_STATS
60273 int i;
60274 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
60275- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
60276- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
60277+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
60278+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
60279 }
60280 #endif
60281 }
60282@@ -455,65 +455,65 @@ static void
60283 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
60284 {
60285 #ifdef CONFIG_CIFS_STATS
60286- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
60287- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
60288+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
60289+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
60290 seq_printf(m, "\nNegotiates: %d sent %d failed",
60291- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
60292- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
60293+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
60294+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
60295 seq_printf(m, "\nSessionSetups: %d sent %d failed",
60296- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
60297- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
60298+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
60299+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
60300 seq_printf(m, "\nLogoffs: %d sent %d failed",
60301- atomic_read(&sent[SMB2_LOGOFF_HE]),
60302- atomic_read(&failed[SMB2_LOGOFF_HE]));
60303+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
60304+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
60305 seq_printf(m, "\nTreeConnects: %d sent %d failed",
60306- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
60307- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
60308+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
60309+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
60310 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
60311- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
60312- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
60313+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
60314+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
60315 seq_printf(m, "\nCreates: %d sent %d failed",
60316- atomic_read(&sent[SMB2_CREATE_HE]),
60317- atomic_read(&failed[SMB2_CREATE_HE]));
60318+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
60319+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
60320 seq_printf(m, "\nCloses: %d sent %d failed",
60321- atomic_read(&sent[SMB2_CLOSE_HE]),
60322- atomic_read(&failed[SMB2_CLOSE_HE]));
60323+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
60324+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
60325 seq_printf(m, "\nFlushes: %d sent %d failed",
60326- atomic_read(&sent[SMB2_FLUSH_HE]),
60327- atomic_read(&failed[SMB2_FLUSH_HE]));
60328+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
60329+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
60330 seq_printf(m, "\nReads: %d sent %d failed",
60331- atomic_read(&sent[SMB2_READ_HE]),
60332- atomic_read(&failed[SMB2_READ_HE]));
60333+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
60334+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
60335 seq_printf(m, "\nWrites: %d sent %d failed",
60336- atomic_read(&sent[SMB2_WRITE_HE]),
60337- atomic_read(&failed[SMB2_WRITE_HE]));
60338+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
60339+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
60340 seq_printf(m, "\nLocks: %d sent %d failed",
60341- atomic_read(&sent[SMB2_LOCK_HE]),
60342- atomic_read(&failed[SMB2_LOCK_HE]));
60343+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
60344+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
60345 seq_printf(m, "\nIOCTLs: %d sent %d failed",
60346- atomic_read(&sent[SMB2_IOCTL_HE]),
60347- atomic_read(&failed[SMB2_IOCTL_HE]));
60348+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
60349+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
60350 seq_printf(m, "\nCancels: %d sent %d failed",
60351- atomic_read(&sent[SMB2_CANCEL_HE]),
60352- atomic_read(&failed[SMB2_CANCEL_HE]));
60353+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
60354+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
60355 seq_printf(m, "\nEchos: %d sent %d failed",
60356- atomic_read(&sent[SMB2_ECHO_HE]),
60357- atomic_read(&failed[SMB2_ECHO_HE]));
60358+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
60359+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
60360 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
60361- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
60362- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
60363+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
60364+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
60365 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
60366- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
60367- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
60368+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
60369+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
60370 seq_printf(m, "\nQueryInfos: %d sent %d failed",
60371- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
60372- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
60373+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
60374+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
60375 seq_printf(m, "\nSetInfos: %d sent %d failed",
60376- atomic_read(&sent[SMB2_SET_INFO_HE]),
60377- atomic_read(&failed[SMB2_SET_INFO_HE]));
60378+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
60379+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
60380 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
60381- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
60382- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
60383+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
60384+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
60385 #endif
60386 }
60387
60388diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
60389index 74b3a66..0c709f3 100644
60390--- a/fs/cifs/smb2pdu.c
60391+++ b/fs/cifs/smb2pdu.c
60392@@ -2143,8 +2143,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
60393 default:
60394 cifs_dbg(VFS, "info level %u isn't supported\n",
60395 srch_inf->info_level);
60396- rc = -EINVAL;
60397- goto qdir_exit;
60398+ return -EINVAL;
60399 }
60400
60401 req->FileIndex = cpu_to_le32(index);
60402diff --git a/fs/coda/cache.c b/fs/coda/cache.c
60403index 278f8fd..e69c52d 100644
60404--- a/fs/coda/cache.c
60405+++ b/fs/coda/cache.c
60406@@ -24,7 +24,7 @@
60407 #include "coda_linux.h"
60408 #include "coda_cache.h"
60409
60410-static atomic_t permission_epoch = ATOMIC_INIT(0);
60411+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
60412
60413 /* replace or extend an acl cache hit */
60414 void coda_cache_enter(struct inode *inode, int mask)
60415@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
60416 struct coda_inode_info *cii = ITOC(inode);
60417
60418 spin_lock(&cii->c_lock);
60419- cii->c_cached_epoch = atomic_read(&permission_epoch);
60420+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
60421 if (!uid_eq(cii->c_uid, current_fsuid())) {
60422 cii->c_uid = current_fsuid();
60423 cii->c_cached_perm = mask;
60424@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
60425 {
60426 struct coda_inode_info *cii = ITOC(inode);
60427 spin_lock(&cii->c_lock);
60428- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
60429+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
60430 spin_unlock(&cii->c_lock);
60431 }
60432
60433 /* remove all acl caches */
60434 void coda_cache_clear_all(struct super_block *sb)
60435 {
60436- atomic_inc(&permission_epoch);
60437+ atomic_inc_unchecked(&permission_epoch);
60438 }
60439
60440
60441@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
60442 spin_lock(&cii->c_lock);
60443 hit = (mask & cii->c_cached_perm) == mask &&
60444 uid_eq(cii->c_uid, current_fsuid()) &&
60445- cii->c_cached_epoch == atomic_read(&permission_epoch);
60446+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
60447 spin_unlock(&cii->c_lock);
60448
60449 return hit;
60450diff --git a/fs/compat.c b/fs/compat.c
60451index 66d3d3c..9c10175 100644
60452--- a/fs/compat.c
60453+++ b/fs/compat.c
60454@@ -54,7 +54,7 @@
60455 #include <asm/ioctls.h>
60456 #include "internal.h"
60457
60458-int compat_log = 1;
60459+int compat_log = 0;
60460
60461 int compat_printk(const char *fmt, ...)
60462 {
60463@@ -512,7 +512,7 @@ COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
60464
60465 set_fs(KERNEL_DS);
60466 /* The __user pointer cast is valid because of the set_fs() */
60467- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
60468+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
60469 set_fs(oldfs);
60470 /* truncating is ok because it's a user address */
60471 if (!ret)
60472@@ -562,7 +562,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
60473 goto out;
60474
60475 ret = -EINVAL;
60476- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
60477+ if (nr_segs > UIO_MAXIOV)
60478 goto out;
60479 if (nr_segs > fast_segs) {
60480 ret = -ENOMEM;
60481@@ -850,6 +850,7 @@ struct compat_old_linux_dirent {
60482 struct compat_readdir_callback {
60483 struct dir_context ctx;
60484 struct compat_old_linux_dirent __user *dirent;
60485+ struct file * file;
60486 int result;
60487 };
60488
60489@@ -867,6 +868,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
60490 buf->result = -EOVERFLOW;
60491 return -EOVERFLOW;
60492 }
60493+
60494+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60495+ return 0;
60496+
60497 buf->result++;
60498 dirent = buf->dirent;
60499 if (!access_ok(VERIFY_WRITE, dirent,
60500@@ -898,6 +903,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
60501 if (!f.file)
60502 return -EBADF;
60503
60504+ buf.file = f.file;
60505 error = iterate_dir(f.file, &buf.ctx);
60506 if (buf.result)
60507 error = buf.result;
60508@@ -917,6 +923,7 @@ struct compat_getdents_callback {
60509 struct dir_context ctx;
60510 struct compat_linux_dirent __user *current_dir;
60511 struct compat_linux_dirent __user *previous;
60512+ struct file * file;
60513 int count;
60514 int error;
60515 };
60516@@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
60517 buf->error = -EOVERFLOW;
60518 return -EOVERFLOW;
60519 }
60520+
60521+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60522+ return 0;
60523+
60524 dirent = buf->previous;
60525 if (dirent) {
60526 if (__put_user(offset, &dirent->d_off))
60527@@ -983,6 +994,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
60528 if (!f.file)
60529 return -EBADF;
60530
60531+ buf.file = f.file;
60532 error = iterate_dir(f.file, &buf.ctx);
60533 if (error >= 0)
60534 error = buf.error;
60535@@ -1003,6 +1015,7 @@ struct compat_getdents_callback64 {
60536 struct dir_context ctx;
60537 struct linux_dirent64 __user *current_dir;
60538 struct linux_dirent64 __user *previous;
60539+ struct file * file;
60540 int count;
60541 int error;
60542 };
60543@@ -1019,6 +1032,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
60544 buf->error = -EINVAL; /* only used if we fail.. */
60545 if (reclen > buf->count)
60546 return -EINVAL;
60547+
60548+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
60549+ return 0;
60550+
60551 dirent = buf->previous;
60552
60553 if (dirent) {
60554@@ -1068,6 +1085,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
60555 if (!f.file)
60556 return -EBADF;
60557
60558+ buf.file = f.file;
60559 error = iterate_dir(f.file, &buf.ctx);
60560 if (error >= 0)
60561 error = buf.error;
60562diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
60563index 4d24d17..4f8c09e 100644
60564--- a/fs/compat_binfmt_elf.c
60565+++ b/fs/compat_binfmt_elf.c
60566@@ -30,11 +30,13 @@
60567 #undef elf_phdr
60568 #undef elf_shdr
60569 #undef elf_note
60570+#undef elf_dyn
60571 #undef elf_addr_t
60572 #define elfhdr elf32_hdr
60573 #define elf_phdr elf32_phdr
60574 #define elf_shdr elf32_shdr
60575 #define elf_note elf32_note
60576+#define elf_dyn Elf32_Dyn
60577 #define elf_addr_t Elf32_Addr
60578
60579 /*
60580diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
60581index afec645..9c65620 100644
60582--- a/fs/compat_ioctl.c
60583+++ b/fs/compat_ioctl.c
60584@@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
60585 return -EFAULT;
60586 if (__get_user(udata, &ss32->iomem_base))
60587 return -EFAULT;
60588- ss.iomem_base = compat_ptr(udata);
60589+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
60590 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
60591 __get_user(ss.port_high, &ss32->port_high))
60592 return -EFAULT;
60593@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
60594 for (i = 0; i < nmsgs; i++) {
60595 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
60596 return -EFAULT;
60597- if (get_user(datap, &umsgs[i].buf) ||
60598- put_user(compat_ptr(datap), &tmsgs[i].buf))
60599+ if (get_user(datap, (compat_caddr_t __user *)&umsgs[i].buf) ||
60600+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
60601 return -EFAULT;
60602 }
60603 return sys_ioctl(fd, cmd, (unsigned long)tdata);
60604@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
60605 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
60606 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
60607 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
60608- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
60609+ copy_in_user(p->l_pad, p32->l_pad, 4*sizeof(u32)))
60610 return -EFAULT;
60611
60612 return ioctl_preallocate(file, p);
60613@@ -1618,8 +1618,8 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
60614 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
60615 {
60616 unsigned int a, b;
60617- a = *(unsigned int *)p;
60618- b = *(unsigned int *)q;
60619+ a = *(const unsigned int *)p;
60620+ b = *(const unsigned int *)q;
60621 if (a > b)
60622 return 1;
60623 if (a < b)
60624diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
60625index 668dcab..daebcd6 100644
60626--- a/fs/configfs/dir.c
60627+++ b/fs/configfs/dir.c
60628@@ -1548,7 +1548,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
60629 }
60630 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
60631 struct configfs_dirent *next;
60632- const char *name;
60633+ const unsigned char * name;
60634+ char d_name[sizeof(next->s_dentry->d_iname)];
60635 int len;
60636 struct inode *inode = NULL;
60637
60638@@ -1557,7 +1558,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
60639 continue;
60640
60641 name = configfs_get_name(next);
60642- len = strlen(name);
60643+ if (next->s_dentry && name == next->s_dentry->d_iname) {
60644+ len = next->s_dentry->d_name.len;
60645+ memcpy(d_name, name, len);
60646+ name = d_name;
60647+ } else
60648+ len = strlen(name);
60649
60650 /*
60651 * We'll have a dentry and an inode for
60652diff --git a/fs/coredump.c b/fs/coredump.c
60653index a93f7e6..d58bcbe 100644
60654--- a/fs/coredump.c
60655+++ b/fs/coredump.c
60656@@ -442,8 +442,8 @@ static void wait_for_dump_helpers(struct file *file)
60657 struct pipe_inode_info *pipe = file->private_data;
60658
60659 pipe_lock(pipe);
60660- pipe->readers++;
60661- pipe->writers--;
60662+ atomic_inc(&pipe->readers);
60663+ atomic_dec(&pipe->writers);
60664 wake_up_interruptible_sync(&pipe->wait);
60665 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
60666 pipe_unlock(pipe);
60667@@ -452,11 +452,11 @@ static void wait_for_dump_helpers(struct file *file)
60668 * We actually want wait_event_freezable() but then we need
60669 * to clear TIF_SIGPENDING and improve dump_interrupted().
60670 */
60671- wait_event_interruptible(pipe->wait, pipe->readers == 1);
60672+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
60673
60674 pipe_lock(pipe);
60675- pipe->readers--;
60676- pipe->writers++;
60677+ atomic_dec(&pipe->readers);
60678+ atomic_inc(&pipe->writers);
60679 pipe_unlock(pipe);
60680 }
60681
60682@@ -503,7 +503,9 @@ void do_coredump(const siginfo_t *siginfo)
60683 struct files_struct *displaced;
60684 bool need_nonrelative = false;
60685 bool core_dumped = false;
60686- static atomic_t core_dump_count = ATOMIC_INIT(0);
60687+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
60688+ long signr = siginfo->si_signo;
60689+ int dumpable;
60690 struct coredump_params cprm = {
60691 .siginfo = siginfo,
60692 .regs = signal_pt_regs(),
60693@@ -516,12 +518,17 @@ void do_coredump(const siginfo_t *siginfo)
60694 .mm_flags = mm->flags,
60695 };
60696
60697- audit_core_dumps(siginfo->si_signo);
60698+ audit_core_dumps(signr);
60699+
60700+ dumpable = __get_dumpable(cprm.mm_flags);
60701+
60702+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
60703+ gr_handle_brute_attach(dumpable);
60704
60705 binfmt = mm->binfmt;
60706 if (!binfmt || !binfmt->core_dump)
60707 goto fail;
60708- if (!__get_dumpable(cprm.mm_flags))
60709+ if (!dumpable)
60710 goto fail;
60711
60712 cred = prepare_creds();
60713@@ -540,7 +547,7 @@ void do_coredump(const siginfo_t *siginfo)
60714 need_nonrelative = true;
60715 }
60716
60717- retval = coredump_wait(siginfo->si_signo, &core_state);
60718+ retval = coredump_wait(signr, &core_state);
60719 if (retval < 0)
60720 goto fail_creds;
60721
60722@@ -583,7 +590,7 @@ void do_coredump(const siginfo_t *siginfo)
60723 }
60724 cprm.limit = RLIM_INFINITY;
60725
60726- dump_count = atomic_inc_return(&core_dump_count);
60727+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
60728 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
60729 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
60730 task_tgid_vnr(current), current->comm);
60731@@ -615,6 +622,8 @@ void do_coredump(const siginfo_t *siginfo)
60732 } else {
60733 struct inode *inode;
60734
60735+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
60736+
60737 if (cprm.limit < binfmt->min_coredump)
60738 goto fail_unlock;
60739
60740@@ -673,7 +682,7 @@ close_fail:
60741 filp_close(cprm.file, NULL);
60742 fail_dropcount:
60743 if (ispipe)
60744- atomic_dec(&core_dump_count);
60745+ atomic_dec_unchecked(&core_dump_count);
60746 fail_unlock:
60747 kfree(cn.corename);
60748 coredump_finish(mm, core_dumped);
60749@@ -694,6 +703,8 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
60750 struct file *file = cprm->file;
60751 loff_t pos = file->f_pos;
60752 ssize_t n;
60753+
60754+ gr_learn_resource(current, RLIMIT_CORE, cprm->written + nr, 1);
60755 if (cprm->written + nr > cprm->limit)
60756 return 0;
60757 while (nr) {
60758diff --git a/fs/dcache.c b/fs/dcache.c
60759index 34b40be8..2003532 100644
60760--- a/fs/dcache.c
60761+++ b/fs/dcache.c
60762@@ -478,7 +478,7 @@ static void __dentry_kill(struct dentry *dentry)
60763 * dentry_iput drops the locks, at which point nobody (except
60764 * transient RCU lookups) can reach this dentry.
60765 */
60766- BUG_ON((int)dentry->d_lockref.count > 0);
60767+ BUG_ON((int)__lockref_read(&dentry->d_lockref) > 0);
60768 this_cpu_dec(nr_dentry);
60769 if (dentry->d_op && dentry->d_op->d_release)
60770 dentry->d_op->d_release(dentry);
60771@@ -531,7 +531,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry)
60772 struct dentry *parent = dentry->d_parent;
60773 if (IS_ROOT(dentry))
60774 return NULL;
60775- if (unlikely((int)dentry->d_lockref.count < 0))
60776+ if (unlikely((int)__lockref_read(&dentry->d_lockref) < 0))
60777 return NULL;
60778 if (likely(spin_trylock(&parent->d_lock)))
60779 return parent;
60780@@ -608,7 +608,7 @@ repeat:
60781 dentry->d_flags |= DCACHE_REFERENCED;
60782 dentry_lru_add(dentry);
60783
60784- dentry->d_lockref.count--;
60785+ __lockref_dec(&dentry->d_lockref);
60786 spin_unlock(&dentry->d_lock);
60787 return;
60788
60789@@ -663,7 +663,7 @@ int d_invalidate(struct dentry * dentry)
60790 * We also need to leave mountpoints alone,
60791 * directory or not.
60792 */
60793- if (dentry->d_lockref.count > 1 && dentry->d_inode) {
60794+ if (__lockref_read(&dentry->d_lockref) > 1 && dentry->d_inode) {
60795 if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
60796 spin_unlock(&dentry->d_lock);
60797 return -EBUSY;
60798@@ -679,7 +679,7 @@ EXPORT_SYMBOL(d_invalidate);
60799 /* This must be called with d_lock held */
60800 static inline void __dget_dlock(struct dentry *dentry)
60801 {
60802- dentry->d_lockref.count++;
60803+ __lockref_inc(&dentry->d_lockref);
60804 }
60805
60806 static inline void __dget(struct dentry *dentry)
60807@@ -720,8 +720,8 @@ repeat:
60808 goto repeat;
60809 }
60810 rcu_read_unlock();
60811- BUG_ON(!ret->d_lockref.count);
60812- ret->d_lockref.count++;
60813+ BUG_ON(!__lockref_read(&ret->d_lockref));
60814+ __lockref_inc(&ret->d_lockref);
60815 spin_unlock(&ret->d_lock);
60816 return ret;
60817 }
60818@@ -798,7 +798,7 @@ restart:
60819 spin_lock(&inode->i_lock);
60820 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
60821 spin_lock(&dentry->d_lock);
60822- if (!dentry->d_lockref.count) {
60823+ if (!__lockref_read(&dentry->d_lockref)) {
60824 /*
60825 * inform the fs via d_prune that this dentry
60826 * is about to be unhashed and destroyed.
60827@@ -841,7 +841,7 @@ static void shrink_dentry_list(struct list_head *list)
60828 * We found an inuse dentry which was not removed from
60829 * the LRU because of laziness during lookup. Do not free it.
60830 */
60831- if ((int)dentry->d_lockref.count > 0) {
60832+ if ((int)__lockref_read(&dentry->d_lockref) > 0) {
60833 spin_unlock(&dentry->d_lock);
60834 if (parent)
60835 spin_unlock(&parent->d_lock);
60836@@ -879,8 +879,8 @@ static void shrink_dentry_list(struct list_head *list)
60837 dentry = parent;
60838 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
60839 parent = lock_parent(dentry);
60840- if (dentry->d_lockref.count != 1) {
60841- dentry->d_lockref.count--;
60842+ if (__lockref_read(&dentry->d_lockref) != 1) {
60843+ __lockref_inc(&dentry->d_lockref);
60844 spin_unlock(&dentry->d_lock);
60845 if (parent)
60846 spin_unlock(&parent->d_lock);
60847@@ -920,7 +920,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
60848 * counts, just remove them from the LRU. Otherwise give them
60849 * another pass through the LRU.
60850 */
60851- if (dentry->d_lockref.count) {
60852+ if (__lockref_read(&dentry->d_lockref) > 0) {
60853 d_lru_isolate(dentry);
60854 spin_unlock(&dentry->d_lock);
60855 return LRU_REMOVED;
60856@@ -1149,6 +1149,7 @@ out_unlock:
60857 return;
60858
60859 rename_retry:
60860+ done_seqretry(&rename_lock, seq);
60861 if (!retry)
60862 return;
60863 seq = 1;
60864@@ -1255,7 +1256,7 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
60865 } else {
60866 if (dentry->d_flags & DCACHE_LRU_LIST)
60867 d_lru_del(dentry);
60868- if (!dentry->d_lockref.count) {
60869+ if (!__lockref_read(&dentry->d_lockref)) {
60870 d_shrink_add(dentry, &data->dispose);
60871 data->found++;
60872 }
60873@@ -1303,7 +1304,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
60874 return D_WALK_CONTINUE;
60875
60876 /* root with refcount 1 is fine */
60877- if (dentry == _data && dentry->d_lockref.count == 1)
60878+ if (dentry == _data && __lockref_read(&dentry->d_lockref) == 1)
60879 return D_WALK_CONTINUE;
60880
60881 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
60882@@ -1312,7 +1313,7 @@ static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
60883 dentry->d_inode ?
60884 dentry->d_inode->i_ino : 0UL,
60885 dentry,
60886- dentry->d_lockref.count,
60887+ __lockref_read(&dentry->d_lockref),
60888 dentry->d_sb->s_type->name,
60889 dentry->d_sb->s_id);
60890 WARN_ON(1);
60891@@ -1438,7 +1439,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60892 */
60893 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
60894 if (name->len > DNAME_INLINE_LEN-1) {
60895- dname = kmalloc(name->len + 1, GFP_KERNEL);
60896+ dname = kmalloc(round_up(name->len + 1, sizeof(unsigned long)), GFP_KERNEL);
60897 if (!dname) {
60898 kmem_cache_free(dentry_cache, dentry);
60899 return NULL;
60900@@ -1456,7 +1457,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
60901 smp_wmb();
60902 dentry->d_name.name = dname;
60903
60904- dentry->d_lockref.count = 1;
60905+ __lockref_set(&dentry->d_lockref, 1);
60906 dentry->d_flags = 0;
60907 spin_lock_init(&dentry->d_lock);
60908 seqcount_init(&dentry->d_seq);
60909@@ -2196,7 +2197,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
60910 goto next;
60911 }
60912
60913- dentry->d_lockref.count++;
60914+ __lockref_inc(&dentry->d_lockref);
60915 found = dentry;
60916 spin_unlock(&dentry->d_lock);
60917 break;
60918@@ -2295,7 +2296,7 @@ again:
60919 spin_lock(&dentry->d_lock);
60920 inode = dentry->d_inode;
60921 isdir = S_ISDIR(inode->i_mode);
60922- if (dentry->d_lockref.count == 1) {
60923+ if (__lockref_read(&dentry->d_lockref) == 1) {
60924 if (!spin_trylock(&inode->i_lock)) {
60925 spin_unlock(&dentry->d_lock);
60926 cpu_relax();
60927@@ -3307,7 +3308,7 @@ static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
60928
60929 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
60930 dentry->d_flags |= DCACHE_GENOCIDE;
60931- dentry->d_lockref.count--;
60932+ __lockref_dec(&dentry->d_lockref);
60933 }
60934 }
60935 return D_WALK_CONTINUE;
60936@@ -3423,7 +3424,8 @@ void __init vfs_caches_init(unsigned long mempages)
60937 mempages -= reserve;
60938
60939 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
60940- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
60941+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
60942+ SLAB_NO_SANITIZE, NULL);
60943
60944 dcache_init();
60945 inode_init();
60946diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
60947index 1e3b99d..6512101 100644
60948--- a/fs/debugfs/inode.c
60949+++ b/fs/debugfs/inode.c
60950@@ -416,7 +416,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
60951 */
60952 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
60953 {
60954+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
60955+ return __create_file(name, S_IFDIR | S_IRWXU,
60956+#else
60957 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
60958+#endif
60959 parent, NULL, NULL);
60960 }
60961 EXPORT_SYMBOL_GPL(debugfs_create_dir);
60962diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
60963index 57ee4c5..ecb13b0 100644
60964--- a/fs/ecryptfs/inode.c
60965+++ b/fs/ecryptfs/inode.c
60966@@ -673,7 +673,7 @@ static char *ecryptfs_readlink_lower(struct dentry *dentry, size_t *bufsiz)
60967 old_fs = get_fs();
60968 set_fs(get_ds());
60969 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
60970- (char __user *)lower_buf,
60971+ (char __force_user *)lower_buf,
60972 PATH_MAX);
60973 set_fs(old_fs);
60974 if (rc < 0)
60975diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
60976index e4141f2..d8263e8 100644
60977--- a/fs/ecryptfs/miscdev.c
60978+++ b/fs/ecryptfs/miscdev.c
60979@@ -304,7 +304,7 @@ check_list:
60980 goto out_unlock_msg_ctx;
60981 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
60982 if (msg_ctx->msg) {
60983- if (copy_to_user(&buf[i], packet_length, packet_length_size))
60984+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
60985 goto out_unlock_msg_ctx;
60986 i += packet_length_size;
60987 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
60988diff --git a/fs/exec.c b/fs/exec.c
60989index a2b42a9..1e924b3 100644
60990--- a/fs/exec.c
60991+++ b/fs/exec.c
60992@@ -56,8 +56,20 @@
60993 #include <linux/pipe_fs_i.h>
60994 #include <linux/oom.h>
60995 #include <linux/compat.h>
60996+#include <linux/random.h>
60997+#include <linux/seq_file.h>
60998+#include <linux/coredump.h>
60999+#include <linux/mman.h>
61000+
61001+#ifdef CONFIG_PAX_REFCOUNT
61002+#include <linux/kallsyms.h>
61003+#include <linux/kdebug.h>
61004+#endif
61005+
61006+#include <trace/events/fs.h>
61007
61008 #include <asm/uaccess.h>
61009+#include <asm/sections.h>
61010 #include <asm/mmu_context.h>
61011 #include <asm/tlb.h>
61012
61013@@ -66,19 +78,34 @@
61014
61015 #include <trace/events/sched.h>
61016
61017+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
61018+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
61019+{
61020+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
61021+}
61022+#endif
61023+
61024+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
61025+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
61026+EXPORT_SYMBOL(pax_set_initial_flags_func);
61027+#endif
61028+
61029 int suid_dumpable = 0;
61030
61031 static LIST_HEAD(formats);
61032 static DEFINE_RWLOCK(binfmt_lock);
61033
61034+extern int gr_process_kernel_exec_ban(void);
61035+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
61036+
61037 void __register_binfmt(struct linux_binfmt * fmt, int insert)
61038 {
61039 BUG_ON(!fmt);
61040 if (WARN_ON(!fmt->load_binary))
61041 return;
61042 write_lock(&binfmt_lock);
61043- insert ? list_add(&fmt->lh, &formats) :
61044- list_add_tail(&fmt->lh, &formats);
61045+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
61046+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
61047 write_unlock(&binfmt_lock);
61048 }
61049
61050@@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
61051 void unregister_binfmt(struct linux_binfmt * fmt)
61052 {
61053 write_lock(&binfmt_lock);
61054- list_del(&fmt->lh);
61055+ pax_list_del((struct list_head *)&fmt->lh);
61056 write_unlock(&binfmt_lock);
61057 }
61058
61059@@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
61060 int write)
61061 {
61062 struct page *page;
61063- int ret;
61064
61065-#ifdef CONFIG_STACK_GROWSUP
61066- if (write) {
61067- ret = expand_downwards(bprm->vma, pos);
61068- if (ret < 0)
61069- return NULL;
61070- }
61071-#endif
61072- ret = get_user_pages(current, bprm->mm, pos,
61073- 1, write, 1, &page, NULL);
61074- if (ret <= 0)
61075+ if (0 > expand_downwards(bprm->vma, pos))
61076+ return NULL;
61077+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
61078 return NULL;
61079
61080 if (write) {
61081@@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
61082 if (size <= ARG_MAX)
61083 return page;
61084
61085+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61086+ // only allow 512KB for argv+env on suid/sgid binaries
61087+ // to prevent easy ASLR exhaustion
61088+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
61089+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
61090+ (size > (512 * 1024))) {
61091+ put_page(page);
61092+ return NULL;
61093+ }
61094+#endif
61095+
61096 /*
61097 * Limit to 1/4-th the stack size for the argv+env strings.
61098 * This ensures that:
61099@@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
61100 vma->vm_end = STACK_TOP_MAX;
61101 vma->vm_start = vma->vm_end - PAGE_SIZE;
61102 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
61103+
61104+#ifdef CONFIG_PAX_SEGMEXEC
61105+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
61106+#endif
61107+
61108 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
61109 INIT_LIST_HEAD(&vma->anon_vma_chain);
61110
61111@@ -279,6 +314,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
61112 mm->stack_vm = mm->total_vm = 1;
61113 up_write(&mm->mmap_sem);
61114 bprm->p = vma->vm_end - sizeof(void *);
61115+
61116+#ifdef CONFIG_PAX_RANDUSTACK
61117+ if (randomize_va_space)
61118+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
61119+#endif
61120+
61121 return 0;
61122 err:
61123 up_write(&mm->mmap_sem);
61124@@ -395,7 +436,7 @@ struct user_arg_ptr {
61125 } ptr;
61126 };
61127
61128-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
61129+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
61130 {
61131 const char __user *native;
61132
61133@@ -404,14 +445,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
61134 compat_uptr_t compat;
61135
61136 if (get_user(compat, argv.ptr.compat + nr))
61137- return ERR_PTR(-EFAULT);
61138+ return (const char __force_user *)ERR_PTR(-EFAULT);
61139
61140 return compat_ptr(compat);
61141 }
61142 #endif
61143
61144 if (get_user(native, argv.ptr.native + nr))
61145- return ERR_PTR(-EFAULT);
61146+ return (const char __force_user *)ERR_PTR(-EFAULT);
61147
61148 return native;
61149 }
61150@@ -430,7 +471,7 @@ static int count(struct user_arg_ptr argv, int max)
61151 if (!p)
61152 break;
61153
61154- if (IS_ERR(p))
61155+ if (IS_ERR((const char __force_kernel *)p))
61156 return -EFAULT;
61157
61158 if (i >= max)
61159@@ -465,7 +506,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
61160
61161 ret = -EFAULT;
61162 str = get_user_arg_ptr(argv, argc);
61163- if (IS_ERR(str))
61164+ if (IS_ERR((const char __force_kernel *)str))
61165 goto out;
61166
61167 len = strnlen_user(str, MAX_ARG_STRLEN);
61168@@ -547,7 +588,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
61169 int r;
61170 mm_segment_t oldfs = get_fs();
61171 struct user_arg_ptr argv = {
61172- .ptr.native = (const char __user *const __user *)__argv,
61173+ .ptr.native = (const char __user * const __force_user *)__argv,
61174 };
61175
61176 set_fs(KERNEL_DS);
61177@@ -582,7 +623,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
61178 unsigned long new_end = old_end - shift;
61179 struct mmu_gather tlb;
61180
61181- BUG_ON(new_start > new_end);
61182+ if (new_start >= new_end || new_start < mmap_min_addr)
61183+ return -ENOMEM;
61184
61185 /*
61186 * ensure there are no vmas between where we want to go
61187@@ -591,6 +633,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
61188 if (vma != find_vma(mm, new_start))
61189 return -EFAULT;
61190
61191+#ifdef CONFIG_PAX_SEGMEXEC
61192+ BUG_ON(pax_find_mirror_vma(vma));
61193+#endif
61194+
61195 /*
61196 * cover the whole range: [new_start, old_end)
61197 */
61198@@ -671,10 +717,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
61199 stack_top = arch_align_stack(stack_top);
61200 stack_top = PAGE_ALIGN(stack_top);
61201
61202- if (unlikely(stack_top < mmap_min_addr) ||
61203- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
61204- return -ENOMEM;
61205-
61206 stack_shift = vma->vm_end - stack_top;
61207
61208 bprm->p -= stack_shift;
61209@@ -686,8 +728,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
61210 bprm->exec -= stack_shift;
61211
61212 down_write(&mm->mmap_sem);
61213+
61214+ /* Move stack pages down in memory. */
61215+ if (stack_shift) {
61216+ ret = shift_arg_pages(vma, stack_shift);
61217+ if (ret)
61218+ goto out_unlock;
61219+ }
61220+
61221 vm_flags = VM_STACK_FLAGS;
61222
61223+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
61224+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
61225+ vm_flags &= ~VM_EXEC;
61226+
61227+#ifdef CONFIG_PAX_MPROTECT
61228+ if (mm->pax_flags & MF_PAX_MPROTECT)
61229+ vm_flags &= ~VM_MAYEXEC;
61230+#endif
61231+
61232+ }
61233+#endif
61234+
61235 /*
61236 * Adjust stack execute permissions; explicitly enable for
61237 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
61238@@ -706,13 +768,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
61239 goto out_unlock;
61240 BUG_ON(prev != vma);
61241
61242- /* Move stack pages down in memory. */
61243- if (stack_shift) {
61244- ret = shift_arg_pages(vma, stack_shift);
61245- if (ret)
61246- goto out_unlock;
61247- }
61248-
61249 /* mprotect_fixup is overkill to remove the temporary stack flags */
61250 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
61251
61252@@ -736,6 +791,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
61253 #endif
61254 current->mm->start_stack = bprm->p;
61255 ret = expand_stack(vma, stack_base);
61256+
61257+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
61258+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
61259+ unsigned long size;
61260+ vm_flags_t vm_flags;
61261+
61262+ size = STACK_TOP - vma->vm_end;
61263+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
61264+
61265+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
61266+
61267+#ifdef CONFIG_X86
61268+ if (!ret) {
61269+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
61270+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
61271+ }
61272+#endif
61273+
61274+ }
61275+#endif
61276+
61277 if (ret)
61278 ret = -EFAULT;
61279
61280@@ -771,6 +847,8 @@ static struct file *do_open_exec(struct filename *name)
61281
61282 fsnotify_open(file);
61283
61284+ trace_open_exec(name->name);
61285+
61286 err = deny_write_access(file);
61287 if (err)
61288 goto exit;
61289@@ -800,7 +878,7 @@ int kernel_read(struct file *file, loff_t offset,
61290 old_fs = get_fs();
61291 set_fs(get_ds());
61292 /* The cast to a user pointer is valid due to the set_fs() */
61293- result = vfs_read(file, (void __user *)addr, count, &pos);
61294+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
61295 set_fs(old_fs);
61296 return result;
61297 }
61298@@ -845,6 +923,7 @@ static int exec_mmap(struct mm_struct *mm)
61299 tsk->mm = mm;
61300 tsk->active_mm = mm;
61301 activate_mm(active_mm, mm);
61302+ populate_stack();
61303 tsk->mm->vmacache_seqnum = 0;
61304 vmacache_flush(tsk);
61305 task_unlock(tsk);
61306@@ -1243,7 +1322,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
61307 }
61308 rcu_read_unlock();
61309
61310- if (p->fs->users > n_fs)
61311+ if (atomic_read(&p->fs->users) > n_fs)
61312 bprm->unsafe |= LSM_UNSAFE_SHARE;
61313 else
61314 p->fs->in_exec = 1;
61315@@ -1419,6 +1498,31 @@ static int exec_binprm(struct linux_binprm *bprm)
61316 return ret;
61317 }
61318
61319+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61320+static DEFINE_PER_CPU(u64, exec_counter);
61321+static int __init init_exec_counters(void)
61322+{
61323+ unsigned int cpu;
61324+
61325+ for_each_possible_cpu(cpu) {
61326+ per_cpu(exec_counter, cpu) = (u64)cpu;
61327+ }
61328+
61329+ return 0;
61330+}
61331+early_initcall(init_exec_counters);
61332+static inline void increment_exec_counter(void)
61333+{
61334+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
61335+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
61336+}
61337+#else
61338+static inline void increment_exec_counter(void) {}
61339+#endif
61340+
61341+extern void gr_handle_exec_args(struct linux_binprm *bprm,
61342+ struct user_arg_ptr argv);
61343+
61344 /*
61345 * sys_execve() executes a new program.
61346 */
61347@@ -1426,6 +1530,11 @@ static int do_execve_common(struct filename *filename,
61348 struct user_arg_ptr argv,
61349 struct user_arg_ptr envp)
61350 {
61351+#ifdef CONFIG_GRKERNSEC
61352+ struct file *old_exec_file;
61353+ struct acl_subject_label *old_acl;
61354+ struct rlimit old_rlim[RLIM_NLIMITS];
61355+#endif
61356 struct linux_binprm *bprm;
61357 struct file *file;
61358 struct files_struct *displaced;
61359@@ -1434,6 +1543,8 @@ static int do_execve_common(struct filename *filename,
61360 if (IS_ERR(filename))
61361 return PTR_ERR(filename);
61362
61363+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
61364+
61365 /*
61366 * We move the actual failure in case of RLIMIT_NPROC excess from
61367 * set*uid() to execve() because too many poorly written programs
61368@@ -1471,11 +1582,21 @@ static int do_execve_common(struct filename *filename,
61369 if (IS_ERR(file))
61370 goto out_unmark;
61371
61372+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
61373+ retval = -EPERM;
61374+ goto out_unmark;
61375+ }
61376+
61377 sched_exec();
61378
61379 bprm->file = file;
61380 bprm->filename = bprm->interp = filename->name;
61381
61382+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
61383+ retval = -EACCES;
61384+ goto out_unmark;
61385+ }
61386+
61387 retval = bprm_mm_init(bprm);
61388 if (retval)
61389 goto out_unmark;
61390@@ -1492,24 +1613,70 @@ static int do_execve_common(struct filename *filename,
61391 if (retval < 0)
61392 goto out;
61393
61394+#ifdef CONFIG_GRKERNSEC
61395+ old_acl = current->acl;
61396+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
61397+ old_exec_file = current->exec_file;
61398+ get_file(file);
61399+ current->exec_file = file;
61400+#endif
61401+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61402+ /* limit suid stack to 8MB
61403+ * we saved the old limits above and will restore them if this exec fails
61404+ */
61405+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
61406+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
61407+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
61408+#endif
61409+
61410+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
61411+ retval = -EPERM;
61412+ goto out_fail;
61413+ }
61414+
61415+ if (!gr_tpe_allow(file)) {
61416+ retval = -EACCES;
61417+ goto out_fail;
61418+ }
61419+
61420+ if (gr_check_crash_exec(file)) {
61421+ retval = -EACCES;
61422+ goto out_fail;
61423+ }
61424+
61425+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
61426+ bprm->unsafe);
61427+ if (retval < 0)
61428+ goto out_fail;
61429+
61430 retval = copy_strings_kernel(1, &bprm->filename, bprm);
61431 if (retval < 0)
61432- goto out;
61433+ goto out_fail;
61434
61435 bprm->exec = bprm->p;
61436 retval = copy_strings(bprm->envc, envp, bprm);
61437 if (retval < 0)
61438- goto out;
61439+ goto out_fail;
61440
61441 retval = copy_strings(bprm->argc, argv, bprm);
61442 if (retval < 0)
61443- goto out;
61444+ goto out_fail;
61445+
61446+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
61447+
61448+ gr_handle_exec_args(bprm, argv);
61449
61450 retval = exec_binprm(bprm);
61451 if (retval < 0)
61452- goto out;
61453+ goto out_fail;
61454+#ifdef CONFIG_GRKERNSEC
61455+ if (old_exec_file)
61456+ fput(old_exec_file);
61457+#endif
61458
61459 /* execve succeeded */
61460+
61461+ increment_exec_counter();
61462 current->fs->in_exec = 0;
61463 current->in_execve = 0;
61464 acct_update_integrals(current);
61465@@ -1520,6 +1687,14 @@ static int do_execve_common(struct filename *filename,
61466 put_files_struct(displaced);
61467 return retval;
61468
61469+out_fail:
61470+#ifdef CONFIG_GRKERNSEC
61471+ current->acl = old_acl;
61472+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
61473+ fput(current->exec_file);
61474+ current->exec_file = old_exec_file;
61475+#endif
61476+
61477 out:
61478 if (bprm->mm) {
61479 acct_arg_size(bprm, 0);
61480@@ -1611,3 +1786,312 @@ COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
61481 return compat_do_execve(getname(filename), argv, envp);
61482 }
61483 #endif
61484+
61485+int pax_check_flags(unsigned long *flags)
61486+{
61487+ int retval = 0;
61488+
61489+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
61490+ if (*flags & MF_PAX_SEGMEXEC)
61491+ {
61492+ *flags &= ~MF_PAX_SEGMEXEC;
61493+ retval = -EINVAL;
61494+ }
61495+#endif
61496+
61497+ if ((*flags & MF_PAX_PAGEEXEC)
61498+
61499+#ifdef CONFIG_PAX_PAGEEXEC
61500+ && (*flags & MF_PAX_SEGMEXEC)
61501+#endif
61502+
61503+ )
61504+ {
61505+ *flags &= ~MF_PAX_PAGEEXEC;
61506+ retval = -EINVAL;
61507+ }
61508+
61509+ if ((*flags & MF_PAX_MPROTECT)
61510+
61511+#ifdef CONFIG_PAX_MPROTECT
61512+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
61513+#endif
61514+
61515+ )
61516+ {
61517+ *flags &= ~MF_PAX_MPROTECT;
61518+ retval = -EINVAL;
61519+ }
61520+
61521+ if ((*flags & MF_PAX_EMUTRAMP)
61522+
61523+#ifdef CONFIG_PAX_EMUTRAMP
61524+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
61525+#endif
61526+
61527+ )
61528+ {
61529+ *flags &= ~MF_PAX_EMUTRAMP;
61530+ retval = -EINVAL;
61531+ }
61532+
61533+ return retval;
61534+}
61535+
61536+EXPORT_SYMBOL(pax_check_flags);
61537+
61538+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
61539+char *pax_get_path(const struct path *path, char *buf, int buflen)
61540+{
61541+ char *pathname = d_path(path, buf, buflen);
61542+
61543+ if (IS_ERR(pathname))
61544+ goto toolong;
61545+
61546+ pathname = mangle_path(buf, pathname, "\t\n\\");
61547+ if (!pathname)
61548+ goto toolong;
61549+
61550+ *pathname = 0;
61551+ return buf;
61552+
61553+toolong:
61554+ return "<path too long>";
61555+}
61556+EXPORT_SYMBOL(pax_get_path);
61557+
61558+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
61559+{
61560+ struct task_struct *tsk = current;
61561+ struct mm_struct *mm = current->mm;
61562+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
61563+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
61564+ char *path_exec = NULL;
61565+ char *path_fault = NULL;
61566+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
61567+ siginfo_t info = { };
61568+
61569+ if (buffer_exec && buffer_fault) {
61570+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
61571+
61572+ down_read(&mm->mmap_sem);
61573+ vma = mm->mmap;
61574+ while (vma && (!vma_exec || !vma_fault)) {
61575+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
61576+ vma_exec = vma;
61577+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
61578+ vma_fault = vma;
61579+ vma = vma->vm_next;
61580+ }
61581+ if (vma_exec)
61582+ path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
61583+ if (vma_fault) {
61584+ start = vma_fault->vm_start;
61585+ end = vma_fault->vm_end;
61586+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
61587+ if (vma_fault->vm_file)
61588+ path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
61589+ else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
61590+ path_fault = "<heap>";
61591+ else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
61592+ path_fault = "<stack>";
61593+ else
61594+ path_fault = "<anonymous mapping>";
61595+ }
61596+ up_read(&mm->mmap_sem);
61597+ }
61598+ if (tsk->signal->curr_ip)
61599+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
61600+ else
61601+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
61602+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
61603+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
61604+ free_page((unsigned long)buffer_exec);
61605+ free_page((unsigned long)buffer_fault);
61606+ pax_report_insns(regs, pc, sp);
61607+ info.si_signo = SIGKILL;
61608+ info.si_errno = 0;
61609+ info.si_code = SI_KERNEL;
61610+ info.si_pid = 0;
61611+ info.si_uid = 0;
61612+ do_coredump(&info);
61613+}
61614+#endif
61615+
61616+#ifdef CONFIG_PAX_REFCOUNT
61617+void pax_report_refcount_overflow(struct pt_regs *regs)
61618+{
61619+ if (current->signal->curr_ip)
61620+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
61621+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
61622+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
61623+ else
61624+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
61625+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
61626+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
61627+ preempt_disable();
61628+ show_regs(regs);
61629+ preempt_enable();
61630+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
61631+}
61632+#endif
61633+
61634+#ifdef CONFIG_PAX_USERCOPY
61635+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
61636+static noinline int check_stack_object(const void *obj, unsigned long len)
61637+{
61638+ const void * const stack = task_stack_page(current);
61639+ const void * const stackend = stack + THREAD_SIZE;
61640+
61641+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
61642+ const void *frame = NULL;
61643+ const void *oldframe;
61644+#endif
61645+
61646+ if (obj + len < obj)
61647+ return -1;
61648+
61649+ if (obj + len <= stack || stackend <= obj)
61650+ return 0;
61651+
61652+ if (obj < stack || stackend < obj + len)
61653+ return -1;
61654+
61655+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
61656+ oldframe = __builtin_frame_address(1);
61657+ if (oldframe)
61658+ frame = __builtin_frame_address(2);
61659+ /*
61660+ low ----------------------------------------------> high
61661+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
61662+ ^----------------^
61663+ allow copies only within here
61664+ */
61665+ while (stack <= frame && frame < stackend) {
61666+ /* if obj + len extends past the last frame, this
61667+ check won't pass and the next frame will be 0,
61668+ causing us to bail out and correctly report
61669+ the copy as invalid
61670+ */
61671+ if (obj + len <= frame)
61672+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
61673+ oldframe = frame;
61674+ frame = *(const void * const *)frame;
61675+ }
61676+ return -1;
61677+#else
61678+ return 1;
61679+#endif
61680+}
61681+
61682+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
61683+{
61684+ if (current->signal->curr_ip)
61685+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
61686+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
61687+ else
61688+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
61689+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
61690+ dump_stack();
61691+ gr_handle_kernel_exploit();
61692+ do_group_exit(SIGKILL);
61693+}
61694+#endif
61695+
61696+#ifdef CONFIG_PAX_USERCOPY
61697+
61698+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
61699+{
61700+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
61701+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
61702+#ifdef CONFIG_MODULES
61703+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
61704+#else
61705+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
61706+#endif
61707+
61708+#else
61709+ unsigned long textlow = (unsigned long)_stext;
61710+ unsigned long texthigh = (unsigned long)_etext;
61711+
61712+#ifdef CONFIG_X86_64
61713+ /* check against linear mapping as well */
61714+ if (high > (unsigned long)__va(__pa(textlow)) &&
61715+ low < (unsigned long)__va(__pa(texthigh)))
61716+ return true;
61717+#endif
61718+
61719+#endif
61720+
61721+ if (high <= textlow || low >= texthigh)
61722+ return false;
61723+ else
61724+ return true;
61725+}
61726+#endif
61727+
61728+void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size)
61729+{
61730+#ifdef CONFIG_PAX_USERCOPY
61731+ const char *type;
61732+#endif
61733+
61734+#ifndef CONFIG_STACK_GROWSUP
61735+ unsigned long stackstart = (unsigned long)task_stack_page(current);
61736+ unsigned long currentsp = (unsigned long)&stackstart;
61737+ if (unlikely((currentsp < stackstart + 512 ||
61738+ currentsp >= stackstart + THREAD_SIZE) && !in_interrupt()))
61739+ BUG();
61740+#endif
61741+
61742+#ifndef CONFIG_PAX_USERCOPY_DEBUG
61743+ if (const_size)
61744+ return;
61745+#endif
61746+
61747+#ifdef CONFIG_PAX_USERCOPY
61748+ if (!n)
61749+ return;
61750+
61751+ type = check_heap_object(ptr, n);
61752+ if (!type) {
61753+ int ret = check_stack_object(ptr, n);
61754+ if (ret == 1 || ret == 2)
61755+ return;
61756+ if (ret == 0) {
61757+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
61758+ type = "<kernel text>";
61759+ else
61760+ return;
61761+ } else
61762+ type = "<process stack>";
61763+ }
61764+
61765+ pax_report_usercopy(ptr, n, to_user, type);
61766+#endif
61767+
61768+}
61769+EXPORT_SYMBOL(__check_object_size);
61770+
61771+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
61772+void pax_track_stack(void)
61773+{
61774+ unsigned long sp = (unsigned long)&sp;
61775+ if (sp < current_thread_info()->lowest_stack &&
61776+ sp > (unsigned long)task_stack_page(current))
61777+ current_thread_info()->lowest_stack = sp;
61778+ if (unlikely((sp & ~(THREAD_SIZE - 1)) < (THREAD_SIZE/16)))
61779+ BUG();
61780+}
61781+EXPORT_SYMBOL(pax_track_stack);
61782+#endif
61783+
61784+#ifdef CONFIG_PAX_SIZE_OVERFLOW
61785+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
61786+{
61787+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
61788+ dump_stack();
61789+ do_group_exit(SIGKILL);
61790+}
61791+EXPORT_SYMBOL(report_size_overflow);
61792+#endif
61793diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
61794index 9f9992b..8b59411 100644
61795--- a/fs/ext2/balloc.c
61796+++ b/fs/ext2/balloc.c
61797@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
61798
61799 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
61800 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
61801- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
61802+ if (free_blocks < root_blocks + 1 &&
61803 !uid_eq(sbi->s_resuid, current_fsuid()) &&
61804 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
61805- !in_group_p (sbi->s_resgid))) {
61806+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
61807 return 0;
61808 }
61809 return 1;
61810diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
61811index 9142614..97484fa 100644
61812--- a/fs/ext2/xattr.c
61813+++ b/fs/ext2/xattr.c
61814@@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
61815 struct buffer_head *bh = NULL;
61816 struct ext2_xattr_entry *entry;
61817 char *end;
61818- size_t rest = buffer_size;
61819+ size_t rest = buffer_size, total_size = 0;
61820 int error;
61821
61822 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
61823@@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
61824 buffer += size;
61825 }
61826 rest -= size;
61827+ total_size += size;
61828 }
61829 }
61830- error = buffer_size - rest; /* total size */
61831+ error = total_size;
61832
61833 cleanup:
61834 brelse(bh);
61835diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
61836index 158b5d4..2432610 100644
61837--- a/fs/ext3/balloc.c
61838+++ b/fs/ext3/balloc.c
61839@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
61840
61841 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
61842 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
61843- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
61844+ if (free_blocks < root_blocks + 1 &&
61845 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
61846 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
61847- !in_group_p (sbi->s_resgid))) {
61848+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
61849 return 0;
61850 }
61851 return 1;
61852diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
61853index c6874be..f8a6ae8 100644
61854--- a/fs/ext3/xattr.c
61855+++ b/fs/ext3/xattr.c
61856@@ -330,7 +330,7 @@ static int
61857 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
61858 char *buffer, size_t buffer_size)
61859 {
61860- size_t rest = buffer_size;
61861+ size_t rest = buffer_size, total_size = 0;
61862
61863 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
61864 const struct xattr_handler *handler =
61865@@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
61866 buffer += size;
61867 }
61868 rest -= size;
61869+ total_size += size;
61870 }
61871 }
61872- return buffer_size - rest;
61873+ return total_size;
61874 }
61875
61876 static int
61877diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
61878index e069155..b825b08 100644
61879--- a/fs/ext4/balloc.c
61880+++ b/fs/ext4/balloc.c
61881@@ -557,8 +557,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
61882 /* Hm, nope. Are (enough) root reserved clusters available? */
61883 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
61884 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
61885- capable(CAP_SYS_RESOURCE) ||
61886- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
61887+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
61888+ capable_nolog(CAP_SYS_RESOURCE)) {
61889
61890 if (free_clusters >= (nclusters + dirty_clusters +
61891 resv_clusters))
61892diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
61893index 96ac9d3..1c30e7e6 100644
61894--- a/fs/ext4/ext4.h
61895+++ b/fs/ext4/ext4.h
61896@@ -1275,19 +1275,19 @@ struct ext4_sb_info {
61897 unsigned long s_mb_last_start;
61898
61899 /* stats for buddy allocator */
61900- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
61901- atomic_t s_bal_success; /* we found long enough chunks */
61902- atomic_t s_bal_allocated; /* in blocks */
61903- atomic_t s_bal_ex_scanned; /* total extents scanned */
61904- atomic_t s_bal_goals; /* goal hits */
61905- atomic_t s_bal_breaks; /* too long searches */
61906- atomic_t s_bal_2orders; /* 2^order hits */
61907+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
61908+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
61909+ atomic_unchecked_t s_bal_allocated; /* in blocks */
61910+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
61911+ atomic_unchecked_t s_bal_goals; /* goal hits */
61912+ atomic_unchecked_t s_bal_breaks; /* too long searches */
61913+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
61914 spinlock_t s_bal_lock;
61915 unsigned long s_mb_buddies_generated;
61916 unsigned long long s_mb_generation_time;
61917- atomic_t s_mb_lost_chunks;
61918- atomic_t s_mb_preallocated;
61919- atomic_t s_mb_discarded;
61920+ atomic_unchecked_t s_mb_lost_chunks;
61921+ atomic_unchecked_t s_mb_preallocated;
61922+ atomic_unchecked_t s_mb_discarded;
61923 atomic_t s_lock_busy;
61924
61925 /* locality groups */
61926diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
61927index 8b0f9ef..cb9f620 100644
61928--- a/fs/ext4/mballoc.c
61929+++ b/fs/ext4/mballoc.c
61930@@ -1901,7 +1901,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
61931 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
61932
61933 if (EXT4_SB(sb)->s_mb_stats)
61934- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
61935+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
61936
61937 break;
61938 }
61939@@ -2211,7 +2211,7 @@ repeat:
61940 ac->ac_status = AC_STATUS_CONTINUE;
61941 ac->ac_flags |= EXT4_MB_HINT_FIRST;
61942 cr = 3;
61943- atomic_inc(&sbi->s_mb_lost_chunks);
61944+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
61945 goto repeat;
61946 }
61947 }
61948@@ -2717,25 +2717,25 @@ int ext4_mb_release(struct super_block *sb)
61949 if (sbi->s_mb_stats) {
61950 ext4_msg(sb, KERN_INFO,
61951 "mballoc: %u blocks %u reqs (%u success)",
61952- atomic_read(&sbi->s_bal_allocated),
61953- atomic_read(&sbi->s_bal_reqs),
61954- atomic_read(&sbi->s_bal_success));
61955+ atomic_read_unchecked(&sbi->s_bal_allocated),
61956+ atomic_read_unchecked(&sbi->s_bal_reqs),
61957+ atomic_read_unchecked(&sbi->s_bal_success));
61958 ext4_msg(sb, KERN_INFO,
61959 "mballoc: %u extents scanned, %u goal hits, "
61960 "%u 2^N hits, %u breaks, %u lost",
61961- atomic_read(&sbi->s_bal_ex_scanned),
61962- atomic_read(&sbi->s_bal_goals),
61963- atomic_read(&sbi->s_bal_2orders),
61964- atomic_read(&sbi->s_bal_breaks),
61965- atomic_read(&sbi->s_mb_lost_chunks));
61966+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
61967+ atomic_read_unchecked(&sbi->s_bal_goals),
61968+ atomic_read_unchecked(&sbi->s_bal_2orders),
61969+ atomic_read_unchecked(&sbi->s_bal_breaks),
61970+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
61971 ext4_msg(sb, KERN_INFO,
61972 "mballoc: %lu generated and it took %Lu",
61973 sbi->s_mb_buddies_generated,
61974 sbi->s_mb_generation_time);
61975 ext4_msg(sb, KERN_INFO,
61976 "mballoc: %u preallocated, %u discarded",
61977- atomic_read(&sbi->s_mb_preallocated),
61978- atomic_read(&sbi->s_mb_discarded));
61979+ atomic_read_unchecked(&sbi->s_mb_preallocated),
61980+ atomic_read_unchecked(&sbi->s_mb_discarded));
61981 }
61982
61983 free_percpu(sbi->s_locality_groups);
61984@@ -3192,16 +3192,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
61985 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
61986
61987 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
61988- atomic_inc(&sbi->s_bal_reqs);
61989- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
61990+ atomic_inc_unchecked(&sbi->s_bal_reqs);
61991+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
61992 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
61993- atomic_inc(&sbi->s_bal_success);
61994- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
61995+ atomic_inc_unchecked(&sbi->s_bal_success);
61996+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
61997 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
61998 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
61999- atomic_inc(&sbi->s_bal_goals);
62000+ atomic_inc_unchecked(&sbi->s_bal_goals);
62001 if (ac->ac_found > sbi->s_mb_max_to_scan)
62002- atomic_inc(&sbi->s_bal_breaks);
62003+ atomic_inc_unchecked(&sbi->s_bal_breaks);
62004 }
62005
62006 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
62007@@ -3628,7 +3628,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
62008 trace_ext4_mb_new_inode_pa(ac, pa);
62009
62010 ext4_mb_use_inode_pa(ac, pa);
62011- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
62012+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
62013
62014 ei = EXT4_I(ac->ac_inode);
62015 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
62016@@ -3688,7 +3688,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
62017 trace_ext4_mb_new_group_pa(ac, pa);
62018
62019 ext4_mb_use_group_pa(ac, pa);
62020- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
62021+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
62022
62023 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
62024 lg = ac->ac_lg;
62025@@ -3777,7 +3777,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
62026 * from the bitmap and continue.
62027 */
62028 }
62029- atomic_add(free, &sbi->s_mb_discarded);
62030+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
62031
62032 return err;
62033 }
62034@@ -3795,7 +3795,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
62035 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
62036 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
62037 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
62038- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
62039+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
62040 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
62041
62042 return 0;
62043diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
62044index 8313ca3..8a37d08 100644
62045--- a/fs/ext4/mmp.c
62046+++ b/fs/ext4/mmp.c
62047@@ -111,7 +111,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
62048 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
62049 const char *function, unsigned int line, const char *msg)
62050 {
62051- __ext4_warning(sb, function, line, msg);
62052+ __ext4_warning(sb, function, line, "%s", msg);
62053 __ext4_warning(sb, function, line,
62054 "MMP failure info: last update time: %llu, last update "
62055 "node: %s, last update device: %s\n",
62056diff --git a/fs/ext4/super.c b/fs/ext4/super.c
62057index b1f0ac7..77e9a05 100644
62058--- a/fs/ext4/super.c
62059+++ b/fs/ext4/super.c
62060@@ -1274,7 +1274,7 @@ static ext4_fsblk_t get_sb_block(void **data)
62061 }
62062
62063 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
62064-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
62065+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
62066 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
62067
62068 #ifdef CONFIG_QUOTA
62069@@ -2454,7 +2454,7 @@ struct ext4_attr {
62070 int offset;
62071 int deprecated_val;
62072 } u;
62073-};
62074+} __do_const;
62075
62076 static int parse_strtoull(const char *buf,
62077 unsigned long long max, unsigned long long *value)
62078diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
62079index 2d1e5803..1b082d415 100644
62080--- a/fs/ext4/xattr.c
62081+++ b/fs/ext4/xattr.c
62082@@ -399,7 +399,7 @@ static int
62083 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
62084 char *buffer, size_t buffer_size)
62085 {
62086- size_t rest = buffer_size;
62087+ size_t rest = buffer_size, total_size = 0;
62088
62089 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
62090 const struct xattr_handler *handler =
62091@@ -416,9 +416,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
62092 buffer += size;
62093 }
62094 rest -= size;
62095+ total_size += size;
62096 }
62097 }
62098- return buffer_size - rest;
62099+ return total_size;
62100 }
62101
62102 static int
62103diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
62104index 6df8d3d..b8b92c2 100644
62105--- a/fs/fat/namei_vfat.c
62106+++ b/fs/fat/namei_vfat.c
62107@@ -736,7 +736,12 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
62108 }
62109
62110 alias = d_find_alias(inode);
62111- if (alias && !vfat_d_anon_disconn(alias)) {
62112+ /*
62113+ * Checking "alias->d_parent == dentry->d_parent" to make sure
62114+ * FS is not corrupted (especially double linked dir).
62115+ */
62116+ if (alias && alias->d_parent == dentry->d_parent &&
62117+ !vfat_d_anon_disconn(alias)) {
62118 /*
62119 * This inode has non anonymous-DCACHE_DISCONNECTED
62120 * dentry. This means, the user did ->lookup() by an
62121@@ -755,12 +760,9 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
62122
62123 out:
62124 mutex_unlock(&MSDOS_SB(sb)->s_lock);
62125- dentry->d_time = dentry->d_parent->d_inode->i_version;
62126- dentry = d_splice_alias(inode, dentry);
62127- if (dentry)
62128- dentry->d_time = dentry->d_parent->d_inode->i_version;
62129- return dentry;
62130-
62131+ if (!inode)
62132+ dentry->d_time = dir->i_version;
62133+ return d_splice_alias(inode, dentry);
62134 error:
62135 mutex_unlock(&MSDOS_SB(sb)->s_lock);
62136 return ERR_PTR(err);
62137@@ -793,7 +795,6 @@ static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
62138 inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
62139 /* timestamp is already written, so mark_inode_dirty() is unneeded. */
62140
62141- dentry->d_time = dentry->d_parent->d_inode->i_version;
62142 d_instantiate(dentry, inode);
62143 out:
62144 mutex_unlock(&MSDOS_SB(sb)->s_lock);
62145@@ -824,6 +825,7 @@ static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
62146 clear_nlink(inode);
62147 inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
62148 fat_detach(inode);
62149+ dentry->d_time = dir->i_version;
62150 out:
62151 mutex_unlock(&MSDOS_SB(sb)->s_lock);
62152
62153@@ -849,6 +851,7 @@ static int vfat_unlink(struct inode *dir, struct dentry *dentry)
62154 clear_nlink(inode);
62155 inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
62156 fat_detach(inode);
62157+ dentry->d_time = dir->i_version;
62158 out:
62159 mutex_unlock(&MSDOS_SB(sb)->s_lock);
62160
62161@@ -889,7 +892,6 @@ static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
62162 inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
62163 /* timestamp is already written, so mark_inode_dirty() is unneeded. */
62164
62165- dentry->d_time = dentry->d_parent->d_inode->i_version;
62166 d_instantiate(dentry, inode);
62167
62168 mutex_unlock(&MSDOS_SB(sb)->s_lock);
62169diff --git a/fs/fcntl.c b/fs/fcntl.c
62170index 22d1c3d..600cf7e 100644
62171--- a/fs/fcntl.c
62172+++ b/fs/fcntl.c
62173@@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
62174 if (err)
62175 return err;
62176
62177+ if (gr_handle_chroot_fowner(pid, type))
62178+ return -ENOENT;
62179+ if (gr_check_protected_task_fowner(pid, type))
62180+ return -EACCES;
62181+
62182 f_modown(filp, pid, type, force);
62183 return 0;
62184 }
62185diff --git a/fs/fhandle.c b/fs/fhandle.c
62186index 999ff5c..ac037c9 100644
62187--- a/fs/fhandle.c
62188+++ b/fs/fhandle.c
62189@@ -8,6 +8,7 @@
62190 #include <linux/fs_struct.h>
62191 #include <linux/fsnotify.h>
62192 #include <linux/personality.h>
62193+#include <linux/grsecurity.h>
62194 #include <asm/uaccess.h>
62195 #include "internal.h"
62196 #include "mount.h"
62197@@ -67,8 +68,7 @@ static long do_sys_name_to_handle(struct path *path,
62198 } else
62199 retval = 0;
62200 /* copy the mount id */
62201- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
62202- sizeof(*mnt_id)) ||
62203+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
62204 copy_to_user(ufh, handle,
62205 sizeof(struct file_handle) + handle_bytes))
62206 retval = -EFAULT;
62207@@ -175,7 +175,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
62208 * the directory. Ideally we would like CAP_DAC_SEARCH.
62209 * But we don't have that
62210 */
62211- if (!capable(CAP_DAC_READ_SEARCH)) {
62212+ if (!capable(CAP_DAC_READ_SEARCH) || !gr_chroot_fhandle()) {
62213 retval = -EPERM;
62214 goto out_err;
62215 }
62216diff --git a/fs/file.c b/fs/file.c
62217index 66923fe..2849783 100644
62218--- a/fs/file.c
62219+++ b/fs/file.c
62220@@ -16,6 +16,7 @@
62221 #include <linux/slab.h>
62222 #include <linux/vmalloc.h>
62223 #include <linux/file.h>
62224+#include <linux/security.h>
62225 #include <linux/fdtable.h>
62226 #include <linux/bitops.h>
62227 #include <linux/interrupt.h>
62228@@ -139,7 +140,7 @@ out:
62229 * Return <0 error code on error; 1 on successful completion.
62230 * The files->file_lock should be held on entry, and will be held on exit.
62231 */
62232-static int expand_fdtable(struct files_struct *files, int nr)
62233+static int expand_fdtable(struct files_struct *files, unsigned int nr)
62234 __releases(files->file_lock)
62235 __acquires(files->file_lock)
62236 {
62237@@ -184,7 +185,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
62238 * expanded and execution may have blocked.
62239 * The files->file_lock should be held on entry, and will be held on exit.
62240 */
62241-static int expand_files(struct files_struct *files, int nr)
62242+static int expand_files(struct files_struct *files, unsigned int nr)
62243 {
62244 struct fdtable *fdt;
62245
62246@@ -799,6 +800,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
62247 if (!file)
62248 return __close_fd(files, fd);
62249
62250+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
62251 if (fd >= rlimit(RLIMIT_NOFILE))
62252 return -EBADF;
62253
62254@@ -825,6 +827,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
62255 if (unlikely(oldfd == newfd))
62256 return -EINVAL;
62257
62258+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
62259 if (newfd >= rlimit(RLIMIT_NOFILE))
62260 return -EBADF;
62261
62262@@ -880,6 +883,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
62263 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
62264 {
62265 int err;
62266+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
62267 if (from >= rlimit(RLIMIT_NOFILE))
62268 return -EINVAL;
62269 err = alloc_fd(from, flags);
62270diff --git a/fs/filesystems.c b/fs/filesystems.c
62271index 5797d45..7d7d79a 100644
62272--- a/fs/filesystems.c
62273+++ b/fs/filesystems.c
62274@@ -275,7 +275,11 @@ struct file_system_type *get_fs_type(const char *name)
62275 int len = dot ? dot - name : strlen(name);
62276
62277 fs = __get_fs_type(name, len);
62278+#ifdef CONFIG_GRKERNSEC_MODHARDEN
62279+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
62280+#else
62281 if (!fs && (request_module("fs-%.*s", len, name) == 0))
62282+#endif
62283 fs = __get_fs_type(name, len);
62284
62285 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
62286diff --git a/fs/fs_struct.c b/fs/fs_struct.c
62287index 7dca743..543d620 100644
62288--- a/fs/fs_struct.c
62289+++ b/fs/fs_struct.c
62290@@ -4,6 +4,7 @@
62291 #include <linux/path.h>
62292 #include <linux/slab.h>
62293 #include <linux/fs_struct.h>
62294+#include <linux/grsecurity.h>
62295 #include "internal.h"
62296
62297 /*
62298@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
62299 write_seqcount_begin(&fs->seq);
62300 old_root = fs->root;
62301 fs->root = *path;
62302+ gr_set_chroot_entries(current, path);
62303 write_seqcount_end(&fs->seq);
62304 spin_unlock(&fs->lock);
62305 if (old_root.dentry)
62306@@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
62307 int hits = 0;
62308 spin_lock(&fs->lock);
62309 write_seqcount_begin(&fs->seq);
62310+ /* this root replacement is only done by pivot_root,
62311+ leave grsec's chroot tagging alone for this task
62312+ so that a pivoted root isn't treated as a chroot
62313+ */
62314 hits += replace_path(&fs->root, old_root, new_root);
62315 hits += replace_path(&fs->pwd, old_root, new_root);
62316 write_seqcount_end(&fs->seq);
62317@@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk)
62318 task_lock(tsk);
62319 spin_lock(&fs->lock);
62320 tsk->fs = NULL;
62321- kill = !--fs->users;
62322+ gr_clear_chroot_entries(tsk);
62323+ kill = !atomic_dec_return(&fs->users);
62324 spin_unlock(&fs->lock);
62325 task_unlock(tsk);
62326 if (kill)
62327@@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
62328 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
62329 /* We don't need to lock fs - think why ;-) */
62330 if (fs) {
62331- fs->users = 1;
62332+ atomic_set(&fs->users, 1);
62333 fs->in_exec = 0;
62334 spin_lock_init(&fs->lock);
62335 seqcount_init(&fs->seq);
62336@@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
62337 spin_lock(&old->lock);
62338 fs->root = old->root;
62339 path_get(&fs->root);
62340+ /* instead of calling gr_set_chroot_entries here,
62341+ we call it from every caller of this function
62342+ */
62343 fs->pwd = old->pwd;
62344 path_get(&fs->pwd);
62345 spin_unlock(&old->lock);
62346@@ -139,8 +149,9 @@ int unshare_fs_struct(void)
62347
62348 task_lock(current);
62349 spin_lock(&fs->lock);
62350- kill = !--fs->users;
62351+ kill = !atomic_dec_return(&fs->users);
62352 current->fs = new_fs;
62353+ gr_set_chroot_entries(current, &new_fs->root);
62354 spin_unlock(&fs->lock);
62355 task_unlock(current);
62356
62357@@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
62358
62359 int current_umask(void)
62360 {
62361- return current->fs->umask;
62362+ return current->fs->umask | gr_acl_umask();
62363 }
62364 EXPORT_SYMBOL(current_umask);
62365
62366 /* to be mentioned only in INIT_TASK */
62367 struct fs_struct init_fs = {
62368- .users = 1,
62369+ .users = ATOMIC_INIT(1),
62370 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
62371 .seq = SEQCNT_ZERO(init_fs.seq),
62372 .umask = 0022,
62373diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
62374index 89acec7..a575262 100644
62375--- a/fs/fscache/cookie.c
62376+++ b/fs/fscache/cookie.c
62377@@ -19,7 +19,7 @@
62378
62379 struct kmem_cache *fscache_cookie_jar;
62380
62381-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
62382+static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
62383
62384 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
62385 static int fscache_alloc_object(struct fscache_cache *cache,
62386@@ -69,11 +69,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
62387 parent ? (char *) parent->def->name : "<no-parent>",
62388 def->name, netfs_data, enable);
62389
62390- fscache_stat(&fscache_n_acquires);
62391+ fscache_stat_unchecked(&fscache_n_acquires);
62392
62393 /* if there's no parent cookie, then we don't create one here either */
62394 if (!parent) {
62395- fscache_stat(&fscache_n_acquires_null);
62396+ fscache_stat_unchecked(&fscache_n_acquires_null);
62397 _leave(" [no parent]");
62398 return NULL;
62399 }
62400@@ -88,7 +88,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62401 /* allocate and initialise a cookie */
62402 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
62403 if (!cookie) {
62404- fscache_stat(&fscache_n_acquires_oom);
62405+ fscache_stat_unchecked(&fscache_n_acquires_oom);
62406 _leave(" [ENOMEM]");
62407 return NULL;
62408 }
62409@@ -115,13 +115,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
62410
62411 switch (cookie->def->type) {
62412 case FSCACHE_COOKIE_TYPE_INDEX:
62413- fscache_stat(&fscache_n_cookie_index);
62414+ fscache_stat_unchecked(&fscache_n_cookie_index);
62415 break;
62416 case FSCACHE_COOKIE_TYPE_DATAFILE:
62417- fscache_stat(&fscache_n_cookie_data);
62418+ fscache_stat_unchecked(&fscache_n_cookie_data);
62419 break;
62420 default:
62421- fscache_stat(&fscache_n_cookie_special);
62422+ fscache_stat_unchecked(&fscache_n_cookie_special);
62423 break;
62424 }
62425
62426@@ -135,7 +135,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62427 } else {
62428 atomic_dec(&parent->n_children);
62429 __fscache_cookie_put(cookie);
62430- fscache_stat(&fscache_n_acquires_nobufs);
62431+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
62432 _leave(" = NULL");
62433 return NULL;
62434 }
62435@@ -144,7 +144,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
62436 }
62437 }
62438
62439- fscache_stat(&fscache_n_acquires_ok);
62440+ fscache_stat_unchecked(&fscache_n_acquires_ok);
62441 _leave(" = %p", cookie);
62442 return cookie;
62443 }
62444@@ -213,7 +213,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
62445 cache = fscache_select_cache_for_object(cookie->parent);
62446 if (!cache) {
62447 up_read(&fscache_addremove_sem);
62448- fscache_stat(&fscache_n_acquires_no_cache);
62449+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
62450 _leave(" = -ENOMEDIUM [no cache]");
62451 return -ENOMEDIUM;
62452 }
62453@@ -297,14 +297,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
62454 object = cache->ops->alloc_object(cache, cookie);
62455 fscache_stat_d(&fscache_n_cop_alloc_object);
62456 if (IS_ERR(object)) {
62457- fscache_stat(&fscache_n_object_no_alloc);
62458+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
62459 ret = PTR_ERR(object);
62460 goto error;
62461 }
62462
62463- fscache_stat(&fscache_n_object_alloc);
62464+ fscache_stat_unchecked(&fscache_n_object_alloc);
62465
62466- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
62467+ object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
62468
62469 _debug("ALLOC OBJ%x: %s {%lx}",
62470 object->debug_id, cookie->def->name, object->events);
62471@@ -418,7 +418,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
62472
62473 _enter("{%s}", cookie->def->name);
62474
62475- fscache_stat(&fscache_n_invalidates);
62476+ fscache_stat_unchecked(&fscache_n_invalidates);
62477
62478 /* Only permit invalidation of data files. Invalidating an index will
62479 * require the caller to release all its attachments to the tree rooted
62480@@ -476,10 +476,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
62481 {
62482 struct fscache_object *object;
62483
62484- fscache_stat(&fscache_n_updates);
62485+ fscache_stat_unchecked(&fscache_n_updates);
62486
62487 if (!cookie) {
62488- fscache_stat(&fscache_n_updates_null);
62489+ fscache_stat_unchecked(&fscache_n_updates_null);
62490 _leave(" [no cookie]");
62491 return;
62492 }
62493@@ -580,12 +580,12 @@ EXPORT_SYMBOL(__fscache_disable_cookie);
62494 */
62495 void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
62496 {
62497- fscache_stat(&fscache_n_relinquishes);
62498+ fscache_stat_unchecked(&fscache_n_relinquishes);
62499 if (retire)
62500- fscache_stat(&fscache_n_relinquishes_retire);
62501+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
62502
62503 if (!cookie) {
62504- fscache_stat(&fscache_n_relinquishes_null);
62505+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
62506 _leave(" [no cookie]");
62507 return;
62508 }
62509@@ -686,7 +686,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
62510 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
62511 goto inconsistent;
62512
62513- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
62514+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
62515
62516 __fscache_use_cookie(cookie);
62517 if (fscache_submit_op(object, op) < 0)
62518diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
62519index 7872a62..d91b19f 100644
62520--- a/fs/fscache/internal.h
62521+++ b/fs/fscache/internal.h
62522@@ -137,8 +137,8 @@ extern void fscache_operation_gc(struct work_struct *);
62523 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
62524 extern int fscache_wait_for_operation_activation(struct fscache_object *,
62525 struct fscache_operation *,
62526- atomic_t *,
62527- atomic_t *,
62528+ atomic_unchecked_t *,
62529+ atomic_unchecked_t *,
62530 void (*)(struct fscache_operation *));
62531 extern void fscache_invalidate_writes(struct fscache_cookie *);
62532
62533@@ -157,101 +157,101 @@ extern void fscache_proc_cleanup(void);
62534 * stats.c
62535 */
62536 #ifdef CONFIG_FSCACHE_STATS
62537-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
62538-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
62539+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
62540+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
62541
62542-extern atomic_t fscache_n_op_pend;
62543-extern atomic_t fscache_n_op_run;
62544-extern atomic_t fscache_n_op_enqueue;
62545-extern atomic_t fscache_n_op_deferred_release;
62546-extern atomic_t fscache_n_op_release;
62547-extern atomic_t fscache_n_op_gc;
62548-extern atomic_t fscache_n_op_cancelled;
62549-extern atomic_t fscache_n_op_rejected;
62550+extern atomic_unchecked_t fscache_n_op_pend;
62551+extern atomic_unchecked_t fscache_n_op_run;
62552+extern atomic_unchecked_t fscache_n_op_enqueue;
62553+extern atomic_unchecked_t fscache_n_op_deferred_release;
62554+extern atomic_unchecked_t fscache_n_op_release;
62555+extern atomic_unchecked_t fscache_n_op_gc;
62556+extern atomic_unchecked_t fscache_n_op_cancelled;
62557+extern atomic_unchecked_t fscache_n_op_rejected;
62558
62559-extern atomic_t fscache_n_attr_changed;
62560-extern atomic_t fscache_n_attr_changed_ok;
62561-extern atomic_t fscache_n_attr_changed_nobufs;
62562-extern atomic_t fscache_n_attr_changed_nomem;
62563-extern atomic_t fscache_n_attr_changed_calls;
62564+extern atomic_unchecked_t fscache_n_attr_changed;
62565+extern atomic_unchecked_t fscache_n_attr_changed_ok;
62566+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
62567+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
62568+extern atomic_unchecked_t fscache_n_attr_changed_calls;
62569
62570-extern atomic_t fscache_n_allocs;
62571-extern atomic_t fscache_n_allocs_ok;
62572-extern atomic_t fscache_n_allocs_wait;
62573-extern atomic_t fscache_n_allocs_nobufs;
62574-extern atomic_t fscache_n_allocs_intr;
62575-extern atomic_t fscache_n_allocs_object_dead;
62576-extern atomic_t fscache_n_alloc_ops;
62577-extern atomic_t fscache_n_alloc_op_waits;
62578+extern atomic_unchecked_t fscache_n_allocs;
62579+extern atomic_unchecked_t fscache_n_allocs_ok;
62580+extern atomic_unchecked_t fscache_n_allocs_wait;
62581+extern atomic_unchecked_t fscache_n_allocs_nobufs;
62582+extern atomic_unchecked_t fscache_n_allocs_intr;
62583+extern atomic_unchecked_t fscache_n_allocs_object_dead;
62584+extern atomic_unchecked_t fscache_n_alloc_ops;
62585+extern atomic_unchecked_t fscache_n_alloc_op_waits;
62586
62587-extern atomic_t fscache_n_retrievals;
62588-extern atomic_t fscache_n_retrievals_ok;
62589-extern atomic_t fscache_n_retrievals_wait;
62590-extern atomic_t fscache_n_retrievals_nodata;
62591-extern atomic_t fscache_n_retrievals_nobufs;
62592-extern atomic_t fscache_n_retrievals_intr;
62593-extern atomic_t fscache_n_retrievals_nomem;
62594-extern atomic_t fscache_n_retrievals_object_dead;
62595-extern atomic_t fscache_n_retrieval_ops;
62596-extern atomic_t fscache_n_retrieval_op_waits;
62597+extern atomic_unchecked_t fscache_n_retrievals;
62598+extern atomic_unchecked_t fscache_n_retrievals_ok;
62599+extern atomic_unchecked_t fscache_n_retrievals_wait;
62600+extern atomic_unchecked_t fscache_n_retrievals_nodata;
62601+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
62602+extern atomic_unchecked_t fscache_n_retrievals_intr;
62603+extern atomic_unchecked_t fscache_n_retrievals_nomem;
62604+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
62605+extern atomic_unchecked_t fscache_n_retrieval_ops;
62606+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
62607
62608-extern atomic_t fscache_n_stores;
62609-extern atomic_t fscache_n_stores_ok;
62610-extern atomic_t fscache_n_stores_again;
62611-extern atomic_t fscache_n_stores_nobufs;
62612-extern atomic_t fscache_n_stores_oom;
62613-extern atomic_t fscache_n_store_ops;
62614-extern atomic_t fscache_n_store_calls;
62615-extern atomic_t fscache_n_store_pages;
62616-extern atomic_t fscache_n_store_radix_deletes;
62617-extern atomic_t fscache_n_store_pages_over_limit;
62618+extern atomic_unchecked_t fscache_n_stores;
62619+extern atomic_unchecked_t fscache_n_stores_ok;
62620+extern atomic_unchecked_t fscache_n_stores_again;
62621+extern atomic_unchecked_t fscache_n_stores_nobufs;
62622+extern atomic_unchecked_t fscache_n_stores_oom;
62623+extern atomic_unchecked_t fscache_n_store_ops;
62624+extern atomic_unchecked_t fscache_n_store_calls;
62625+extern atomic_unchecked_t fscache_n_store_pages;
62626+extern atomic_unchecked_t fscache_n_store_radix_deletes;
62627+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
62628
62629-extern atomic_t fscache_n_store_vmscan_not_storing;
62630-extern atomic_t fscache_n_store_vmscan_gone;
62631-extern atomic_t fscache_n_store_vmscan_busy;
62632-extern atomic_t fscache_n_store_vmscan_cancelled;
62633-extern atomic_t fscache_n_store_vmscan_wait;
62634+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
62635+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
62636+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
62637+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
62638+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
62639
62640-extern atomic_t fscache_n_marks;
62641-extern atomic_t fscache_n_uncaches;
62642+extern atomic_unchecked_t fscache_n_marks;
62643+extern atomic_unchecked_t fscache_n_uncaches;
62644
62645-extern atomic_t fscache_n_acquires;
62646-extern atomic_t fscache_n_acquires_null;
62647-extern atomic_t fscache_n_acquires_no_cache;
62648-extern atomic_t fscache_n_acquires_ok;
62649-extern atomic_t fscache_n_acquires_nobufs;
62650-extern atomic_t fscache_n_acquires_oom;
62651+extern atomic_unchecked_t fscache_n_acquires;
62652+extern atomic_unchecked_t fscache_n_acquires_null;
62653+extern atomic_unchecked_t fscache_n_acquires_no_cache;
62654+extern atomic_unchecked_t fscache_n_acquires_ok;
62655+extern atomic_unchecked_t fscache_n_acquires_nobufs;
62656+extern atomic_unchecked_t fscache_n_acquires_oom;
62657
62658-extern atomic_t fscache_n_invalidates;
62659-extern atomic_t fscache_n_invalidates_run;
62660+extern atomic_unchecked_t fscache_n_invalidates;
62661+extern atomic_unchecked_t fscache_n_invalidates_run;
62662
62663-extern atomic_t fscache_n_updates;
62664-extern atomic_t fscache_n_updates_null;
62665-extern atomic_t fscache_n_updates_run;
62666+extern atomic_unchecked_t fscache_n_updates;
62667+extern atomic_unchecked_t fscache_n_updates_null;
62668+extern atomic_unchecked_t fscache_n_updates_run;
62669
62670-extern atomic_t fscache_n_relinquishes;
62671-extern atomic_t fscache_n_relinquishes_null;
62672-extern atomic_t fscache_n_relinquishes_waitcrt;
62673-extern atomic_t fscache_n_relinquishes_retire;
62674+extern atomic_unchecked_t fscache_n_relinquishes;
62675+extern atomic_unchecked_t fscache_n_relinquishes_null;
62676+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
62677+extern atomic_unchecked_t fscache_n_relinquishes_retire;
62678
62679-extern atomic_t fscache_n_cookie_index;
62680-extern atomic_t fscache_n_cookie_data;
62681-extern atomic_t fscache_n_cookie_special;
62682+extern atomic_unchecked_t fscache_n_cookie_index;
62683+extern atomic_unchecked_t fscache_n_cookie_data;
62684+extern atomic_unchecked_t fscache_n_cookie_special;
62685
62686-extern atomic_t fscache_n_object_alloc;
62687-extern atomic_t fscache_n_object_no_alloc;
62688-extern atomic_t fscache_n_object_lookups;
62689-extern atomic_t fscache_n_object_lookups_negative;
62690-extern atomic_t fscache_n_object_lookups_positive;
62691-extern atomic_t fscache_n_object_lookups_timed_out;
62692-extern atomic_t fscache_n_object_created;
62693-extern atomic_t fscache_n_object_avail;
62694-extern atomic_t fscache_n_object_dead;
62695+extern atomic_unchecked_t fscache_n_object_alloc;
62696+extern atomic_unchecked_t fscache_n_object_no_alloc;
62697+extern atomic_unchecked_t fscache_n_object_lookups;
62698+extern atomic_unchecked_t fscache_n_object_lookups_negative;
62699+extern atomic_unchecked_t fscache_n_object_lookups_positive;
62700+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
62701+extern atomic_unchecked_t fscache_n_object_created;
62702+extern atomic_unchecked_t fscache_n_object_avail;
62703+extern atomic_unchecked_t fscache_n_object_dead;
62704
62705-extern atomic_t fscache_n_checkaux_none;
62706-extern atomic_t fscache_n_checkaux_okay;
62707-extern atomic_t fscache_n_checkaux_update;
62708-extern atomic_t fscache_n_checkaux_obsolete;
62709+extern atomic_unchecked_t fscache_n_checkaux_none;
62710+extern atomic_unchecked_t fscache_n_checkaux_okay;
62711+extern atomic_unchecked_t fscache_n_checkaux_update;
62712+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
62713
62714 extern atomic_t fscache_n_cop_alloc_object;
62715 extern atomic_t fscache_n_cop_lookup_object;
62716@@ -276,6 +276,11 @@ static inline void fscache_stat(atomic_t *stat)
62717 atomic_inc(stat);
62718 }
62719
62720+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
62721+{
62722+ atomic_inc_unchecked(stat);
62723+}
62724+
62725 static inline void fscache_stat_d(atomic_t *stat)
62726 {
62727 atomic_dec(stat);
62728@@ -288,6 +293,7 @@ extern const struct file_operations fscache_stats_fops;
62729
62730 #define __fscache_stat(stat) (NULL)
62731 #define fscache_stat(stat) do {} while (0)
62732+#define fscache_stat_unchecked(stat) do {} while (0)
62733 #define fscache_stat_d(stat) do {} while (0)
62734 #endif
62735
62736diff --git a/fs/fscache/object.c b/fs/fscache/object.c
62737index da032da..0076ce7 100644
62738--- a/fs/fscache/object.c
62739+++ b/fs/fscache/object.c
62740@@ -454,7 +454,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
62741 _debug("LOOKUP \"%s\" in \"%s\"",
62742 cookie->def->name, object->cache->tag->name);
62743
62744- fscache_stat(&fscache_n_object_lookups);
62745+ fscache_stat_unchecked(&fscache_n_object_lookups);
62746 fscache_stat(&fscache_n_cop_lookup_object);
62747 ret = object->cache->ops->lookup_object(object);
62748 fscache_stat_d(&fscache_n_cop_lookup_object);
62749@@ -464,7 +464,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
62750 if (ret == -ETIMEDOUT) {
62751 /* probably stuck behind another object, so move this one to
62752 * the back of the queue */
62753- fscache_stat(&fscache_n_object_lookups_timed_out);
62754+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
62755 _leave(" [timeout]");
62756 return NO_TRANSIT;
62757 }
62758@@ -492,7 +492,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
62759 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
62760
62761 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
62762- fscache_stat(&fscache_n_object_lookups_negative);
62763+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
62764
62765 /* Allow write requests to begin stacking up and read requests to begin
62766 * returning ENODATA.
62767@@ -527,7 +527,7 @@ void fscache_obtained_object(struct fscache_object *object)
62768 /* if we were still looking up, then we must have a positive lookup
62769 * result, in which case there may be data available */
62770 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
62771- fscache_stat(&fscache_n_object_lookups_positive);
62772+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
62773
62774 /* We do (presumably) have data */
62775 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
62776@@ -539,7 +539,7 @@ void fscache_obtained_object(struct fscache_object *object)
62777 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
62778 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
62779 } else {
62780- fscache_stat(&fscache_n_object_created);
62781+ fscache_stat_unchecked(&fscache_n_object_created);
62782 }
62783
62784 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
62785@@ -575,7 +575,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
62786 fscache_stat_d(&fscache_n_cop_lookup_complete);
62787
62788 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
62789- fscache_stat(&fscache_n_object_avail);
62790+ fscache_stat_unchecked(&fscache_n_object_avail);
62791
62792 _leave("");
62793 return transit_to(JUMPSTART_DEPS);
62794@@ -722,7 +722,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
62795
62796 /* this just shifts the object release to the work processor */
62797 fscache_put_object(object);
62798- fscache_stat(&fscache_n_object_dead);
62799+ fscache_stat_unchecked(&fscache_n_object_dead);
62800
62801 _leave("");
62802 return transit_to(OBJECT_DEAD);
62803@@ -887,7 +887,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
62804 enum fscache_checkaux result;
62805
62806 if (!object->cookie->def->check_aux) {
62807- fscache_stat(&fscache_n_checkaux_none);
62808+ fscache_stat_unchecked(&fscache_n_checkaux_none);
62809 return FSCACHE_CHECKAUX_OKAY;
62810 }
62811
62812@@ -896,17 +896,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
62813 switch (result) {
62814 /* entry okay as is */
62815 case FSCACHE_CHECKAUX_OKAY:
62816- fscache_stat(&fscache_n_checkaux_okay);
62817+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
62818 break;
62819
62820 /* entry requires update */
62821 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
62822- fscache_stat(&fscache_n_checkaux_update);
62823+ fscache_stat_unchecked(&fscache_n_checkaux_update);
62824 break;
62825
62826 /* entry requires deletion */
62827 case FSCACHE_CHECKAUX_OBSOLETE:
62828- fscache_stat(&fscache_n_checkaux_obsolete);
62829+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
62830 break;
62831
62832 default:
62833@@ -993,7 +993,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
62834 {
62835 const struct fscache_state *s;
62836
62837- fscache_stat(&fscache_n_invalidates_run);
62838+ fscache_stat_unchecked(&fscache_n_invalidates_run);
62839 fscache_stat(&fscache_n_cop_invalidate_object);
62840 s = _fscache_invalidate_object(object, event);
62841 fscache_stat_d(&fscache_n_cop_invalidate_object);
62842@@ -1008,7 +1008,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
62843 {
62844 _enter("{OBJ%x},%d", object->debug_id, event);
62845
62846- fscache_stat(&fscache_n_updates_run);
62847+ fscache_stat_unchecked(&fscache_n_updates_run);
62848 fscache_stat(&fscache_n_cop_update_object);
62849 object->cache->ops->update_object(object);
62850 fscache_stat_d(&fscache_n_cop_update_object);
62851diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
62852index e7b87a0..a85d47a 100644
62853--- a/fs/fscache/operation.c
62854+++ b/fs/fscache/operation.c
62855@@ -17,7 +17,7 @@
62856 #include <linux/slab.h>
62857 #include "internal.h"
62858
62859-atomic_t fscache_op_debug_id;
62860+atomic_unchecked_t fscache_op_debug_id;
62861 EXPORT_SYMBOL(fscache_op_debug_id);
62862
62863 /**
62864@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
62865 ASSERTCMP(atomic_read(&op->usage), >, 0);
62866 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
62867
62868- fscache_stat(&fscache_n_op_enqueue);
62869+ fscache_stat_unchecked(&fscache_n_op_enqueue);
62870 switch (op->flags & FSCACHE_OP_TYPE) {
62871 case FSCACHE_OP_ASYNC:
62872 _debug("queue async");
62873@@ -72,7 +72,7 @@ static void fscache_run_op(struct fscache_object *object,
62874 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
62875 if (op->processor)
62876 fscache_enqueue_operation(op);
62877- fscache_stat(&fscache_n_op_run);
62878+ fscache_stat_unchecked(&fscache_n_op_run);
62879 }
62880
62881 /*
62882@@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
62883 if (object->n_in_progress > 0) {
62884 atomic_inc(&op->usage);
62885 list_add_tail(&op->pend_link, &object->pending_ops);
62886- fscache_stat(&fscache_n_op_pend);
62887+ fscache_stat_unchecked(&fscache_n_op_pend);
62888 } else if (!list_empty(&object->pending_ops)) {
62889 atomic_inc(&op->usage);
62890 list_add_tail(&op->pend_link, &object->pending_ops);
62891- fscache_stat(&fscache_n_op_pend);
62892+ fscache_stat_unchecked(&fscache_n_op_pend);
62893 fscache_start_operations(object);
62894 } else {
62895 ASSERTCMP(object->n_in_progress, ==, 0);
62896@@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
62897 object->n_exclusive++; /* reads and writes must wait */
62898 atomic_inc(&op->usage);
62899 list_add_tail(&op->pend_link, &object->pending_ops);
62900- fscache_stat(&fscache_n_op_pend);
62901+ fscache_stat_unchecked(&fscache_n_op_pend);
62902 ret = 0;
62903 } else {
62904 /* If we're in any other state, there must have been an I/O
62905@@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_object *object,
62906 if (object->n_exclusive > 0) {
62907 atomic_inc(&op->usage);
62908 list_add_tail(&op->pend_link, &object->pending_ops);
62909- fscache_stat(&fscache_n_op_pend);
62910+ fscache_stat_unchecked(&fscache_n_op_pend);
62911 } else if (!list_empty(&object->pending_ops)) {
62912 atomic_inc(&op->usage);
62913 list_add_tail(&op->pend_link, &object->pending_ops);
62914- fscache_stat(&fscache_n_op_pend);
62915+ fscache_stat_unchecked(&fscache_n_op_pend);
62916 fscache_start_operations(object);
62917 } else {
62918 ASSERTCMP(object->n_exclusive, ==, 0);
62919@@ -227,10 +227,10 @@ int fscache_submit_op(struct fscache_object *object,
62920 object->n_ops++;
62921 atomic_inc(&op->usage);
62922 list_add_tail(&op->pend_link, &object->pending_ops);
62923- fscache_stat(&fscache_n_op_pend);
62924+ fscache_stat_unchecked(&fscache_n_op_pend);
62925 ret = 0;
62926 } else if (fscache_object_is_dying(object)) {
62927- fscache_stat(&fscache_n_op_rejected);
62928+ fscache_stat_unchecked(&fscache_n_op_rejected);
62929 op->state = FSCACHE_OP_ST_CANCELLED;
62930 ret = -ENOBUFS;
62931 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
62932@@ -309,7 +309,7 @@ int fscache_cancel_op(struct fscache_operation *op,
62933 ret = -EBUSY;
62934 if (op->state == FSCACHE_OP_ST_PENDING) {
62935 ASSERT(!list_empty(&op->pend_link));
62936- fscache_stat(&fscache_n_op_cancelled);
62937+ fscache_stat_unchecked(&fscache_n_op_cancelled);
62938 list_del_init(&op->pend_link);
62939 if (do_cancel)
62940 do_cancel(op);
62941@@ -341,7 +341,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
62942 while (!list_empty(&object->pending_ops)) {
62943 op = list_entry(object->pending_ops.next,
62944 struct fscache_operation, pend_link);
62945- fscache_stat(&fscache_n_op_cancelled);
62946+ fscache_stat_unchecked(&fscache_n_op_cancelled);
62947 list_del_init(&op->pend_link);
62948
62949 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
62950@@ -413,7 +413,7 @@ void fscache_put_operation(struct fscache_operation *op)
62951 op->state, ==, FSCACHE_OP_ST_CANCELLED);
62952 op->state = FSCACHE_OP_ST_DEAD;
62953
62954- fscache_stat(&fscache_n_op_release);
62955+ fscache_stat_unchecked(&fscache_n_op_release);
62956
62957 if (op->release) {
62958 op->release(op);
62959@@ -432,7 +432,7 @@ void fscache_put_operation(struct fscache_operation *op)
62960 * lock, and defer it otherwise */
62961 if (!spin_trylock(&object->lock)) {
62962 _debug("defer put");
62963- fscache_stat(&fscache_n_op_deferred_release);
62964+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
62965
62966 cache = object->cache;
62967 spin_lock(&cache->op_gc_list_lock);
62968@@ -485,7 +485,7 @@ void fscache_operation_gc(struct work_struct *work)
62969
62970 _debug("GC DEFERRED REL OBJ%x OP%x",
62971 object->debug_id, op->debug_id);
62972- fscache_stat(&fscache_n_op_gc);
62973+ fscache_stat_unchecked(&fscache_n_op_gc);
62974
62975 ASSERTCMP(atomic_read(&op->usage), ==, 0);
62976 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
62977diff --git a/fs/fscache/page.c b/fs/fscache/page.c
62978index de33b3f..8be4d29 100644
62979--- a/fs/fscache/page.c
62980+++ b/fs/fscache/page.c
62981@@ -74,7 +74,7 @@ try_again:
62982 val = radix_tree_lookup(&cookie->stores, page->index);
62983 if (!val) {
62984 rcu_read_unlock();
62985- fscache_stat(&fscache_n_store_vmscan_not_storing);
62986+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
62987 __fscache_uncache_page(cookie, page);
62988 return true;
62989 }
62990@@ -104,11 +104,11 @@ try_again:
62991 spin_unlock(&cookie->stores_lock);
62992
62993 if (xpage) {
62994- fscache_stat(&fscache_n_store_vmscan_cancelled);
62995- fscache_stat(&fscache_n_store_radix_deletes);
62996+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
62997+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
62998 ASSERTCMP(xpage, ==, page);
62999 } else {
63000- fscache_stat(&fscache_n_store_vmscan_gone);
63001+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
63002 }
63003
63004 wake_up_bit(&cookie->flags, 0);
63005@@ -123,11 +123,11 @@ page_busy:
63006 * sleeping on memory allocation, so we may need to impose a timeout
63007 * too. */
63008 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
63009- fscache_stat(&fscache_n_store_vmscan_busy);
63010+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
63011 return false;
63012 }
63013
63014- fscache_stat(&fscache_n_store_vmscan_wait);
63015+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
63016 if (!release_page_wait_timeout(cookie, page))
63017 _debug("fscache writeout timeout page: %p{%lx}",
63018 page, page->index);
63019@@ -156,7 +156,7 @@ static void fscache_end_page_write(struct fscache_object *object,
63020 FSCACHE_COOKIE_STORING_TAG);
63021 if (!radix_tree_tag_get(&cookie->stores, page->index,
63022 FSCACHE_COOKIE_PENDING_TAG)) {
63023- fscache_stat(&fscache_n_store_radix_deletes);
63024+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
63025 xpage = radix_tree_delete(&cookie->stores, page->index);
63026 }
63027 spin_unlock(&cookie->stores_lock);
63028@@ -177,7 +177,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
63029
63030 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
63031
63032- fscache_stat(&fscache_n_attr_changed_calls);
63033+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
63034
63035 if (fscache_object_is_active(object)) {
63036 fscache_stat(&fscache_n_cop_attr_changed);
63037@@ -204,11 +204,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
63038
63039 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
63040
63041- fscache_stat(&fscache_n_attr_changed);
63042+ fscache_stat_unchecked(&fscache_n_attr_changed);
63043
63044 op = kzalloc(sizeof(*op), GFP_KERNEL);
63045 if (!op) {
63046- fscache_stat(&fscache_n_attr_changed_nomem);
63047+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
63048 _leave(" = -ENOMEM");
63049 return -ENOMEM;
63050 }
63051@@ -230,7 +230,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
63052 if (fscache_submit_exclusive_op(object, op) < 0)
63053 goto nobufs_dec;
63054 spin_unlock(&cookie->lock);
63055- fscache_stat(&fscache_n_attr_changed_ok);
63056+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
63057 fscache_put_operation(op);
63058 _leave(" = 0");
63059 return 0;
63060@@ -242,7 +242,7 @@ nobufs:
63061 kfree(op);
63062 if (wake_cookie)
63063 __fscache_wake_unused_cookie(cookie);
63064- fscache_stat(&fscache_n_attr_changed_nobufs);
63065+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
63066 _leave(" = %d", -ENOBUFS);
63067 return -ENOBUFS;
63068 }
63069@@ -281,7 +281,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
63070 /* allocate a retrieval operation and attempt to submit it */
63071 op = kzalloc(sizeof(*op), GFP_NOIO);
63072 if (!op) {
63073- fscache_stat(&fscache_n_retrievals_nomem);
63074+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
63075 return NULL;
63076 }
63077
63078@@ -311,12 +311,12 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
63079 return 0;
63080 }
63081
63082- fscache_stat(&fscache_n_retrievals_wait);
63083+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
63084
63085 jif = jiffies;
63086 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
63087 TASK_INTERRUPTIBLE) != 0) {
63088- fscache_stat(&fscache_n_retrievals_intr);
63089+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
63090 _leave(" = -ERESTARTSYS");
63091 return -ERESTARTSYS;
63092 }
63093@@ -345,8 +345,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
63094 */
63095 int fscache_wait_for_operation_activation(struct fscache_object *object,
63096 struct fscache_operation *op,
63097- atomic_t *stat_op_waits,
63098- atomic_t *stat_object_dead,
63099+ atomic_unchecked_t *stat_op_waits,
63100+ atomic_unchecked_t *stat_object_dead,
63101 void (*do_cancel)(struct fscache_operation *))
63102 {
63103 int ret;
63104@@ -356,7 +356,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
63105
63106 _debug(">>> WT");
63107 if (stat_op_waits)
63108- fscache_stat(stat_op_waits);
63109+ fscache_stat_unchecked(stat_op_waits);
63110 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
63111 TASK_INTERRUPTIBLE) != 0) {
63112 ret = fscache_cancel_op(op, do_cancel);
63113@@ -373,7 +373,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
63114 check_if_dead:
63115 if (op->state == FSCACHE_OP_ST_CANCELLED) {
63116 if (stat_object_dead)
63117- fscache_stat(stat_object_dead);
63118+ fscache_stat_unchecked(stat_object_dead);
63119 _leave(" = -ENOBUFS [cancelled]");
63120 return -ENOBUFS;
63121 }
63122@@ -381,7 +381,7 @@ check_if_dead:
63123 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
63124 fscache_cancel_op(op, do_cancel);
63125 if (stat_object_dead)
63126- fscache_stat(stat_object_dead);
63127+ fscache_stat_unchecked(stat_object_dead);
63128 return -ENOBUFS;
63129 }
63130 return 0;
63131@@ -409,7 +409,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
63132
63133 _enter("%p,%p,,,", cookie, page);
63134
63135- fscache_stat(&fscache_n_retrievals);
63136+ fscache_stat_unchecked(&fscache_n_retrievals);
63137
63138 if (hlist_empty(&cookie->backing_objects))
63139 goto nobufs;
63140@@ -451,7 +451,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
63141 goto nobufs_unlock_dec;
63142 spin_unlock(&cookie->lock);
63143
63144- fscache_stat(&fscache_n_retrieval_ops);
63145+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
63146
63147 /* pin the netfs read context in case we need to do the actual netfs
63148 * read because we've encountered a cache read failure */
63149@@ -482,15 +482,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
63150
63151 error:
63152 if (ret == -ENOMEM)
63153- fscache_stat(&fscache_n_retrievals_nomem);
63154+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
63155 else if (ret == -ERESTARTSYS)
63156- fscache_stat(&fscache_n_retrievals_intr);
63157+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
63158 else if (ret == -ENODATA)
63159- fscache_stat(&fscache_n_retrievals_nodata);
63160+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
63161 else if (ret < 0)
63162- fscache_stat(&fscache_n_retrievals_nobufs);
63163+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63164 else
63165- fscache_stat(&fscache_n_retrievals_ok);
63166+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
63167
63168 fscache_put_retrieval(op);
63169 _leave(" = %d", ret);
63170@@ -505,7 +505,7 @@ nobufs_unlock:
63171 __fscache_wake_unused_cookie(cookie);
63172 kfree(op);
63173 nobufs:
63174- fscache_stat(&fscache_n_retrievals_nobufs);
63175+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63176 _leave(" = -ENOBUFS");
63177 return -ENOBUFS;
63178 }
63179@@ -544,7 +544,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
63180
63181 _enter("%p,,%d,,,", cookie, *nr_pages);
63182
63183- fscache_stat(&fscache_n_retrievals);
63184+ fscache_stat_unchecked(&fscache_n_retrievals);
63185
63186 if (hlist_empty(&cookie->backing_objects))
63187 goto nobufs;
63188@@ -582,7 +582,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
63189 goto nobufs_unlock_dec;
63190 spin_unlock(&cookie->lock);
63191
63192- fscache_stat(&fscache_n_retrieval_ops);
63193+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
63194
63195 /* pin the netfs read context in case we need to do the actual netfs
63196 * read because we've encountered a cache read failure */
63197@@ -613,15 +613,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
63198
63199 error:
63200 if (ret == -ENOMEM)
63201- fscache_stat(&fscache_n_retrievals_nomem);
63202+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
63203 else if (ret == -ERESTARTSYS)
63204- fscache_stat(&fscache_n_retrievals_intr);
63205+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
63206 else if (ret == -ENODATA)
63207- fscache_stat(&fscache_n_retrievals_nodata);
63208+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
63209 else if (ret < 0)
63210- fscache_stat(&fscache_n_retrievals_nobufs);
63211+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63212 else
63213- fscache_stat(&fscache_n_retrievals_ok);
63214+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
63215
63216 fscache_put_retrieval(op);
63217 _leave(" = %d", ret);
63218@@ -636,7 +636,7 @@ nobufs_unlock:
63219 if (wake_cookie)
63220 __fscache_wake_unused_cookie(cookie);
63221 nobufs:
63222- fscache_stat(&fscache_n_retrievals_nobufs);
63223+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
63224 _leave(" = -ENOBUFS");
63225 return -ENOBUFS;
63226 }
63227@@ -661,7 +661,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
63228
63229 _enter("%p,%p,,,", cookie, page);
63230
63231- fscache_stat(&fscache_n_allocs);
63232+ fscache_stat_unchecked(&fscache_n_allocs);
63233
63234 if (hlist_empty(&cookie->backing_objects))
63235 goto nobufs;
63236@@ -695,7 +695,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
63237 goto nobufs_unlock_dec;
63238 spin_unlock(&cookie->lock);
63239
63240- fscache_stat(&fscache_n_alloc_ops);
63241+ fscache_stat_unchecked(&fscache_n_alloc_ops);
63242
63243 ret = fscache_wait_for_operation_activation(
63244 object, &op->op,
63245@@ -712,11 +712,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
63246
63247 error:
63248 if (ret == -ERESTARTSYS)
63249- fscache_stat(&fscache_n_allocs_intr);
63250+ fscache_stat_unchecked(&fscache_n_allocs_intr);
63251 else if (ret < 0)
63252- fscache_stat(&fscache_n_allocs_nobufs);
63253+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
63254 else
63255- fscache_stat(&fscache_n_allocs_ok);
63256+ fscache_stat_unchecked(&fscache_n_allocs_ok);
63257
63258 fscache_put_retrieval(op);
63259 _leave(" = %d", ret);
63260@@ -730,7 +730,7 @@ nobufs_unlock:
63261 if (wake_cookie)
63262 __fscache_wake_unused_cookie(cookie);
63263 nobufs:
63264- fscache_stat(&fscache_n_allocs_nobufs);
63265+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
63266 _leave(" = -ENOBUFS");
63267 return -ENOBUFS;
63268 }
63269@@ -806,7 +806,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63270
63271 spin_lock(&cookie->stores_lock);
63272
63273- fscache_stat(&fscache_n_store_calls);
63274+ fscache_stat_unchecked(&fscache_n_store_calls);
63275
63276 /* find a page to store */
63277 page = NULL;
63278@@ -817,7 +817,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63279 page = results[0];
63280 _debug("gang %d [%lx]", n, page->index);
63281 if (page->index > op->store_limit) {
63282- fscache_stat(&fscache_n_store_pages_over_limit);
63283+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
63284 goto superseded;
63285 }
63286
63287@@ -829,7 +829,7 @@ static void fscache_write_op(struct fscache_operation *_op)
63288 spin_unlock(&cookie->stores_lock);
63289 spin_unlock(&object->lock);
63290
63291- fscache_stat(&fscache_n_store_pages);
63292+ fscache_stat_unchecked(&fscache_n_store_pages);
63293 fscache_stat(&fscache_n_cop_write_page);
63294 ret = object->cache->ops->write_page(op, page);
63295 fscache_stat_d(&fscache_n_cop_write_page);
63296@@ -933,7 +933,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63297 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
63298 ASSERT(PageFsCache(page));
63299
63300- fscache_stat(&fscache_n_stores);
63301+ fscache_stat_unchecked(&fscache_n_stores);
63302
63303 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
63304 _leave(" = -ENOBUFS [invalidating]");
63305@@ -992,7 +992,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63306 spin_unlock(&cookie->stores_lock);
63307 spin_unlock(&object->lock);
63308
63309- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
63310+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
63311 op->store_limit = object->store_limit;
63312
63313 __fscache_use_cookie(cookie);
63314@@ -1001,8 +1001,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63315
63316 spin_unlock(&cookie->lock);
63317 radix_tree_preload_end();
63318- fscache_stat(&fscache_n_store_ops);
63319- fscache_stat(&fscache_n_stores_ok);
63320+ fscache_stat_unchecked(&fscache_n_store_ops);
63321+ fscache_stat_unchecked(&fscache_n_stores_ok);
63322
63323 /* the work queue now carries its own ref on the object */
63324 fscache_put_operation(&op->op);
63325@@ -1010,14 +1010,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
63326 return 0;
63327
63328 already_queued:
63329- fscache_stat(&fscache_n_stores_again);
63330+ fscache_stat_unchecked(&fscache_n_stores_again);
63331 already_pending:
63332 spin_unlock(&cookie->stores_lock);
63333 spin_unlock(&object->lock);
63334 spin_unlock(&cookie->lock);
63335 radix_tree_preload_end();
63336 kfree(op);
63337- fscache_stat(&fscache_n_stores_ok);
63338+ fscache_stat_unchecked(&fscache_n_stores_ok);
63339 _leave(" = 0");
63340 return 0;
63341
63342@@ -1039,14 +1039,14 @@ nobufs:
63343 kfree(op);
63344 if (wake_cookie)
63345 __fscache_wake_unused_cookie(cookie);
63346- fscache_stat(&fscache_n_stores_nobufs);
63347+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
63348 _leave(" = -ENOBUFS");
63349 return -ENOBUFS;
63350
63351 nomem_free:
63352 kfree(op);
63353 nomem:
63354- fscache_stat(&fscache_n_stores_oom);
63355+ fscache_stat_unchecked(&fscache_n_stores_oom);
63356 _leave(" = -ENOMEM");
63357 return -ENOMEM;
63358 }
63359@@ -1064,7 +1064,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
63360 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
63361 ASSERTCMP(page, !=, NULL);
63362
63363- fscache_stat(&fscache_n_uncaches);
63364+ fscache_stat_unchecked(&fscache_n_uncaches);
63365
63366 /* cache withdrawal may beat us to it */
63367 if (!PageFsCache(page))
63368@@ -1115,7 +1115,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
63369 struct fscache_cookie *cookie = op->op.object->cookie;
63370
63371 #ifdef CONFIG_FSCACHE_STATS
63372- atomic_inc(&fscache_n_marks);
63373+ atomic_inc_unchecked(&fscache_n_marks);
63374 #endif
63375
63376 _debug("- mark %p{%lx}", page, page->index);
63377diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
63378index 40d13c7..ddf52b9 100644
63379--- a/fs/fscache/stats.c
63380+++ b/fs/fscache/stats.c
63381@@ -18,99 +18,99 @@
63382 /*
63383 * operation counters
63384 */
63385-atomic_t fscache_n_op_pend;
63386-atomic_t fscache_n_op_run;
63387-atomic_t fscache_n_op_enqueue;
63388-atomic_t fscache_n_op_requeue;
63389-atomic_t fscache_n_op_deferred_release;
63390-atomic_t fscache_n_op_release;
63391-atomic_t fscache_n_op_gc;
63392-atomic_t fscache_n_op_cancelled;
63393-atomic_t fscache_n_op_rejected;
63394+atomic_unchecked_t fscache_n_op_pend;
63395+atomic_unchecked_t fscache_n_op_run;
63396+atomic_unchecked_t fscache_n_op_enqueue;
63397+atomic_unchecked_t fscache_n_op_requeue;
63398+atomic_unchecked_t fscache_n_op_deferred_release;
63399+atomic_unchecked_t fscache_n_op_release;
63400+atomic_unchecked_t fscache_n_op_gc;
63401+atomic_unchecked_t fscache_n_op_cancelled;
63402+atomic_unchecked_t fscache_n_op_rejected;
63403
63404-atomic_t fscache_n_attr_changed;
63405-atomic_t fscache_n_attr_changed_ok;
63406-atomic_t fscache_n_attr_changed_nobufs;
63407-atomic_t fscache_n_attr_changed_nomem;
63408-atomic_t fscache_n_attr_changed_calls;
63409+atomic_unchecked_t fscache_n_attr_changed;
63410+atomic_unchecked_t fscache_n_attr_changed_ok;
63411+atomic_unchecked_t fscache_n_attr_changed_nobufs;
63412+atomic_unchecked_t fscache_n_attr_changed_nomem;
63413+atomic_unchecked_t fscache_n_attr_changed_calls;
63414
63415-atomic_t fscache_n_allocs;
63416-atomic_t fscache_n_allocs_ok;
63417-atomic_t fscache_n_allocs_wait;
63418-atomic_t fscache_n_allocs_nobufs;
63419-atomic_t fscache_n_allocs_intr;
63420-atomic_t fscache_n_allocs_object_dead;
63421-atomic_t fscache_n_alloc_ops;
63422-atomic_t fscache_n_alloc_op_waits;
63423+atomic_unchecked_t fscache_n_allocs;
63424+atomic_unchecked_t fscache_n_allocs_ok;
63425+atomic_unchecked_t fscache_n_allocs_wait;
63426+atomic_unchecked_t fscache_n_allocs_nobufs;
63427+atomic_unchecked_t fscache_n_allocs_intr;
63428+atomic_unchecked_t fscache_n_allocs_object_dead;
63429+atomic_unchecked_t fscache_n_alloc_ops;
63430+atomic_unchecked_t fscache_n_alloc_op_waits;
63431
63432-atomic_t fscache_n_retrievals;
63433-atomic_t fscache_n_retrievals_ok;
63434-atomic_t fscache_n_retrievals_wait;
63435-atomic_t fscache_n_retrievals_nodata;
63436-atomic_t fscache_n_retrievals_nobufs;
63437-atomic_t fscache_n_retrievals_intr;
63438-atomic_t fscache_n_retrievals_nomem;
63439-atomic_t fscache_n_retrievals_object_dead;
63440-atomic_t fscache_n_retrieval_ops;
63441-atomic_t fscache_n_retrieval_op_waits;
63442+atomic_unchecked_t fscache_n_retrievals;
63443+atomic_unchecked_t fscache_n_retrievals_ok;
63444+atomic_unchecked_t fscache_n_retrievals_wait;
63445+atomic_unchecked_t fscache_n_retrievals_nodata;
63446+atomic_unchecked_t fscache_n_retrievals_nobufs;
63447+atomic_unchecked_t fscache_n_retrievals_intr;
63448+atomic_unchecked_t fscache_n_retrievals_nomem;
63449+atomic_unchecked_t fscache_n_retrievals_object_dead;
63450+atomic_unchecked_t fscache_n_retrieval_ops;
63451+atomic_unchecked_t fscache_n_retrieval_op_waits;
63452
63453-atomic_t fscache_n_stores;
63454-atomic_t fscache_n_stores_ok;
63455-atomic_t fscache_n_stores_again;
63456-atomic_t fscache_n_stores_nobufs;
63457-atomic_t fscache_n_stores_oom;
63458-atomic_t fscache_n_store_ops;
63459-atomic_t fscache_n_store_calls;
63460-atomic_t fscache_n_store_pages;
63461-atomic_t fscache_n_store_radix_deletes;
63462-atomic_t fscache_n_store_pages_over_limit;
63463+atomic_unchecked_t fscache_n_stores;
63464+atomic_unchecked_t fscache_n_stores_ok;
63465+atomic_unchecked_t fscache_n_stores_again;
63466+atomic_unchecked_t fscache_n_stores_nobufs;
63467+atomic_unchecked_t fscache_n_stores_oom;
63468+atomic_unchecked_t fscache_n_store_ops;
63469+atomic_unchecked_t fscache_n_store_calls;
63470+atomic_unchecked_t fscache_n_store_pages;
63471+atomic_unchecked_t fscache_n_store_radix_deletes;
63472+atomic_unchecked_t fscache_n_store_pages_over_limit;
63473
63474-atomic_t fscache_n_store_vmscan_not_storing;
63475-atomic_t fscache_n_store_vmscan_gone;
63476-atomic_t fscache_n_store_vmscan_busy;
63477-atomic_t fscache_n_store_vmscan_cancelled;
63478-atomic_t fscache_n_store_vmscan_wait;
63479+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
63480+atomic_unchecked_t fscache_n_store_vmscan_gone;
63481+atomic_unchecked_t fscache_n_store_vmscan_busy;
63482+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
63483+atomic_unchecked_t fscache_n_store_vmscan_wait;
63484
63485-atomic_t fscache_n_marks;
63486-atomic_t fscache_n_uncaches;
63487+atomic_unchecked_t fscache_n_marks;
63488+atomic_unchecked_t fscache_n_uncaches;
63489
63490-atomic_t fscache_n_acquires;
63491-atomic_t fscache_n_acquires_null;
63492-atomic_t fscache_n_acquires_no_cache;
63493-atomic_t fscache_n_acquires_ok;
63494-atomic_t fscache_n_acquires_nobufs;
63495-atomic_t fscache_n_acquires_oom;
63496+atomic_unchecked_t fscache_n_acquires;
63497+atomic_unchecked_t fscache_n_acquires_null;
63498+atomic_unchecked_t fscache_n_acquires_no_cache;
63499+atomic_unchecked_t fscache_n_acquires_ok;
63500+atomic_unchecked_t fscache_n_acquires_nobufs;
63501+atomic_unchecked_t fscache_n_acquires_oom;
63502
63503-atomic_t fscache_n_invalidates;
63504-atomic_t fscache_n_invalidates_run;
63505+atomic_unchecked_t fscache_n_invalidates;
63506+atomic_unchecked_t fscache_n_invalidates_run;
63507
63508-atomic_t fscache_n_updates;
63509-atomic_t fscache_n_updates_null;
63510-atomic_t fscache_n_updates_run;
63511+atomic_unchecked_t fscache_n_updates;
63512+atomic_unchecked_t fscache_n_updates_null;
63513+atomic_unchecked_t fscache_n_updates_run;
63514
63515-atomic_t fscache_n_relinquishes;
63516-atomic_t fscache_n_relinquishes_null;
63517-atomic_t fscache_n_relinquishes_waitcrt;
63518-atomic_t fscache_n_relinquishes_retire;
63519+atomic_unchecked_t fscache_n_relinquishes;
63520+atomic_unchecked_t fscache_n_relinquishes_null;
63521+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
63522+atomic_unchecked_t fscache_n_relinquishes_retire;
63523
63524-atomic_t fscache_n_cookie_index;
63525-atomic_t fscache_n_cookie_data;
63526-atomic_t fscache_n_cookie_special;
63527+atomic_unchecked_t fscache_n_cookie_index;
63528+atomic_unchecked_t fscache_n_cookie_data;
63529+atomic_unchecked_t fscache_n_cookie_special;
63530
63531-atomic_t fscache_n_object_alloc;
63532-atomic_t fscache_n_object_no_alloc;
63533-atomic_t fscache_n_object_lookups;
63534-atomic_t fscache_n_object_lookups_negative;
63535-atomic_t fscache_n_object_lookups_positive;
63536-atomic_t fscache_n_object_lookups_timed_out;
63537-atomic_t fscache_n_object_created;
63538-atomic_t fscache_n_object_avail;
63539-atomic_t fscache_n_object_dead;
63540+atomic_unchecked_t fscache_n_object_alloc;
63541+atomic_unchecked_t fscache_n_object_no_alloc;
63542+atomic_unchecked_t fscache_n_object_lookups;
63543+atomic_unchecked_t fscache_n_object_lookups_negative;
63544+atomic_unchecked_t fscache_n_object_lookups_positive;
63545+atomic_unchecked_t fscache_n_object_lookups_timed_out;
63546+atomic_unchecked_t fscache_n_object_created;
63547+atomic_unchecked_t fscache_n_object_avail;
63548+atomic_unchecked_t fscache_n_object_dead;
63549
63550-atomic_t fscache_n_checkaux_none;
63551-atomic_t fscache_n_checkaux_okay;
63552-atomic_t fscache_n_checkaux_update;
63553-atomic_t fscache_n_checkaux_obsolete;
63554+atomic_unchecked_t fscache_n_checkaux_none;
63555+atomic_unchecked_t fscache_n_checkaux_okay;
63556+atomic_unchecked_t fscache_n_checkaux_update;
63557+atomic_unchecked_t fscache_n_checkaux_obsolete;
63558
63559 atomic_t fscache_n_cop_alloc_object;
63560 atomic_t fscache_n_cop_lookup_object;
63561@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
63562 seq_puts(m, "FS-Cache statistics\n");
63563
63564 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
63565- atomic_read(&fscache_n_cookie_index),
63566- atomic_read(&fscache_n_cookie_data),
63567- atomic_read(&fscache_n_cookie_special));
63568+ atomic_read_unchecked(&fscache_n_cookie_index),
63569+ atomic_read_unchecked(&fscache_n_cookie_data),
63570+ atomic_read_unchecked(&fscache_n_cookie_special));
63571
63572 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
63573- atomic_read(&fscache_n_object_alloc),
63574- atomic_read(&fscache_n_object_no_alloc),
63575- atomic_read(&fscache_n_object_avail),
63576- atomic_read(&fscache_n_object_dead));
63577+ atomic_read_unchecked(&fscache_n_object_alloc),
63578+ atomic_read_unchecked(&fscache_n_object_no_alloc),
63579+ atomic_read_unchecked(&fscache_n_object_avail),
63580+ atomic_read_unchecked(&fscache_n_object_dead));
63581 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
63582- atomic_read(&fscache_n_checkaux_none),
63583- atomic_read(&fscache_n_checkaux_okay),
63584- atomic_read(&fscache_n_checkaux_update),
63585- atomic_read(&fscache_n_checkaux_obsolete));
63586+ atomic_read_unchecked(&fscache_n_checkaux_none),
63587+ atomic_read_unchecked(&fscache_n_checkaux_okay),
63588+ atomic_read_unchecked(&fscache_n_checkaux_update),
63589+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
63590
63591 seq_printf(m, "Pages : mrk=%u unc=%u\n",
63592- atomic_read(&fscache_n_marks),
63593- atomic_read(&fscache_n_uncaches));
63594+ atomic_read_unchecked(&fscache_n_marks),
63595+ atomic_read_unchecked(&fscache_n_uncaches));
63596
63597 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
63598 " oom=%u\n",
63599- atomic_read(&fscache_n_acquires),
63600- atomic_read(&fscache_n_acquires_null),
63601- atomic_read(&fscache_n_acquires_no_cache),
63602- atomic_read(&fscache_n_acquires_ok),
63603- atomic_read(&fscache_n_acquires_nobufs),
63604- atomic_read(&fscache_n_acquires_oom));
63605+ atomic_read_unchecked(&fscache_n_acquires),
63606+ atomic_read_unchecked(&fscache_n_acquires_null),
63607+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
63608+ atomic_read_unchecked(&fscache_n_acquires_ok),
63609+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
63610+ atomic_read_unchecked(&fscache_n_acquires_oom));
63611
63612 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
63613- atomic_read(&fscache_n_object_lookups),
63614- atomic_read(&fscache_n_object_lookups_negative),
63615- atomic_read(&fscache_n_object_lookups_positive),
63616- atomic_read(&fscache_n_object_created),
63617- atomic_read(&fscache_n_object_lookups_timed_out));
63618+ atomic_read_unchecked(&fscache_n_object_lookups),
63619+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
63620+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
63621+ atomic_read_unchecked(&fscache_n_object_created),
63622+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
63623
63624 seq_printf(m, "Invals : n=%u run=%u\n",
63625- atomic_read(&fscache_n_invalidates),
63626- atomic_read(&fscache_n_invalidates_run));
63627+ atomic_read_unchecked(&fscache_n_invalidates),
63628+ atomic_read_unchecked(&fscache_n_invalidates_run));
63629
63630 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
63631- atomic_read(&fscache_n_updates),
63632- atomic_read(&fscache_n_updates_null),
63633- atomic_read(&fscache_n_updates_run));
63634+ atomic_read_unchecked(&fscache_n_updates),
63635+ atomic_read_unchecked(&fscache_n_updates_null),
63636+ atomic_read_unchecked(&fscache_n_updates_run));
63637
63638 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
63639- atomic_read(&fscache_n_relinquishes),
63640- atomic_read(&fscache_n_relinquishes_null),
63641- atomic_read(&fscache_n_relinquishes_waitcrt),
63642- atomic_read(&fscache_n_relinquishes_retire));
63643+ atomic_read_unchecked(&fscache_n_relinquishes),
63644+ atomic_read_unchecked(&fscache_n_relinquishes_null),
63645+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
63646+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
63647
63648 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
63649- atomic_read(&fscache_n_attr_changed),
63650- atomic_read(&fscache_n_attr_changed_ok),
63651- atomic_read(&fscache_n_attr_changed_nobufs),
63652- atomic_read(&fscache_n_attr_changed_nomem),
63653- atomic_read(&fscache_n_attr_changed_calls));
63654+ atomic_read_unchecked(&fscache_n_attr_changed),
63655+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
63656+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
63657+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
63658+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
63659
63660 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
63661- atomic_read(&fscache_n_allocs),
63662- atomic_read(&fscache_n_allocs_ok),
63663- atomic_read(&fscache_n_allocs_wait),
63664- atomic_read(&fscache_n_allocs_nobufs),
63665- atomic_read(&fscache_n_allocs_intr));
63666+ atomic_read_unchecked(&fscache_n_allocs),
63667+ atomic_read_unchecked(&fscache_n_allocs_ok),
63668+ atomic_read_unchecked(&fscache_n_allocs_wait),
63669+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
63670+ atomic_read_unchecked(&fscache_n_allocs_intr));
63671 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
63672- atomic_read(&fscache_n_alloc_ops),
63673- atomic_read(&fscache_n_alloc_op_waits),
63674- atomic_read(&fscache_n_allocs_object_dead));
63675+ atomic_read_unchecked(&fscache_n_alloc_ops),
63676+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
63677+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
63678
63679 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
63680 " int=%u oom=%u\n",
63681- atomic_read(&fscache_n_retrievals),
63682- atomic_read(&fscache_n_retrievals_ok),
63683- atomic_read(&fscache_n_retrievals_wait),
63684- atomic_read(&fscache_n_retrievals_nodata),
63685- atomic_read(&fscache_n_retrievals_nobufs),
63686- atomic_read(&fscache_n_retrievals_intr),
63687- atomic_read(&fscache_n_retrievals_nomem));
63688+ atomic_read_unchecked(&fscache_n_retrievals),
63689+ atomic_read_unchecked(&fscache_n_retrievals_ok),
63690+ atomic_read_unchecked(&fscache_n_retrievals_wait),
63691+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
63692+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
63693+ atomic_read_unchecked(&fscache_n_retrievals_intr),
63694+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
63695 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
63696- atomic_read(&fscache_n_retrieval_ops),
63697- atomic_read(&fscache_n_retrieval_op_waits),
63698- atomic_read(&fscache_n_retrievals_object_dead));
63699+ atomic_read_unchecked(&fscache_n_retrieval_ops),
63700+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
63701+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
63702
63703 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
63704- atomic_read(&fscache_n_stores),
63705- atomic_read(&fscache_n_stores_ok),
63706- atomic_read(&fscache_n_stores_again),
63707- atomic_read(&fscache_n_stores_nobufs),
63708- atomic_read(&fscache_n_stores_oom));
63709+ atomic_read_unchecked(&fscache_n_stores),
63710+ atomic_read_unchecked(&fscache_n_stores_ok),
63711+ atomic_read_unchecked(&fscache_n_stores_again),
63712+ atomic_read_unchecked(&fscache_n_stores_nobufs),
63713+ atomic_read_unchecked(&fscache_n_stores_oom));
63714 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
63715- atomic_read(&fscache_n_store_ops),
63716- atomic_read(&fscache_n_store_calls),
63717- atomic_read(&fscache_n_store_pages),
63718- atomic_read(&fscache_n_store_radix_deletes),
63719- atomic_read(&fscache_n_store_pages_over_limit));
63720+ atomic_read_unchecked(&fscache_n_store_ops),
63721+ atomic_read_unchecked(&fscache_n_store_calls),
63722+ atomic_read_unchecked(&fscache_n_store_pages),
63723+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
63724+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
63725
63726 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
63727- atomic_read(&fscache_n_store_vmscan_not_storing),
63728- atomic_read(&fscache_n_store_vmscan_gone),
63729- atomic_read(&fscache_n_store_vmscan_busy),
63730- atomic_read(&fscache_n_store_vmscan_cancelled),
63731- atomic_read(&fscache_n_store_vmscan_wait));
63732+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
63733+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
63734+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
63735+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
63736+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
63737
63738 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
63739- atomic_read(&fscache_n_op_pend),
63740- atomic_read(&fscache_n_op_run),
63741- atomic_read(&fscache_n_op_enqueue),
63742- atomic_read(&fscache_n_op_cancelled),
63743- atomic_read(&fscache_n_op_rejected));
63744+ atomic_read_unchecked(&fscache_n_op_pend),
63745+ atomic_read_unchecked(&fscache_n_op_run),
63746+ atomic_read_unchecked(&fscache_n_op_enqueue),
63747+ atomic_read_unchecked(&fscache_n_op_cancelled),
63748+ atomic_read_unchecked(&fscache_n_op_rejected));
63749 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
63750- atomic_read(&fscache_n_op_deferred_release),
63751- atomic_read(&fscache_n_op_release),
63752- atomic_read(&fscache_n_op_gc));
63753+ atomic_read_unchecked(&fscache_n_op_deferred_release),
63754+ atomic_read_unchecked(&fscache_n_op_release),
63755+ atomic_read_unchecked(&fscache_n_op_gc));
63756
63757 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
63758 atomic_read(&fscache_n_cop_alloc_object),
63759diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
63760index 966ace8..030a03a 100644
63761--- a/fs/fuse/cuse.c
63762+++ b/fs/fuse/cuse.c
63763@@ -611,10 +611,12 @@ static int __init cuse_init(void)
63764 INIT_LIST_HEAD(&cuse_conntbl[i]);
63765
63766 /* inherit and extend fuse_dev_operations */
63767- cuse_channel_fops = fuse_dev_operations;
63768- cuse_channel_fops.owner = THIS_MODULE;
63769- cuse_channel_fops.open = cuse_channel_open;
63770- cuse_channel_fops.release = cuse_channel_release;
63771+ pax_open_kernel();
63772+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
63773+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
63774+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
63775+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
63776+ pax_close_kernel();
63777
63778 cuse_class = class_create(THIS_MODULE, "cuse");
63779 if (IS_ERR(cuse_class))
63780diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
63781index ca88731..8e9c55d 100644
63782--- a/fs/fuse/dev.c
63783+++ b/fs/fuse/dev.c
63784@@ -1318,7 +1318,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
63785 ret = 0;
63786 pipe_lock(pipe);
63787
63788- if (!pipe->readers) {
63789+ if (!atomic_read(&pipe->readers)) {
63790 send_sig(SIGPIPE, current, 0);
63791 if (!ret)
63792 ret = -EPIPE;
63793@@ -1347,7 +1347,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
63794 page_nr++;
63795 ret += buf->len;
63796
63797- if (pipe->files)
63798+ if (atomic_read(&pipe->files))
63799 do_wakeup = 1;
63800 }
63801
63802diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
63803index de1d84a..fd69c0c 100644
63804--- a/fs/fuse/dir.c
63805+++ b/fs/fuse/dir.c
63806@@ -1479,7 +1479,7 @@ static char *read_link(struct dentry *dentry)
63807 return link;
63808 }
63809
63810-static void free_link(char *link)
63811+static void free_link(const char *link)
63812 {
63813 if (!IS_ERR(link))
63814 free_page((unsigned long) link);
63815diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
63816index fd62cae..3494dfa 100644
63817--- a/fs/hostfs/hostfs_kern.c
63818+++ b/fs/hostfs/hostfs_kern.c
63819@@ -908,7 +908,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
63820
63821 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
63822 {
63823- char *s = nd_get_link(nd);
63824+ const char *s = nd_get_link(nd);
63825 if (!IS_ERR(s))
63826 __putname(s);
63827 }
63828diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
63829index 1e2872b..7aea000 100644
63830--- a/fs/hugetlbfs/inode.c
63831+++ b/fs/hugetlbfs/inode.c
63832@@ -154,6 +154,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
63833 struct mm_struct *mm = current->mm;
63834 struct vm_area_struct *vma;
63835 struct hstate *h = hstate_file(file);
63836+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
63837 struct vm_unmapped_area_info info;
63838
63839 if (len & ~huge_page_mask(h))
63840@@ -167,17 +168,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
63841 return addr;
63842 }
63843
63844+#ifdef CONFIG_PAX_RANDMMAP
63845+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
63846+#endif
63847+
63848 if (addr) {
63849 addr = ALIGN(addr, huge_page_size(h));
63850 vma = find_vma(mm, addr);
63851- if (TASK_SIZE - len >= addr &&
63852- (!vma || addr + len <= vma->vm_start))
63853+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
63854 return addr;
63855 }
63856
63857 info.flags = 0;
63858 info.length = len;
63859 info.low_limit = TASK_UNMAPPED_BASE;
63860+
63861+#ifdef CONFIG_PAX_RANDMMAP
63862+ if (mm->pax_flags & MF_PAX_RANDMMAP)
63863+ info.low_limit += mm->delta_mmap;
63864+#endif
63865+
63866 info.high_limit = TASK_SIZE;
63867 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
63868 info.align_offset = 0;
63869@@ -919,7 +929,7 @@ static struct file_system_type hugetlbfs_fs_type = {
63870 };
63871 MODULE_ALIAS_FS("hugetlbfs");
63872
63873-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
63874+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
63875
63876 static int can_do_hugetlb_shm(void)
63877 {
63878diff --git a/fs/inode.c b/fs/inode.c
63879index 26753ba..d19eb34 100644
63880--- a/fs/inode.c
63881+++ b/fs/inode.c
63882@@ -840,16 +840,20 @@ unsigned int get_next_ino(void)
63883 unsigned int *p = &get_cpu_var(last_ino);
63884 unsigned int res = *p;
63885
63886+start:
63887+
63888 #ifdef CONFIG_SMP
63889 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
63890- static atomic_t shared_last_ino;
63891- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
63892+ static atomic_unchecked_t shared_last_ino;
63893+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
63894
63895 res = next - LAST_INO_BATCH;
63896 }
63897 #endif
63898
63899- *p = ++res;
63900+ if (unlikely(!++res))
63901+ goto start; /* never zero */
63902+ *p = res;
63903 put_cpu_var(last_ino);
63904 return res;
63905 }
63906diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
63907index 4a6cf28..d3a29d3 100644
63908--- a/fs/jffs2/erase.c
63909+++ b/fs/jffs2/erase.c
63910@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
63911 struct jffs2_unknown_node marker = {
63912 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
63913 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
63914- .totlen = cpu_to_je32(c->cleanmarker_size)
63915+ .totlen = cpu_to_je32(c->cleanmarker_size),
63916+ .hdr_crc = cpu_to_je32(0)
63917 };
63918
63919 jffs2_prealloc_raw_node_refs(c, jeb, 1);
63920diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
63921index 09ed551..45684f8 100644
63922--- a/fs/jffs2/wbuf.c
63923+++ b/fs/jffs2/wbuf.c
63924@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
63925 {
63926 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
63927 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
63928- .totlen = constant_cpu_to_je32(8)
63929+ .totlen = constant_cpu_to_je32(8),
63930+ .hdr_crc = constant_cpu_to_je32(0)
63931 };
63932
63933 /*
63934diff --git a/fs/jfs/super.c b/fs/jfs/super.c
63935index adf8cb0..bb935fa 100644
63936--- a/fs/jfs/super.c
63937+++ b/fs/jfs/super.c
63938@@ -893,7 +893,7 @@ static int __init init_jfs_fs(void)
63939
63940 jfs_inode_cachep =
63941 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
63942- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
63943+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
63944 init_once);
63945 if (jfs_inode_cachep == NULL)
63946 return -ENOMEM;
63947diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
63948index a693f5b..82276a1 100644
63949--- a/fs/kernfs/dir.c
63950+++ b/fs/kernfs/dir.c
63951@@ -182,7 +182,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
63952 *
63953 * Returns 31 bit hash of ns + name (so it fits in an off_t )
63954 */
63955-static unsigned int kernfs_name_hash(const char *name, const void *ns)
63956+static unsigned int kernfs_name_hash(const unsigned char *name, const void *ns)
63957 {
63958 unsigned long hash = init_name_hash();
63959 unsigned int len = strlen(name);
63960diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
63961index 4429d6d..9831f52 100644
63962--- a/fs/kernfs/file.c
63963+++ b/fs/kernfs/file.c
63964@@ -34,7 +34,7 @@ static DEFINE_MUTEX(kernfs_open_file_mutex);
63965
63966 struct kernfs_open_node {
63967 atomic_t refcnt;
63968- atomic_t event;
63969+ atomic_unchecked_t event;
63970 wait_queue_head_t poll;
63971 struct list_head files; /* goes through kernfs_open_file.list */
63972 };
63973@@ -163,7 +163,7 @@ static int kernfs_seq_show(struct seq_file *sf, void *v)
63974 {
63975 struct kernfs_open_file *of = sf->private;
63976
63977- of->event = atomic_read(&of->kn->attr.open->event);
63978+ of->event = atomic_read_unchecked(&of->kn->attr.open->event);
63979
63980 return of->kn->attr.ops->seq_show(sf, v);
63981 }
63982@@ -375,12 +375,12 @@ static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
63983 return ret;
63984 }
63985
63986-static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
63987- void *buf, int len, int write)
63988+static ssize_t kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
63989+ void *buf, size_t len, int write)
63990 {
63991 struct file *file = vma->vm_file;
63992 struct kernfs_open_file *of = kernfs_of(file);
63993- int ret;
63994+ ssize_t ret;
63995
63996 if (!of->vm_ops)
63997 return -EINVAL;
63998@@ -581,7 +581,7 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
63999 return -ENOMEM;
64000
64001 atomic_set(&new_on->refcnt, 0);
64002- atomic_set(&new_on->event, 1);
64003+ atomic_set_unchecked(&new_on->event, 1);
64004 init_waitqueue_head(&new_on->poll);
64005 INIT_LIST_HEAD(&new_on->files);
64006 goto retry;
64007@@ -787,7 +787,7 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
64008
64009 kernfs_put_active(kn);
64010
64011- if (of->event != atomic_read(&on->event))
64012+ if (of->event != atomic_read_unchecked(&on->event))
64013 goto trigger;
64014
64015 return DEFAULT_POLLMASK;
64016@@ -818,7 +818,7 @@ repeat:
64017
64018 on = kn->attr.open;
64019 if (on) {
64020- atomic_inc(&on->event);
64021+ atomic_inc_unchecked(&on->event);
64022 wake_up_interruptible(&on->poll);
64023 }
64024
64025diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
64026index 8a19889..4c3069a 100644
64027--- a/fs/kernfs/symlink.c
64028+++ b/fs/kernfs/symlink.c
64029@@ -128,7 +128,7 @@ static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
64030 static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
64031 void *cookie)
64032 {
64033- char *page = nd_get_link(nd);
64034+ const char *page = nd_get_link(nd);
64035 if (!IS_ERR(page))
64036 free_page((unsigned long)page);
64037 }
64038diff --git a/fs/libfs.c b/fs/libfs.c
64039index 88e3e00..979c262 100644
64040--- a/fs/libfs.c
64041+++ b/fs/libfs.c
64042@@ -160,6 +160,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
64043
64044 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
64045 struct dentry *next = list_entry(p, struct dentry, d_u.d_child);
64046+ char d_name[sizeof(next->d_iname)];
64047+ const unsigned char *name;
64048+
64049 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
64050 if (!simple_positive(next)) {
64051 spin_unlock(&next->d_lock);
64052@@ -168,7 +171,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
64053
64054 spin_unlock(&next->d_lock);
64055 spin_unlock(&dentry->d_lock);
64056- if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
64057+ name = next->d_name.name;
64058+ if (name == next->d_iname) {
64059+ memcpy(d_name, name, next->d_name.len);
64060+ name = d_name;
64061+ }
64062+ if (!dir_emit(ctx, name, next->d_name.len,
64063 next->d_inode->i_ino, dt_type(next->d_inode)))
64064 return 0;
64065 spin_lock(&dentry->d_lock);
64066@@ -1027,7 +1035,7 @@ EXPORT_SYMBOL(noop_fsync);
64067 void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
64068 void *cookie)
64069 {
64070- char *s = nd_get_link(nd);
64071+ const char *s = nd_get_link(nd);
64072 if (!IS_ERR(s))
64073 kfree(s);
64074 }
64075diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
64076index acd3947..1f896e2 100644
64077--- a/fs/lockd/clntproc.c
64078+++ b/fs/lockd/clntproc.c
64079@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
64080 /*
64081 * Cookie counter for NLM requests
64082 */
64083-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
64084+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
64085
64086 void nlmclnt_next_cookie(struct nlm_cookie *c)
64087 {
64088- u32 cookie = atomic_inc_return(&nlm_cookie);
64089+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
64090
64091 memcpy(c->data, &cookie, 4);
64092 c->len=4;
64093diff --git a/fs/locks.c b/fs/locks.c
64094index bb08857..f65e8bf 100644
64095--- a/fs/locks.c
64096+++ b/fs/locks.c
64097@@ -2350,7 +2350,7 @@ void locks_remove_file(struct file *filp)
64098 locks_remove_posix(filp, filp);
64099
64100 if (filp->f_op->flock) {
64101- struct file_lock fl = {
64102+ struct file_lock flock = {
64103 .fl_owner = filp,
64104 .fl_pid = current->tgid,
64105 .fl_file = filp,
64106@@ -2358,9 +2358,9 @@ void locks_remove_file(struct file *filp)
64107 .fl_type = F_UNLCK,
64108 .fl_end = OFFSET_MAX,
64109 };
64110- filp->f_op->flock(filp, F_SETLKW, &fl);
64111- if (fl.fl_ops && fl.fl_ops->fl_release_private)
64112- fl.fl_ops->fl_release_private(&fl);
64113+ filp->f_op->flock(filp, F_SETLKW, &flock);
64114+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
64115+ flock.fl_ops->fl_release_private(&flock);
64116 }
64117
64118 spin_lock(&inode->i_lock);
64119diff --git a/fs/mount.h b/fs/mount.h
64120index 6740a62..ccb472f 100644
64121--- a/fs/mount.h
64122+++ b/fs/mount.h
64123@@ -11,7 +11,7 @@ struct mnt_namespace {
64124 u64 seq; /* Sequence number to prevent loops */
64125 wait_queue_head_t poll;
64126 u64 event;
64127-};
64128+} __randomize_layout;
64129
64130 struct mnt_pcp {
64131 int mnt_count;
64132@@ -57,7 +57,7 @@ struct mount {
64133 int mnt_expiry_mark; /* true if marked for expiry */
64134 struct hlist_head mnt_pins;
64135 struct path mnt_ex_mountpoint;
64136-};
64137+} __randomize_layout;
64138
64139 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
64140
64141diff --git a/fs/namei.c b/fs/namei.c
64142index bb02687..79cba2c 100644
64143--- a/fs/namei.c
64144+++ b/fs/namei.c
64145@@ -331,17 +331,32 @@ int generic_permission(struct inode *inode, int mask)
64146 if (ret != -EACCES)
64147 return ret;
64148
64149+#ifdef CONFIG_GRKERNSEC
64150+ /* we'll block if we have to log due to a denied capability use */
64151+ if (mask & MAY_NOT_BLOCK)
64152+ return -ECHILD;
64153+#endif
64154+
64155 if (S_ISDIR(inode->i_mode)) {
64156 /* DACs are overridable for directories */
64157- if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
64158- return 0;
64159 if (!(mask & MAY_WRITE))
64160- if (capable_wrt_inode_uidgid(inode,
64161- CAP_DAC_READ_SEARCH))
64162+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
64163+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
64164 return 0;
64165+ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
64166+ return 0;
64167 return -EACCES;
64168 }
64169 /*
64170+ * Searching includes executable on directories, else just read.
64171+ */
64172+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
64173+ if (mask == MAY_READ)
64174+ if (capable_wrt_inode_uidgid_nolog(inode, CAP_DAC_OVERRIDE) ||
64175+ capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
64176+ return 0;
64177+
64178+ /*
64179 * Read/write DACs are always overridable.
64180 * Executable DACs are overridable when there is
64181 * at least one exec bit set.
64182@@ -350,14 +365,6 @@ int generic_permission(struct inode *inode, int mask)
64183 if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE))
64184 return 0;
64185
64186- /*
64187- * Searching includes executable on directories, else just read.
64188- */
64189- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
64190- if (mask == MAY_READ)
64191- if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH))
64192- return 0;
64193-
64194 return -EACCES;
64195 }
64196 EXPORT_SYMBOL(generic_permission);
64197@@ -823,7 +830,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
64198 {
64199 struct dentry *dentry = link->dentry;
64200 int error;
64201- char *s;
64202+ const char *s;
64203
64204 BUG_ON(nd->flags & LOOKUP_RCU);
64205
64206@@ -844,6 +851,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
64207 if (error)
64208 goto out_put_nd_path;
64209
64210+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
64211+ dentry->d_inode, dentry, nd->path.mnt)) {
64212+ error = -EACCES;
64213+ goto out_put_nd_path;
64214+ }
64215+
64216 nd->last_type = LAST_BIND;
64217 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
64218 error = PTR_ERR(*p);
64219@@ -1607,6 +1620,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
64220 if (res)
64221 break;
64222 res = walk_component(nd, path, LOOKUP_FOLLOW);
64223+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
64224+ res = -EACCES;
64225 put_link(nd, &link, cookie);
64226 } while (res > 0);
64227
64228@@ -1679,7 +1694,7 @@ EXPORT_SYMBOL(full_name_hash);
64229 static inline u64 hash_name(const char *name)
64230 {
64231 unsigned long a, b, adata, bdata, mask, hash, len;
64232- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
64233+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
64234
64235 hash = a = 0;
64236 len = -sizeof(unsigned long);
64237@@ -1968,6 +1983,8 @@ static int path_lookupat(int dfd, const char *name,
64238 if (err)
64239 break;
64240 err = lookup_last(nd, &path);
64241+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
64242+ err = -EACCES;
64243 put_link(nd, &link, cookie);
64244 }
64245 }
64246@@ -1975,6 +1992,13 @@ static int path_lookupat(int dfd, const char *name,
64247 if (!err)
64248 err = complete_walk(nd);
64249
64250+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
64251+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
64252+ path_put(&nd->path);
64253+ err = -ENOENT;
64254+ }
64255+ }
64256+
64257 if (!err && nd->flags & LOOKUP_DIRECTORY) {
64258 if (!d_can_lookup(nd->path.dentry)) {
64259 path_put(&nd->path);
64260@@ -2002,8 +2026,15 @@ static int filename_lookup(int dfd, struct filename *name,
64261 retval = path_lookupat(dfd, name->name,
64262 flags | LOOKUP_REVAL, nd);
64263
64264- if (likely(!retval))
64265+ if (likely(!retval)) {
64266 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
64267+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
64268+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
64269+ path_put(&nd->path);
64270+ return -ENOENT;
64271+ }
64272+ }
64273+ }
64274 return retval;
64275 }
64276
64277@@ -2585,6 +2616,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
64278 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
64279 return -EPERM;
64280
64281+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
64282+ return -EPERM;
64283+ if (gr_handle_rawio(inode))
64284+ return -EPERM;
64285+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
64286+ return -EACCES;
64287+
64288 return 0;
64289 }
64290
64291@@ -2816,7 +2854,7 @@ looked_up:
64292 * cleared otherwise prior to returning.
64293 */
64294 static int lookup_open(struct nameidata *nd, struct path *path,
64295- struct file *file,
64296+ struct path *link, struct file *file,
64297 const struct open_flags *op,
64298 bool got_write, int *opened)
64299 {
64300@@ -2851,6 +2889,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
64301 /* Negative dentry, just create the file */
64302 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
64303 umode_t mode = op->mode;
64304+
64305+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
64306+ error = -EACCES;
64307+ goto out_dput;
64308+ }
64309+
64310+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
64311+ error = -EACCES;
64312+ goto out_dput;
64313+ }
64314+
64315 if (!IS_POSIXACL(dir->d_inode))
64316 mode &= ~current_umask();
64317 /*
64318@@ -2872,6 +2921,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
64319 nd->flags & LOOKUP_EXCL);
64320 if (error)
64321 goto out_dput;
64322+ else
64323+ gr_handle_create(dentry, nd->path.mnt);
64324 }
64325 out_no_open:
64326 path->dentry = dentry;
64327@@ -2886,7 +2937,7 @@ out_dput:
64328 /*
64329 * Handle the last step of open()
64330 */
64331-static int do_last(struct nameidata *nd, struct path *path,
64332+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
64333 struct file *file, const struct open_flags *op,
64334 int *opened, struct filename *name)
64335 {
64336@@ -2936,6 +2987,15 @@ static int do_last(struct nameidata *nd, struct path *path,
64337 if (error)
64338 return error;
64339
64340+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
64341+ error = -ENOENT;
64342+ goto out;
64343+ }
64344+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
64345+ error = -EACCES;
64346+ goto out;
64347+ }
64348+
64349 audit_inode(name, dir, LOOKUP_PARENT);
64350 error = -EISDIR;
64351 /* trailing slashes? */
64352@@ -2955,7 +3015,7 @@ retry_lookup:
64353 */
64354 }
64355 mutex_lock(&dir->d_inode->i_mutex);
64356- error = lookup_open(nd, path, file, op, got_write, opened);
64357+ error = lookup_open(nd, path, link, file, op, got_write, opened);
64358 mutex_unlock(&dir->d_inode->i_mutex);
64359
64360 if (error <= 0) {
64361@@ -2979,11 +3039,28 @@ retry_lookup:
64362 goto finish_open_created;
64363 }
64364
64365+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
64366+ error = -ENOENT;
64367+ goto exit_dput;
64368+ }
64369+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
64370+ error = -EACCES;
64371+ goto exit_dput;
64372+ }
64373+
64374 /*
64375 * create/update audit record if it already exists.
64376 */
64377- if (d_is_positive(path->dentry))
64378+ if (d_is_positive(path->dentry)) {
64379+ /* only check if O_CREAT is specified, all other checks need to go
64380+ into may_open */
64381+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
64382+ error = -EACCES;
64383+ goto exit_dput;
64384+ }
64385+
64386 audit_inode(name, path->dentry, 0);
64387+ }
64388
64389 /*
64390 * If atomic_open() acquired write access it is dropped now due to
64391@@ -3024,6 +3101,11 @@ finish_lookup:
64392 }
64393 }
64394 BUG_ON(inode != path->dentry->d_inode);
64395+ /* if we're resolving a symlink to another symlink */
64396+ if (link && gr_handle_symlink_owner(link, inode)) {
64397+ error = -EACCES;
64398+ goto out;
64399+ }
64400 return 1;
64401 }
64402
64403@@ -3033,7 +3115,6 @@ finish_lookup:
64404 save_parent.dentry = nd->path.dentry;
64405 save_parent.mnt = mntget(path->mnt);
64406 nd->path.dentry = path->dentry;
64407-
64408 }
64409 nd->inode = inode;
64410 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
64411@@ -3043,7 +3124,18 @@ finish_open:
64412 path_put(&save_parent);
64413 return error;
64414 }
64415+
64416+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
64417+ error = -ENOENT;
64418+ goto out;
64419+ }
64420+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
64421+ error = -EACCES;
64422+ goto out;
64423+ }
64424+
64425 audit_inode(name, nd->path.dentry, 0);
64426+
64427 error = -EISDIR;
64428 if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
64429 goto out;
64430@@ -3207,7 +3299,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
64431 if (unlikely(error))
64432 goto out;
64433
64434- error = do_last(nd, &path, file, op, &opened, pathname);
64435+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
64436 while (unlikely(error > 0)) { /* trailing symlink */
64437 struct path link = path;
64438 void *cookie;
64439@@ -3225,7 +3317,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
64440 error = follow_link(&link, nd, &cookie);
64441 if (unlikely(error))
64442 break;
64443- error = do_last(nd, &path, file, op, &opened, pathname);
64444+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
64445 put_link(nd, &link, cookie);
64446 }
64447 out:
64448@@ -3325,9 +3417,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
64449 goto unlock;
64450
64451 error = -EEXIST;
64452- if (d_is_positive(dentry))
64453+ if (d_is_positive(dentry)) {
64454+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt))
64455+ error = -ENOENT;
64456 goto fail;
64457-
64458+ }
64459 /*
64460 * Special case - lookup gave negative, but... we had foo/bar/
64461 * From the vfs_mknod() POV we just have a negative dentry -
64462@@ -3379,6 +3473,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
64463 }
64464 EXPORT_SYMBOL(user_path_create);
64465
64466+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
64467+{
64468+ struct filename *tmp = getname(pathname);
64469+ struct dentry *res;
64470+ if (IS_ERR(tmp))
64471+ return ERR_CAST(tmp);
64472+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
64473+ if (IS_ERR(res))
64474+ putname(tmp);
64475+ else
64476+ *to = tmp;
64477+ return res;
64478+}
64479+
64480 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
64481 {
64482 int error = may_create(dir, dentry);
64483@@ -3442,6 +3550,17 @@ retry:
64484
64485 if (!IS_POSIXACL(path.dentry->d_inode))
64486 mode &= ~current_umask();
64487+
64488+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
64489+ error = -EPERM;
64490+ goto out;
64491+ }
64492+
64493+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
64494+ error = -EACCES;
64495+ goto out;
64496+ }
64497+
64498 error = security_path_mknod(&path, dentry, mode, dev);
64499 if (error)
64500 goto out;
64501@@ -3457,6 +3576,8 @@ retry:
64502 error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
64503 break;
64504 }
64505+ if (!error)
64506+ gr_handle_create(dentry, path.mnt);
64507 out:
64508 done_path_create(&path, dentry);
64509 if (retry_estale(error, lookup_flags)) {
64510@@ -3511,9 +3632,16 @@ retry:
64511
64512 if (!IS_POSIXACL(path.dentry->d_inode))
64513 mode &= ~current_umask();
64514+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
64515+ error = -EACCES;
64516+ goto out;
64517+ }
64518 error = security_path_mkdir(&path, dentry, mode);
64519 if (!error)
64520 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
64521+ if (!error)
64522+ gr_handle_create(dentry, path.mnt);
64523+out:
64524 done_path_create(&path, dentry);
64525 if (retry_estale(error, lookup_flags)) {
64526 lookup_flags |= LOOKUP_REVAL;
64527@@ -3596,6 +3724,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
64528 struct filename *name;
64529 struct dentry *dentry;
64530 struct nameidata nd;
64531+ ino_t saved_ino = 0;
64532+ dev_t saved_dev = 0;
64533 unsigned int lookup_flags = 0;
64534 retry:
64535 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
64536@@ -3628,10 +3758,21 @@ retry:
64537 error = -ENOENT;
64538 goto exit3;
64539 }
64540+
64541+ saved_ino = dentry->d_inode->i_ino;
64542+ saved_dev = gr_get_dev_from_dentry(dentry);
64543+
64544+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
64545+ error = -EACCES;
64546+ goto exit3;
64547+ }
64548+
64549 error = security_path_rmdir(&nd.path, dentry);
64550 if (error)
64551 goto exit3;
64552 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
64553+ if (!error && (saved_dev || saved_ino))
64554+ gr_handle_delete(saved_ino, saved_dev);
64555 exit3:
64556 dput(dentry);
64557 exit2:
64558@@ -3722,6 +3863,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
64559 struct nameidata nd;
64560 struct inode *inode = NULL;
64561 struct inode *delegated_inode = NULL;
64562+ ino_t saved_ino = 0;
64563+ dev_t saved_dev = 0;
64564 unsigned int lookup_flags = 0;
64565 retry:
64566 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
64567@@ -3748,10 +3891,22 @@ retry_deleg:
64568 if (d_is_negative(dentry))
64569 goto slashes;
64570 ihold(inode);
64571+
64572+ if (inode->i_nlink <= 1) {
64573+ saved_ino = inode->i_ino;
64574+ saved_dev = gr_get_dev_from_dentry(dentry);
64575+ }
64576+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
64577+ error = -EACCES;
64578+ goto exit2;
64579+ }
64580+
64581 error = security_path_unlink(&nd.path, dentry);
64582 if (error)
64583 goto exit2;
64584 error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
64585+ if (!error && (saved_ino || saved_dev))
64586+ gr_handle_delete(saved_ino, saved_dev);
64587 exit2:
64588 dput(dentry);
64589 }
64590@@ -3840,9 +3995,17 @@ retry:
64591 if (IS_ERR(dentry))
64592 goto out_putname;
64593
64594+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
64595+ error = -EACCES;
64596+ goto out;
64597+ }
64598+
64599 error = security_path_symlink(&path, dentry, from->name);
64600 if (!error)
64601 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
64602+ if (!error)
64603+ gr_handle_create(dentry, path.mnt);
64604+out:
64605 done_path_create(&path, dentry);
64606 if (retry_estale(error, lookup_flags)) {
64607 lookup_flags |= LOOKUP_REVAL;
64608@@ -3946,6 +4109,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
64609 struct dentry *new_dentry;
64610 struct path old_path, new_path;
64611 struct inode *delegated_inode = NULL;
64612+ struct filename *to = NULL;
64613 int how = 0;
64614 int error;
64615
64616@@ -3969,7 +4133,7 @@ retry:
64617 if (error)
64618 return error;
64619
64620- new_dentry = user_path_create(newdfd, newname, &new_path,
64621+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
64622 (how & LOOKUP_REVAL));
64623 error = PTR_ERR(new_dentry);
64624 if (IS_ERR(new_dentry))
64625@@ -3981,11 +4145,28 @@ retry:
64626 error = may_linkat(&old_path);
64627 if (unlikely(error))
64628 goto out_dput;
64629+
64630+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
64631+ old_path.dentry->d_inode,
64632+ old_path.dentry->d_inode->i_mode, to)) {
64633+ error = -EACCES;
64634+ goto out_dput;
64635+ }
64636+
64637+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
64638+ old_path.dentry, old_path.mnt, to)) {
64639+ error = -EACCES;
64640+ goto out_dput;
64641+ }
64642+
64643 error = security_path_link(old_path.dentry, &new_path, new_dentry);
64644 if (error)
64645 goto out_dput;
64646 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode);
64647+ if (!error)
64648+ gr_handle_create(new_dentry, new_path.mnt);
64649 out_dput:
64650+ putname(to);
64651 done_path_create(&new_path, new_dentry);
64652 if (delegated_inode) {
64653 error = break_deleg_wait(&delegated_inode);
64654@@ -4296,6 +4477,12 @@ retry_deleg:
64655 if (new_dentry == trap)
64656 goto exit5;
64657
64658+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
64659+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
64660+ to, flags);
64661+ if (error)
64662+ goto exit5;
64663+
64664 error = security_path_rename(&oldnd.path, old_dentry,
64665 &newnd.path, new_dentry, flags);
64666 if (error)
64667@@ -4303,6 +4490,9 @@ retry_deleg:
64668 error = vfs_rename(old_dir->d_inode, old_dentry,
64669 new_dir->d_inode, new_dentry,
64670 &delegated_inode, flags);
64671+ if (!error)
64672+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
64673+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0, flags);
64674 exit5:
64675 dput(new_dentry);
64676 exit4:
64677@@ -4345,14 +4535,24 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
64678
64679 int readlink_copy(char __user *buffer, int buflen, const char *link)
64680 {
64681+ char tmpbuf[64];
64682+ const char *newlink;
64683 int len = PTR_ERR(link);
64684+
64685 if (IS_ERR(link))
64686 goto out;
64687
64688 len = strlen(link);
64689 if (len > (unsigned) buflen)
64690 len = buflen;
64691- if (copy_to_user(buffer, link, len))
64692+
64693+ if (len < sizeof(tmpbuf)) {
64694+ memcpy(tmpbuf, link, len);
64695+ newlink = tmpbuf;
64696+ } else
64697+ newlink = link;
64698+
64699+ if (copy_to_user(buffer, newlink, len))
64700 len = -EFAULT;
64701 out:
64702 return len;
64703diff --git a/fs/namespace.c b/fs/namespace.c
64704index 550dbff..c4ad324 100644
64705--- a/fs/namespace.c
64706+++ b/fs/namespace.c
64707@@ -1362,6 +1362,9 @@ static int do_umount(struct mount *mnt, int flags)
64708 if (!(sb->s_flags & MS_RDONLY))
64709 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
64710 up_write(&sb->s_umount);
64711+
64712+ gr_log_remount(mnt->mnt_devname, retval);
64713+
64714 return retval;
64715 }
64716
64717@@ -1384,6 +1387,9 @@ static int do_umount(struct mount *mnt, int flags)
64718 }
64719 unlock_mount_hash();
64720 namespace_unlock();
64721+
64722+ gr_log_unmount(mnt->mnt_devname, retval);
64723+
64724 return retval;
64725 }
64726
64727@@ -1403,7 +1409,7 @@ static inline bool may_mount(void)
64728 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
64729 */
64730
64731-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
64732+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
64733 {
64734 struct path path;
64735 struct mount *mnt;
64736@@ -1445,7 +1451,7 @@ out:
64737 /*
64738 * The 2.0 compatible umount. No flags.
64739 */
64740-SYSCALL_DEFINE1(oldumount, char __user *, name)
64741+SYSCALL_DEFINE1(oldumount, const char __user *, name)
64742 {
64743 return sys_umount(name, 0);
64744 }
64745@@ -2494,6 +2500,16 @@ long do_mount(const char *dev_name, const char *dir_name,
64746 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
64747 MS_STRICTATIME);
64748
64749+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
64750+ retval = -EPERM;
64751+ goto dput_out;
64752+ }
64753+
64754+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
64755+ retval = -EPERM;
64756+ goto dput_out;
64757+ }
64758+
64759 if (flags & MS_REMOUNT)
64760 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
64761 data_page);
64762@@ -2508,6 +2524,9 @@ long do_mount(const char *dev_name, const char *dir_name,
64763 dev_name, data_page);
64764 dput_out:
64765 path_put(&path);
64766+
64767+ gr_log_mount(dev_name, dir_name, retval);
64768+
64769 return retval;
64770 }
64771
64772@@ -2525,7 +2544,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
64773 * number incrementing at 10Ghz will take 12,427 years to wrap which
64774 * is effectively never, so we can ignore the possibility.
64775 */
64776-static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
64777+static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
64778
64779 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64780 {
64781@@ -2540,7 +2559,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64782 kfree(new_ns);
64783 return ERR_PTR(ret);
64784 }
64785- new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
64786+ new_ns->seq = atomic64_inc_return_unchecked(&mnt_ns_seq);
64787 atomic_set(&new_ns->count, 1);
64788 new_ns->root = NULL;
64789 INIT_LIST_HEAD(&new_ns->list);
64790@@ -2550,7 +2569,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
64791 return new_ns;
64792 }
64793
64794-struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
64795+__latent_entropy struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
64796 struct user_namespace *user_ns, struct fs_struct *new_fs)
64797 {
64798 struct mnt_namespace *new_ns;
64799@@ -2671,8 +2690,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
64800 }
64801 EXPORT_SYMBOL(mount_subtree);
64802
64803-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
64804- char __user *, type, unsigned long, flags, void __user *, data)
64805+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
64806+ const char __user *, type, unsigned long, flags, void __user *, data)
64807 {
64808 int ret;
64809 char *kernel_type;
64810@@ -2785,6 +2804,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
64811 if (error)
64812 goto out2;
64813
64814+ if (gr_handle_chroot_pivot()) {
64815+ error = -EPERM;
64816+ goto out2;
64817+ }
64818+
64819 get_fs_root(current->fs, &root);
64820 old_mp = lock_mount(&old);
64821 error = PTR_ERR(old_mp);
64822@@ -3056,7 +3080,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
64823 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
64824 return -EPERM;
64825
64826- if (fs->users != 1)
64827+ if (atomic_read(&fs->users) != 1)
64828 return -EINVAL;
64829
64830 get_mnt_ns(mnt_ns);
64831diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
64832index f4ccfe6..a5cf064 100644
64833--- a/fs/nfs/callback_xdr.c
64834+++ b/fs/nfs/callback_xdr.c
64835@@ -51,7 +51,7 @@ struct callback_op {
64836 callback_decode_arg_t decode_args;
64837 callback_encode_res_t encode_res;
64838 long res_maxsize;
64839-};
64840+} __do_const;
64841
64842 static struct callback_op callback_ops[];
64843
64844diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
64845index 0689aa5..299386e 100644
64846--- a/fs/nfs/inode.c
64847+++ b/fs/nfs/inode.c
64848@@ -1228,16 +1228,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
64849 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
64850 }
64851
64852-static atomic_long_t nfs_attr_generation_counter;
64853+static atomic_long_unchecked_t nfs_attr_generation_counter;
64854
64855 static unsigned long nfs_read_attr_generation_counter(void)
64856 {
64857- return atomic_long_read(&nfs_attr_generation_counter);
64858+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
64859 }
64860
64861 unsigned long nfs_inc_attr_generation_counter(void)
64862 {
64863- return atomic_long_inc_return(&nfs_attr_generation_counter);
64864+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
64865 }
64866
64867 void nfs_fattr_init(struct nfs_fattr *fattr)
64868diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
64869index 1d3cb47..2b8ed89 100644
64870--- a/fs/nfsd/nfs4proc.c
64871+++ b/fs/nfsd/nfs4proc.c
64872@@ -1155,7 +1155,7 @@ struct nfsd4_operation {
64873 nfsd4op_rsize op_rsize_bop;
64874 stateid_getter op_get_currentstateid;
64875 stateid_setter op_set_currentstateid;
64876-};
64877+} __do_const;
64878
64879 static struct nfsd4_operation nfsd4_ops[];
64880
64881diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
64882index 353aac8..32035ee 100644
64883--- a/fs/nfsd/nfs4xdr.c
64884+++ b/fs/nfsd/nfs4xdr.c
64885@@ -1534,7 +1534,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
64886
64887 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
64888
64889-static nfsd4_dec nfsd4_dec_ops[] = {
64890+static const nfsd4_dec nfsd4_dec_ops[] = {
64891 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
64892 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
64893 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
64894diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
64895index ff95676..96cf3f62 100644
64896--- a/fs/nfsd/nfscache.c
64897+++ b/fs/nfsd/nfscache.c
64898@@ -527,17 +527,20 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64899 {
64900 struct svc_cacherep *rp = rqstp->rq_cacherep;
64901 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
64902- int len;
64903+ long len;
64904 size_t bufsize = 0;
64905
64906 if (!rp)
64907 return;
64908
64909- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
64910- len >>= 2;
64911+ if (statp) {
64912+ len = (char*)statp - (char*)resv->iov_base;
64913+ len = resv->iov_len - len;
64914+ len >>= 2;
64915+ }
64916
64917 /* Don't cache excessive amounts of data and XDR failures */
64918- if (!statp || len > (256 >> 2)) {
64919+ if (!statp || len > (256 >> 2) || len < 0) {
64920 nfsd_reply_cache_free(rp);
64921 return;
64922 }
64923@@ -545,7 +548,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
64924 switch (cachetype) {
64925 case RC_REPLSTAT:
64926 if (len != 1)
64927- printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
64928+ printk("nfsd: RC_REPLSTAT/reply len %ld!\n",len);
64929 rp->c_replstat = *statp;
64930 break;
64931 case RC_REPLBUFF:
64932diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
64933index 6ab077b..5ac7f0b 100644
64934--- a/fs/nfsd/vfs.c
64935+++ b/fs/nfsd/vfs.c
64936@@ -855,7 +855,7 @@ __be32 nfsd_readv(struct file *file, loff_t offset, struct kvec *vec, int vlen,
64937
64938 oldfs = get_fs();
64939 set_fs(KERNEL_DS);
64940- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
64941+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
64942 set_fs(oldfs);
64943 return nfsd_finish_read(file, count, host_err);
64944 }
64945@@ -943,7 +943,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
64946
64947 /* Write the data. */
64948 oldfs = get_fs(); set_fs(KERNEL_DS);
64949- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
64950+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
64951 set_fs(oldfs);
64952 if (host_err < 0)
64953 goto out_nfserr;
64954@@ -1485,7 +1485,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
64955 */
64956
64957 oldfs = get_fs(); set_fs(KERNEL_DS);
64958- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
64959+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
64960 set_fs(oldfs);
64961
64962 if (host_err < 0)
64963diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
64964index 52ccd34..7a6b202 100644
64965--- a/fs/nls/nls_base.c
64966+++ b/fs/nls/nls_base.c
64967@@ -234,21 +234,25 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
64968
64969 int __register_nls(struct nls_table *nls, struct module *owner)
64970 {
64971- struct nls_table ** tmp = &tables;
64972+ struct nls_table *tmp = tables;
64973
64974 if (nls->next)
64975 return -EBUSY;
64976
64977- nls->owner = owner;
64978+ pax_open_kernel();
64979+ *(void **)&nls->owner = owner;
64980+ pax_close_kernel();
64981 spin_lock(&nls_lock);
64982- while (*tmp) {
64983- if (nls == *tmp) {
64984+ while (tmp) {
64985+ if (nls == tmp) {
64986 spin_unlock(&nls_lock);
64987 return -EBUSY;
64988 }
64989- tmp = &(*tmp)->next;
64990+ tmp = tmp->next;
64991 }
64992- nls->next = tables;
64993+ pax_open_kernel();
64994+ *(struct nls_table **)&nls->next = tables;
64995+ pax_close_kernel();
64996 tables = nls;
64997 spin_unlock(&nls_lock);
64998 return 0;
64999@@ -257,12 +261,14 @@ EXPORT_SYMBOL(__register_nls);
65000
65001 int unregister_nls(struct nls_table * nls)
65002 {
65003- struct nls_table ** tmp = &tables;
65004+ struct nls_table * const * tmp = &tables;
65005
65006 spin_lock(&nls_lock);
65007 while (*tmp) {
65008 if (nls == *tmp) {
65009- *tmp = nls->next;
65010+ pax_open_kernel();
65011+ *(struct nls_table **)tmp = nls->next;
65012+ pax_close_kernel();
65013 spin_unlock(&nls_lock);
65014 return 0;
65015 }
65016@@ -272,7 +278,7 @@ int unregister_nls(struct nls_table * nls)
65017 return -EINVAL;
65018 }
65019
65020-static struct nls_table *find_nls(char *charset)
65021+static struct nls_table *find_nls(const char *charset)
65022 {
65023 struct nls_table *nls;
65024 spin_lock(&nls_lock);
65025@@ -288,7 +294,7 @@ static struct nls_table *find_nls(char *charset)
65026 return nls;
65027 }
65028
65029-struct nls_table *load_nls(char *charset)
65030+struct nls_table *load_nls(const char *charset)
65031 {
65032 return try_then_request_module(find_nls(charset), "nls_%s", charset);
65033 }
65034diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
65035index 162b3f1..6076a7c 100644
65036--- a/fs/nls/nls_euc-jp.c
65037+++ b/fs/nls/nls_euc-jp.c
65038@@ -560,8 +560,10 @@ static int __init init_nls_euc_jp(void)
65039 p_nls = load_nls("cp932");
65040
65041 if (p_nls) {
65042- table.charset2upper = p_nls->charset2upper;
65043- table.charset2lower = p_nls->charset2lower;
65044+ pax_open_kernel();
65045+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
65046+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
65047+ pax_close_kernel();
65048 return register_nls(&table);
65049 }
65050
65051diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
65052index a80a741..7b96e1b 100644
65053--- a/fs/nls/nls_koi8-ru.c
65054+++ b/fs/nls/nls_koi8-ru.c
65055@@ -62,8 +62,10 @@ static int __init init_nls_koi8_ru(void)
65056 p_nls = load_nls("koi8-u");
65057
65058 if (p_nls) {
65059- table.charset2upper = p_nls->charset2upper;
65060- table.charset2lower = p_nls->charset2lower;
65061+ pax_open_kernel();
65062+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
65063+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
65064+ pax_close_kernel();
65065 return register_nls(&table);
65066 }
65067
65068diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
65069index c991616..5ae51af 100644
65070--- a/fs/notify/fanotify/fanotify_user.c
65071+++ b/fs/notify/fanotify/fanotify_user.c
65072@@ -216,8 +216,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
65073
65074 fd = fanotify_event_metadata.fd;
65075 ret = -EFAULT;
65076- if (copy_to_user(buf, &fanotify_event_metadata,
65077- fanotify_event_metadata.event_len))
65078+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
65079+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
65080 goto out_close_fd;
65081
65082 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
65083diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
65084index 0f88bc0..7d888d7 100644
65085--- a/fs/notify/inotify/inotify_fsnotify.c
65086+++ b/fs/notify/inotify/inotify_fsnotify.c
65087@@ -165,8 +165,10 @@ static void inotify_free_group_priv(struct fsnotify_group *group)
65088 /* ideally the idr is empty and we won't hit the BUG in the callback */
65089 idr_for_each(&group->inotify_data.idr, idr_callback, group);
65090 idr_destroy(&group->inotify_data.idr);
65091- atomic_dec(&group->inotify_data.user->inotify_devs);
65092- free_uid(group->inotify_data.user);
65093+ if (group->inotify_data.user) {
65094+ atomic_dec(&group->inotify_data.user->inotify_devs);
65095+ free_uid(group->inotify_data.user);
65096+ }
65097 }
65098
65099 static void inotify_free_event(struct fsnotify_event *fsn_event)
65100diff --git a/fs/notify/notification.c b/fs/notify/notification.c
65101index a95d8e0..a91a5fd 100644
65102--- a/fs/notify/notification.c
65103+++ b/fs/notify/notification.c
65104@@ -48,7 +48,7 @@
65105 #include <linux/fsnotify_backend.h>
65106 #include "fsnotify.h"
65107
65108-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
65109+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
65110
65111 /**
65112 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
65113@@ -56,7 +56,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
65114 */
65115 u32 fsnotify_get_cookie(void)
65116 {
65117- return atomic_inc_return(&fsnotify_sync_cookie);
65118+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
65119 }
65120 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
65121
65122diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
65123index 9e38daf..5727cae 100644
65124--- a/fs/ntfs/dir.c
65125+++ b/fs/ntfs/dir.c
65126@@ -1310,7 +1310,7 @@ find_next_index_buffer:
65127 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
65128 ~(s64)(ndir->itype.index.block_size - 1)));
65129 /* Bounds checks. */
65130- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
65131+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
65132 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
65133 "inode 0x%lx or driver bug.", vdir->i_ino);
65134 goto err_out;
65135diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
65136index f5ec1ce..807fd78 100644
65137--- a/fs/ntfs/file.c
65138+++ b/fs/ntfs/file.c
65139@@ -1279,7 +1279,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
65140 char *addr;
65141 size_t total = 0;
65142 unsigned len;
65143- int left;
65144+ unsigned left;
65145
65146 do {
65147 len = PAGE_CACHE_SIZE - ofs;
65148diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
65149index 6c3296e..c0b99f0 100644
65150--- a/fs/ntfs/super.c
65151+++ b/fs/ntfs/super.c
65152@@ -688,7 +688,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
65153 if (!silent)
65154 ntfs_error(sb, "Primary boot sector is invalid.");
65155 } else if (!silent)
65156- ntfs_error(sb, read_err_str, "primary");
65157+ ntfs_error(sb, read_err_str, "%s", "primary");
65158 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
65159 if (bh_primary)
65160 brelse(bh_primary);
65161@@ -704,7 +704,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
65162 goto hotfix_primary_boot_sector;
65163 brelse(bh_backup);
65164 } else if (!silent)
65165- ntfs_error(sb, read_err_str, "backup");
65166+ ntfs_error(sb, read_err_str, "%s", "backup");
65167 /* Try to read NT3.51- backup boot sector. */
65168 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
65169 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
65170@@ -715,7 +715,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
65171 "sector.");
65172 brelse(bh_backup);
65173 } else if (!silent)
65174- ntfs_error(sb, read_err_str, "backup");
65175+ ntfs_error(sb, read_err_str, "%s", "backup");
65176 /* We failed. Cleanup and return. */
65177 if (bh_primary)
65178 brelse(bh_primary);
65179diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
65180index 0440134..d52c93a 100644
65181--- a/fs/ocfs2/localalloc.c
65182+++ b/fs/ocfs2/localalloc.c
65183@@ -1320,7 +1320,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
65184 goto bail;
65185 }
65186
65187- atomic_inc(&osb->alloc_stats.moves);
65188+ atomic_inc_unchecked(&osb->alloc_stats.moves);
65189
65190 bail:
65191 if (handle)
65192diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
65193index 8add6f1..b931e04 100644
65194--- a/fs/ocfs2/namei.c
65195+++ b/fs/ocfs2/namei.c
65196@@ -158,7 +158,7 @@ bail_add:
65197 * NOTE: This dentry already has ->d_op set from
65198 * ocfs2_get_parent() and ocfs2_get_dentry()
65199 */
65200- if (ret)
65201+ if (!IS_ERR_OR_NULL(ret))
65202 dentry = ret;
65203
65204 status = ocfs2_dentry_attach_lock(dentry, inode,
65205diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
65206index bbec539..7b266d5 100644
65207--- a/fs/ocfs2/ocfs2.h
65208+++ b/fs/ocfs2/ocfs2.h
65209@@ -236,11 +236,11 @@ enum ocfs2_vol_state
65210
65211 struct ocfs2_alloc_stats
65212 {
65213- atomic_t moves;
65214- atomic_t local_data;
65215- atomic_t bitmap_data;
65216- atomic_t bg_allocs;
65217- atomic_t bg_extends;
65218+ atomic_unchecked_t moves;
65219+ atomic_unchecked_t local_data;
65220+ atomic_unchecked_t bitmap_data;
65221+ atomic_unchecked_t bg_allocs;
65222+ atomic_unchecked_t bg_extends;
65223 };
65224
65225 enum ocfs2_local_alloc_state
65226diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
65227index 0cb889a..6a26b24 100644
65228--- a/fs/ocfs2/suballoc.c
65229+++ b/fs/ocfs2/suballoc.c
65230@@ -867,7 +867,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
65231 mlog_errno(status);
65232 goto bail;
65233 }
65234- atomic_inc(&osb->alloc_stats.bg_extends);
65235+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
65236
65237 /* You should never ask for this much metadata */
65238 BUG_ON(bits_wanted >
65239@@ -2014,7 +2014,7 @@ int ocfs2_claim_metadata(handle_t *handle,
65240 mlog_errno(status);
65241 goto bail;
65242 }
65243- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65244+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65245
65246 *suballoc_loc = res.sr_bg_blkno;
65247 *suballoc_bit_start = res.sr_bit_offset;
65248@@ -2180,7 +2180,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
65249 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
65250 res->sr_bits);
65251
65252- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65253+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65254
65255 BUG_ON(res->sr_bits != 1);
65256
65257@@ -2222,7 +2222,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
65258 mlog_errno(status);
65259 goto bail;
65260 }
65261- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65262+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
65263
65264 BUG_ON(res.sr_bits != 1);
65265
65266@@ -2326,7 +2326,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
65267 cluster_start,
65268 num_clusters);
65269 if (!status)
65270- atomic_inc(&osb->alloc_stats.local_data);
65271+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
65272 } else {
65273 if (min_clusters > (osb->bitmap_cpg - 1)) {
65274 /* The only paths asking for contiguousness
65275@@ -2352,7 +2352,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
65276 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
65277 res.sr_bg_blkno,
65278 res.sr_bit_offset);
65279- atomic_inc(&osb->alloc_stats.bitmap_data);
65280+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
65281 *num_clusters = res.sr_bits;
65282 }
65283 }
65284diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
65285index 4142546..69375a9 100644
65286--- a/fs/ocfs2/super.c
65287+++ b/fs/ocfs2/super.c
65288@@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
65289 "%10s => GlobalAllocs: %d LocalAllocs: %d "
65290 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
65291 "Stats",
65292- atomic_read(&osb->alloc_stats.bitmap_data),
65293- atomic_read(&osb->alloc_stats.local_data),
65294- atomic_read(&osb->alloc_stats.bg_allocs),
65295- atomic_read(&osb->alloc_stats.moves),
65296- atomic_read(&osb->alloc_stats.bg_extends));
65297+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
65298+ atomic_read_unchecked(&osb->alloc_stats.local_data),
65299+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
65300+ atomic_read_unchecked(&osb->alloc_stats.moves),
65301+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
65302
65303 out += snprintf(buf + out, len - out,
65304 "%10s => State: %u Descriptor: %llu Size: %u bits "
65305@@ -2100,11 +2100,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
65306
65307 mutex_init(&osb->system_file_mutex);
65308
65309- atomic_set(&osb->alloc_stats.moves, 0);
65310- atomic_set(&osb->alloc_stats.local_data, 0);
65311- atomic_set(&osb->alloc_stats.bitmap_data, 0);
65312- atomic_set(&osb->alloc_stats.bg_allocs, 0);
65313- atomic_set(&osb->alloc_stats.bg_extends, 0);
65314+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
65315+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
65316+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
65317+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
65318+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
65319
65320 /* Copy the blockcheck stats from the superblock probe */
65321 osb->osb_ecc_stats = *stats;
65322diff --git a/fs/open.c b/fs/open.c
65323index d6fd3ac..6ccf474 100644
65324--- a/fs/open.c
65325+++ b/fs/open.c
65326@@ -32,6 +32,8 @@
65327 #include <linux/dnotify.h>
65328 #include <linux/compat.h>
65329
65330+#define CREATE_TRACE_POINTS
65331+#include <trace/events/fs.h>
65332 #include "internal.h"
65333
65334 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
65335@@ -103,6 +105,8 @@ long vfs_truncate(struct path *path, loff_t length)
65336 error = locks_verify_truncate(inode, NULL, length);
65337 if (!error)
65338 error = security_path_truncate(path);
65339+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
65340+ error = -EACCES;
65341 if (!error)
65342 error = do_truncate(path->dentry, length, 0, NULL);
65343
65344@@ -187,6 +191,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
65345 error = locks_verify_truncate(inode, f.file, length);
65346 if (!error)
65347 error = security_path_truncate(&f.file->f_path);
65348+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
65349+ error = -EACCES;
65350 if (!error)
65351 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
65352 sb_end_write(inode->i_sb);
65353@@ -380,6 +386,9 @@ retry:
65354 if (__mnt_is_readonly(path.mnt))
65355 res = -EROFS;
65356
65357+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
65358+ res = -EACCES;
65359+
65360 out_path_release:
65361 path_put(&path);
65362 if (retry_estale(res, lookup_flags)) {
65363@@ -411,6 +420,8 @@ retry:
65364 if (error)
65365 goto dput_and_out;
65366
65367+ gr_log_chdir(path.dentry, path.mnt);
65368+
65369 set_fs_pwd(current->fs, &path);
65370
65371 dput_and_out:
65372@@ -440,6 +451,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
65373 goto out_putf;
65374
65375 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
65376+
65377+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
65378+ error = -EPERM;
65379+
65380+ if (!error)
65381+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
65382+
65383 if (!error)
65384 set_fs_pwd(current->fs, &f.file->f_path);
65385 out_putf:
65386@@ -469,7 +487,13 @@ retry:
65387 if (error)
65388 goto dput_and_out;
65389
65390+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
65391+ goto dput_and_out;
65392+
65393 set_fs_root(current->fs, &path);
65394+
65395+ gr_handle_chroot_chdir(&path);
65396+
65397 error = 0;
65398 dput_and_out:
65399 path_put(&path);
65400@@ -493,6 +517,16 @@ static int chmod_common(struct path *path, umode_t mode)
65401 return error;
65402 retry_deleg:
65403 mutex_lock(&inode->i_mutex);
65404+
65405+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
65406+ error = -EACCES;
65407+ goto out_unlock;
65408+ }
65409+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
65410+ error = -EACCES;
65411+ goto out_unlock;
65412+ }
65413+
65414 error = security_path_chmod(path, mode);
65415 if (error)
65416 goto out_unlock;
65417@@ -558,6 +592,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
65418 uid = make_kuid(current_user_ns(), user);
65419 gid = make_kgid(current_user_ns(), group);
65420
65421+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
65422+ return -EACCES;
65423+
65424 newattrs.ia_valid = ATTR_CTIME;
65425 if (user != (uid_t) -1) {
65426 if (!uid_valid(uid))
65427@@ -983,6 +1020,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
65428 } else {
65429 fsnotify_open(f);
65430 fd_install(fd, f);
65431+ trace_do_sys_open(tmp->name, flags, mode);
65432 }
65433 }
65434 putname(tmp);
65435diff --git a/fs/pipe.c b/fs/pipe.c
65436index 21981e5..3d5f55c 100644
65437--- a/fs/pipe.c
65438+++ b/fs/pipe.c
65439@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
65440
65441 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
65442 {
65443- if (pipe->files)
65444+ if (atomic_read(&pipe->files))
65445 mutex_lock_nested(&pipe->mutex, subclass);
65446 }
65447
65448@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
65449
65450 void pipe_unlock(struct pipe_inode_info *pipe)
65451 {
65452- if (pipe->files)
65453+ if (atomic_read(&pipe->files))
65454 mutex_unlock(&pipe->mutex);
65455 }
65456 EXPORT_SYMBOL(pipe_unlock);
65457@@ -292,9 +292,9 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
65458 }
65459 if (bufs) /* More to do? */
65460 continue;
65461- if (!pipe->writers)
65462+ if (!atomic_read(&pipe->writers))
65463 break;
65464- if (!pipe->waiting_writers) {
65465+ if (!atomic_read(&pipe->waiting_writers)) {
65466 /* syscall merging: Usually we must not sleep
65467 * if O_NONBLOCK is set, or if we got some data.
65468 * But if a writer sleeps in kernel space, then
65469@@ -351,7 +351,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65470
65471 __pipe_lock(pipe);
65472
65473- if (!pipe->readers) {
65474+ if (!atomic_read(&pipe->readers)) {
65475 send_sig(SIGPIPE, current, 0);
65476 ret = -EPIPE;
65477 goto out;
65478@@ -387,7 +387,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65479 for (;;) {
65480 int bufs;
65481
65482- if (!pipe->readers) {
65483+ if (!atomic_read(&pipe->readers)) {
65484 send_sig(SIGPIPE, current, 0);
65485 if (!ret)
65486 ret = -EPIPE;
65487@@ -455,9 +455,9 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
65488 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
65489 do_wakeup = 0;
65490 }
65491- pipe->waiting_writers++;
65492+ atomic_inc(&pipe->waiting_writers);
65493 pipe_wait(pipe);
65494- pipe->waiting_writers--;
65495+ atomic_dec(&pipe->waiting_writers);
65496 }
65497 out:
65498 __pipe_unlock(pipe);
65499@@ -512,7 +512,7 @@ pipe_poll(struct file *filp, poll_table *wait)
65500 mask = 0;
65501 if (filp->f_mode & FMODE_READ) {
65502 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
65503- if (!pipe->writers && filp->f_version != pipe->w_counter)
65504+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
65505 mask |= POLLHUP;
65506 }
65507
65508@@ -522,7 +522,7 @@ pipe_poll(struct file *filp, poll_table *wait)
65509 * Most Unices do not set POLLERR for FIFOs but on Linux they
65510 * behave exactly like pipes for poll().
65511 */
65512- if (!pipe->readers)
65513+ if (!atomic_read(&pipe->readers))
65514 mask |= POLLERR;
65515 }
65516
65517@@ -534,7 +534,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
65518 int kill = 0;
65519
65520 spin_lock(&inode->i_lock);
65521- if (!--pipe->files) {
65522+ if (atomic_dec_and_test(&pipe->files)) {
65523 inode->i_pipe = NULL;
65524 kill = 1;
65525 }
65526@@ -551,11 +551,11 @@ pipe_release(struct inode *inode, struct file *file)
65527
65528 __pipe_lock(pipe);
65529 if (file->f_mode & FMODE_READ)
65530- pipe->readers--;
65531+ atomic_dec(&pipe->readers);
65532 if (file->f_mode & FMODE_WRITE)
65533- pipe->writers--;
65534+ atomic_dec(&pipe->writers);
65535
65536- if (pipe->readers || pipe->writers) {
65537+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
65538 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
65539 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
65540 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
65541@@ -620,7 +620,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
65542 kfree(pipe);
65543 }
65544
65545-static struct vfsmount *pipe_mnt __read_mostly;
65546+struct vfsmount *pipe_mnt __read_mostly;
65547
65548 /*
65549 * pipefs_dname() is called from d_path().
65550@@ -650,8 +650,9 @@ static struct inode * get_pipe_inode(void)
65551 goto fail_iput;
65552
65553 inode->i_pipe = pipe;
65554- pipe->files = 2;
65555- pipe->readers = pipe->writers = 1;
65556+ atomic_set(&pipe->files, 2);
65557+ atomic_set(&pipe->readers, 1);
65558+ atomic_set(&pipe->writers, 1);
65559 inode->i_fop = &pipefifo_fops;
65560
65561 /*
65562@@ -830,17 +831,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
65563 spin_lock(&inode->i_lock);
65564 if (inode->i_pipe) {
65565 pipe = inode->i_pipe;
65566- pipe->files++;
65567+ atomic_inc(&pipe->files);
65568 spin_unlock(&inode->i_lock);
65569 } else {
65570 spin_unlock(&inode->i_lock);
65571 pipe = alloc_pipe_info();
65572 if (!pipe)
65573 return -ENOMEM;
65574- pipe->files = 1;
65575+ atomic_set(&pipe->files, 1);
65576 spin_lock(&inode->i_lock);
65577 if (unlikely(inode->i_pipe)) {
65578- inode->i_pipe->files++;
65579+ atomic_inc(&inode->i_pipe->files);
65580 spin_unlock(&inode->i_lock);
65581 free_pipe_info(pipe);
65582 pipe = inode->i_pipe;
65583@@ -865,10 +866,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
65584 * opened, even when there is no process writing the FIFO.
65585 */
65586 pipe->r_counter++;
65587- if (pipe->readers++ == 0)
65588+ if (atomic_inc_return(&pipe->readers) == 1)
65589 wake_up_partner(pipe);
65590
65591- if (!is_pipe && !pipe->writers) {
65592+ if (!is_pipe && !atomic_read(&pipe->writers)) {
65593 if ((filp->f_flags & O_NONBLOCK)) {
65594 /* suppress POLLHUP until we have
65595 * seen a writer */
65596@@ -887,14 +888,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
65597 * errno=ENXIO when there is no process reading the FIFO.
65598 */
65599 ret = -ENXIO;
65600- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
65601+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
65602 goto err;
65603
65604 pipe->w_counter++;
65605- if (!pipe->writers++)
65606+ if (atomic_inc_return(&pipe->writers) == 1)
65607 wake_up_partner(pipe);
65608
65609- if (!is_pipe && !pipe->readers) {
65610+ if (!is_pipe && !atomic_read(&pipe->readers)) {
65611 if (wait_for_partner(pipe, &pipe->r_counter))
65612 goto err_wr;
65613 }
65614@@ -908,11 +909,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
65615 * the process can at least talk to itself.
65616 */
65617
65618- pipe->readers++;
65619- pipe->writers++;
65620+ atomic_inc(&pipe->readers);
65621+ atomic_inc(&pipe->writers);
65622 pipe->r_counter++;
65623 pipe->w_counter++;
65624- if (pipe->readers == 1 || pipe->writers == 1)
65625+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
65626 wake_up_partner(pipe);
65627 break;
65628
65629@@ -926,13 +927,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
65630 return 0;
65631
65632 err_rd:
65633- if (!--pipe->readers)
65634+ if (atomic_dec_and_test(&pipe->readers))
65635 wake_up_interruptible(&pipe->wait);
65636 ret = -ERESTARTSYS;
65637 goto err;
65638
65639 err_wr:
65640- if (!--pipe->writers)
65641+ if (atomic_dec_and_test(&pipe->writers))
65642 wake_up_interruptible(&pipe->wait);
65643 ret = -ERESTARTSYS;
65644 goto err;
65645diff --git a/fs/posix_acl.c b/fs/posix_acl.c
65646index 0855f77..6787d50 100644
65647--- a/fs/posix_acl.c
65648+++ b/fs/posix_acl.c
65649@@ -20,6 +20,7 @@
65650 #include <linux/xattr.h>
65651 #include <linux/export.h>
65652 #include <linux/user_namespace.h>
65653+#include <linux/grsecurity.h>
65654
65655 struct posix_acl **acl_by_type(struct inode *inode, int type)
65656 {
65657@@ -277,7 +278,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, umode_t *mode_p)
65658 }
65659 }
65660 if (mode_p)
65661- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
65662+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
65663 return not_equiv;
65664 }
65665 EXPORT_SYMBOL(posix_acl_equiv_mode);
65666@@ -427,7 +428,7 @@ static int posix_acl_create_masq(struct posix_acl *acl, umode_t *mode_p)
65667 mode &= (group_obj->e_perm << 3) | ~S_IRWXG;
65668 }
65669
65670- *mode_p = (*mode_p & ~S_IRWXUGO) | mode;
65671+ *mode_p = ((*mode_p & ~S_IRWXUGO) | mode) & ~gr_acl_umask();
65672 return not_equiv;
65673 }
65674
65675@@ -485,6 +486,8 @@ __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
65676 struct posix_acl *clone = posix_acl_clone(*acl, gfp);
65677 int err = -ENOMEM;
65678 if (clone) {
65679+ *mode_p &= ~gr_acl_umask();
65680+
65681 err = posix_acl_create_masq(clone, mode_p);
65682 if (err < 0) {
65683 posix_acl_release(clone);
65684@@ -659,11 +662,12 @@ struct posix_acl *
65685 posix_acl_from_xattr(struct user_namespace *user_ns,
65686 const void *value, size_t size)
65687 {
65688- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
65689- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
65690+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
65691+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
65692 int count;
65693 struct posix_acl *acl;
65694 struct posix_acl_entry *acl_e;
65695+ umode_t umask = gr_acl_umask();
65696
65697 if (!value)
65698 return NULL;
65699@@ -689,12 +693,18 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65700
65701 switch(acl_e->e_tag) {
65702 case ACL_USER_OBJ:
65703+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65704+ break;
65705 case ACL_GROUP_OBJ:
65706 case ACL_MASK:
65707+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65708+ break;
65709 case ACL_OTHER:
65710+ acl_e->e_perm &= ~(umask & S_IRWXO);
65711 break;
65712
65713 case ACL_USER:
65714+ acl_e->e_perm &= ~((umask & S_IRWXU) >> 6);
65715 acl_e->e_uid =
65716 make_kuid(user_ns,
65717 le32_to_cpu(entry->e_id));
65718@@ -702,6 +712,7 @@ posix_acl_from_xattr(struct user_namespace *user_ns,
65719 goto fail;
65720 break;
65721 case ACL_GROUP:
65722+ acl_e->e_perm &= ~((umask & S_IRWXG) >> 3);
65723 acl_e->e_gid =
65724 make_kgid(user_ns,
65725 le32_to_cpu(entry->e_id));
65726diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
65727index 2183fcf..3c32a98 100644
65728--- a/fs/proc/Kconfig
65729+++ b/fs/proc/Kconfig
65730@@ -30,7 +30,7 @@ config PROC_FS
65731
65732 config PROC_KCORE
65733 bool "/proc/kcore support" if !ARM
65734- depends on PROC_FS && MMU
65735+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
65736 help
65737 Provides a virtual ELF core file of the live kernel. This can
65738 be read with gdb and other ELF tools. No modifications can be
65739@@ -38,8 +38,8 @@ config PROC_KCORE
65740
65741 config PROC_VMCORE
65742 bool "/proc/vmcore support"
65743- depends on PROC_FS && CRASH_DUMP
65744- default y
65745+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
65746+ default n
65747 help
65748 Exports the dump image of crashed kernel in ELF format.
65749
65750@@ -63,8 +63,8 @@ config PROC_SYSCTL
65751 limited in memory.
65752
65753 config PROC_PAGE_MONITOR
65754- default y
65755- depends on PROC_FS && MMU
65756+ default n
65757+ depends on PROC_FS && MMU && !GRKERNSEC
65758 bool "Enable /proc page monitoring" if EXPERT
65759 help
65760 Various /proc files exist to monitor process memory utilization:
65761diff --git a/fs/proc/array.c b/fs/proc/array.c
65762index cd3653e..9b9b79a 100644
65763--- a/fs/proc/array.c
65764+++ b/fs/proc/array.c
65765@@ -60,6 +60,7 @@
65766 #include <linux/tty.h>
65767 #include <linux/string.h>
65768 #include <linux/mman.h>
65769+#include <linux/grsecurity.h>
65770 #include <linux/proc_fs.h>
65771 #include <linux/ioport.h>
65772 #include <linux/uaccess.h>
65773@@ -347,6 +348,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
65774 seq_putc(m, '\n');
65775 }
65776
65777+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65778+static inline void task_pax(struct seq_file *m, struct task_struct *p)
65779+{
65780+ if (p->mm)
65781+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
65782+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
65783+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
65784+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
65785+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
65786+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
65787+ else
65788+ seq_printf(m, "PaX:\t-----\n");
65789+}
65790+#endif
65791+
65792 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
65793 struct pid *pid, struct task_struct *task)
65794 {
65795@@ -365,9 +381,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
65796 task_cpus_allowed(m, task);
65797 cpuset_task_status_allowed(m, task);
65798 task_context_switch_counts(m, task);
65799+
65800+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
65801+ task_pax(m, task);
65802+#endif
65803+
65804+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
65805+ task_grsec_rbac(m, task);
65806+#endif
65807+
65808 return 0;
65809 }
65810
65811+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65812+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
65813+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
65814+ _mm->pax_flags & MF_PAX_SEGMEXEC))
65815+#endif
65816+
65817 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65818 struct pid *pid, struct task_struct *task, int whole)
65819 {
65820@@ -389,6 +420,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65821 char tcomm[sizeof(task->comm)];
65822 unsigned long flags;
65823
65824+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65825+ if (current->exec_id != m->exec_id) {
65826+ gr_log_badprocpid("stat");
65827+ return 0;
65828+ }
65829+#endif
65830+
65831 state = *get_task_state(task);
65832 vsize = eip = esp = 0;
65833 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
65834@@ -459,6 +497,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65835 gtime = task_gtime(task);
65836 }
65837
65838+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65839+ if (PAX_RAND_FLAGS(mm)) {
65840+ eip = 0;
65841+ esp = 0;
65842+ wchan = 0;
65843+ }
65844+#endif
65845+#ifdef CONFIG_GRKERNSEC_HIDESYM
65846+ wchan = 0;
65847+ eip =0;
65848+ esp =0;
65849+#endif
65850+
65851 /* scale priority and nice values from timeslices to -20..20 */
65852 /* to make it look like a "normal" Unix priority/nice value */
65853 priority = task_prio(task);
65854@@ -490,9 +541,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65855 seq_put_decimal_ull(m, ' ', vsize);
65856 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
65857 seq_put_decimal_ull(m, ' ', rsslim);
65858+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65859+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
65860+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
65861+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
65862+#else
65863 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
65864 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
65865 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
65866+#endif
65867 seq_put_decimal_ull(m, ' ', esp);
65868 seq_put_decimal_ull(m, ' ', eip);
65869 /* The signal information here is obsolete.
65870@@ -514,7 +571,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
65871 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
65872 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
65873
65874- if (mm && permitted) {
65875+ if (mm && permitted
65876+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65877+ && !PAX_RAND_FLAGS(mm)
65878+#endif
65879+ ) {
65880 seq_put_decimal_ull(m, ' ', mm->start_data);
65881 seq_put_decimal_ull(m, ' ', mm->end_data);
65882 seq_put_decimal_ull(m, ' ', mm->start_brk);
65883@@ -552,8 +613,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
65884 struct pid *pid, struct task_struct *task)
65885 {
65886 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
65887- struct mm_struct *mm = get_task_mm(task);
65888+ struct mm_struct *mm;
65889
65890+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65891+ if (current->exec_id != m->exec_id) {
65892+ gr_log_badprocpid("statm");
65893+ return 0;
65894+ }
65895+#endif
65896+ mm = get_task_mm(task);
65897 if (mm) {
65898 size = task_statm(mm, &shared, &text, &data, &resident);
65899 mmput(mm);
65900@@ -576,6 +644,20 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
65901 return 0;
65902 }
65903
65904+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
65905+int proc_pid_ipaddr(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task)
65906+{
65907+ unsigned long flags;
65908+ u32 curr_ip = 0;
65909+
65910+ if (lock_task_sighand(task, &flags)) {
65911+ curr_ip = task->signal->curr_ip;
65912+ unlock_task_sighand(task, &flags);
65913+ }
65914+ return seq_printf(m, "%pI4\n", &curr_ip);
65915+}
65916+#endif
65917+
65918 #ifdef CONFIG_CHECKPOINT_RESTORE
65919 static struct pid *
65920 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
65921diff --git a/fs/proc/base.c b/fs/proc/base.c
65922index baf852b..03fe930 100644
65923--- a/fs/proc/base.c
65924+++ b/fs/proc/base.c
65925@@ -113,6 +113,14 @@ struct pid_entry {
65926 union proc_op op;
65927 };
65928
65929+struct getdents_callback {
65930+ struct linux_dirent __user * current_dir;
65931+ struct linux_dirent __user * previous;
65932+ struct file * file;
65933+ int count;
65934+ int error;
65935+};
65936+
65937 #define NOD(NAME, MODE, IOP, FOP, OP) { \
65938 .name = (NAME), \
65939 .len = sizeof(NAME) - 1, \
65940@@ -208,12 +216,28 @@ static int proc_pid_cmdline(struct seq_file *m, struct pid_namespace *ns,
65941 return 0;
65942 }
65943
65944+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65945+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
65946+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
65947+ _mm->pax_flags & MF_PAX_SEGMEXEC))
65948+#endif
65949+
65950 static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
65951 struct pid *pid, struct task_struct *task)
65952 {
65953 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
65954 if (mm && !IS_ERR(mm)) {
65955 unsigned int nwords = 0;
65956+
65957+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
65958+ /* allow if we're currently ptracing this task */
65959+ if (PAX_RAND_FLAGS(mm) &&
65960+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
65961+ mmput(mm);
65962+ return 0;
65963+ }
65964+#endif
65965+
65966 do {
65967 nwords += 2;
65968 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
65969@@ -225,7 +249,7 @@ static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
65970 }
65971
65972
65973-#ifdef CONFIG_KALLSYMS
65974+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65975 /*
65976 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
65977 * Returns the resolved symbol. If that fails, simply return the address.
65978@@ -265,7 +289,7 @@ static void unlock_trace(struct task_struct *task)
65979 mutex_unlock(&task->signal->cred_guard_mutex);
65980 }
65981
65982-#ifdef CONFIG_STACKTRACE
65983+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
65984
65985 #define MAX_STACK_TRACE_DEPTH 64
65986
65987@@ -487,7 +511,7 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
65988 return 0;
65989 }
65990
65991-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
65992+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
65993 static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
65994 struct pid *pid, struct task_struct *task)
65995 {
65996@@ -517,7 +541,7 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
65997 /************************************************************************/
65998
65999 /* permission checks */
66000-static int proc_fd_access_allowed(struct inode *inode)
66001+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
66002 {
66003 struct task_struct *task;
66004 int allowed = 0;
66005@@ -527,7 +551,10 @@ static int proc_fd_access_allowed(struct inode *inode)
66006 */
66007 task = get_proc_task(inode);
66008 if (task) {
66009- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
66010+ if (log)
66011+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
66012+ else
66013+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
66014 put_task_struct(task);
66015 }
66016 return allowed;
66017@@ -558,10 +585,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
66018 struct task_struct *task,
66019 int hide_pid_min)
66020 {
66021+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66022+ return false;
66023+
66024+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66025+ rcu_read_lock();
66026+ {
66027+ const struct cred *tmpcred = current_cred();
66028+ const struct cred *cred = __task_cred(task);
66029+
66030+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
66031+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66032+ || in_group_p(grsec_proc_gid)
66033+#endif
66034+ ) {
66035+ rcu_read_unlock();
66036+ return true;
66037+ }
66038+ }
66039+ rcu_read_unlock();
66040+
66041+ if (!pid->hide_pid)
66042+ return false;
66043+#endif
66044+
66045 if (pid->hide_pid < hide_pid_min)
66046 return true;
66047 if (in_group_p(pid->pid_gid))
66048 return true;
66049+
66050 return ptrace_may_access(task, PTRACE_MODE_READ);
66051 }
66052
66053@@ -579,7 +631,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
66054 put_task_struct(task);
66055
66056 if (!has_perms) {
66057+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66058+ {
66059+#else
66060 if (pid->hide_pid == 2) {
66061+#endif
66062 /*
66063 * Let's make getdents(), stat(), and open()
66064 * consistent with each other. If a process
66065@@ -640,6 +696,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
66066 if (!task)
66067 return -ESRCH;
66068
66069+ if (gr_acl_handle_procpidmem(task)) {
66070+ put_task_struct(task);
66071+ return -EPERM;
66072+ }
66073+
66074 mm = mm_access(task, mode);
66075 put_task_struct(task);
66076
66077@@ -655,6 +716,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
66078
66079 file->private_data = mm;
66080
66081+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66082+ file->f_version = current->exec_id;
66083+#endif
66084+
66085 return 0;
66086 }
66087
66088@@ -676,6 +741,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
66089 ssize_t copied;
66090 char *page;
66091
66092+#ifdef CONFIG_GRKERNSEC
66093+ if (write)
66094+ return -EPERM;
66095+#endif
66096+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66097+ if (file->f_version != current->exec_id) {
66098+ gr_log_badprocpid("mem");
66099+ return 0;
66100+ }
66101+#endif
66102+
66103 if (!mm)
66104 return 0;
66105
66106@@ -688,7 +764,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
66107 goto free;
66108
66109 while (count > 0) {
66110- int this_len = min_t(int, count, PAGE_SIZE);
66111+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
66112
66113 if (write && copy_from_user(page, buf, this_len)) {
66114 copied = -EFAULT;
66115@@ -780,6 +856,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
66116 if (!mm)
66117 return 0;
66118
66119+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66120+ if (file->f_version != current->exec_id) {
66121+ gr_log_badprocpid("environ");
66122+ return 0;
66123+ }
66124+#endif
66125+
66126 page = (char *)__get_free_page(GFP_TEMPORARY);
66127 if (!page)
66128 return -ENOMEM;
66129@@ -789,7 +872,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
66130 goto free;
66131 while (count > 0) {
66132 size_t this_len, max_len;
66133- int retval;
66134+ ssize_t retval;
66135
66136 if (src >= (mm->env_end - mm->env_start))
66137 break;
66138@@ -1403,7 +1486,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
66139 int error = -EACCES;
66140
66141 /* Are we allowed to snoop on the tasks file descriptors? */
66142- if (!proc_fd_access_allowed(inode))
66143+ if (!proc_fd_access_allowed(inode, 0))
66144 goto out;
66145
66146 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
66147@@ -1447,8 +1530,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
66148 struct path path;
66149
66150 /* Are we allowed to snoop on the tasks file descriptors? */
66151- if (!proc_fd_access_allowed(inode))
66152- goto out;
66153+ /* logging this is needed for learning on chromium to work properly,
66154+ but we don't want to flood the logs from 'ps' which does a readlink
66155+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
66156+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
66157+ */
66158+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
66159+ if (!proc_fd_access_allowed(inode,0))
66160+ goto out;
66161+ } else {
66162+ if (!proc_fd_access_allowed(inode,1))
66163+ goto out;
66164+ }
66165
66166 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
66167 if (error)
66168@@ -1498,7 +1591,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
66169 rcu_read_lock();
66170 cred = __task_cred(task);
66171 inode->i_uid = cred->euid;
66172+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66173+ inode->i_gid = grsec_proc_gid;
66174+#else
66175 inode->i_gid = cred->egid;
66176+#endif
66177 rcu_read_unlock();
66178 }
66179 security_task_to_inode(task, inode);
66180@@ -1534,10 +1631,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
66181 return -ENOENT;
66182 }
66183 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
66184+#ifdef CONFIG_GRKERNSEC_PROC_USER
66185+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
66186+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66187+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
66188+#endif
66189 task_dumpable(task)) {
66190 cred = __task_cred(task);
66191 stat->uid = cred->euid;
66192+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66193+ stat->gid = grsec_proc_gid;
66194+#else
66195 stat->gid = cred->egid;
66196+#endif
66197 }
66198 }
66199 rcu_read_unlock();
66200@@ -1575,11 +1681,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
66201
66202 if (task) {
66203 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
66204+#ifdef CONFIG_GRKERNSEC_PROC_USER
66205+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
66206+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66207+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
66208+#endif
66209 task_dumpable(task)) {
66210 rcu_read_lock();
66211 cred = __task_cred(task);
66212 inode->i_uid = cred->euid;
66213+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66214+ inode->i_gid = grsec_proc_gid;
66215+#else
66216 inode->i_gid = cred->egid;
66217+#endif
66218 rcu_read_unlock();
66219 } else {
66220 inode->i_uid = GLOBAL_ROOT_UID;
66221@@ -2114,6 +2229,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
66222 if (!task)
66223 goto out_no_task;
66224
66225+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66226+ goto out;
66227+
66228 /*
66229 * Yes, it does not scale. And it should not. Don't add
66230 * new entries into /proc/<tgid>/ without very good reasons.
66231@@ -2144,6 +2262,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
66232 if (!task)
66233 return -ENOENT;
66234
66235+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66236+ goto out;
66237+
66238 if (!dir_emit_dots(file, ctx))
66239 goto out;
66240
66241@@ -2535,7 +2656,7 @@ static const struct pid_entry tgid_base_stuff[] = {
66242 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
66243 #endif
66244 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
66245-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
66246+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66247 ONE("syscall", S_IRUSR, proc_pid_syscall),
66248 #endif
66249 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
66250@@ -2560,10 +2681,10 @@ static const struct pid_entry tgid_base_stuff[] = {
66251 #ifdef CONFIG_SECURITY
66252 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
66253 #endif
66254-#ifdef CONFIG_KALLSYMS
66255+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66256 ONE("wchan", S_IRUGO, proc_pid_wchan),
66257 #endif
66258-#ifdef CONFIG_STACKTRACE
66259+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66260 ONE("stack", S_IRUSR, proc_pid_stack),
66261 #endif
66262 #ifdef CONFIG_SCHEDSTATS
66263@@ -2597,6 +2718,9 @@ static const struct pid_entry tgid_base_stuff[] = {
66264 #ifdef CONFIG_HARDWALL
66265 ONE("hardwall", S_IRUGO, proc_pid_hardwall),
66266 #endif
66267+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66268+ ONE("ipaddr", S_IRUSR, proc_pid_ipaddr),
66269+#endif
66270 #ifdef CONFIG_USER_NS
66271 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
66272 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
66273@@ -2727,7 +2851,14 @@ static int proc_pid_instantiate(struct inode *dir,
66274 if (!inode)
66275 goto out;
66276
66277+#ifdef CONFIG_GRKERNSEC_PROC_USER
66278+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
66279+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66280+ inode->i_gid = grsec_proc_gid;
66281+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
66282+#else
66283 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
66284+#endif
66285 inode->i_op = &proc_tgid_base_inode_operations;
66286 inode->i_fop = &proc_tgid_base_operations;
66287 inode->i_flags|=S_IMMUTABLE;
66288@@ -2765,7 +2896,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
66289 if (!task)
66290 goto out;
66291
66292+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
66293+ goto out_put_task;
66294+
66295 result = proc_pid_instantiate(dir, dentry, task, NULL);
66296+out_put_task:
66297 put_task_struct(task);
66298 out:
66299 return ERR_PTR(result);
66300@@ -2879,7 +3014,7 @@ static const struct pid_entry tid_base_stuff[] = {
66301 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
66302 #endif
66303 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
66304-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
66305+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66306 ONE("syscall", S_IRUSR, proc_pid_syscall),
66307 #endif
66308 ONE("cmdline", S_IRUGO, proc_pid_cmdline),
66309@@ -2906,10 +3041,10 @@ static const struct pid_entry tid_base_stuff[] = {
66310 #ifdef CONFIG_SECURITY
66311 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
66312 #endif
66313-#ifdef CONFIG_KALLSYMS
66314+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66315 ONE("wchan", S_IRUGO, proc_pid_wchan),
66316 #endif
66317-#ifdef CONFIG_STACKTRACE
66318+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66319 ONE("stack", S_IRUSR, proc_pid_stack),
66320 #endif
66321 #ifdef CONFIG_SCHEDSTATS
66322diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
66323index cbd82df..c0407d2 100644
66324--- a/fs/proc/cmdline.c
66325+++ b/fs/proc/cmdline.c
66326@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
66327
66328 static int __init proc_cmdline_init(void)
66329 {
66330+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66331+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
66332+#else
66333 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
66334+#endif
66335 return 0;
66336 }
66337 fs_initcall(proc_cmdline_init);
66338diff --git a/fs/proc/devices.c b/fs/proc/devices.c
66339index 50493ed..248166b 100644
66340--- a/fs/proc/devices.c
66341+++ b/fs/proc/devices.c
66342@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
66343
66344 static int __init proc_devices_init(void)
66345 {
66346+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66347+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
66348+#else
66349 proc_create("devices", 0, NULL, &proc_devinfo_operations);
66350+#endif
66351 return 0;
66352 }
66353 fs_initcall(proc_devices_init);
66354diff --git a/fs/proc/fd.c b/fs/proc/fd.c
66355index 955bb55..71948bd 100644
66356--- a/fs/proc/fd.c
66357+++ b/fs/proc/fd.c
66358@@ -26,7 +26,8 @@ static int seq_show(struct seq_file *m, void *v)
66359 if (!task)
66360 return -ENOENT;
66361
66362- files = get_files_struct(task);
66363+ if (!gr_acl_handle_procpidmem(task))
66364+ files = get_files_struct(task);
66365 put_task_struct(task);
66366
66367 if (files) {
66368@@ -285,11 +286,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
66369 */
66370 int proc_fd_permission(struct inode *inode, int mask)
66371 {
66372+ struct task_struct *task;
66373 int rv = generic_permission(inode, mask);
66374- if (rv == 0)
66375- return 0;
66376+
66377 if (task_tgid(current) == proc_pid(inode))
66378 rv = 0;
66379+
66380+ task = get_proc_task(inode);
66381+ if (task == NULL)
66382+ return rv;
66383+
66384+ if (gr_acl_handle_procpidmem(task))
66385+ rv = -EACCES;
66386+
66387+ put_task_struct(task);
66388+
66389 return rv;
66390 }
66391
66392diff --git a/fs/proc/generic.c b/fs/proc/generic.c
66393index 317b726..e329aed 100644
66394--- a/fs/proc/generic.c
66395+++ b/fs/proc/generic.c
66396@@ -23,6 +23,7 @@
66397 #include <linux/bitops.h>
66398 #include <linux/spinlock.h>
66399 #include <linux/completion.h>
66400+#include <linux/grsecurity.h>
66401 #include <asm/uaccess.h>
66402
66403 #include "internal.h"
66404@@ -207,6 +208,15 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
66405 return proc_lookup_de(PDE(dir), dir, dentry);
66406 }
66407
66408+struct dentry *proc_lookup_restrict(struct inode *dir, struct dentry *dentry,
66409+ unsigned int flags)
66410+{
66411+ if (gr_proc_is_restricted())
66412+ return ERR_PTR(-EACCES);
66413+
66414+ return proc_lookup_de(PDE(dir), dir, dentry);
66415+}
66416+
66417 /*
66418 * This returns non-zero if at EOF, so that the /proc
66419 * root directory can use this and check if it should
66420@@ -264,6 +274,16 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
66421 return proc_readdir_de(PDE(inode), file, ctx);
66422 }
66423
66424+int proc_readdir_restrict(struct file *file, struct dir_context *ctx)
66425+{
66426+ struct inode *inode = file_inode(file);
66427+
66428+ if (gr_proc_is_restricted())
66429+ return -EACCES;
66430+
66431+ return proc_readdir_de(PDE(inode), file, ctx);
66432+}
66433+
66434 /*
66435 * These are the generic /proc directory operations. They
66436 * use the in-memory "struct proc_dir_entry" tree to parse
66437@@ -275,6 +295,12 @@ static const struct file_operations proc_dir_operations = {
66438 .iterate = proc_readdir,
66439 };
66440
66441+static const struct file_operations proc_dir_restricted_operations = {
66442+ .llseek = generic_file_llseek,
66443+ .read = generic_read_dir,
66444+ .iterate = proc_readdir_restrict,
66445+};
66446+
66447 /*
66448 * proc directories can do almost nothing..
66449 */
66450@@ -284,6 +310,12 @@ static const struct inode_operations proc_dir_inode_operations = {
66451 .setattr = proc_notify_change,
66452 };
66453
66454+static const struct inode_operations proc_dir_restricted_inode_operations = {
66455+ .lookup = proc_lookup_restrict,
66456+ .getattr = proc_getattr,
66457+ .setattr = proc_notify_change,
66458+};
66459+
66460 static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp)
66461 {
66462 struct proc_dir_entry *tmp;
66463@@ -294,8 +326,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
66464 return ret;
66465
66466 if (S_ISDIR(dp->mode)) {
66467- dp->proc_fops = &proc_dir_operations;
66468- dp->proc_iops = &proc_dir_inode_operations;
66469+ if (dp->restricted) {
66470+ dp->proc_fops = &proc_dir_restricted_operations;
66471+ dp->proc_iops = &proc_dir_restricted_inode_operations;
66472+ } else {
66473+ dp->proc_fops = &proc_dir_operations;
66474+ dp->proc_iops = &proc_dir_inode_operations;
66475+ }
66476 dir->nlink++;
66477 } else if (S_ISLNK(dp->mode)) {
66478 dp->proc_iops = &proc_link_inode_operations;
66479@@ -407,6 +444,27 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
66480 }
66481 EXPORT_SYMBOL_GPL(proc_mkdir_data);
66482
66483+struct proc_dir_entry *proc_mkdir_data_restrict(const char *name, umode_t mode,
66484+ struct proc_dir_entry *parent, void *data)
66485+{
66486+ struct proc_dir_entry *ent;
66487+
66488+ if (mode == 0)
66489+ mode = S_IRUGO | S_IXUGO;
66490+
66491+ ent = __proc_create(&parent, name, S_IFDIR | mode, 2);
66492+ if (ent) {
66493+ ent->data = data;
66494+ ent->restricted = 1;
66495+ if (proc_register(parent, ent) < 0) {
66496+ kfree(ent);
66497+ ent = NULL;
66498+ }
66499+ }
66500+ return ent;
66501+}
66502+EXPORT_SYMBOL_GPL(proc_mkdir_data_restrict);
66503+
66504 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
66505 struct proc_dir_entry *parent)
66506 {
66507@@ -421,6 +479,13 @@ struct proc_dir_entry *proc_mkdir(const char *name,
66508 }
66509 EXPORT_SYMBOL(proc_mkdir);
66510
66511+struct proc_dir_entry *proc_mkdir_restrict(const char *name,
66512+ struct proc_dir_entry *parent)
66513+{
66514+ return proc_mkdir_data_restrict(name, 0, parent, NULL);
66515+}
66516+EXPORT_SYMBOL(proc_mkdir_restrict);
66517+
66518 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
66519 struct proc_dir_entry *parent,
66520 const struct file_operations *proc_fops,
66521diff --git a/fs/proc/inode.c b/fs/proc/inode.c
66522index 333080d..0a35ec4 100644
66523--- a/fs/proc/inode.c
66524+++ b/fs/proc/inode.c
66525@@ -23,11 +23,17 @@
66526 #include <linux/slab.h>
66527 #include <linux/mount.h>
66528 #include <linux/magic.h>
66529+#include <linux/grsecurity.h>
66530
66531 #include <asm/uaccess.h>
66532
66533 #include "internal.h"
66534
66535+#ifdef CONFIG_PROC_SYSCTL
66536+extern const struct inode_operations proc_sys_inode_operations;
66537+extern const struct inode_operations proc_sys_dir_operations;
66538+#endif
66539+
66540 static void proc_evict_inode(struct inode *inode)
66541 {
66542 struct proc_dir_entry *de;
66543@@ -55,6 +61,13 @@ static void proc_evict_inode(struct inode *inode)
66544 ns = PROC_I(inode)->ns.ns;
66545 if (ns_ops && ns)
66546 ns_ops->put(ns);
66547+
66548+#ifdef CONFIG_PROC_SYSCTL
66549+ if (inode->i_op == &proc_sys_inode_operations ||
66550+ inode->i_op == &proc_sys_dir_operations)
66551+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
66552+#endif
66553+
66554 }
66555
66556 static struct kmem_cache * proc_inode_cachep;
66557@@ -413,7 +426,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
66558 if (de->mode) {
66559 inode->i_mode = de->mode;
66560 inode->i_uid = de->uid;
66561+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66562+ inode->i_gid = grsec_proc_gid;
66563+#else
66564 inode->i_gid = de->gid;
66565+#endif
66566 }
66567 if (de->size)
66568 inode->i_size = de->size;
66569diff --git a/fs/proc/internal.h b/fs/proc/internal.h
66570index 7da13e4..68d0981 100644
66571--- a/fs/proc/internal.h
66572+++ b/fs/proc/internal.h
66573@@ -46,9 +46,10 @@ struct proc_dir_entry {
66574 struct completion *pde_unload_completion;
66575 struct list_head pde_openers; /* who did ->open, but not ->release */
66576 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
66577+ u8 restricted; /* a directory in /proc/net that should be restricted via GRKERNSEC_PROC */
66578 u8 namelen;
66579 char name[];
66580-};
66581+} __randomize_layout;
66582
66583 union proc_op {
66584 int (*proc_get_link)(struct dentry *, struct path *);
66585@@ -66,7 +67,7 @@ struct proc_inode {
66586 struct ctl_table *sysctl_entry;
66587 struct proc_ns ns;
66588 struct inode vfs_inode;
66589-};
66590+} __randomize_layout;
66591
66592 /*
66593 * General functions
66594@@ -154,6 +155,10 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
66595 struct pid *, struct task_struct *);
66596 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
66597 struct pid *, struct task_struct *);
66598+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
66599+extern int proc_pid_ipaddr(struct seq_file *, struct pid_namespace *,
66600+ struct pid *, struct task_struct *);
66601+#endif
66602
66603 /*
66604 * base.c
66605@@ -178,9 +183,11 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
66606 * generic.c
66607 */
66608 extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
66609+extern struct dentry *proc_lookup_restrict(struct inode *, struct dentry *, unsigned int);
66610 extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
66611 struct dentry *);
66612 extern int proc_readdir(struct file *, struct dir_context *);
66613+extern int proc_readdir_restrict(struct file *, struct dir_context *);
66614 extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
66615
66616 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
66617diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
66618index a352d57..cb94a5c 100644
66619--- a/fs/proc/interrupts.c
66620+++ b/fs/proc/interrupts.c
66621@@ -47,7 +47,11 @@ static const struct file_operations proc_interrupts_operations = {
66622
66623 static int __init proc_interrupts_init(void)
66624 {
66625+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66626+ proc_create_grsec("interrupts", 0, NULL, &proc_interrupts_operations);
66627+#else
66628 proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
66629+#endif
66630 return 0;
66631 }
66632 fs_initcall(proc_interrupts_init);
66633diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
66634index 6df8d07..3321060 100644
66635--- a/fs/proc/kcore.c
66636+++ b/fs/proc/kcore.c
66637@@ -483,9 +483,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66638 * the addresses in the elf_phdr on our list.
66639 */
66640 start = kc_offset_to_vaddr(*fpos - elf_buflen);
66641- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
66642+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
66643+ if (tsz > buflen)
66644 tsz = buflen;
66645-
66646+
66647 while (buflen) {
66648 struct kcore_list *m;
66649
66650@@ -514,20 +515,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66651 kfree(elf_buf);
66652 } else {
66653 if (kern_addr_valid(start)) {
66654- unsigned long n;
66655+ char *elf_buf;
66656+ mm_segment_t oldfs;
66657
66658- n = copy_to_user(buffer, (char *)start, tsz);
66659- /*
66660- * We cannot distinguish between fault on source
66661- * and fault on destination. When this happens
66662- * we clear too and hope it will trigger the
66663- * EFAULT again.
66664- */
66665- if (n) {
66666- if (clear_user(buffer + tsz - n,
66667- n))
66668+ elf_buf = kmalloc(tsz, GFP_KERNEL);
66669+ if (!elf_buf)
66670+ return -ENOMEM;
66671+ oldfs = get_fs();
66672+ set_fs(KERNEL_DS);
66673+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
66674+ set_fs(oldfs);
66675+ if (copy_to_user(buffer, elf_buf, tsz)) {
66676+ kfree(elf_buf);
66677 return -EFAULT;
66678+ }
66679 }
66680+ set_fs(oldfs);
66681+ kfree(elf_buf);
66682 } else {
66683 if (clear_user(buffer, tsz))
66684 return -EFAULT;
66685@@ -547,6 +551,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
66686
66687 static int open_kcore(struct inode *inode, struct file *filp)
66688 {
66689+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
66690+ return -EPERM;
66691+#endif
66692 if (!capable(CAP_SYS_RAWIO))
66693 return -EPERM;
66694 if (kcore_need_update)
66695diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
66696index aa1eee0..03dda72 100644
66697--- a/fs/proc/meminfo.c
66698+++ b/fs/proc/meminfo.c
66699@@ -187,7 +187,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
66700 vmi.used >> 10,
66701 vmi.largest_chunk >> 10
66702 #ifdef CONFIG_MEMORY_FAILURE
66703- ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
66704+ ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
66705 #endif
66706 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
66707 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
66708diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
66709index d4a3574..b421ce9 100644
66710--- a/fs/proc/nommu.c
66711+++ b/fs/proc/nommu.c
66712@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
66713
66714 if (file) {
66715 seq_pad(m, ' ');
66716- seq_path(m, &file->f_path, "");
66717+ seq_path(m, &file->f_path, "\n\\");
66718 }
66719
66720 seq_putc(m, '\n');
66721diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
66722index a63af3e..b4f262a 100644
66723--- a/fs/proc/proc_net.c
66724+++ b/fs/proc/proc_net.c
66725@@ -23,9 +23,27 @@
66726 #include <linux/nsproxy.h>
66727 #include <net/net_namespace.h>
66728 #include <linux/seq_file.h>
66729+#include <linux/grsecurity.h>
66730
66731 #include "internal.h"
66732
66733+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
66734+static struct seq_operations *ipv6_seq_ops_addr;
66735+
66736+void register_ipv6_seq_ops_addr(struct seq_operations *addr)
66737+{
66738+ ipv6_seq_ops_addr = addr;
66739+}
66740+
66741+void unregister_ipv6_seq_ops_addr(void)
66742+{
66743+ ipv6_seq_ops_addr = NULL;
66744+}
66745+
66746+EXPORT_SYMBOL_GPL(register_ipv6_seq_ops_addr);
66747+EXPORT_SYMBOL_GPL(unregister_ipv6_seq_ops_addr);
66748+#endif
66749+
66750 static inline struct net *PDE_NET(struct proc_dir_entry *pde)
66751 {
66752 return pde->parent->data;
66753@@ -36,6 +54,8 @@ static struct net *get_proc_net(const struct inode *inode)
66754 return maybe_get_net(PDE_NET(PDE(inode)));
66755 }
66756
66757+extern const struct seq_operations dev_seq_ops;
66758+
66759 int seq_open_net(struct inode *ino, struct file *f,
66760 const struct seq_operations *ops, int size)
66761 {
66762@@ -44,6 +64,14 @@ int seq_open_net(struct inode *ino, struct file *f,
66763
66764 BUG_ON(size < sizeof(*p));
66765
66766+ /* only permit access to /proc/net/dev */
66767+ if (
66768+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
66769+ ops != ipv6_seq_ops_addr &&
66770+#endif
66771+ ops != &dev_seq_ops && gr_proc_is_restricted())
66772+ return -EACCES;
66773+
66774 net = get_proc_net(ino);
66775 if (net == NULL)
66776 return -ENXIO;
66777@@ -66,6 +94,9 @@ int single_open_net(struct inode *inode, struct file *file,
66778 int err;
66779 struct net *net;
66780
66781+ if (gr_proc_is_restricted())
66782+ return -EACCES;
66783+
66784 err = -ENXIO;
66785 net = get_proc_net(inode);
66786 if (net == NULL)
66787diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
66788index f92d5dd..26398ac 100644
66789--- a/fs/proc/proc_sysctl.c
66790+++ b/fs/proc/proc_sysctl.c
66791@@ -11,13 +11,21 @@
66792 #include <linux/namei.h>
66793 #include <linux/mm.h>
66794 #include <linux/module.h>
66795+#include <linux/nsproxy.h>
66796+#ifdef CONFIG_GRKERNSEC
66797+#include <net/net_namespace.h>
66798+#endif
66799 #include "internal.h"
66800
66801+extern int gr_handle_chroot_sysctl(const int op);
66802+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66803+ const int op);
66804+
66805 static const struct dentry_operations proc_sys_dentry_operations;
66806 static const struct file_operations proc_sys_file_operations;
66807-static const struct inode_operations proc_sys_inode_operations;
66808+const struct inode_operations proc_sys_inode_operations;
66809 static const struct file_operations proc_sys_dir_file_operations;
66810-static const struct inode_operations proc_sys_dir_operations;
66811+const struct inode_operations proc_sys_dir_operations;
66812
66813 void proc_sys_poll_notify(struct ctl_table_poll *poll)
66814 {
66815@@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
66816
66817 err = NULL;
66818 d_set_d_op(dentry, &proc_sys_dentry_operations);
66819+
66820+ gr_handle_proc_create(dentry, inode);
66821+
66822 d_add(dentry, inode);
66823
66824 out:
66825@@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66826 struct inode *inode = file_inode(filp);
66827 struct ctl_table_header *head = grab_header(inode);
66828 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
66829+ int op = write ? MAY_WRITE : MAY_READ;
66830 ssize_t error;
66831 size_t res;
66832
66833@@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66834 * and won't be until we finish.
66835 */
66836 error = -EPERM;
66837- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
66838+ if (sysctl_perm(head, table, op))
66839 goto out;
66840
66841 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
66842@@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
66843 if (!table->proc_handler)
66844 goto out;
66845
66846+#ifdef CONFIG_GRKERNSEC
66847+ error = -EPERM;
66848+ if (gr_handle_chroot_sysctl(op))
66849+ goto out;
66850+ dget(filp->f_path.dentry);
66851+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
66852+ dput(filp->f_path.dentry);
66853+ goto out;
66854+ }
66855+ dput(filp->f_path.dentry);
66856+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
66857+ goto out;
66858+ if (write) {
66859+ if (current->nsproxy->net_ns != table->extra2) {
66860+ if (!capable(CAP_SYS_ADMIN))
66861+ goto out;
66862+ } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
66863+ goto out;
66864+ }
66865+#endif
66866+
66867 /* careful: calling conventions are nasty here */
66868 res = count;
66869 error = table->proc_handler(table, write, buf, &res, ppos);
66870@@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
66871 return false;
66872 } else {
66873 d_set_d_op(child, &proc_sys_dentry_operations);
66874+
66875+ gr_handle_proc_create(child, inode);
66876+
66877 d_add(child, inode);
66878 }
66879 } else {
66880@@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, struct ctl_table *table,
66881 if ((*pos)++ < ctx->pos)
66882 return true;
66883
66884+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
66885+ return 0;
66886+
66887 if (unlikely(S_ISLNK(table->mode)))
66888 res = proc_sys_link_fill_cache(file, ctx, head, table);
66889 else
66890@@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
66891 if (IS_ERR(head))
66892 return PTR_ERR(head);
66893
66894+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
66895+ return -ENOENT;
66896+
66897 generic_fillattr(inode, stat);
66898 if (table)
66899 stat->mode = (stat->mode & S_IFMT) | table->mode;
66900@@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
66901 .llseek = generic_file_llseek,
66902 };
66903
66904-static const struct inode_operations proc_sys_inode_operations = {
66905+const struct inode_operations proc_sys_inode_operations = {
66906 .permission = proc_sys_permission,
66907 .setattr = proc_sys_setattr,
66908 .getattr = proc_sys_getattr,
66909 };
66910
66911-static const struct inode_operations proc_sys_dir_operations = {
66912+const struct inode_operations proc_sys_dir_operations = {
66913 .lookup = proc_sys_lookup,
66914 .permission = proc_sys_permission,
66915 .setattr = proc_sys_setattr,
66916@@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
66917 static struct ctl_dir *new_dir(struct ctl_table_set *set,
66918 const char *name, int namelen)
66919 {
66920- struct ctl_table *table;
66921+ ctl_table_no_const *table;
66922 struct ctl_dir *new;
66923 struct ctl_node *node;
66924 char *new_name;
66925@@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
66926 return NULL;
66927
66928 node = (struct ctl_node *)(new + 1);
66929- table = (struct ctl_table *)(node + 1);
66930+ table = (ctl_table_no_const *)(node + 1);
66931 new_name = (char *)(table + 2);
66932 memcpy(new_name, name, namelen);
66933 new_name[namelen] = '\0';
66934@@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
66935 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
66936 struct ctl_table_root *link_root)
66937 {
66938- struct ctl_table *link_table, *entry, *link;
66939+ ctl_table_no_const *link_table, *link;
66940+ struct ctl_table *entry;
66941 struct ctl_table_header *links;
66942 struct ctl_node *node;
66943 char *link_name;
66944@@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
66945 return NULL;
66946
66947 node = (struct ctl_node *)(links + 1);
66948- link_table = (struct ctl_table *)(node + nr_entries);
66949+ link_table = (ctl_table_no_const *)(node + nr_entries);
66950 link_name = (char *)&link_table[nr_entries + 1];
66951
66952 for (link = link_table, entry = table; entry->procname; link++, entry++) {
66953@@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66954 struct ctl_table_header ***subheader, struct ctl_table_set *set,
66955 struct ctl_table *table)
66956 {
66957- struct ctl_table *ctl_table_arg = NULL;
66958- struct ctl_table *entry, *files;
66959+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
66960+ struct ctl_table *entry;
66961 int nr_files = 0;
66962 int nr_dirs = 0;
66963 int err = -ENOMEM;
66964@@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66965 nr_files++;
66966 }
66967
66968- files = table;
66969 /* If there are mixed files and directories we need a new table */
66970 if (nr_dirs && nr_files) {
66971- struct ctl_table *new;
66972+ ctl_table_no_const *new;
66973 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
66974 GFP_KERNEL);
66975 if (!files)
66976@@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
66977 /* Register everything except a directory full of subdirectories */
66978 if (nr_files || !nr_dirs) {
66979 struct ctl_table_header *header;
66980- header = __register_sysctl_table(set, path, files);
66981+ header = __register_sysctl_table(set, path, files ? files : table);
66982 if (!header) {
66983 kfree(ctl_table_arg);
66984 goto out;
66985diff --git a/fs/proc/root.c b/fs/proc/root.c
66986index 094e44d..085a877 100644
66987--- a/fs/proc/root.c
66988+++ b/fs/proc/root.c
66989@@ -188,7 +188,15 @@ void __init proc_root_init(void)
66990 proc_mkdir("openprom", NULL);
66991 #endif
66992 proc_tty_init();
66993+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66994+#ifdef CONFIG_GRKERNSEC_PROC_USER
66995+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
66996+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66997+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
66998+#endif
66999+#else
67000 proc_mkdir("bus", NULL);
67001+#endif
67002 proc_sys_init();
67003 }
67004
67005diff --git a/fs/proc/stat.c b/fs/proc/stat.c
67006index bf2d03f..f058f9c 100644
67007--- a/fs/proc/stat.c
67008+++ b/fs/proc/stat.c
67009@@ -11,6 +11,7 @@
67010 #include <linux/irqnr.h>
67011 #include <linux/cputime.h>
67012 #include <linux/tick.h>
67013+#include <linux/grsecurity.h>
67014
67015 #ifndef arch_irq_stat_cpu
67016 #define arch_irq_stat_cpu(cpu) 0
67017@@ -87,6 +88,18 @@ static int show_stat(struct seq_file *p, void *v)
67018 u64 sum_softirq = 0;
67019 unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
67020 struct timespec boottime;
67021+ int unrestricted = 1;
67022+
67023+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67024+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67025+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
67026+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67027+ && !in_group_p(grsec_proc_gid)
67028+#endif
67029+ )
67030+ unrestricted = 0;
67031+#endif
67032+#endif
67033
67034 user = nice = system = idle = iowait =
67035 irq = softirq = steal = 0;
67036@@ -99,23 +112,25 @@ static int show_stat(struct seq_file *p, void *v)
67037 nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
67038 system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
67039 idle += get_idle_time(i);
67040- iowait += get_iowait_time(i);
67041- irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67042- softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67043- steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67044- guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67045- guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67046- sum += kstat_cpu_irqs_sum(i);
67047- sum += arch_irq_stat_cpu(i);
67048+ if (unrestricted) {
67049+ iowait += get_iowait_time(i);
67050+ irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67051+ softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67052+ steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67053+ guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67054+ guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67055+ sum += kstat_cpu_irqs_sum(i);
67056+ sum += arch_irq_stat_cpu(i);
67057+ for (j = 0; j < NR_SOFTIRQS; j++) {
67058+ unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
67059
67060- for (j = 0; j < NR_SOFTIRQS; j++) {
67061- unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
67062-
67063- per_softirq_sums[j] += softirq_stat;
67064- sum_softirq += softirq_stat;
67065+ per_softirq_sums[j] += softirq_stat;
67066+ sum_softirq += softirq_stat;
67067+ }
67068 }
67069 }
67070- sum += arch_irq_stat();
67071+ if (unrestricted)
67072+ sum += arch_irq_stat();
67073
67074 seq_puts(p, "cpu ");
67075 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
67076@@ -136,12 +151,14 @@ static int show_stat(struct seq_file *p, void *v)
67077 nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
67078 system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
67079 idle = get_idle_time(i);
67080- iowait = get_iowait_time(i);
67081- irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67082- softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67083- steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67084- guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67085- guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67086+ if (unrestricted) {
67087+ iowait = get_iowait_time(i);
67088+ irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
67089+ softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
67090+ steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
67091+ guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
67092+ guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
67093+ }
67094 seq_printf(p, "cpu%d", i);
67095 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
67096 seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
67097@@ -159,7 +176,7 @@ static int show_stat(struct seq_file *p, void *v)
67098
67099 /* sum again ? it could be updated? */
67100 for_each_irq_nr(j)
67101- seq_put_decimal_ull(p, ' ', kstat_irqs(j));
67102+ seq_put_decimal_ull(p, ' ', unrestricted ? kstat_irqs(j) : 0ULL);
67103
67104 seq_printf(p,
67105 "\nctxt %llu\n"
67106@@ -167,11 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
67107 "processes %lu\n"
67108 "procs_running %lu\n"
67109 "procs_blocked %lu\n",
67110- nr_context_switches(),
67111+ unrestricted ? nr_context_switches() : 0ULL,
67112 (unsigned long)jif,
67113- total_forks,
67114- nr_running(),
67115- nr_iowait());
67116+ unrestricted ? total_forks : 0UL,
67117+ unrestricted ? nr_running() : 0UL,
67118+ unrestricted ? nr_iowait() : 0UL);
67119
67120 seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);
67121
67122diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
67123index c341568..75852a2 100644
67124--- a/fs/proc/task_mmu.c
67125+++ b/fs/proc/task_mmu.c
67126@@ -13,12 +13,19 @@
67127 #include <linux/swap.h>
67128 #include <linux/swapops.h>
67129 #include <linux/mmu_notifier.h>
67130+#include <linux/grsecurity.h>
67131
67132 #include <asm/elf.h>
67133 #include <asm/uaccess.h>
67134 #include <asm/tlbflush.h>
67135 #include "internal.h"
67136
67137+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67138+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
67139+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
67140+ _mm->pax_flags & MF_PAX_SEGMEXEC))
67141+#endif
67142+
67143 void task_mem(struct seq_file *m, struct mm_struct *mm)
67144 {
67145 unsigned long data, text, lib, swap;
67146@@ -54,8 +61,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
67147 "VmExe:\t%8lu kB\n"
67148 "VmLib:\t%8lu kB\n"
67149 "VmPTE:\t%8lu kB\n"
67150- "VmSwap:\t%8lu kB\n",
67151- hiwater_vm << (PAGE_SHIFT-10),
67152+ "VmSwap:\t%8lu kB\n"
67153+
67154+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
67155+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
67156+#endif
67157+
67158+ ,hiwater_vm << (PAGE_SHIFT-10),
67159 total_vm << (PAGE_SHIFT-10),
67160 mm->locked_vm << (PAGE_SHIFT-10),
67161 mm->pinned_vm << (PAGE_SHIFT-10),
67162@@ -65,7 +77,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
67163 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
67164 (PTRS_PER_PTE * sizeof(pte_t) *
67165 atomic_long_read(&mm->nr_ptes)) >> 10,
67166- swap << (PAGE_SHIFT-10));
67167+ swap << (PAGE_SHIFT-10)
67168+
67169+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
67170+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67171+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
67172+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
67173+#else
67174+ , mm->context.user_cs_base
67175+ , mm->context.user_cs_limit
67176+#endif
67177+#endif
67178+
67179+ );
67180 }
67181
67182 unsigned long task_vsize(struct mm_struct *mm)
67183@@ -271,13 +295,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67184 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
67185 }
67186
67187- /* We don't show the stack guard page in /proc/maps */
67188+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67189+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
67190+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
67191+#else
67192 start = vma->vm_start;
67193- if (stack_guard_page_start(vma, start))
67194- start += PAGE_SIZE;
67195 end = vma->vm_end;
67196- if (stack_guard_page_end(vma, end))
67197- end -= PAGE_SIZE;
67198+#endif
67199
67200 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
67201 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
67202@@ -287,7 +311,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67203 flags & VM_WRITE ? 'w' : '-',
67204 flags & VM_EXEC ? 'x' : '-',
67205 flags & VM_MAYSHARE ? 's' : 'p',
67206+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67207+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
67208+#else
67209 pgoff,
67210+#endif
67211 MAJOR(dev), MINOR(dev), ino);
67212
67213 /*
67214@@ -296,7 +324,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67215 */
67216 if (file) {
67217 seq_pad(m, ' ');
67218- seq_path(m, &file->f_path, "\n");
67219+ seq_path(m, &file->f_path, "\n\\");
67220 goto done;
67221 }
67222
67223@@ -328,8 +356,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
67224 * Thread stack in /proc/PID/task/TID/maps or
67225 * the main process stack.
67226 */
67227- if (!is_pid || (vma->vm_start <= mm->start_stack &&
67228- vma->vm_end >= mm->start_stack)) {
67229+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
67230+ (vma->vm_start <= mm->start_stack &&
67231+ vma->vm_end >= mm->start_stack)) {
67232 name = "[stack]";
67233 } else {
67234 /* Thread stack in /proc/PID/maps */
67235@@ -353,6 +382,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
67236 struct proc_maps_private *priv = m->private;
67237 struct task_struct *task = priv->task;
67238
67239+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67240+ if (current->exec_id != m->exec_id) {
67241+ gr_log_badprocpid("maps");
67242+ return 0;
67243+ }
67244+#endif
67245+
67246 show_map_vma(m, vma, is_pid);
67247
67248 if (m->count < m->size) /* vma is copied successfully */
67249@@ -593,12 +629,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
67250 .private = &mss,
67251 };
67252
67253+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67254+ if (current->exec_id != m->exec_id) {
67255+ gr_log_badprocpid("smaps");
67256+ return 0;
67257+ }
67258+#endif
67259 memset(&mss, 0, sizeof mss);
67260- mss.vma = vma;
67261- /* mmap_sem is held in m_start */
67262- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
67263- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
67264-
67265+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67266+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
67267+#endif
67268+ mss.vma = vma;
67269+ /* mmap_sem is held in m_start */
67270+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
67271+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
67272+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67273+ }
67274+#endif
67275 show_map_vma(m, vma, is_pid);
67276
67277 seq_printf(m,
67278@@ -616,7 +663,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
67279 "KernelPageSize: %8lu kB\n"
67280 "MMUPageSize: %8lu kB\n"
67281 "Locked: %8lu kB\n",
67282+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67283+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
67284+#else
67285 (vma->vm_end - vma->vm_start) >> 10,
67286+#endif
67287 mss.resident >> 10,
67288 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
67289 mss.shared_clean >> 10,
67290@@ -1422,6 +1473,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
67291 char buffer[64];
67292 int nid;
67293
67294+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67295+ if (current->exec_id != m->exec_id) {
67296+ gr_log_badprocpid("numa_maps");
67297+ return 0;
67298+ }
67299+#endif
67300+
67301 if (!mm)
67302 return 0;
67303
67304@@ -1439,11 +1497,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
67305 mpol_to_str(buffer, sizeof(buffer), pol);
67306 mpol_cond_put(pol);
67307
67308+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67309+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
67310+#else
67311 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
67312+#endif
67313
67314 if (file) {
67315 seq_puts(m, " file=");
67316- seq_path(m, &file->f_path, "\n\t= ");
67317+ seq_path(m, &file->f_path, "\n\t\\= ");
67318 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
67319 seq_puts(m, " heap");
67320 } else {
67321diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
67322index 678455d..ebd3245 100644
67323--- a/fs/proc/task_nommu.c
67324+++ b/fs/proc/task_nommu.c
67325@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
67326 else
67327 bytes += kobjsize(mm);
67328
67329- if (current->fs && current->fs->users > 1)
67330+ if (current->fs && atomic_read(&current->fs->users) > 1)
67331 sbytes += kobjsize(current->fs);
67332 else
67333 bytes += kobjsize(current->fs);
67334@@ -161,7 +161,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
67335
67336 if (file) {
67337 seq_pad(m, ' ');
67338- seq_path(m, &file->f_path, "");
67339+ seq_path(m, &file->f_path, "\n\\");
67340 } else if (mm) {
67341 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
67342
67343diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
67344index a90d6d35..d08047c 100644
67345--- a/fs/proc/vmcore.c
67346+++ b/fs/proc/vmcore.c
67347@@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
67348 nr_bytes = count;
67349
67350 /* If pfn is not ram, return zeros for sparse dump files */
67351- if (pfn_is_ram(pfn) == 0)
67352- memset(buf, 0, nr_bytes);
67353- else {
67354+ if (pfn_is_ram(pfn) == 0) {
67355+ if (userbuf) {
67356+ if (clear_user((char __force_user *)buf, nr_bytes))
67357+ return -EFAULT;
67358+ } else
67359+ memset(buf, 0, nr_bytes);
67360+ } else {
67361 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
67362 offset, userbuf);
67363 if (tmp < 0)
67364@@ -170,7 +174,7 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
67365 static int copy_to(void *target, void *src, size_t size, int userbuf)
67366 {
67367 if (userbuf) {
67368- if (copy_to_user((char __user *) target, src, size))
67369+ if (copy_to_user((char __force_user *) target, src, size))
67370 return -EFAULT;
67371 } else {
67372 memcpy(target, src, size);
67373@@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
67374 if (*fpos < m->offset + m->size) {
67375 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
67376 start = m->paddr + *fpos - m->offset;
67377- tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
67378+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
67379 if (tmp < 0)
67380 return tmp;
67381 buflen -= tsz;
67382@@ -253,7 +257,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
67383 static ssize_t read_vmcore(struct file *file, char __user *buffer,
67384 size_t buflen, loff_t *fpos)
67385 {
67386- return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
67387+ return __read_vmcore((__force_kernel char *) buffer, buflen, fpos, 1);
67388 }
67389
67390 /*
67391diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
67392index d3fb2b6..43a8140 100644
67393--- a/fs/qnx6/qnx6.h
67394+++ b/fs/qnx6/qnx6.h
67395@@ -74,7 +74,7 @@ enum {
67396 BYTESEX_BE,
67397 };
67398
67399-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
67400+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
67401 {
67402 if (sbi->s_bytesex == BYTESEX_LE)
67403 return le64_to_cpu((__force __le64)n);
67404@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
67405 return (__force __fs64)cpu_to_be64(n);
67406 }
67407
67408-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
67409+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
67410 {
67411 if (sbi->s_bytesex == BYTESEX_LE)
67412 return le32_to_cpu((__force __le32)n);
67413diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
67414index bb2869f..d34ada8 100644
67415--- a/fs/quota/netlink.c
67416+++ b/fs/quota/netlink.c
67417@@ -44,7 +44,7 @@ static struct genl_family quota_genl_family = {
67418 void quota_send_warning(struct kqid qid, dev_t dev,
67419 const char warntype)
67420 {
67421- static atomic_t seq;
67422+ static atomic_unchecked_t seq;
67423 struct sk_buff *skb;
67424 void *msg_head;
67425 int ret;
67426@@ -60,7 +60,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
67427 "VFS: Not enough memory to send quota warning.\n");
67428 return;
67429 }
67430- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
67431+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
67432 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
67433 if (!msg_head) {
67434 printk(KERN_ERR
67435diff --git a/fs/read_write.c b/fs/read_write.c
67436index 009d854..16ce214 100644
67437--- a/fs/read_write.c
67438+++ b/fs/read_write.c
67439@@ -495,7 +495,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
67440
67441 old_fs = get_fs();
67442 set_fs(get_ds());
67443- p = (__force const char __user *)buf;
67444+ p = (const char __force_user *)buf;
67445 if (count > MAX_RW_COUNT)
67446 count = MAX_RW_COUNT;
67447 if (file->f_op->write)
67448diff --git a/fs/readdir.c b/fs/readdir.c
67449index 33fd922..e0d6094 100644
67450--- a/fs/readdir.c
67451+++ b/fs/readdir.c
67452@@ -18,6 +18,7 @@
67453 #include <linux/security.h>
67454 #include <linux/syscalls.h>
67455 #include <linux/unistd.h>
67456+#include <linux/namei.h>
67457
67458 #include <asm/uaccess.h>
67459
67460@@ -71,6 +72,7 @@ struct old_linux_dirent {
67461 struct readdir_callback {
67462 struct dir_context ctx;
67463 struct old_linux_dirent __user * dirent;
67464+ struct file * file;
67465 int result;
67466 };
67467
67468@@ -88,6 +90,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
67469 buf->result = -EOVERFLOW;
67470 return -EOVERFLOW;
67471 }
67472+
67473+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67474+ return 0;
67475+
67476 buf->result++;
67477 dirent = buf->dirent;
67478 if (!access_ok(VERIFY_WRITE, dirent,
67479@@ -119,6 +125,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
67480 if (!f.file)
67481 return -EBADF;
67482
67483+ buf.file = f.file;
67484 error = iterate_dir(f.file, &buf.ctx);
67485 if (buf.result)
67486 error = buf.result;
67487@@ -144,6 +151,7 @@ struct getdents_callback {
67488 struct dir_context ctx;
67489 struct linux_dirent __user * current_dir;
67490 struct linux_dirent __user * previous;
67491+ struct file * file;
67492 int count;
67493 int error;
67494 };
67495@@ -165,6 +173,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
67496 buf->error = -EOVERFLOW;
67497 return -EOVERFLOW;
67498 }
67499+
67500+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67501+ return 0;
67502+
67503 dirent = buf->previous;
67504 if (dirent) {
67505 if (__put_user(offset, &dirent->d_off))
67506@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
67507 if (!f.file)
67508 return -EBADF;
67509
67510+ buf.file = f.file;
67511 error = iterate_dir(f.file, &buf.ctx);
67512 if (error >= 0)
67513 error = buf.error;
67514@@ -228,6 +241,7 @@ struct getdents_callback64 {
67515 struct dir_context ctx;
67516 struct linux_dirent64 __user * current_dir;
67517 struct linux_dirent64 __user * previous;
67518+ struct file *file;
67519 int count;
67520 int error;
67521 };
67522@@ -243,6 +257,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
67523 buf->error = -EINVAL; /* only used if we fail.. */
67524 if (reclen > buf->count)
67525 return -EINVAL;
67526+
67527+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
67528+ return 0;
67529+
67530 dirent = buf->previous;
67531 if (dirent) {
67532 if (__put_user(offset, &dirent->d_off))
67533@@ -290,6 +308,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
67534 if (!f.file)
67535 return -EBADF;
67536
67537+ buf.file = f.file;
67538 error = iterate_dir(f.file, &buf.ctx);
67539 if (error >= 0)
67540 error = buf.error;
67541diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
67542index 9c02d96..6562c10 100644
67543--- a/fs/reiserfs/do_balan.c
67544+++ b/fs/reiserfs/do_balan.c
67545@@ -1887,7 +1887,7 @@ void do_balance(struct tree_balance *tb, struct item_head *ih,
67546 return;
67547 }
67548
67549- atomic_inc(&fs_generation(tb->tb_sb));
67550+ atomic_inc_unchecked(&fs_generation(tb->tb_sb));
67551 do_balance_starts(tb);
67552
67553 /*
67554diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
67555index aca73dd..e3c558d 100644
67556--- a/fs/reiserfs/item_ops.c
67557+++ b/fs/reiserfs/item_ops.c
67558@@ -724,18 +724,18 @@ static void errcatch_print_vi(struct virtual_item *vi)
67559 }
67560
67561 static struct item_operations errcatch_ops = {
67562- errcatch_bytes_number,
67563- errcatch_decrement_key,
67564- errcatch_is_left_mergeable,
67565- errcatch_print_item,
67566- errcatch_check_item,
67567+ .bytes_number = errcatch_bytes_number,
67568+ .decrement_key = errcatch_decrement_key,
67569+ .is_left_mergeable = errcatch_is_left_mergeable,
67570+ .print_item = errcatch_print_item,
67571+ .check_item = errcatch_check_item,
67572
67573- errcatch_create_vi,
67574- errcatch_check_left,
67575- errcatch_check_right,
67576- errcatch_part_size,
67577- errcatch_unit_num,
67578- errcatch_print_vi
67579+ .create_vi = errcatch_create_vi,
67580+ .check_left = errcatch_check_left,
67581+ .check_right = errcatch_check_right,
67582+ .part_size = errcatch_part_size,
67583+ .unit_num = errcatch_unit_num,
67584+ .print_vi = errcatch_print_vi
67585 };
67586
67587 #if ! (TYPE_STAT_DATA == 0 && TYPE_INDIRECT == 1 && TYPE_DIRECT == 2 && TYPE_DIRENTRY == 3)
67588diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
67589index 621b9f3..af527fd 100644
67590--- a/fs/reiserfs/procfs.c
67591+++ b/fs/reiserfs/procfs.c
67592@@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
67593 "SMALL_TAILS " : "NO_TAILS ",
67594 replay_only(sb) ? "REPLAY_ONLY " : "",
67595 convert_reiserfs(sb) ? "CONV " : "",
67596- atomic_read(&r->s_generation_counter),
67597+ atomic_read_unchecked(&r->s_generation_counter),
67598 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
67599 SF(s_do_balance), SF(s_unneeded_left_neighbor),
67600 SF(s_good_search_by_key_reada), SF(s_bmaps),
67601diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
67602index 735c2c2..81b91af 100644
67603--- a/fs/reiserfs/reiserfs.h
67604+++ b/fs/reiserfs/reiserfs.h
67605@@ -573,7 +573,7 @@ struct reiserfs_sb_info {
67606 /* Comment? -Hans */
67607 wait_queue_head_t s_wait;
67608 /* increased by one every time the tree gets re-balanced */
67609- atomic_t s_generation_counter;
67610+ atomic_unchecked_t s_generation_counter;
67611
67612 /* File system properties. Currently holds on-disk FS format */
67613 unsigned long s_properties;
67614@@ -2294,7 +2294,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
67615 #define REISERFS_USER_MEM 1 /* user memory mode */
67616
67617 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
67618-#define get_generation(s) atomic_read (&fs_generation(s))
67619+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
67620 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
67621 #define __fs_changed(gen,s) (gen != get_generation (s))
67622 #define fs_changed(gen,s) \
67623diff --git a/fs/select.c b/fs/select.c
67624index 467bb1c..cf9d65a 100644
67625--- a/fs/select.c
67626+++ b/fs/select.c
67627@@ -20,6 +20,7 @@
67628 #include <linux/export.h>
67629 #include <linux/slab.h>
67630 #include <linux/poll.h>
67631+#include <linux/security.h>
67632 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
67633 #include <linux/file.h>
67634 #include <linux/fdtable.h>
67635@@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
67636 struct poll_list *walk = head;
67637 unsigned long todo = nfds;
67638
67639+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
67640 if (nfds > rlimit(RLIMIT_NOFILE))
67641 return -EINVAL;
67642
67643diff --git a/fs/seq_file.c b/fs/seq_file.c
67644index 3857b72..0b7281e 100644
67645--- a/fs/seq_file.c
67646+++ b/fs/seq_file.c
67647@@ -12,6 +12,8 @@
67648 #include <linux/slab.h>
67649 #include <linux/cred.h>
67650 #include <linux/mm.h>
67651+#include <linux/sched.h>
67652+#include <linux/grsecurity.h>
67653
67654 #include <asm/uaccess.h>
67655 #include <asm/page.h>
67656@@ -34,12 +36,7 @@ static void seq_set_overflow(struct seq_file *m)
67657
67658 static void *seq_buf_alloc(unsigned long size)
67659 {
67660- void *buf;
67661-
67662- buf = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
67663- if (!buf && size > PAGE_SIZE)
67664- buf = vmalloc(size);
67665- return buf;
67666+ return kmalloc(size, GFP_KERNEL | GFP_USERCOPY);
67667 }
67668
67669 /**
67670@@ -72,6 +69,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
67671 #ifdef CONFIG_USER_NS
67672 p->user_ns = file->f_cred->user_ns;
67673 #endif
67674+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
67675+ p->exec_id = current->exec_id;
67676+#endif
67677
67678 /*
67679 * Wrappers around seq_open(e.g. swaps_open) need to be
67680@@ -94,6 +94,16 @@ int seq_open(struct file *file, const struct seq_operations *op)
67681 }
67682 EXPORT_SYMBOL(seq_open);
67683
67684+
67685+int seq_open_restrict(struct file *file, const struct seq_operations *op)
67686+{
67687+ if (gr_proc_is_restricted())
67688+ return -EACCES;
67689+
67690+ return seq_open(file, op);
67691+}
67692+EXPORT_SYMBOL(seq_open_restrict);
67693+
67694 static int traverse(struct seq_file *m, loff_t offset)
67695 {
67696 loff_t pos = 0, index;
67697@@ -165,7 +175,7 @@ Eoverflow:
67698 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
67699 {
67700 struct seq_file *m = file->private_data;
67701- size_t copied = 0;
67702+ ssize_t copied = 0;
67703 loff_t pos;
67704 size_t n;
67705 void *p;
67706@@ -596,7 +606,7 @@ static void single_stop(struct seq_file *p, void *v)
67707 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
67708 void *data)
67709 {
67710- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
67711+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
67712 int res = -ENOMEM;
67713
67714 if (op) {
67715@@ -632,6 +642,17 @@ int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
67716 }
67717 EXPORT_SYMBOL(single_open_size);
67718
67719+int single_open_restrict(struct file *file, int (*show)(struct seq_file *, void *),
67720+ void *data)
67721+{
67722+ if (gr_proc_is_restricted())
67723+ return -EACCES;
67724+
67725+ return single_open(file, show, data);
67726+}
67727+EXPORT_SYMBOL(single_open_restrict);
67728+
67729+
67730 int single_release(struct inode *inode, struct file *file)
67731 {
67732 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
67733diff --git a/fs/splice.c b/fs/splice.c
67734index f5cb9ba..8ddb1e9 100644
67735--- a/fs/splice.c
67736+++ b/fs/splice.c
67737@@ -193,7 +193,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67738 pipe_lock(pipe);
67739
67740 for (;;) {
67741- if (!pipe->readers) {
67742+ if (!atomic_read(&pipe->readers)) {
67743 send_sig(SIGPIPE, current, 0);
67744 if (!ret)
67745 ret = -EPIPE;
67746@@ -216,7 +216,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67747 page_nr++;
67748 ret += buf->len;
67749
67750- if (pipe->files)
67751+ if (atomic_read(&pipe->files))
67752 do_wakeup = 1;
67753
67754 if (!--spd->nr_pages)
67755@@ -247,9 +247,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
67756 do_wakeup = 0;
67757 }
67758
67759- pipe->waiting_writers++;
67760+ atomic_inc(&pipe->waiting_writers);
67761 pipe_wait(pipe);
67762- pipe->waiting_writers--;
67763+ atomic_dec(&pipe->waiting_writers);
67764 }
67765
67766 pipe_unlock(pipe);
67767@@ -576,7 +576,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
67768 old_fs = get_fs();
67769 set_fs(get_ds());
67770 /* The cast to a user pointer is valid due to the set_fs() */
67771- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
67772+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
67773 set_fs(old_fs);
67774
67775 return res;
67776@@ -591,7 +591,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
67777 old_fs = get_fs();
67778 set_fs(get_ds());
67779 /* The cast to a user pointer is valid due to the set_fs() */
67780- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
67781+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
67782 set_fs(old_fs);
67783
67784 return res;
67785@@ -644,7 +644,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
67786 goto err;
67787
67788 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
67789- vec[i].iov_base = (void __user *) page_address(page);
67790+ vec[i].iov_base = (void __force_user *) page_address(page);
67791 vec[i].iov_len = this_len;
67792 spd.pages[i] = page;
67793 spd.nr_pages++;
67794@@ -783,7 +783,7 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
67795 ops->release(pipe, buf);
67796 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
67797 pipe->nrbufs--;
67798- if (pipe->files)
67799+ if (atomic_read(&pipe->files))
67800 sd->need_wakeup = true;
67801 }
67802
67803@@ -807,10 +807,10 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des
67804 static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
67805 {
67806 while (!pipe->nrbufs) {
67807- if (!pipe->writers)
67808+ if (!atomic_read(&pipe->writers))
67809 return 0;
67810
67811- if (!pipe->waiting_writers && sd->num_spliced)
67812+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
67813 return 0;
67814
67815 if (sd->flags & SPLICE_F_NONBLOCK)
67816@@ -1040,7 +1040,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
67817 ops->release(pipe, buf);
67818 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
67819 pipe->nrbufs--;
67820- if (pipe->files)
67821+ if (atomic_read(&pipe->files))
67822 sd.need_wakeup = true;
67823 } else {
67824 buf->offset += ret;
67825@@ -1200,7 +1200,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
67826 * out of the pipe right after the splice_to_pipe(). So set
67827 * PIPE_READERS appropriately.
67828 */
67829- pipe->readers = 1;
67830+ atomic_set(&pipe->readers, 1);
67831
67832 current->splice_pipe = pipe;
67833 }
67834@@ -1496,6 +1496,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
67835
67836 partial[buffers].offset = off;
67837 partial[buffers].len = plen;
67838+ partial[buffers].private = 0;
67839
67840 off = 0;
67841 len -= plen;
67842@@ -1732,9 +1733,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67843 ret = -ERESTARTSYS;
67844 break;
67845 }
67846- if (!pipe->writers)
67847+ if (!atomic_read(&pipe->writers))
67848 break;
67849- if (!pipe->waiting_writers) {
67850+ if (!atomic_read(&pipe->waiting_writers)) {
67851 if (flags & SPLICE_F_NONBLOCK) {
67852 ret = -EAGAIN;
67853 break;
67854@@ -1766,7 +1767,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67855 pipe_lock(pipe);
67856
67857 while (pipe->nrbufs >= pipe->buffers) {
67858- if (!pipe->readers) {
67859+ if (!atomic_read(&pipe->readers)) {
67860 send_sig(SIGPIPE, current, 0);
67861 ret = -EPIPE;
67862 break;
67863@@ -1779,9 +1780,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
67864 ret = -ERESTARTSYS;
67865 break;
67866 }
67867- pipe->waiting_writers++;
67868+ atomic_inc(&pipe->waiting_writers);
67869 pipe_wait(pipe);
67870- pipe->waiting_writers--;
67871+ atomic_dec(&pipe->waiting_writers);
67872 }
67873
67874 pipe_unlock(pipe);
67875@@ -1817,14 +1818,14 @@ retry:
67876 pipe_double_lock(ipipe, opipe);
67877
67878 do {
67879- if (!opipe->readers) {
67880+ if (!atomic_read(&opipe->readers)) {
67881 send_sig(SIGPIPE, current, 0);
67882 if (!ret)
67883 ret = -EPIPE;
67884 break;
67885 }
67886
67887- if (!ipipe->nrbufs && !ipipe->writers)
67888+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
67889 break;
67890
67891 /*
67892@@ -1921,7 +1922,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
67893 pipe_double_lock(ipipe, opipe);
67894
67895 do {
67896- if (!opipe->readers) {
67897+ if (!atomic_read(&opipe->readers)) {
67898 send_sig(SIGPIPE, current, 0);
67899 if (!ret)
67900 ret = -EPIPE;
67901@@ -1966,7 +1967,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
67902 * return EAGAIN if we have the potential of some data in the
67903 * future, otherwise just return 0
67904 */
67905- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
67906+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
67907 ret = -EAGAIN;
67908
67909 pipe_unlock(ipipe);
67910diff --git a/fs/stat.c b/fs/stat.c
67911index ae0c3ce..9ee641c 100644
67912--- a/fs/stat.c
67913+++ b/fs/stat.c
67914@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
67915 stat->gid = inode->i_gid;
67916 stat->rdev = inode->i_rdev;
67917 stat->size = i_size_read(inode);
67918- stat->atime = inode->i_atime;
67919- stat->mtime = inode->i_mtime;
67920+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
67921+ stat->atime = inode->i_ctime;
67922+ stat->mtime = inode->i_ctime;
67923+ } else {
67924+ stat->atime = inode->i_atime;
67925+ stat->mtime = inode->i_mtime;
67926+ }
67927 stat->ctime = inode->i_ctime;
67928 stat->blksize = (1 << inode->i_blkbits);
67929 stat->blocks = inode->i_blocks;
67930@@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
67931 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
67932 {
67933 struct inode *inode = path->dentry->d_inode;
67934+ int retval;
67935
67936- if (inode->i_op->getattr)
67937- return inode->i_op->getattr(path->mnt, path->dentry, stat);
67938+ if (inode->i_op->getattr) {
67939+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
67940+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
67941+ stat->atime = stat->ctime;
67942+ stat->mtime = stat->ctime;
67943+ }
67944+ return retval;
67945+ }
67946
67947 generic_fillattr(inode, stat);
67948 return 0;
67949diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
67950index 0b45ff4..847de5b 100644
67951--- a/fs/sysfs/dir.c
67952+++ b/fs/sysfs/dir.c
67953@@ -41,9 +41,16 @@ void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
67954 int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
67955 {
67956 struct kernfs_node *parent, *kn;
67957+ const char *name;
67958+ umode_t mode = S_IRWXU | S_IRUGO | S_IXUGO;
67959+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67960+ const char *parent_name;
67961+#endif
67962
67963 BUG_ON(!kobj);
67964
67965+ name = kobject_name(kobj);
67966+
67967 if (kobj->parent)
67968 parent = kobj->parent->sd;
67969 else
67970@@ -52,11 +59,22 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
67971 if (!parent)
67972 return -ENOENT;
67973
67974- kn = kernfs_create_dir_ns(parent, kobject_name(kobj),
67975- S_IRWXU | S_IRUGO | S_IXUGO, kobj, ns);
67976+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
67977+ parent_name = parent->name;
67978+ mode = S_IRWXU;
67979+
67980+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
67981+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
67982+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
67983+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
67984+ mode = S_IRWXU | S_IRUGO | S_IXUGO;
67985+#endif
67986+
67987+ kn = kernfs_create_dir_ns(parent, name,
67988+ mode, kobj, ns);
67989 if (IS_ERR(kn)) {
67990 if (PTR_ERR(kn) == -EEXIST)
67991- sysfs_warn_dup(parent, kobject_name(kobj));
67992+ sysfs_warn_dup(parent, name);
67993 return PTR_ERR(kn);
67994 }
67995
67996diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
67997index 69d4889..a810bd4 100644
67998--- a/fs/sysv/sysv.h
67999+++ b/fs/sysv/sysv.h
68000@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
68001 #endif
68002 }
68003
68004-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
68005+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
68006 {
68007 if (sbi->s_bytesex == BYTESEX_PDP)
68008 return PDP_swab((__force __u32)n);
68009diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
68010index fb08b0c..65fcc7e 100644
68011--- a/fs/ubifs/io.c
68012+++ b/fs/ubifs/io.c
68013@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
68014 return err;
68015 }
68016
68017-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
68018+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
68019 {
68020 int err;
68021
68022diff --git a/fs/udf/misc.c b/fs/udf/misc.c
68023index c175b4d..8f36a16 100644
68024--- a/fs/udf/misc.c
68025+++ b/fs/udf/misc.c
68026@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
68027
68028 u8 udf_tag_checksum(const struct tag *t)
68029 {
68030- u8 *data = (u8 *)t;
68031+ const u8 *data = (const u8 *)t;
68032 u8 checksum = 0;
68033 int i;
68034 for (i = 0; i < sizeof(struct tag); ++i)
68035diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
68036index 8d974c4..b82f6ec 100644
68037--- a/fs/ufs/swab.h
68038+++ b/fs/ufs/swab.h
68039@@ -22,7 +22,7 @@ enum {
68040 BYTESEX_BE
68041 };
68042
68043-static inline u64
68044+static inline u64 __intentional_overflow(-1)
68045 fs64_to_cpu(struct super_block *sbp, __fs64 n)
68046 {
68047 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
68048@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
68049 return (__force __fs64)cpu_to_be64(n);
68050 }
68051
68052-static inline u32
68053+static inline u32 __intentional_overflow(-1)
68054 fs32_to_cpu(struct super_block *sbp, __fs32 n)
68055 {
68056 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
68057diff --git a/fs/utimes.c b/fs/utimes.c
68058index aa138d6..5f3a811 100644
68059--- a/fs/utimes.c
68060+++ b/fs/utimes.c
68061@@ -1,6 +1,7 @@
68062 #include <linux/compiler.h>
68063 #include <linux/file.h>
68064 #include <linux/fs.h>
68065+#include <linux/security.h>
68066 #include <linux/linkage.h>
68067 #include <linux/mount.h>
68068 #include <linux/namei.h>
68069@@ -103,6 +104,12 @@ static int utimes_common(struct path *path, struct timespec *times)
68070 }
68071 }
68072 retry_deleg:
68073+
68074+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
68075+ error = -EACCES;
68076+ goto mnt_drop_write_and_out;
68077+ }
68078+
68079 mutex_lock(&inode->i_mutex);
68080 error = notify_change(path->dentry, &newattrs, &delegated_inode);
68081 mutex_unlock(&inode->i_mutex);
68082diff --git a/fs/xattr.c b/fs/xattr.c
68083index c69e6d4..cc56af5 100644
68084--- a/fs/xattr.c
68085+++ b/fs/xattr.c
68086@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
68087 return rc;
68088 }
68089
68090+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
68091+ssize_t
68092+pax_getxattr(struct dentry *dentry, void *value, size_t size)
68093+{
68094+ struct inode *inode = dentry->d_inode;
68095+ ssize_t error;
68096+
68097+ error = inode_permission(inode, MAY_EXEC);
68098+ if (error)
68099+ return error;
68100+
68101+ if (inode->i_op->getxattr)
68102+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
68103+ else
68104+ error = -EOPNOTSUPP;
68105+
68106+ return error;
68107+}
68108+EXPORT_SYMBOL(pax_getxattr);
68109+#endif
68110+
68111 ssize_t
68112 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
68113 {
68114@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
68115 * Extended attribute SET operations
68116 */
68117 static long
68118-setxattr(struct dentry *d, const char __user *name, const void __user *value,
68119+setxattr(struct path *path, const char __user *name, const void __user *value,
68120 size_t size, int flags)
68121 {
68122 int error;
68123@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
68124 posix_acl_fix_xattr_from_user(kvalue, size);
68125 }
68126
68127- error = vfs_setxattr(d, kname, kvalue, size, flags);
68128+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
68129+ error = -EACCES;
68130+ goto out;
68131+ }
68132+
68133+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
68134 out:
68135 if (vvalue)
68136 vfree(vvalue);
68137@@ -377,7 +403,7 @@ retry:
68138 return error;
68139 error = mnt_want_write(path.mnt);
68140 if (!error) {
68141- error = setxattr(path.dentry, name, value, size, flags);
68142+ error = setxattr(&path, name, value, size, flags);
68143 mnt_drop_write(path.mnt);
68144 }
68145 path_put(&path);
68146@@ -401,7 +427,7 @@ retry:
68147 return error;
68148 error = mnt_want_write(path.mnt);
68149 if (!error) {
68150- error = setxattr(path.dentry, name, value, size, flags);
68151+ error = setxattr(&path, name, value, size, flags);
68152 mnt_drop_write(path.mnt);
68153 }
68154 path_put(&path);
68155@@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
68156 const void __user *,value, size_t, size, int, flags)
68157 {
68158 struct fd f = fdget(fd);
68159- struct dentry *dentry;
68160 int error = -EBADF;
68161
68162 if (!f.file)
68163 return error;
68164- dentry = f.file->f_path.dentry;
68165- audit_inode(NULL, dentry, 0);
68166+ audit_inode(NULL, f.file->f_path.dentry, 0);
68167 error = mnt_want_write_file(f.file);
68168 if (!error) {
68169- error = setxattr(dentry, name, value, size, flags);
68170+ error = setxattr(&f.file->f_path, name, value, size, flags);
68171 mnt_drop_write_file(f.file);
68172 }
68173 fdput(f);
68174@@ -626,7 +650,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
68175 * Extended attribute REMOVE operations
68176 */
68177 static long
68178-removexattr(struct dentry *d, const char __user *name)
68179+removexattr(struct path *path, const char __user *name)
68180 {
68181 int error;
68182 char kname[XATTR_NAME_MAX + 1];
68183@@ -637,7 +661,10 @@ removexattr(struct dentry *d, const char __user *name)
68184 if (error < 0)
68185 return error;
68186
68187- return vfs_removexattr(d, kname);
68188+ if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
68189+ return -EACCES;
68190+
68191+ return vfs_removexattr(path->dentry, kname);
68192 }
68193
68194 SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
68195@@ -652,7 +679,7 @@ retry:
68196 return error;
68197 error = mnt_want_write(path.mnt);
68198 if (!error) {
68199- error = removexattr(path.dentry, name);
68200+ error = removexattr(&path, name);
68201 mnt_drop_write(path.mnt);
68202 }
68203 path_put(&path);
68204@@ -675,7 +702,7 @@ retry:
68205 return error;
68206 error = mnt_want_write(path.mnt);
68207 if (!error) {
68208- error = removexattr(path.dentry, name);
68209+ error = removexattr(&path, name);
68210 mnt_drop_write(path.mnt);
68211 }
68212 path_put(&path);
68213@@ -689,16 +716,16 @@ retry:
68214 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
68215 {
68216 struct fd f = fdget(fd);
68217- struct dentry *dentry;
68218+ struct path *path;
68219 int error = -EBADF;
68220
68221 if (!f.file)
68222 return error;
68223- dentry = f.file->f_path.dentry;
68224- audit_inode(NULL, dentry, 0);
68225+ path = &f.file->f_path;
68226+ audit_inode(NULL, path->dentry, 0);
68227 error = mnt_want_write_file(f.file);
68228 if (!error) {
68229- error = removexattr(dentry, name);
68230+ error = removexattr(path, name);
68231 mnt_drop_write_file(f.file);
68232 }
68233 fdput(f);
68234diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
68235index 86df952..ac430d6 100644
68236--- a/fs/xfs/libxfs/xfs_bmap.c
68237+++ b/fs/xfs/libxfs/xfs_bmap.c
68238@@ -583,7 +583,7 @@ xfs_bmap_validate_ret(
68239
68240 #else
68241 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
68242-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
68243+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
68244 #endif /* DEBUG */
68245
68246 /*
68247diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
68248index f1b69ed..3d0222f 100644
68249--- a/fs/xfs/xfs_dir2_readdir.c
68250+++ b/fs/xfs/xfs_dir2_readdir.c
68251@@ -159,7 +159,12 @@ xfs_dir2_sf_getdents(
68252 ino = dp->d_ops->sf_get_ino(sfp, sfep);
68253 filetype = dp->d_ops->sf_get_ftype(sfep);
68254 ctx->pos = off & 0x7fffffff;
68255- if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
68256+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
68257+ char name[sfep->namelen];
68258+ memcpy(name, sfep->name, sfep->namelen);
68259+ if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(dp->i_mount, filetype)))
68260+ return 0;
68261+ } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
68262 xfs_dir3_get_dtype(dp->i_mount, filetype)))
68263 return 0;
68264 sfep = dp->d_ops->sf_nextentry(sfp, sfep);
68265diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
68266index 3799695..0ddc953 100644
68267--- a/fs/xfs/xfs_ioctl.c
68268+++ b/fs/xfs/xfs_ioctl.c
68269@@ -122,7 +122,7 @@ xfs_find_handle(
68270 }
68271
68272 error = -EFAULT;
68273- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
68274+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
68275 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
68276 goto out_put;
68277
68278diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
68279index d10dc8f..56b3430 100644
68280--- a/fs/xfs/xfs_linux.h
68281+++ b/fs/xfs/xfs_linux.h
68282@@ -230,7 +230,7 @@ static inline kgid_t xfs_gid_to_kgid(__uint32_t gid)
68283 * of the compiler which do not like us using do_div in the middle
68284 * of large functions.
68285 */
68286-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
68287+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
68288 {
68289 __u32 mod;
68290
68291@@ -286,7 +286,7 @@ static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
68292 return 0;
68293 }
68294 #else
68295-static inline __u32 xfs_do_div(void *a, __u32 b, int n)
68296+static inline __u32 __intentional_overflow(-1) xfs_do_div(void *a, __u32 b, int n)
68297 {
68298 __u32 mod;
68299
68300diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
68301new file mode 100644
68302index 0000000..f27264e
68303--- /dev/null
68304+++ b/grsecurity/Kconfig
68305@@ -0,0 +1,1166 @@
68306+#
68307+# grecurity configuration
68308+#
68309+menu "Memory Protections"
68310+depends on GRKERNSEC
68311+
68312+config GRKERNSEC_KMEM
68313+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
68314+ default y if GRKERNSEC_CONFIG_AUTO
68315+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
68316+ help
68317+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
68318+ be written to or read from to modify or leak the contents of the running
68319+ kernel. /dev/port will also not be allowed to be opened, writing to
68320+ /dev/cpu/*/msr will be prevented, and support for kexec will be removed.
68321+ If you have module support disabled, enabling this will close up several
68322+ ways that are currently used to insert malicious code into the running
68323+ kernel.
68324+
68325+ Even with this feature enabled, we still highly recommend that
68326+ you use the RBAC system, as it is still possible for an attacker to
68327+ modify the running kernel through other more obscure methods.
68328+
68329+ It is highly recommended that you say Y here if you meet all the
68330+ conditions above.
68331+
68332+config GRKERNSEC_VM86
68333+ bool "Restrict VM86 mode"
68334+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68335+ depends on X86_32
68336+
68337+ help
68338+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
68339+ make use of a special execution mode on 32bit x86 processors called
68340+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
68341+ video cards and will still work with this option enabled. The purpose
68342+ of the option is to prevent exploitation of emulation errors in
68343+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
68344+ Nearly all users should be able to enable this option.
68345+
68346+config GRKERNSEC_IO
68347+ bool "Disable privileged I/O"
68348+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68349+ depends on X86
68350+ select RTC_CLASS
68351+ select RTC_INTF_DEV
68352+ select RTC_DRV_CMOS
68353+
68354+ help
68355+ If you say Y here, all ioperm and iopl calls will return an error.
68356+ Ioperm and iopl can be used to modify the running kernel.
68357+ Unfortunately, some programs need this access to operate properly,
68358+ the most notable of which are XFree86 and hwclock. hwclock can be
68359+ remedied by having RTC support in the kernel, so real-time
68360+ clock support is enabled if this option is enabled, to ensure
68361+ that hwclock operates correctly. If hwclock still does not work,
68362+ either update udev or symlink /dev/rtc to /dev/rtc0.
68363+
68364+ If you're using XFree86 or a version of Xorg from 2012 or earlier,
68365+ you may not be able to boot into a graphical environment with this
68366+ option enabled. In this case, you should use the RBAC system instead.
68367+
68368+config GRKERNSEC_BPF_HARDEN
68369+ bool "Harden BPF interpreter"
68370+ default y if GRKERNSEC_CONFIG_AUTO
68371+ help
68372+ Unlike previous versions of grsecurity that hardened both the BPF
68373+ interpreted code against corruption at rest as well as the JIT code
68374+ against JIT-spray attacks and attacker-controlled immediate values
68375+ for ROP, this feature will enforce disabling of the new eBPF JIT engine
68376+ and will ensure the interpreted code is read-only at rest. This feature
68377+ may be removed at a later time when eBPF stabilizes to entirely revert
68378+ back to the more secure pre-3.16 BPF interpreter/JIT.
68379+
68380+ If you're using KERNEXEC, it's recommended that you enable this option
68381+ to supplement the hardening of the kernel.
68382+
68383+config GRKERNSEC_PERF_HARDEN
68384+ bool "Disable unprivileged PERF_EVENTS usage by default"
68385+ default y if GRKERNSEC_CONFIG_AUTO
68386+ depends on PERF_EVENTS
68387+ help
68388+ If you say Y here, the range of acceptable values for the
68389+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
68390+ default to a new value: 3. When the sysctl is set to this value, no
68391+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
68392+
68393+ Though PERF_EVENTS can be used legitimately for performance monitoring
68394+ and low-level application profiling, it is forced on regardless of
68395+ configuration, has been at fault for several vulnerabilities, and
68396+ creates new opportunities for side channels and other information leaks.
68397+
68398+ This feature puts PERF_EVENTS into a secure default state and permits
68399+ the administrator to change out of it temporarily if unprivileged
68400+ application profiling is needed.
68401+
68402+config GRKERNSEC_RAND_THREADSTACK
68403+ bool "Insert random gaps between thread stacks"
68404+ default y if GRKERNSEC_CONFIG_AUTO
68405+ depends on PAX_RANDMMAP && !PPC
68406+ help
68407+ If you say Y here, a random-sized gap will be enforced between allocated
68408+ thread stacks. Glibc's NPTL and other threading libraries that
68409+ pass MAP_STACK to the kernel for thread stack allocation are supported.
68410+ The implementation currently provides 8 bits of entropy for the gap.
68411+
68412+ Many distributions do not compile threaded remote services with the
68413+ -fstack-check argument to GCC, causing the variable-sized stack-based
68414+ allocator, alloca(), to not probe the stack on allocation. This
68415+ permits an unbounded alloca() to skip over any guard page and potentially
68416+ modify another thread's stack reliably. An enforced random gap
68417+ reduces the reliability of such an attack and increases the chance
68418+ that such a read/write to another thread's stack instead lands in
68419+ an unmapped area, causing a crash and triggering grsecurity's
68420+ anti-bruteforcing logic.
68421+
68422+config GRKERNSEC_PROC_MEMMAP
68423+ bool "Harden ASLR against information leaks and entropy reduction"
68424+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
68425+ depends on PAX_NOEXEC || PAX_ASLR
68426+ help
68427+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
68428+ give no information about the addresses of its mappings if
68429+ PaX features that rely on random addresses are enabled on the task.
68430+ In addition to sanitizing this information and disabling other
68431+ dangerous sources of information, this option causes reads of sensitive
68432+ /proc/<pid> entries where the file descriptor was opened in a different
68433+ task than the one performing the read. Such attempts are logged.
68434+ This option also limits argv/env strings for suid/sgid binaries
68435+ to 512KB to prevent a complete exhaustion of the stack entropy provided
68436+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
68437+ binaries to prevent alternative mmap layouts from being abused.
68438+
68439+ If you use PaX it is essential that you say Y here as it closes up
68440+ several holes that make full ASLR useless locally.
68441+
68442+
68443+config GRKERNSEC_KSTACKOVERFLOW
68444+ bool "Prevent kernel stack overflows"
68445+ default y if GRKERNSEC_CONFIG_AUTO
68446+ depends on !IA64 && 64BIT
68447+ help
68448+ If you say Y here, the kernel's process stacks will be allocated
68449+ with vmalloc instead of the kernel's default allocator. This
68450+ introduces guard pages that in combination with the alloca checking
68451+ of the STACKLEAK feature prevents all forms of kernel process stack
68452+ overflow abuse. Note that this is different from kernel stack
68453+ buffer overflows.
68454+
68455+config GRKERNSEC_BRUTE
68456+ bool "Deter exploit bruteforcing"
68457+ default y if GRKERNSEC_CONFIG_AUTO
68458+ help
68459+ If you say Y here, attempts to bruteforce exploits against forking
68460+ daemons such as apache or sshd, as well as against suid/sgid binaries
68461+ will be deterred. When a child of a forking daemon is killed by PaX
68462+ or crashes due to an illegal instruction or other suspicious signal,
68463+ the parent process will be delayed 30 seconds upon every subsequent
68464+ fork until the administrator is able to assess the situation and
68465+ restart the daemon.
68466+ In the suid/sgid case, the attempt is logged, the user has all their
68467+ existing instances of the suid/sgid binary terminated and will
68468+ be unable to execute any suid/sgid binaries for 15 minutes.
68469+
68470+ It is recommended that you also enable signal logging in the auditing
68471+ section so that logs are generated when a process triggers a suspicious
68472+ signal.
68473+ If the sysctl option is enabled, a sysctl option with name
68474+ "deter_bruteforce" is created.
68475+
68476+config GRKERNSEC_MODHARDEN
68477+ bool "Harden module auto-loading"
68478+ default y if GRKERNSEC_CONFIG_AUTO
68479+ depends on MODULES
68480+ help
68481+ If you say Y here, module auto-loading in response to use of some
68482+ feature implemented by an unloaded module will be restricted to
68483+ root users. Enabling this option helps defend against attacks
68484+ by unprivileged users who abuse the auto-loading behavior to
68485+ cause a vulnerable module to load that is then exploited.
68486+
68487+ If this option prevents a legitimate use of auto-loading for a
68488+ non-root user, the administrator can execute modprobe manually
68489+ with the exact name of the module mentioned in the alert log.
68490+ Alternatively, the administrator can add the module to the list
68491+ of modules loaded at boot by modifying init scripts.
68492+
68493+ Modification of init scripts will most likely be needed on
68494+ Ubuntu servers with encrypted home directory support enabled,
68495+ as the first non-root user logging in will cause the ecb(aes),
68496+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
68497+
68498+config GRKERNSEC_HIDESYM
68499+ bool "Hide kernel symbols"
68500+ default y if GRKERNSEC_CONFIG_AUTO
68501+ select PAX_USERCOPY_SLABS
68502+ help
68503+ If you say Y here, getting information on loaded modules, and
68504+ displaying all kernel symbols through a syscall will be restricted
68505+ to users with CAP_SYS_MODULE. For software compatibility reasons,
68506+ /proc/kallsyms will be restricted to the root user. The RBAC
68507+ system can hide that entry even from root.
68508+
68509+ This option also prevents leaking of kernel addresses through
68510+ several /proc entries.
68511+
68512+ Note that this option is only effective provided the following
68513+ conditions are met:
68514+ 1) The kernel using grsecurity is not precompiled by some distribution
68515+ 2) You have also enabled GRKERNSEC_DMESG
68516+ 3) You are using the RBAC system and hiding other files such as your
68517+ kernel image and System.map. Alternatively, enabling this option
68518+ causes the permissions on /boot, /lib/modules, and the kernel
68519+ source directory to change at compile time to prevent
68520+ reading by non-root users.
68521+ If the above conditions are met, this option will aid in providing a
68522+ useful protection against local kernel exploitation of overflows
68523+ and arbitrary read/write vulnerabilities.
68524+
68525+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
68526+ in addition to this feature.
68527+
68528+config GRKERNSEC_RANDSTRUCT
68529+ bool "Randomize layout of sensitive kernel structures"
68530+ default y if GRKERNSEC_CONFIG_AUTO
68531+ select GRKERNSEC_HIDESYM
68532+ select MODVERSIONS if MODULES
68533+ help
68534+ If you say Y here, the layouts of a number of sensitive kernel
68535+ structures (task, fs, cred, etc) and all structures composed entirely
68536+ of function pointers (aka "ops" structs) will be randomized at compile-time.
68537+ This can introduce the requirement of an additional infoleak
68538+ vulnerability for exploits targeting these structure types.
68539+
68540+ Enabling this feature will introduce some performance impact, slightly
68541+ increase memory usage, and prevent the use of forensic tools like
68542+ Volatility against the system (unless the kernel source tree isn't
68543+ cleaned after kernel installation).
68544+
68545+ The seed used for compilation is located at tools/gcc/randomize_layout_seed.h.
68546+ It remains after a make clean to allow for external modules to be compiled
68547+ with the existing seed and will be removed by a make mrproper or
68548+ make distclean.
68549+
68550+ Note that the implementation requires gcc 4.6.4. or newer. You may need
68551+ to install the supporting headers explicitly in addition to the normal
68552+ gcc package.
68553+
68554+config GRKERNSEC_RANDSTRUCT_PERFORMANCE
68555+ bool "Use cacheline-aware structure randomization"
68556+ depends on GRKERNSEC_RANDSTRUCT
68557+ default y if GRKERNSEC_CONFIG_PRIORITY_PERF
68558+ help
68559+ If you say Y here, the RANDSTRUCT randomization will make a best effort
68560+ at restricting randomization to cacheline-sized groups of elements. It
68561+ will further not randomize bitfields in structures. This reduces the
68562+ performance hit of RANDSTRUCT at the cost of weakened randomization.
68563+
68564+config GRKERNSEC_KERN_LOCKOUT
68565+ bool "Active kernel exploit response"
68566+ default y if GRKERNSEC_CONFIG_AUTO
68567+ depends on X86 || ARM || PPC || SPARC
68568+ help
68569+ If you say Y here, when a PaX alert is triggered due to suspicious
68570+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
68571+ or an OOPS occurs due to bad memory accesses, instead of just
68572+ terminating the offending process (and potentially allowing
68573+ a subsequent exploit from the same user), we will take one of two
68574+ actions:
68575+ If the user was root, we will panic the system
68576+ If the user was non-root, we will log the attempt, terminate
68577+ all processes owned by the user, then prevent them from creating
68578+ any new processes until the system is restarted
68579+ This deters repeated kernel exploitation/bruteforcing attempts
68580+ and is useful for later forensics.
68581+
68582+config GRKERNSEC_OLD_ARM_USERLAND
68583+ bool "Old ARM userland compatibility"
68584+ depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
68585+ help
68586+ If you say Y here, stubs of executable code to perform such operations
68587+ as "compare-exchange" will be placed at fixed locations in the ARM vector
68588+ table. This is unfortunately needed for old ARM userland meant to run
68589+ across a wide range of processors. Without this option enabled,
68590+ the get_tls and data memory barrier stubs will be emulated by the kernel,
68591+ which is enough for Linaro userlands or other userlands designed for v6
68592+ and newer ARM CPUs. It's recommended that you try without this option enabled
68593+ first, and only enable it if your userland does not boot (it will likely fail
68594+ at init time).
68595+
68596+endmenu
68597+menu "Role Based Access Control Options"
68598+depends on GRKERNSEC
68599+
68600+config GRKERNSEC_RBAC_DEBUG
68601+ bool
68602+
68603+config GRKERNSEC_NO_RBAC
68604+ bool "Disable RBAC system"
68605+ help
68606+ If you say Y here, the /dev/grsec device will be removed from the kernel,
68607+ preventing the RBAC system from being enabled. You should only say Y
68608+ here if you have no intention of using the RBAC system, so as to prevent
68609+ an attacker with root access from misusing the RBAC system to hide files
68610+ and processes when loadable module support and /dev/[k]mem have been
68611+ locked down.
68612+
68613+config GRKERNSEC_ACL_HIDEKERN
68614+ bool "Hide kernel processes"
68615+ help
68616+ If you say Y here, all kernel threads will be hidden to all
68617+ processes but those whose subject has the "view hidden processes"
68618+ flag.
68619+
68620+config GRKERNSEC_ACL_MAXTRIES
68621+ int "Maximum tries before password lockout"
68622+ default 3
68623+ help
68624+ This option enforces the maximum number of times a user can attempt
68625+ to authorize themselves with the grsecurity RBAC system before being
68626+ denied the ability to attempt authorization again for a specified time.
68627+ The lower the number, the harder it will be to brute-force a password.
68628+
68629+config GRKERNSEC_ACL_TIMEOUT
68630+ int "Time to wait after max password tries, in seconds"
68631+ default 30
68632+ help
68633+ This option specifies the time the user must wait after attempting to
68634+ authorize to the RBAC system with the maximum number of invalid
68635+ passwords. The higher the number, the harder it will be to brute-force
68636+ a password.
68637+
68638+endmenu
68639+menu "Filesystem Protections"
68640+depends on GRKERNSEC
68641+
68642+config GRKERNSEC_PROC
68643+ bool "Proc restrictions"
68644+ default y if GRKERNSEC_CONFIG_AUTO
68645+ help
68646+ If you say Y here, the permissions of the /proc filesystem
68647+ will be altered to enhance system security and privacy. You MUST
68648+ choose either a user only restriction or a user and group restriction.
68649+ Depending upon the option you choose, you can either restrict users to
68650+ see only the processes they themselves run, or choose a group that can
68651+ view all processes and files normally restricted to root if you choose
68652+ the "restrict to user only" option. NOTE: If you're running identd or
68653+ ntpd as a non-root user, you will have to run it as the group you
68654+ specify here.
68655+
68656+config GRKERNSEC_PROC_USER
68657+ bool "Restrict /proc to user only"
68658+ depends on GRKERNSEC_PROC
68659+ help
68660+ If you say Y here, non-root users will only be able to view their own
68661+ processes, and restricts them from viewing network-related information,
68662+ and viewing kernel symbol and module information.
68663+
68664+config GRKERNSEC_PROC_USERGROUP
68665+ bool "Allow special group"
68666+ default y if GRKERNSEC_CONFIG_AUTO
68667+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
68668+ help
68669+ If you say Y here, you will be able to select a group that will be
68670+ able to view all processes and network-related information. If you've
68671+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
68672+ remain hidden. This option is useful if you want to run identd as
68673+ a non-root user. The group you select may also be chosen at boot time
68674+ via "grsec_proc_gid=" on the kernel commandline.
68675+
68676+config GRKERNSEC_PROC_GID
68677+ int "GID for special group"
68678+ depends on GRKERNSEC_PROC_USERGROUP
68679+ default 1001
68680+
68681+config GRKERNSEC_PROC_ADD
68682+ bool "Additional restrictions"
68683+ default y if GRKERNSEC_CONFIG_AUTO
68684+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
68685+ help
68686+ If you say Y here, additional restrictions will be placed on
68687+ /proc that keep normal users from viewing device information and
68688+ slabinfo information that could be useful for exploits.
68689+
68690+config GRKERNSEC_LINK
68691+ bool "Linking restrictions"
68692+ default y if GRKERNSEC_CONFIG_AUTO
68693+ help
68694+ If you say Y here, /tmp race exploits will be prevented, since users
68695+ will no longer be able to follow symlinks owned by other users in
68696+ world-writable +t directories (e.g. /tmp), unless the owner of the
68697+ symlink is the owner of the directory. users will also not be
68698+ able to hardlink to files they do not own. If the sysctl option is
68699+ enabled, a sysctl option with name "linking_restrictions" is created.
68700+
68701+config GRKERNSEC_SYMLINKOWN
68702+ bool "Kernel-enforced SymlinksIfOwnerMatch"
68703+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
68704+ help
68705+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
68706+ that prevents it from being used as a security feature. As Apache
68707+ verifies the symlink by performing a stat() against the target of
68708+ the symlink before it is followed, an attacker can setup a symlink
68709+ to point to a same-owned file, then replace the symlink with one
68710+ that targets another user's file just after Apache "validates" the
68711+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
68712+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
68713+ will be in place for the group you specify. If the sysctl option
68714+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
68715+ created.
68716+
68717+config GRKERNSEC_SYMLINKOWN_GID
68718+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
68719+ depends on GRKERNSEC_SYMLINKOWN
68720+ default 1006
68721+ help
68722+ Setting this GID determines what group kernel-enforced
68723+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
68724+ is enabled, a sysctl option with name "symlinkown_gid" is created.
68725+
68726+config GRKERNSEC_FIFO
68727+ bool "FIFO restrictions"
68728+ default y if GRKERNSEC_CONFIG_AUTO
68729+ help
68730+ If you say Y here, users will not be able to write to FIFOs they don't
68731+ own in world-writable +t directories (e.g. /tmp), unless the owner of
68732+ the FIFO is the same owner of the directory it's held in. If the sysctl
68733+ option is enabled, a sysctl option with name "fifo_restrictions" is
68734+ created.
68735+
68736+config GRKERNSEC_SYSFS_RESTRICT
68737+ bool "Sysfs/debugfs restriction"
68738+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
68739+ depends on SYSFS
68740+ help
68741+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
68742+ any filesystem normally mounted under it (e.g. debugfs) will be
68743+ mostly accessible only by root. These filesystems generally provide access
68744+ to hardware and debug information that isn't appropriate for unprivileged
68745+ users of the system. Sysfs and debugfs have also become a large source
68746+ of new vulnerabilities, ranging from infoleaks to local compromise.
68747+ There has been very little oversight with an eye toward security involved
68748+ in adding new exporters of information to these filesystems, so their
68749+ use is discouraged.
68750+ For reasons of compatibility, a few directories have been whitelisted
68751+ for access by non-root users:
68752+ /sys/fs/selinux
68753+ /sys/fs/fuse
68754+ /sys/devices/system/cpu
68755+
68756+config GRKERNSEC_ROFS
68757+ bool "Runtime read-only mount protection"
68758+ depends on SYSCTL
68759+ help
68760+ If you say Y here, a sysctl option with name "romount_protect" will
68761+ be created. By setting this option to 1 at runtime, filesystems
68762+ will be protected in the following ways:
68763+ * No new writable mounts will be allowed
68764+ * Existing read-only mounts won't be able to be remounted read/write
68765+ * Write operations will be denied on all block devices
68766+ This option acts independently of grsec_lock: once it is set to 1,
68767+ it cannot be turned off. Therefore, please be mindful of the resulting
68768+ behavior if this option is enabled in an init script on a read-only
68769+ filesystem.
68770+ Also be aware that as with other root-focused features, GRKERNSEC_KMEM
68771+ and GRKERNSEC_IO should be enabled and module loading disabled via
68772+ config or at runtime.
68773+ This feature is mainly intended for secure embedded systems.
68774+
68775+
68776+config GRKERNSEC_DEVICE_SIDECHANNEL
68777+ bool "Eliminate stat/notify-based device sidechannels"
68778+ default y if GRKERNSEC_CONFIG_AUTO
68779+ help
68780+ If you say Y here, timing analyses on block or character
68781+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
68782+ will be thwarted for unprivileged users. If a process without
68783+ CAP_MKNOD stats such a device, the last access and last modify times
68784+ will match the device's create time. No access or modify events
68785+ will be triggered through inotify/dnotify/fanotify for such devices.
68786+ This feature will prevent attacks that may at a minimum
68787+ allow an attacker to determine the administrator's password length.
68788+
68789+config GRKERNSEC_CHROOT
68790+ bool "Chroot jail restrictions"
68791+ default y if GRKERNSEC_CONFIG_AUTO
68792+ help
68793+ If you say Y here, you will be able to choose several options that will
68794+ make breaking out of a chrooted jail much more difficult. If you
68795+ encounter no software incompatibilities with the following options, it
68796+ is recommended that you enable each one.
68797+
68798+ Note that the chroot restrictions are not intended to apply to "chroots"
68799+ to directories that are simple bind mounts of the global root filesystem.
68800+ For several other reasons, a user shouldn't expect any significant
68801+ security by performing such a chroot.
68802+
68803+config GRKERNSEC_CHROOT_MOUNT
68804+ bool "Deny mounts"
68805+ default y if GRKERNSEC_CONFIG_AUTO
68806+ depends on GRKERNSEC_CHROOT
68807+ help
68808+ If you say Y here, processes inside a chroot will not be able to
68809+ mount or remount filesystems. If the sysctl option is enabled, a
68810+ sysctl option with name "chroot_deny_mount" is created.
68811+
68812+config GRKERNSEC_CHROOT_DOUBLE
68813+ bool "Deny double-chroots"
68814+ default y if GRKERNSEC_CONFIG_AUTO
68815+ depends on GRKERNSEC_CHROOT
68816+ help
68817+ If you say Y here, processes inside a chroot will not be able to chroot
68818+ again outside the chroot. This is a widely used method of breaking
68819+ out of a chroot jail and should not be allowed. If the sysctl
68820+ option is enabled, a sysctl option with name
68821+ "chroot_deny_chroot" is created.
68822+
68823+config GRKERNSEC_CHROOT_PIVOT
68824+ bool "Deny pivot_root in chroot"
68825+ default y if GRKERNSEC_CONFIG_AUTO
68826+ depends on GRKERNSEC_CHROOT
68827+ help
68828+ If you say Y here, processes inside a chroot will not be able to use
68829+ a function called pivot_root() that was introduced in Linux 2.3.41. It
68830+ works similar to chroot in that it changes the root filesystem. This
68831+ function could be misused in a chrooted process to attempt to break out
68832+ of the chroot, and therefore should not be allowed. If the sysctl
68833+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
68834+ created.
68835+
68836+config GRKERNSEC_CHROOT_CHDIR
68837+ bool "Enforce chdir(\"/\") on all chroots"
68838+ default y if GRKERNSEC_CONFIG_AUTO
68839+ depends on GRKERNSEC_CHROOT
68840+ help
68841+ If you say Y here, the current working directory of all newly-chrooted
68842+ applications will be set to the the root directory of the chroot.
68843+ The man page on chroot(2) states:
68844+ Note that this call does not change the current working
68845+ directory, so that `.' can be outside the tree rooted at
68846+ `/'. In particular, the super-user can escape from a
68847+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
68848+
68849+ It is recommended that you say Y here, since it's not known to break
68850+ any software. If the sysctl option is enabled, a sysctl option with
68851+ name "chroot_enforce_chdir" is created.
68852+
68853+config GRKERNSEC_CHROOT_CHMOD
68854+ bool "Deny (f)chmod +s"
68855+ default y if GRKERNSEC_CONFIG_AUTO
68856+ depends on GRKERNSEC_CHROOT
68857+ help
68858+ If you say Y here, processes inside a chroot will not be able to chmod
68859+ or fchmod files to make them have suid or sgid bits. This protects
68860+ against another published method of breaking a chroot. If the sysctl
68861+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
68862+ created.
68863+
68864+config GRKERNSEC_CHROOT_FCHDIR
68865+ bool "Deny fchdir and fhandle out of chroot"
68866+ default y if GRKERNSEC_CONFIG_AUTO
68867+ depends on GRKERNSEC_CHROOT
68868+ help
68869+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
68870+ to a file descriptor of the chrooting process that points to a directory
68871+ outside the filesystem will be stopped. Additionally, this option prevents
68872+ use of the recently-created syscall for opening files by a guessable "file
68873+ handle" inside a chroot. If the sysctl option is enabled, a sysctl option
68874+ with name "chroot_deny_fchdir" is created.
68875+
68876+config GRKERNSEC_CHROOT_MKNOD
68877+ bool "Deny mknod"
68878+ default y if GRKERNSEC_CONFIG_AUTO
68879+ depends on GRKERNSEC_CHROOT
68880+ help
68881+ If you say Y here, processes inside a chroot will not be allowed to
68882+ mknod. The problem with using mknod inside a chroot is that it
68883+ would allow an attacker to create a device entry that is the same
68884+ as one on the physical root of your system, which could range from
68885+ anything from the console device to a device for your harddrive (which
68886+ they could then use to wipe the drive or steal data). It is recommended
68887+ that you say Y here, unless you run into software incompatibilities.
68888+ If the sysctl option is enabled, a sysctl option with name
68889+ "chroot_deny_mknod" is created.
68890+
68891+config GRKERNSEC_CHROOT_SHMAT
68892+ bool "Deny shmat() out of chroot"
68893+ default y if GRKERNSEC_CONFIG_AUTO
68894+ depends on GRKERNSEC_CHROOT
68895+ help
68896+ If you say Y here, processes inside a chroot will not be able to attach
68897+ to shared memory segments that were created outside of the chroot jail.
68898+ It is recommended that you say Y here. If the sysctl option is enabled,
68899+ a sysctl option with name "chroot_deny_shmat" is created.
68900+
68901+config GRKERNSEC_CHROOT_UNIX
68902+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
68903+ default y if GRKERNSEC_CONFIG_AUTO
68904+ depends on GRKERNSEC_CHROOT
68905+ help
68906+ If you say Y here, processes inside a chroot will not be able to
68907+ connect to abstract (meaning not belonging to a filesystem) Unix
68908+ domain sockets that were bound outside of a chroot. It is recommended
68909+ that you say Y here. If the sysctl option is enabled, a sysctl option
68910+ with name "chroot_deny_unix" is created.
68911+
68912+config GRKERNSEC_CHROOT_FINDTASK
68913+ bool "Protect outside processes"
68914+ default y if GRKERNSEC_CONFIG_AUTO
68915+ depends on GRKERNSEC_CHROOT
68916+ help
68917+ If you say Y here, processes inside a chroot will not be able to
68918+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
68919+ getsid, or view any process outside of the chroot. If the sysctl
68920+ option is enabled, a sysctl option with name "chroot_findtask" is
68921+ created.
68922+
68923+config GRKERNSEC_CHROOT_NICE
68924+ bool "Restrict priority changes"
68925+ default y if GRKERNSEC_CONFIG_AUTO
68926+ depends on GRKERNSEC_CHROOT
68927+ help
68928+ If you say Y here, processes inside a chroot will not be able to raise
68929+ the priority of processes in the chroot, or alter the priority of
68930+ processes outside the chroot. This provides more security than simply
68931+ removing CAP_SYS_NICE from the process' capability set. If the
68932+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
68933+ is created.
68934+
68935+config GRKERNSEC_CHROOT_SYSCTL
68936+ bool "Deny sysctl writes"
68937+ default y if GRKERNSEC_CONFIG_AUTO
68938+ depends on GRKERNSEC_CHROOT
68939+ help
68940+ If you say Y here, an attacker in a chroot will not be able to
68941+ write to sysctl entries, either by sysctl(2) or through a /proc
68942+ interface. It is strongly recommended that you say Y here. If the
68943+ sysctl option is enabled, a sysctl option with name
68944+ "chroot_deny_sysctl" is created.
68945+
68946+config GRKERNSEC_CHROOT_CAPS
68947+ bool "Capability restrictions"
68948+ default y if GRKERNSEC_CONFIG_AUTO
68949+ depends on GRKERNSEC_CHROOT
68950+ help
68951+ If you say Y here, the capabilities on all processes within a
68952+ chroot jail will be lowered to stop module insertion, raw i/o,
68953+ system and net admin tasks, rebooting the system, modifying immutable
68954+ files, modifying IPC owned by another, and changing the system time.
68955+ This is left an option because it can break some apps. Disable this
68956+ if your chrooted apps are having problems performing those kinds of
68957+ tasks. If the sysctl option is enabled, a sysctl option with
68958+ name "chroot_caps" is created.
68959+
68960+config GRKERNSEC_CHROOT_INITRD
68961+ bool "Exempt initrd tasks from restrictions"
68962+ default y if GRKERNSEC_CONFIG_AUTO
68963+ depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
68964+ help
68965+ If you say Y here, tasks started prior to init will be exempted from
68966+ grsecurity's chroot restrictions. This option is mainly meant to
68967+ resolve Plymouth's performing privileged operations unnecessarily
68968+ in a chroot.
68969+
68970+endmenu
68971+menu "Kernel Auditing"
68972+depends on GRKERNSEC
68973+
68974+config GRKERNSEC_AUDIT_GROUP
68975+ bool "Single group for auditing"
68976+ help
68977+ If you say Y here, the exec and chdir logging features will only operate
68978+ on a group you specify. This option is recommended if you only want to
68979+ watch certain users instead of having a large amount of logs from the
68980+ entire system. If the sysctl option is enabled, a sysctl option with
68981+ name "audit_group" is created.
68982+
68983+config GRKERNSEC_AUDIT_GID
68984+ int "GID for auditing"
68985+ depends on GRKERNSEC_AUDIT_GROUP
68986+ default 1007
68987+
68988+config GRKERNSEC_EXECLOG
68989+ bool "Exec logging"
68990+ help
68991+ If you say Y here, all execve() calls will be logged (since the
68992+ other exec*() calls are frontends to execve(), all execution
68993+ will be logged). Useful for shell-servers that like to keep track
68994+ of their users. If the sysctl option is enabled, a sysctl option with
68995+ name "exec_logging" is created.
68996+ WARNING: This option when enabled will produce a LOT of logs, especially
68997+ on an active system.
68998+
68999+config GRKERNSEC_RESLOG
69000+ bool "Resource logging"
69001+ default y if GRKERNSEC_CONFIG_AUTO
69002+ help
69003+ If you say Y here, all attempts to overstep resource limits will
69004+ be logged with the resource name, the requested size, and the current
69005+ limit. It is highly recommended that you say Y here. If the sysctl
69006+ option is enabled, a sysctl option with name "resource_logging" is
69007+ created. If the RBAC system is enabled, the sysctl value is ignored.
69008+
69009+config GRKERNSEC_CHROOT_EXECLOG
69010+ bool "Log execs within chroot"
69011+ help
69012+ If you say Y here, all executions inside a chroot jail will be logged
69013+ to syslog. This can cause a large amount of logs if certain
69014+ applications (eg. djb's daemontools) are installed on the system, and
69015+ is therefore left as an option. If the sysctl option is enabled, a
69016+ sysctl option with name "chroot_execlog" is created.
69017+
69018+config GRKERNSEC_AUDIT_PTRACE
69019+ bool "Ptrace logging"
69020+ help
69021+ If you say Y here, all attempts to attach to a process via ptrace
69022+ will be logged. If the sysctl option is enabled, a sysctl option
69023+ with name "audit_ptrace" is created.
69024+
69025+config GRKERNSEC_AUDIT_CHDIR
69026+ bool "Chdir logging"
69027+ help
69028+ If you say Y here, all chdir() calls will be logged. If the sysctl
69029+ option is enabled, a sysctl option with name "audit_chdir" is created.
69030+
69031+config GRKERNSEC_AUDIT_MOUNT
69032+ bool "(Un)Mount logging"
69033+ help
69034+ If you say Y here, all mounts and unmounts will be logged. If the
69035+ sysctl option is enabled, a sysctl option with name "audit_mount" is
69036+ created.
69037+
69038+config GRKERNSEC_SIGNAL
69039+ bool "Signal logging"
69040+ default y if GRKERNSEC_CONFIG_AUTO
69041+ help
69042+ If you say Y here, certain important signals will be logged, such as
69043+ SIGSEGV, which will as a result inform you of when a error in a program
69044+ occurred, which in some cases could mean a possible exploit attempt.
69045+ If the sysctl option is enabled, a sysctl option with name
69046+ "signal_logging" is created.
69047+
69048+config GRKERNSEC_FORKFAIL
69049+ bool "Fork failure logging"
69050+ help
69051+ If you say Y here, all failed fork() attempts will be logged.
69052+ This could suggest a fork bomb, or someone attempting to overstep
69053+ their process limit. If the sysctl option is enabled, a sysctl option
69054+ with name "forkfail_logging" is created.
69055+
69056+config GRKERNSEC_TIME
69057+ bool "Time change logging"
69058+ default y if GRKERNSEC_CONFIG_AUTO
69059+ help
69060+ If you say Y here, any changes of the system clock will be logged.
69061+ If the sysctl option is enabled, a sysctl option with name
69062+ "timechange_logging" is created.
69063+
69064+config GRKERNSEC_PROC_IPADDR
69065+ bool "/proc/<pid>/ipaddr support"
69066+ default y if GRKERNSEC_CONFIG_AUTO
69067+ help
69068+ If you say Y here, a new entry will be added to each /proc/<pid>
69069+ directory that contains the IP address of the person using the task.
69070+ The IP is carried across local TCP and AF_UNIX stream sockets.
69071+ This information can be useful for IDS/IPSes to perform remote response
69072+ to a local attack. The entry is readable by only the owner of the
69073+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
69074+ the RBAC system), and thus does not create privacy concerns.
69075+
69076+config GRKERNSEC_RWXMAP_LOG
69077+ bool 'Denied RWX mmap/mprotect logging'
69078+ default y if GRKERNSEC_CONFIG_AUTO
69079+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
69080+ help
69081+ If you say Y here, calls to mmap() and mprotect() with explicit
69082+ usage of PROT_WRITE and PROT_EXEC together will be logged when
69083+ denied by the PAX_MPROTECT feature. This feature will also
69084+ log other problematic scenarios that can occur when PAX_MPROTECT
69085+ is enabled on a binary, like textrels and PT_GNU_STACK. If the
69086+ sysctl option is enabled, a sysctl option with name "rwxmap_logging"
69087+ is created.
69088+
69089+endmenu
69090+
69091+menu "Executable Protections"
69092+depends on GRKERNSEC
69093+
69094+config GRKERNSEC_DMESG
69095+ bool "Dmesg(8) restriction"
69096+ default y if GRKERNSEC_CONFIG_AUTO
69097+ help
69098+ If you say Y here, non-root users will not be able to use dmesg(8)
69099+ to view the contents of the kernel's circular log buffer.
69100+ The kernel's log buffer often contains kernel addresses and other
69101+ identifying information useful to an attacker in fingerprinting a
69102+ system for a targeted exploit.
69103+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
69104+ created.
69105+
69106+config GRKERNSEC_HARDEN_PTRACE
69107+ bool "Deter ptrace-based process snooping"
69108+ default y if GRKERNSEC_CONFIG_AUTO
69109+ help
69110+ If you say Y here, TTY sniffers and other malicious monitoring
69111+ programs implemented through ptrace will be defeated. If you
69112+ have been using the RBAC system, this option has already been
69113+ enabled for several years for all users, with the ability to make
69114+ fine-grained exceptions.
69115+
69116+ This option only affects the ability of non-root users to ptrace
69117+ processes that are not a descendent of the ptracing process.
69118+ This means that strace ./binary and gdb ./binary will still work,
69119+ but attaching to arbitrary processes will not. If the sysctl
69120+ option is enabled, a sysctl option with name "harden_ptrace" is
69121+ created.
69122+
69123+config GRKERNSEC_PTRACE_READEXEC
69124+ bool "Require read access to ptrace sensitive binaries"
69125+ default y if GRKERNSEC_CONFIG_AUTO
69126+ help
69127+ If you say Y here, unprivileged users will not be able to ptrace unreadable
69128+ binaries. This option is useful in environments that
69129+ remove the read bits (e.g. file mode 4711) from suid binaries to
69130+ prevent infoleaking of their contents. This option adds
69131+ consistency to the use of that file mode, as the binary could normally
69132+ be read out when run without privileges while ptracing.
69133+
69134+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
69135+ is created.
69136+
69137+config GRKERNSEC_SETXID
69138+ bool "Enforce consistent multithreaded privileges"
69139+ default y if GRKERNSEC_CONFIG_AUTO
69140+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
69141+ help
69142+ If you say Y here, a change from a root uid to a non-root uid
69143+ in a multithreaded application will cause the resulting uids,
69144+ gids, supplementary groups, and capabilities in that thread
69145+ to be propagated to the other threads of the process. In most
69146+ cases this is unnecessary, as glibc will emulate this behavior
69147+ on behalf of the application. Other libcs do not act in the
69148+ same way, allowing the other threads of the process to continue
69149+ running with root privileges. If the sysctl option is enabled,
69150+ a sysctl option with name "consistent_setxid" is created.
69151+
69152+config GRKERNSEC_HARDEN_IPC
69153+ bool "Disallow access to overly-permissive IPC objects"
69154+ default y if GRKERNSEC_CONFIG_AUTO
69155+ depends on SYSVIPC
69156+ help
69157+ If you say Y here, access to overly-permissive IPC objects (shared
69158+ memory, message queues, and semaphores) will be denied for processes
69159+ given the following criteria beyond normal permission checks:
69160+ 1) If the IPC object is world-accessible and the euid doesn't match
69161+ that of the creator or current uid for the IPC object
69162+ 2) If the IPC object is group-accessible and the egid doesn't
69163+ match that of the creator or current gid for the IPC object
69164+ It's a common error to grant too much permission to these objects,
69165+ with impact ranging from denial of service and information leaking to
69166+ privilege escalation. This feature was developed in response to
69167+ research by Tim Brown:
69168+ http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
69169+ who found hundreds of such insecure usages. Processes with
69170+ CAP_IPC_OWNER are still permitted to access these IPC objects.
69171+ If the sysctl option is enabled, a sysctl option with name
69172+ "harden_ipc" is created.
69173+
69174+config GRKERNSEC_TPE
69175+ bool "Trusted Path Execution (TPE)"
69176+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
69177+ help
69178+ If you say Y here, you will be able to choose a gid to add to the
69179+ supplementary groups of users you want to mark as "untrusted."
69180+ These users will not be able to execute any files that are not in
69181+ root-owned directories writable only by root. If the sysctl option
69182+ is enabled, a sysctl option with name "tpe" is created.
69183+
69184+config GRKERNSEC_TPE_ALL
69185+ bool "Partially restrict all non-root users"
69186+ depends on GRKERNSEC_TPE
69187+ help
69188+ If you say Y here, all non-root users will be covered under
69189+ a weaker TPE restriction. This is separate from, and in addition to,
69190+ the main TPE options that you have selected elsewhere. Thus, if a
69191+ "trusted" GID is chosen, this restriction applies to even that GID.
69192+ Under this restriction, all non-root users will only be allowed to
69193+ execute files in directories they own that are not group or
69194+ world-writable, or in directories owned by root and writable only by
69195+ root. If the sysctl option is enabled, a sysctl option with name
69196+ "tpe_restrict_all" is created.
69197+
69198+config GRKERNSEC_TPE_INVERT
69199+ bool "Invert GID option"
69200+ depends on GRKERNSEC_TPE
69201+ help
69202+ If you say Y here, the group you specify in the TPE configuration will
69203+ decide what group TPE restrictions will be *disabled* for. This
69204+ option is useful if you want TPE restrictions to be applied to most
69205+ users on the system. If the sysctl option is enabled, a sysctl option
69206+ with name "tpe_invert" is created. Unlike other sysctl options, this
69207+ entry will default to on for backward-compatibility.
69208+
69209+config GRKERNSEC_TPE_GID
69210+ int
69211+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
69212+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
69213+
69214+config GRKERNSEC_TPE_UNTRUSTED_GID
69215+ int "GID for TPE-untrusted users"
69216+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
69217+ default 1005
69218+ help
69219+ Setting this GID determines what group TPE restrictions will be
69220+ *enabled* for. If the sysctl option is enabled, a sysctl option
69221+ with name "tpe_gid" is created.
69222+
69223+config GRKERNSEC_TPE_TRUSTED_GID
69224+ int "GID for TPE-trusted users"
69225+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
69226+ default 1005
69227+ help
69228+ Setting this GID determines what group TPE restrictions will be
69229+ *disabled* for. If the sysctl option is enabled, a sysctl option
69230+ with name "tpe_gid" is created.
69231+
69232+endmenu
69233+menu "Network Protections"
69234+depends on GRKERNSEC
69235+
69236+config GRKERNSEC_BLACKHOLE
69237+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
69238+ default y if GRKERNSEC_CONFIG_AUTO
69239+ depends on NET
69240+ help
69241+ If you say Y here, neither TCP resets nor ICMP
69242+ destination-unreachable packets will be sent in response to packets
69243+ sent to ports for which no associated listening process exists.
69244+ It will also prevent the sending of ICMP protocol unreachable packets
69245+ in response to packets with unknown protocols.
69246+ This feature supports both IPV4 and IPV6 and exempts the
69247+ loopback interface from blackholing. Enabling this feature
69248+ makes a host more resilient to DoS attacks and reduces network
69249+ visibility against scanners.
69250+
69251+ The blackhole feature as-implemented is equivalent to the FreeBSD
69252+ blackhole feature, as it prevents RST responses to all packets, not
69253+ just SYNs. Under most application behavior this causes no
69254+ problems, but applications (like haproxy) may not close certain
69255+ connections in a way that cleanly terminates them on the remote
69256+ end, leaving the remote host in LAST_ACK state. Because of this
69257+ side-effect and to prevent intentional LAST_ACK DoSes, this
69258+ feature also adds automatic mitigation against such attacks.
69259+ The mitigation drastically reduces the amount of time a socket
69260+ can spend in LAST_ACK state. If you're using haproxy and not
69261+ all servers it connects to have this option enabled, consider
69262+ disabling this feature on the haproxy host.
69263+
69264+ If the sysctl option is enabled, two sysctl options with names
69265+ "ip_blackhole" and "lastack_retries" will be created.
69266+ While "ip_blackhole" takes the standard zero/non-zero on/off
69267+ toggle, "lastack_retries" uses the same kinds of values as
69268+ "tcp_retries1" and "tcp_retries2". The default value of 4
69269+ prevents a socket from lasting more than 45 seconds in LAST_ACK
69270+ state.
69271+
69272+config GRKERNSEC_NO_SIMULT_CONNECT
69273+ bool "Disable TCP Simultaneous Connect"
69274+ default y if GRKERNSEC_CONFIG_AUTO
69275+ depends on NET
69276+ help
69277+ If you say Y here, a feature by Willy Tarreau will be enabled that
69278+ removes a weakness in Linux's strict implementation of TCP that
69279+ allows two clients to connect to each other without either entering
69280+ a listening state. The weakness allows an attacker to easily prevent
69281+ a client from connecting to a known server provided the source port
69282+ for the connection is guessed correctly.
69283+
69284+ As the weakness could be used to prevent an antivirus or IPS from
69285+ fetching updates, or prevent an SSL gateway from fetching a CRL,
69286+ it should be eliminated by enabling this option. Though Linux is
69287+ one of few operating systems supporting simultaneous connect, it
69288+ has no legitimate use in practice and is rarely supported by firewalls.
69289+
69290+config GRKERNSEC_SOCKET
69291+ bool "Socket restrictions"
69292+ depends on NET
69293+ help
69294+ If you say Y here, you will be able to choose from several options.
69295+ If you assign a GID on your system and add it to the supplementary
69296+ groups of users you want to restrict socket access to, this patch
69297+ will perform up to three things, based on the option(s) you choose.
69298+
69299+config GRKERNSEC_SOCKET_ALL
69300+ bool "Deny any sockets to group"
69301+ depends on GRKERNSEC_SOCKET
69302+ help
69303+ If you say Y here, you will be able to choose a GID of whose users will
69304+ be unable to connect to other hosts from your machine or run server
69305+ applications from your machine. If the sysctl option is enabled, a
69306+ sysctl option with name "socket_all" is created.
69307+
69308+config GRKERNSEC_SOCKET_ALL_GID
69309+ int "GID to deny all sockets for"
69310+ depends on GRKERNSEC_SOCKET_ALL
69311+ default 1004
69312+ help
69313+ Here you can choose the GID to disable socket access for. Remember to
69314+ add the users you want socket access disabled for to the GID
69315+ specified here. If the sysctl option is enabled, a sysctl option
69316+ with name "socket_all_gid" is created.
69317+
69318+config GRKERNSEC_SOCKET_CLIENT
69319+ bool "Deny client sockets to group"
69320+ depends on GRKERNSEC_SOCKET
69321+ help
69322+ If you say Y here, you will be able to choose a GID of whose users will
69323+ be unable to connect to other hosts from your machine, but will be
69324+ able to run servers. If this option is enabled, all users in the group
69325+ you specify will have to use passive mode when initiating ftp transfers
69326+ from the shell on your machine. If the sysctl option is enabled, a
69327+ sysctl option with name "socket_client" is created.
69328+
69329+config GRKERNSEC_SOCKET_CLIENT_GID
69330+ int "GID to deny client sockets for"
69331+ depends on GRKERNSEC_SOCKET_CLIENT
69332+ default 1003
69333+ help
69334+ Here you can choose the GID to disable client socket access for.
69335+ Remember to add the users you want client socket access disabled for to
69336+ the GID specified here. If the sysctl option is enabled, a sysctl
69337+ option with name "socket_client_gid" is created.
69338+
69339+config GRKERNSEC_SOCKET_SERVER
69340+ bool "Deny server sockets to group"
69341+ depends on GRKERNSEC_SOCKET
69342+ help
69343+ If you say Y here, you will be able to choose a GID of whose users will
69344+ be unable to run server applications from your machine. If the sysctl
69345+ option is enabled, a sysctl option with name "socket_server" is created.
69346+
69347+config GRKERNSEC_SOCKET_SERVER_GID
69348+ int "GID to deny server sockets for"
69349+ depends on GRKERNSEC_SOCKET_SERVER
69350+ default 1002
69351+ help
69352+ Here you can choose the GID to disable server socket access for.
69353+ Remember to add the users you want server socket access disabled for to
69354+ the GID specified here. If the sysctl option is enabled, a sysctl
69355+ option with name "socket_server_gid" is created.
69356+
69357+endmenu
69358+
69359+menu "Physical Protections"
69360+depends on GRKERNSEC
69361+
69362+config GRKERNSEC_DENYUSB
69363+ bool "Deny new USB connections after toggle"
69364+ default y if GRKERNSEC_CONFIG_AUTO
69365+ depends on SYSCTL && USB_SUPPORT
69366+ help
69367+ If you say Y here, a new sysctl option with name "deny_new_usb"
69368+ will be created. Setting its value to 1 will prevent any new
69369+ USB devices from being recognized by the OS. Any attempted USB
69370+ device insertion will be logged. This option is intended to be
69371+ used against custom USB devices designed to exploit vulnerabilities
69372+ in various USB device drivers.
69373+
69374+ For greatest effectiveness, this sysctl should be set after any
69375+ relevant init scripts. This option is safe to enable in distros
69376+ as each user can choose whether or not to toggle the sysctl.
69377+
69378+config GRKERNSEC_DENYUSB_FORCE
69379+ bool "Reject all USB devices not connected at boot"
69380+ select USB
69381+ depends on GRKERNSEC_DENYUSB
69382+ help
69383+ If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
69384+ that doesn't involve a sysctl entry. This option should only be
69385+ enabled if you're sure you want to deny all new USB connections
69386+ at runtime and don't want to modify init scripts. This should not
69387+ be enabled by distros. It forces the core USB code to be built
69388+ into the kernel image so that all devices connected at boot time
69389+ can be recognized and new USB device connections can be prevented
69390+ prior to init running.
69391+
69392+endmenu
69393+
69394+menu "Sysctl Support"
69395+depends on GRKERNSEC && SYSCTL
69396+
69397+config GRKERNSEC_SYSCTL
69398+ bool "Sysctl support"
69399+ default y if GRKERNSEC_CONFIG_AUTO
69400+ help
69401+ If you say Y here, you will be able to change the options that
69402+ grsecurity runs with at bootup, without having to recompile your
69403+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
69404+ to enable (1) or disable (0) various features. All the sysctl entries
69405+ are mutable until the "grsec_lock" entry is set to a non-zero value.
69406+ All features enabled in the kernel configuration are disabled at boot
69407+ if you do not say Y to the "Turn on features by default" option.
69408+ All options should be set at startup, and the grsec_lock entry should
69409+ be set to a non-zero value after all the options are set.
69410+ *THIS IS EXTREMELY IMPORTANT*
69411+
69412+config GRKERNSEC_SYSCTL_DISTRO
69413+ bool "Extra sysctl support for distro makers (READ HELP)"
69414+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
69415+ help
69416+ If you say Y here, additional sysctl options will be created
69417+ for features that affect processes running as root. Therefore,
69418+ it is critical when using this option that the grsec_lock entry be
69419+ enabled after boot. Only distros with prebuilt kernel packages
69420+ with this option enabled that can ensure grsec_lock is enabled
69421+ after boot should use this option.
69422+ *Failure to set grsec_lock after boot makes all grsec features
69423+ this option covers useless*
69424+
69425+ Currently this option creates the following sysctl entries:
69426+ "Disable Privileged I/O": "disable_priv_io"
69427+
69428+config GRKERNSEC_SYSCTL_ON
69429+ bool "Turn on features by default"
69430+ default y if GRKERNSEC_CONFIG_AUTO
69431+ depends on GRKERNSEC_SYSCTL
69432+ help
69433+ If you say Y here, instead of having all features enabled in the
69434+ kernel configuration disabled at boot time, the features will be
69435+ enabled at boot time. It is recommended you say Y here unless
69436+ there is some reason you would want all sysctl-tunable features to
69437+ be disabled by default. As mentioned elsewhere, it is important
69438+ to enable the grsec_lock entry once you have finished modifying
69439+ the sysctl entries.
69440+
69441+endmenu
69442+menu "Logging Options"
69443+depends on GRKERNSEC
69444+
69445+config GRKERNSEC_FLOODTIME
69446+ int "Seconds in between log messages (minimum)"
69447+ default 10
69448+ help
69449+ This option allows you to enforce the number of seconds between
69450+ grsecurity log messages. The default should be suitable for most
69451+ people, however, if you choose to change it, choose a value small enough
69452+ to allow informative logs to be produced, but large enough to
69453+ prevent flooding.
69454+
69455+ Setting both this value and GRKERNSEC_FLOODBURST to 0 will disable
69456+ any rate limiting on grsecurity log messages.
69457+
69458+config GRKERNSEC_FLOODBURST
69459+ int "Number of messages in a burst (maximum)"
69460+ default 6
69461+ help
69462+ This option allows you to choose the maximum number of messages allowed
69463+ within the flood time interval you chose in a separate option. The
69464+ default should be suitable for most people, however if you find that
69465+ many of your logs are being interpreted as flooding, you may want to
69466+ raise this value.
69467+
69468+ Setting both this value and GRKERNSEC_FLOODTIME to 0 will disable
69469+ any rate limiting on grsecurity log messages.
69470+
69471+endmenu
69472diff --git a/grsecurity/Makefile b/grsecurity/Makefile
69473new file mode 100644
69474index 0000000..30ababb
69475--- /dev/null
69476+++ b/grsecurity/Makefile
69477@@ -0,0 +1,54 @@
69478+# grsecurity – access control and security hardening for Linux
69479+# All code in this directory and various hooks located throughout the Linux kernel are
69480+# Copyright (C) 2001-2014 Bradley Spengler, Open Source Security, Inc.
69481+# http://www.grsecurity.net spender@grsecurity.net
69482+#
69483+# This program is free software; you can redistribute it and/or
69484+# modify it under the terms of the GNU General Public License version 2
69485+# as published by the Free Software Foundation.
69486+#
69487+# This program is distributed in the hope that it will be useful,
69488+# but WITHOUT ANY WARRANTY; without even the implied warranty of
69489+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
69490+# GNU General Public License for more details.
69491+#
69492+# You should have received a copy of the GNU General Public License
69493+# along with this program; if not, write to the Free Software
69494+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
69495+
69496+KBUILD_CFLAGS += -Werror
69497+
69498+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
69499+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
69500+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
69501+ grsec_usb.o grsec_ipc.o grsec_proc.o
69502+
69503+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
69504+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
69505+ gracl_learn.o grsec_log.o gracl_policy.o
69506+ifdef CONFIG_COMPAT
69507+obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
69508+endif
69509+
69510+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
69511+
69512+ifdef CONFIG_NET
69513+obj-y += grsec_sock.o
69514+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
69515+endif
69516+
69517+ifndef CONFIG_GRKERNSEC
69518+obj-y += grsec_disabled.o
69519+endif
69520+
69521+ifdef CONFIG_GRKERNSEC_HIDESYM
69522+extra-y := grsec_hidesym.o
69523+$(obj)/grsec_hidesym.o:
69524+ @-chmod -f 500 /boot
69525+ @-chmod -f 500 /lib/modules
69526+ @-chmod -f 500 /lib64/modules
69527+ @-chmod -f 500 /lib32/modules
69528+ @-chmod -f 700 .
69529+ @-chmod -f 700 $(objtree)
69530+ @echo ' grsec: protected kernel image paths'
69531+endif
69532diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
69533new file mode 100644
69534index 0000000..6ae3aa0
69535--- /dev/null
69536+++ b/grsecurity/gracl.c
69537@@ -0,0 +1,2703 @@
69538+#include <linux/kernel.h>
69539+#include <linux/module.h>
69540+#include <linux/sched.h>
69541+#include <linux/mm.h>
69542+#include <linux/file.h>
69543+#include <linux/fs.h>
69544+#include <linux/namei.h>
69545+#include <linux/mount.h>
69546+#include <linux/tty.h>
69547+#include <linux/proc_fs.h>
69548+#include <linux/lglock.h>
69549+#include <linux/slab.h>
69550+#include <linux/vmalloc.h>
69551+#include <linux/types.h>
69552+#include <linux/sysctl.h>
69553+#include <linux/netdevice.h>
69554+#include <linux/ptrace.h>
69555+#include <linux/gracl.h>
69556+#include <linux/gralloc.h>
69557+#include <linux/security.h>
69558+#include <linux/grinternal.h>
69559+#include <linux/pid_namespace.h>
69560+#include <linux/stop_machine.h>
69561+#include <linux/fdtable.h>
69562+#include <linux/percpu.h>
69563+#include <linux/lglock.h>
69564+#include <linux/hugetlb.h>
69565+#include <linux/posix-timers.h>
69566+#include <linux/prefetch.h>
69567+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69568+#include <linux/magic.h>
69569+#include <linux/pagemap.h>
69570+#include "../fs/btrfs/async-thread.h"
69571+#include "../fs/btrfs/ctree.h"
69572+#include "../fs/btrfs/btrfs_inode.h"
69573+#endif
69574+#include "../fs/mount.h"
69575+
69576+#include <asm/uaccess.h>
69577+#include <asm/errno.h>
69578+#include <asm/mman.h>
69579+
69580+#define FOR_EACH_ROLE_START(role) \
69581+ role = running_polstate.role_list; \
69582+ while (role) {
69583+
69584+#define FOR_EACH_ROLE_END(role) \
69585+ role = role->prev; \
69586+ }
69587+
69588+extern struct path gr_real_root;
69589+
69590+static struct gr_policy_state running_polstate;
69591+struct gr_policy_state *polstate = &running_polstate;
69592+extern struct gr_alloc_state *current_alloc_state;
69593+
69594+extern char *gr_shared_page[4];
69595+DEFINE_RWLOCK(gr_inode_lock);
69596+
69597+static unsigned int gr_status __read_only = GR_STATUS_INIT;
69598+
69599+#ifdef CONFIG_NET
69600+extern struct vfsmount *sock_mnt;
69601+#endif
69602+
69603+extern struct vfsmount *pipe_mnt;
69604+extern struct vfsmount *shm_mnt;
69605+
69606+#ifdef CONFIG_HUGETLBFS
69607+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
69608+#endif
69609+
69610+extern u16 acl_sp_role_value;
69611+extern struct acl_object_label *fakefs_obj_rw;
69612+extern struct acl_object_label *fakefs_obj_rwx;
69613+
69614+int gr_acl_is_enabled(void)
69615+{
69616+ return (gr_status & GR_READY);
69617+}
69618+
69619+void gr_enable_rbac_system(void)
69620+{
69621+ pax_open_kernel();
69622+ gr_status |= GR_READY;
69623+ pax_close_kernel();
69624+}
69625+
69626+int gr_rbac_disable(void *unused)
69627+{
69628+ pax_open_kernel();
69629+ gr_status &= ~GR_READY;
69630+ pax_close_kernel();
69631+
69632+ return 0;
69633+}
69634+
69635+static inline dev_t __get_dev(const struct dentry *dentry)
69636+{
69637+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69638+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
69639+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
69640+ else
69641+#endif
69642+ return dentry->d_sb->s_dev;
69643+}
69644+
69645+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
69646+{
69647+ return __get_dev(dentry);
69648+}
69649+
69650+static char gr_task_roletype_to_char(struct task_struct *task)
69651+{
69652+ switch (task->role->roletype &
69653+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
69654+ GR_ROLE_SPECIAL)) {
69655+ case GR_ROLE_DEFAULT:
69656+ return 'D';
69657+ case GR_ROLE_USER:
69658+ return 'U';
69659+ case GR_ROLE_GROUP:
69660+ return 'G';
69661+ case GR_ROLE_SPECIAL:
69662+ return 'S';
69663+ }
69664+
69665+ return 'X';
69666+}
69667+
69668+char gr_roletype_to_char(void)
69669+{
69670+ return gr_task_roletype_to_char(current);
69671+}
69672+
69673+__inline__ int
69674+gr_acl_tpe_check(void)
69675+{
69676+ if (unlikely(!(gr_status & GR_READY)))
69677+ return 0;
69678+ if (current->role->roletype & GR_ROLE_TPE)
69679+ return 1;
69680+ else
69681+ return 0;
69682+}
69683+
69684+int
69685+gr_handle_rawio(const struct inode *inode)
69686+{
69687+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
69688+ if (inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR)) &&
69689+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
69690+ !capable(CAP_SYS_RAWIO))
69691+ return 1;
69692+#endif
69693+ return 0;
69694+}
69695+
69696+int
69697+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
69698+{
69699+ if (likely(lena != lenb))
69700+ return 0;
69701+
69702+ return !memcmp(a, b, lena);
69703+}
69704+
69705+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
69706+{
69707+ *buflen -= namelen;
69708+ if (*buflen < 0)
69709+ return -ENAMETOOLONG;
69710+ *buffer -= namelen;
69711+ memcpy(*buffer, str, namelen);
69712+ return 0;
69713+}
69714+
69715+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
69716+{
69717+ return prepend(buffer, buflen, name->name, name->len);
69718+}
69719+
69720+static int prepend_path(const struct path *path, struct path *root,
69721+ char **buffer, int *buflen)
69722+{
69723+ struct dentry *dentry = path->dentry;
69724+ struct vfsmount *vfsmnt = path->mnt;
69725+ struct mount *mnt = real_mount(vfsmnt);
69726+ bool slash = false;
69727+ int error = 0;
69728+
69729+ while (dentry != root->dentry || vfsmnt != root->mnt) {
69730+ struct dentry * parent;
69731+
69732+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
69733+ /* Global root? */
69734+ if (!mnt_has_parent(mnt)) {
69735+ goto out;
69736+ }
69737+ dentry = mnt->mnt_mountpoint;
69738+ mnt = mnt->mnt_parent;
69739+ vfsmnt = &mnt->mnt;
69740+ continue;
69741+ }
69742+ parent = dentry->d_parent;
69743+ prefetch(parent);
69744+ spin_lock(&dentry->d_lock);
69745+ error = prepend_name(buffer, buflen, &dentry->d_name);
69746+ spin_unlock(&dentry->d_lock);
69747+ if (!error)
69748+ error = prepend(buffer, buflen, "/", 1);
69749+ if (error)
69750+ break;
69751+
69752+ slash = true;
69753+ dentry = parent;
69754+ }
69755+
69756+out:
69757+ if (!error && !slash)
69758+ error = prepend(buffer, buflen, "/", 1);
69759+
69760+ return error;
69761+}
69762+
69763+/* this must be called with mount_lock and rename_lock held */
69764+
69765+static char *__our_d_path(const struct path *path, struct path *root,
69766+ char *buf, int buflen)
69767+{
69768+ char *res = buf + buflen;
69769+ int error;
69770+
69771+ prepend(&res, &buflen, "\0", 1);
69772+ error = prepend_path(path, root, &res, &buflen);
69773+ if (error)
69774+ return ERR_PTR(error);
69775+
69776+ return res;
69777+}
69778+
69779+static char *
69780+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
69781+{
69782+ char *retval;
69783+
69784+ retval = __our_d_path(path, root, buf, buflen);
69785+ if (unlikely(IS_ERR(retval)))
69786+ retval = strcpy(buf, "<path too long>");
69787+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
69788+ retval[1] = '\0';
69789+
69790+ return retval;
69791+}
69792+
69793+static char *
69794+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
69795+ char *buf, int buflen)
69796+{
69797+ struct path path;
69798+ char *res;
69799+
69800+ path.dentry = (struct dentry *)dentry;
69801+ path.mnt = (struct vfsmount *)vfsmnt;
69802+
69803+ /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
69804+ by the RBAC system */
69805+ res = gen_full_path(&path, &gr_real_root, buf, buflen);
69806+
69807+ return res;
69808+}
69809+
69810+static char *
69811+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
69812+ char *buf, int buflen)
69813+{
69814+ char *res;
69815+ struct path path;
69816+ struct path root;
69817+ struct task_struct *reaper = init_pid_ns.child_reaper;
69818+
69819+ path.dentry = (struct dentry *)dentry;
69820+ path.mnt = (struct vfsmount *)vfsmnt;
69821+
69822+ /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
69823+ get_fs_root(reaper->fs, &root);
69824+
69825+ read_seqlock_excl(&mount_lock);
69826+ write_seqlock(&rename_lock);
69827+ res = gen_full_path(&path, &root, buf, buflen);
69828+ write_sequnlock(&rename_lock);
69829+ read_sequnlock_excl(&mount_lock);
69830+
69831+ path_put(&root);
69832+ return res;
69833+}
69834+
69835+char *
69836+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
69837+{
69838+ char *ret;
69839+ read_seqlock_excl(&mount_lock);
69840+ write_seqlock(&rename_lock);
69841+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
69842+ PAGE_SIZE);
69843+ write_sequnlock(&rename_lock);
69844+ read_sequnlock_excl(&mount_lock);
69845+ return ret;
69846+}
69847+
69848+static char *
69849+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
69850+{
69851+ char *ret;
69852+ char *buf;
69853+ int buflen;
69854+
69855+ read_seqlock_excl(&mount_lock);
69856+ write_seqlock(&rename_lock);
69857+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
69858+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
69859+ buflen = (int)(ret - buf);
69860+ if (buflen >= 5)
69861+ prepend(&ret, &buflen, "/proc", 5);
69862+ else
69863+ ret = strcpy(buf, "<path too long>");
69864+ write_sequnlock(&rename_lock);
69865+ read_sequnlock_excl(&mount_lock);
69866+ return ret;
69867+}
69868+
69869+char *
69870+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
69871+{
69872+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
69873+ PAGE_SIZE);
69874+}
69875+
69876+char *
69877+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
69878+{
69879+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
69880+ PAGE_SIZE);
69881+}
69882+
69883+char *
69884+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
69885+{
69886+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
69887+ PAGE_SIZE);
69888+}
69889+
69890+char *
69891+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
69892+{
69893+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
69894+ PAGE_SIZE);
69895+}
69896+
69897+char *
69898+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
69899+{
69900+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
69901+ PAGE_SIZE);
69902+}
69903+
69904+__inline__ __u32
69905+to_gr_audit(const __u32 reqmode)
69906+{
69907+ /* masks off auditable permission flags, then shifts them to create
69908+ auditing flags, and adds the special case of append auditing if
69909+ we're requesting write */
69910+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
69911+}
69912+
69913+struct acl_role_label *
69914+__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
69915+ const gid_t gid)
69916+{
69917+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
69918+ struct acl_role_label *match;
69919+ struct role_allowed_ip *ipp;
69920+ unsigned int x;
69921+ u32 curr_ip = task->signal->saved_ip;
69922+
69923+ match = state->acl_role_set.r_hash[index];
69924+
69925+ while (match) {
69926+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
69927+ for (x = 0; x < match->domain_child_num; x++) {
69928+ if (match->domain_children[x] == uid)
69929+ goto found;
69930+ }
69931+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
69932+ break;
69933+ match = match->next;
69934+ }
69935+found:
69936+ if (match == NULL) {
69937+ try_group:
69938+ index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
69939+ match = state->acl_role_set.r_hash[index];
69940+
69941+ while (match) {
69942+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
69943+ for (x = 0; x < match->domain_child_num; x++) {
69944+ if (match->domain_children[x] == gid)
69945+ goto found2;
69946+ }
69947+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
69948+ break;
69949+ match = match->next;
69950+ }
69951+found2:
69952+ if (match == NULL)
69953+ match = state->default_role;
69954+ if (match->allowed_ips == NULL)
69955+ return match;
69956+ else {
69957+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
69958+ if (likely
69959+ ((ntohl(curr_ip) & ipp->netmask) ==
69960+ (ntohl(ipp->addr) & ipp->netmask)))
69961+ return match;
69962+ }
69963+ match = state->default_role;
69964+ }
69965+ } else if (match->allowed_ips == NULL) {
69966+ return match;
69967+ } else {
69968+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
69969+ if (likely
69970+ ((ntohl(curr_ip) & ipp->netmask) ==
69971+ (ntohl(ipp->addr) & ipp->netmask)))
69972+ return match;
69973+ }
69974+ goto try_group;
69975+ }
69976+
69977+ return match;
69978+}
69979+
69980+static struct acl_role_label *
69981+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
69982+ const gid_t gid)
69983+{
69984+ return __lookup_acl_role_label(&running_polstate, task, uid, gid);
69985+}
69986+
69987+struct acl_subject_label *
69988+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
69989+ const struct acl_role_label *role)
69990+{
69991+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
69992+ struct acl_subject_label *match;
69993+
69994+ match = role->subj_hash[index];
69995+
69996+ while (match && (match->inode != ino || match->device != dev ||
69997+ (match->mode & GR_DELETED))) {
69998+ match = match->next;
69999+ }
70000+
70001+ if (match && !(match->mode & GR_DELETED))
70002+ return match;
70003+ else
70004+ return NULL;
70005+}
70006+
70007+struct acl_subject_label *
70008+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
70009+ const struct acl_role_label *role)
70010+{
70011+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
70012+ struct acl_subject_label *match;
70013+
70014+ match = role->subj_hash[index];
70015+
70016+ while (match && (match->inode != ino || match->device != dev ||
70017+ !(match->mode & GR_DELETED))) {
70018+ match = match->next;
70019+ }
70020+
70021+ if (match && (match->mode & GR_DELETED))
70022+ return match;
70023+ else
70024+ return NULL;
70025+}
70026+
70027+static struct acl_object_label *
70028+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
70029+ const struct acl_subject_label *subj)
70030+{
70031+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
70032+ struct acl_object_label *match;
70033+
70034+ match = subj->obj_hash[index];
70035+
70036+ while (match && (match->inode != ino || match->device != dev ||
70037+ (match->mode & GR_DELETED))) {
70038+ match = match->next;
70039+ }
70040+
70041+ if (match && !(match->mode & GR_DELETED))
70042+ return match;
70043+ else
70044+ return NULL;
70045+}
70046+
70047+static struct acl_object_label *
70048+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
70049+ const struct acl_subject_label *subj)
70050+{
70051+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
70052+ struct acl_object_label *match;
70053+
70054+ match = subj->obj_hash[index];
70055+
70056+ while (match && (match->inode != ino || match->device != dev ||
70057+ !(match->mode & GR_DELETED))) {
70058+ match = match->next;
70059+ }
70060+
70061+ if (match && (match->mode & GR_DELETED))
70062+ return match;
70063+
70064+ match = subj->obj_hash[index];
70065+
70066+ while (match && (match->inode != ino || match->device != dev ||
70067+ (match->mode & GR_DELETED))) {
70068+ match = match->next;
70069+ }
70070+
70071+ if (match && !(match->mode & GR_DELETED))
70072+ return match;
70073+ else
70074+ return NULL;
70075+}
70076+
70077+struct name_entry *
70078+__lookup_name_entry(const struct gr_policy_state *state, const char *name)
70079+{
70080+ unsigned int len = strlen(name);
70081+ unsigned int key = full_name_hash(name, len);
70082+ unsigned int index = key % state->name_set.n_size;
70083+ struct name_entry *match;
70084+
70085+ match = state->name_set.n_hash[index];
70086+
70087+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
70088+ match = match->next;
70089+
70090+ return match;
70091+}
70092+
70093+static struct name_entry *
70094+lookup_name_entry(const char *name)
70095+{
70096+ return __lookup_name_entry(&running_polstate, name);
70097+}
70098+
70099+static struct name_entry *
70100+lookup_name_entry_create(const char *name)
70101+{
70102+ unsigned int len = strlen(name);
70103+ unsigned int key = full_name_hash(name, len);
70104+ unsigned int index = key % running_polstate.name_set.n_size;
70105+ struct name_entry *match;
70106+
70107+ match = running_polstate.name_set.n_hash[index];
70108+
70109+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
70110+ !match->deleted))
70111+ match = match->next;
70112+
70113+ if (match && match->deleted)
70114+ return match;
70115+
70116+ match = running_polstate.name_set.n_hash[index];
70117+
70118+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
70119+ match->deleted))
70120+ match = match->next;
70121+
70122+ if (match && !match->deleted)
70123+ return match;
70124+ else
70125+ return NULL;
70126+}
70127+
70128+static struct inodev_entry *
70129+lookup_inodev_entry(const ino_t ino, const dev_t dev)
70130+{
70131+ unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
70132+ struct inodev_entry *match;
70133+
70134+ match = running_polstate.inodev_set.i_hash[index];
70135+
70136+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
70137+ match = match->next;
70138+
70139+ return match;
70140+}
70141+
70142+void
70143+__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
70144+{
70145+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
70146+ state->inodev_set.i_size);
70147+ struct inodev_entry **curr;
70148+
70149+ entry->prev = NULL;
70150+
70151+ curr = &state->inodev_set.i_hash[index];
70152+ if (*curr != NULL)
70153+ (*curr)->prev = entry;
70154+
70155+ entry->next = *curr;
70156+ *curr = entry;
70157+
70158+ return;
70159+}
70160+
70161+static void
70162+insert_inodev_entry(struct inodev_entry *entry)
70163+{
70164+ __insert_inodev_entry(&running_polstate, entry);
70165+}
70166+
70167+void
70168+insert_acl_obj_label(struct acl_object_label *obj,
70169+ struct acl_subject_label *subj)
70170+{
70171+ unsigned int index =
70172+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
70173+ struct acl_object_label **curr;
70174+
70175+ obj->prev = NULL;
70176+
70177+ curr = &subj->obj_hash[index];
70178+ if (*curr != NULL)
70179+ (*curr)->prev = obj;
70180+
70181+ obj->next = *curr;
70182+ *curr = obj;
70183+
70184+ return;
70185+}
70186+
70187+void
70188+insert_acl_subj_label(struct acl_subject_label *obj,
70189+ struct acl_role_label *role)
70190+{
70191+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
70192+ struct acl_subject_label **curr;
70193+
70194+ obj->prev = NULL;
70195+
70196+ curr = &role->subj_hash[index];
70197+ if (*curr != NULL)
70198+ (*curr)->prev = obj;
70199+
70200+ obj->next = *curr;
70201+ *curr = obj;
70202+
70203+ return;
70204+}
70205+
70206+/* derived from glibc fnmatch() 0: match, 1: no match*/
70207+
70208+static int
70209+glob_match(const char *p, const char *n)
70210+{
70211+ char c;
70212+
70213+ while ((c = *p++) != '\0') {
70214+ switch (c) {
70215+ case '?':
70216+ if (*n == '\0')
70217+ return 1;
70218+ else if (*n == '/')
70219+ return 1;
70220+ break;
70221+ case '\\':
70222+ if (*n != c)
70223+ return 1;
70224+ break;
70225+ case '*':
70226+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
70227+ if (*n == '/')
70228+ return 1;
70229+ else if (c == '?') {
70230+ if (*n == '\0')
70231+ return 1;
70232+ else
70233+ ++n;
70234+ }
70235+ }
70236+ if (c == '\0') {
70237+ return 0;
70238+ } else {
70239+ const char *endp;
70240+
70241+ if ((endp = strchr(n, '/')) == NULL)
70242+ endp = n + strlen(n);
70243+
70244+ if (c == '[') {
70245+ for (--p; n < endp; ++n)
70246+ if (!glob_match(p, n))
70247+ return 0;
70248+ } else if (c == '/') {
70249+ while (*n != '\0' && *n != '/')
70250+ ++n;
70251+ if (*n == '/' && !glob_match(p, n + 1))
70252+ return 0;
70253+ } else {
70254+ for (--p; n < endp; ++n)
70255+ if (*n == c && !glob_match(p, n))
70256+ return 0;
70257+ }
70258+
70259+ return 1;
70260+ }
70261+ case '[':
70262+ {
70263+ int not;
70264+ char cold;
70265+
70266+ if (*n == '\0' || *n == '/')
70267+ return 1;
70268+
70269+ not = (*p == '!' || *p == '^');
70270+ if (not)
70271+ ++p;
70272+
70273+ c = *p++;
70274+ for (;;) {
70275+ unsigned char fn = (unsigned char)*n;
70276+
70277+ if (c == '\0')
70278+ return 1;
70279+ else {
70280+ if (c == fn)
70281+ goto matched;
70282+ cold = c;
70283+ c = *p++;
70284+
70285+ if (c == '-' && *p != ']') {
70286+ unsigned char cend = *p++;
70287+
70288+ if (cend == '\0')
70289+ return 1;
70290+
70291+ if (cold <= fn && fn <= cend)
70292+ goto matched;
70293+
70294+ c = *p++;
70295+ }
70296+ }
70297+
70298+ if (c == ']')
70299+ break;
70300+ }
70301+ if (!not)
70302+ return 1;
70303+ break;
70304+ matched:
70305+ while (c != ']') {
70306+ if (c == '\0')
70307+ return 1;
70308+
70309+ c = *p++;
70310+ }
70311+ if (not)
70312+ return 1;
70313+ }
70314+ break;
70315+ default:
70316+ if (c != *n)
70317+ return 1;
70318+ }
70319+
70320+ ++n;
70321+ }
70322+
70323+ if (*n == '\0')
70324+ return 0;
70325+
70326+ if (*n == '/')
70327+ return 0;
70328+
70329+ return 1;
70330+}
70331+
70332+static struct acl_object_label *
70333+chk_glob_label(struct acl_object_label *globbed,
70334+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
70335+{
70336+ struct acl_object_label *tmp;
70337+
70338+ if (*path == NULL)
70339+ *path = gr_to_filename_nolock(dentry, mnt);
70340+
70341+ tmp = globbed;
70342+
70343+ while (tmp) {
70344+ if (!glob_match(tmp->filename, *path))
70345+ return tmp;
70346+ tmp = tmp->next;
70347+ }
70348+
70349+ return NULL;
70350+}
70351+
70352+static struct acl_object_label *
70353+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
70354+ const ino_t curr_ino, const dev_t curr_dev,
70355+ const struct acl_subject_label *subj, char **path, const int checkglob)
70356+{
70357+ struct acl_subject_label *tmpsubj;
70358+ struct acl_object_label *retval;
70359+ struct acl_object_label *retval2;
70360+
70361+ tmpsubj = (struct acl_subject_label *) subj;
70362+ read_lock(&gr_inode_lock);
70363+ do {
70364+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
70365+ if (retval) {
70366+ if (checkglob && retval->globbed) {
70367+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
70368+ if (retval2)
70369+ retval = retval2;
70370+ }
70371+ break;
70372+ }
70373+ } while ((tmpsubj = tmpsubj->parent_subject));
70374+ read_unlock(&gr_inode_lock);
70375+
70376+ return retval;
70377+}
70378+
70379+static __inline__ struct acl_object_label *
70380+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
70381+ struct dentry *curr_dentry,
70382+ const struct acl_subject_label *subj, char **path, const int checkglob)
70383+{
70384+ int newglob = checkglob;
70385+ ino_t inode;
70386+ dev_t device;
70387+
70388+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
70389+ as we don't want a / * rule to match instead of the / object
70390+ don't do this for create lookups that call this function though, since they're looking up
70391+ on the parent and thus need globbing checks on all paths
70392+ */
70393+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
70394+ newglob = GR_NO_GLOB;
70395+
70396+ spin_lock(&curr_dentry->d_lock);
70397+ inode = curr_dentry->d_inode->i_ino;
70398+ device = __get_dev(curr_dentry);
70399+ spin_unlock(&curr_dentry->d_lock);
70400+
70401+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
70402+}
70403+
70404+#ifdef CONFIG_HUGETLBFS
70405+static inline bool
70406+is_hugetlbfs_mnt(const struct vfsmount *mnt)
70407+{
70408+ int i;
70409+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
70410+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
70411+ return true;
70412+ }
70413+
70414+ return false;
70415+}
70416+#endif
70417+
70418+static struct acl_object_label *
70419+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70420+ const struct acl_subject_label *subj, char *path, const int checkglob)
70421+{
70422+ struct dentry *dentry = (struct dentry *) l_dentry;
70423+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
70424+ struct mount *real_mnt = real_mount(mnt);
70425+ struct acl_object_label *retval;
70426+ struct dentry *parent;
70427+
70428+ read_seqlock_excl(&mount_lock);
70429+ write_seqlock(&rename_lock);
70430+
70431+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
70432+#ifdef CONFIG_NET
70433+ mnt == sock_mnt ||
70434+#endif
70435+#ifdef CONFIG_HUGETLBFS
70436+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
70437+#endif
70438+ /* ignore Eric Biederman */
70439+ IS_PRIVATE(l_dentry->d_inode))) {
70440+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
70441+ goto out;
70442+ }
70443+
70444+ for (;;) {
70445+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
70446+ break;
70447+
70448+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
70449+ if (!mnt_has_parent(real_mnt))
70450+ break;
70451+
70452+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70453+ if (retval != NULL)
70454+ goto out;
70455+
70456+ dentry = real_mnt->mnt_mountpoint;
70457+ real_mnt = real_mnt->mnt_parent;
70458+ mnt = &real_mnt->mnt;
70459+ continue;
70460+ }
70461+
70462+ parent = dentry->d_parent;
70463+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70464+ if (retval != NULL)
70465+ goto out;
70466+
70467+ dentry = parent;
70468+ }
70469+
70470+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
70471+
70472+ /* gr_real_root is pinned so we don't have to hold a reference */
70473+ if (retval == NULL)
70474+ retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
70475+out:
70476+ write_sequnlock(&rename_lock);
70477+ read_sequnlock_excl(&mount_lock);
70478+
70479+ BUG_ON(retval == NULL);
70480+
70481+ return retval;
70482+}
70483+
70484+static __inline__ struct acl_object_label *
70485+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70486+ const struct acl_subject_label *subj)
70487+{
70488+ char *path = NULL;
70489+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
70490+}
70491+
70492+static __inline__ struct acl_object_label *
70493+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70494+ const struct acl_subject_label *subj)
70495+{
70496+ char *path = NULL;
70497+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
70498+}
70499+
70500+static __inline__ struct acl_object_label *
70501+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70502+ const struct acl_subject_label *subj, char *path)
70503+{
70504+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
70505+}
70506+
70507+struct acl_subject_label *
70508+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
70509+ const struct acl_role_label *role)
70510+{
70511+ struct dentry *dentry = (struct dentry *) l_dentry;
70512+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
70513+ struct mount *real_mnt = real_mount(mnt);
70514+ struct acl_subject_label *retval;
70515+ struct dentry *parent;
70516+
70517+ read_seqlock_excl(&mount_lock);
70518+ write_seqlock(&rename_lock);
70519+
70520+ for (;;) {
70521+ if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
70522+ break;
70523+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
70524+ if (!mnt_has_parent(real_mnt))
70525+ break;
70526+
70527+ spin_lock(&dentry->d_lock);
70528+ read_lock(&gr_inode_lock);
70529+ retval =
70530+ lookup_acl_subj_label(dentry->d_inode->i_ino,
70531+ __get_dev(dentry), role);
70532+ read_unlock(&gr_inode_lock);
70533+ spin_unlock(&dentry->d_lock);
70534+ if (retval != NULL)
70535+ goto out;
70536+
70537+ dentry = real_mnt->mnt_mountpoint;
70538+ real_mnt = real_mnt->mnt_parent;
70539+ mnt = &real_mnt->mnt;
70540+ continue;
70541+ }
70542+
70543+ spin_lock(&dentry->d_lock);
70544+ read_lock(&gr_inode_lock);
70545+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
70546+ __get_dev(dentry), role);
70547+ read_unlock(&gr_inode_lock);
70548+ parent = dentry->d_parent;
70549+ spin_unlock(&dentry->d_lock);
70550+
70551+ if (retval != NULL)
70552+ goto out;
70553+
70554+ dentry = parent;
70555+ }
70556+
70557+ spin_lock(&dentry->d_lock);
70558+ read_lock(&gr_inode_lock);
70559+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
70560+ __get_dev(dentry), role);
70561+ read_unlock(&gr_inode_lock);
70562+ spin_unlock(&dentry->d_lock);
70563+
70564+ if (unlikely(retval == NULL)) {
70565+ /* gr_real_root is pinned, we don't need to hold a reference */
70566+ read_lock(&gr_inode_lock);
70567+ retval = lookup_acl_subj_label(gr_real_root.dentry->d_inode->i_ino,
70568+ __get_dev(gr_real_root.dentry), role);
70569+ read_unlock(&gr_inode_lock);
70570+ }
70571+out:
70572+ write_sequnlock(&rename_lock);
70573+ read_sequnlock_excl(&mount_lock);
70574+
70575+ BUG_ON(retval == NULL);
70576+
70577+ return retval;
70578+}
70579+
70580+void
70581+assign_special_role(const char *rolename)
70582+{
70583+ struct acl_object_label *obj;
70584+ struct acl_role_label *r;
70585+ struct acl_role_label *assigned = NULL;
70586+ struct task_struct *tsk;
70587+ struct file *filp;
70588+
70589+ FOR_EACH_ROLE_START(r)
70590+ if (!strcmp(rolename, r->rolename) &&
70591+ (r->roletype & GR_ROLE_SPECIAL)) {
70592+ assigned = r;
70593+ break;
70594+ }
70595+ FOR_EACH_ROLE_END(r)
70596+
70597+ if (!assigned)
70598+ return;
70599+
70600+ read_lock(&tasklist_lock);
70601+ read_lock(&grsec_exec_file_lock);
70602+
70603+ tsk = current->real_parent;
70604+ if (tsk == NULL)
70605+ goto out_unlock;
70606+
70607+ filp = tsk->exec_file;
70608+ if (filp == NULL)
70609+ goto out_unlock;
70610+
70611+ tsk->is_writable = 0;
70612+ tsk->inherited = 0;
70613+
70614+ tsk->acl_sp_role = 1;
70615+ tsk->acl_role_id = ++acl_sp_role_value;
70616+ tsk->role = assigned;
70617+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
70618+
70619+ /* ignore additional mmap checks for processes that are writable
70620+ by the default ACL */
70621+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
70622+ if (unlikely(obj->mode & GR_WRITE))
70623+ tsk->is_writable = 1;
70624+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
70625+ if (unlikely(obj->mode & GR_WRITE))
70626+ tsk->is_writable = 1;
70627+
70628+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70629+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
70630+ tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
70631+#endif
70632+
70633+out_unlock:
70634+ read_unlock(&grsec_exec_file_lock);
70635+ read_unlock(&tasklist_lock);
70636+ return;
70637+}
70638+
70639+
70640+static void
70641+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
70642+{
70643+ struct task_struct *task = current;
70644+ const struct cred *cred = current_cred();
70645+
70646+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
70647+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70648+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70649+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
70650+
70651+ return;
70652+}
70653+
70654+static void
70655+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
70656+{
70657+ struct task_struct *task = current;
70658+ const struct cred *cred = current_cred();
70659+
70660+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
70661+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70662+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70663+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
70664+
70665+ return;
70666+}
70667+
70668+static void
70669+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
70670+{
70671+ struct task_struct *task = current;
70672+ const struct cred *cred = current_cred();
70673+
70674+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
70675+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
70676+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
70677+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
70678+
70679+ return;
70680+}
70681+
70682+static void
70683+gr_set_proc_res(struct task_struct *task)
70684+{
70685+ struct acl_subject_label *proc;
70686+ unsigned short i;
70687+
70688+ proc = task->acl;
70689+
70690+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
70691+ return;
70692+
70693+ for (i = 0; i < RLIM_NLIMITS; i++) {
70694+ if (!(proc->resmask & (1U << i)))
70695+ continue;
70696+
70697+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
70698+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
70699+
70700+ if (i == RLIMIT_CPU)
70701+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
70702+ }
70703+
70704+ return;
70705+}
70706+
70707+/* both of the below must be called with
70708+ rcu_read_lock();
70709+ read_lock(&tasklist_lock);
70710+ read_lock(&grsec_exec_file_lock);
70711+*/
70712+
70713+struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename)
70714+{
70715+ char *tmpname;
70716+ struct acl_subject_label *tmpsubj;
70717+ struct file *filp;
70718+ struct name_entry *nmatch;
70719+
70720+ filp = task->exec_file;
70721+ if (filp == NULL)
70722+ return NULL;
70723+
70724+ /* the following is to apply the correct subject
70725+ on binaries running when the RBAC system
70726+ is enabled, when the binaries have been
70727+ replaced or deleted since their execution
70728+ -----
70729+ when the RBAC system starts, the inode/dev
70730+ from exec_file will be one the RBAC system
70731+ is unaware of. It only knows the inode/dev
70732+ of the present file on disk, or the absence
70733+ of it.
70734+ */
70735+
70736+ if (filename)
70737+ nmatch = __lookup_name_entry(state, filename);
70738+ else {
70739+ preempt_disable();
70740+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
70741+
70742+ nmatch = __lookup_name_entry(state, tmpname);
70743+ preempt_enable();
70744+ }
70745+ tmpsubj = NULL;
70746+ if (nmatch) {
70747+ if (nmatch->deleted)
70748+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
70749+ else
70750+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
70751+ }
70752+ /* this also works for the reload case -- if we don't match a potentially inherited subject
70753+ then we fall back to a normal lookup based on the binary's ino/dev
70754+ */
70755+ if (tmpsubj == NULL)
70756+ tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
70757+
70758+ return tmpsubj;
70759+}
70760+
70761+static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename)
70762+{
70763+ return __gr_get_subject_for_task(&running_polstate, task, filename);
70764+}
70765+
70766+void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
70767+{
70768+ struct acl_object_label *obj;
70769+ struct file *filp;
70770+
70771+ filp = task->exec_file;
70772+
70773+ task->acl = subj;
70774+ task->is_writable = 0;
70775+ /* ignore additional mmap checks for processes that are writable
70776+ by the default ACL */
70777+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
70778+ if (unlikely(obj->mode & GR_WRITE))
70779+ task->is_writable = 1;
70780+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
70781+ if (unlikely(obj->mode & GR_WRITE))
70782+ task->is_writable = 1;
70783+
70784+ gr_set_proc_res(task);
70785+
70786+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
70787+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
70788+#endif
70789+}
70790+
70791+static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
70792+{
70793+ __gr_apply_subject_to_task(&running_polstate, task, subj);
70794+}
70795+
70796+__u32
70797+gr_search_file(const struct dentry * dentry, const __u32 mode,
70798+ const struct vfsmount * mnt)
70799+{
70800+ __u32 retval = mode;
70801+ struct acl_subject_label *curracl;
70802+ struct acl_object_label *currobj;
70803+
70804+ if (unlikely(!(gr_status & GR_READY)))
70805+ return (mode & ~GR_AUDITS);
70806+
70807+ curracl = current->acl;
70808+
70809+ currobj = chk_obj_label(dentry, mnt, curracl);
70810+ retval = currobj->mode & mode;
70811+
70812+ /* if we're opening a specified transfer file for writing
70813+ (e.g. /dev/initctl), then transfer our role to init
70814+ */
70815+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
70816+ current->role->roletype & GR_ROLE_PERSIST)) {
70817+ struct task_struct *task = init_pid_ns.child_reaper;
70818+
70819+ if (task->role != current->role) {
70820+ struct acl_subject_label *subj;
70821+
70822+ task->acl_sp_role = 0;
70823+ task->acl_role_id = current->acl_role_id;
70824+ task->role = current->role;
70825+ rcu_read_lock();
70826+ read_lock(&grsec_exec_file_lock);
70827+ subj = gr_get_subject_for_task(task, NULL);
70828+ gr_apply_subject_to_task(task, subj);
70829+ read_unlock(&grsec_exec_file_lock);
70830+ rcu_read_unlock();
70831+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
70832+ }
70833+ }
70834+
70835+ if (unlikely
70836+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
70837+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
70838+ __u32 new_mode = mode;
70839+
70840+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
70841+
70842+ retval = new_mode;
70843+
70844+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
70845+ new_mode |= GR_INHERIT;
70846+
70847+ if (!(mode & GR_NOLEARN))
70848+ gr_log_learn(dentry, mnt, new_mode);
70849+ }
70850+
70851+ return retval;
70852+}
70853+
70854+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
70855+ const struct dentry *parent,
70856+ const struct vfsmount *mnt)
70857+{
70858+ struct name_entry *match;
70859+ struct acl_object_label *matchpo;
70860+ struct acl_subject_label *curracl;
70861+ char *path;
70862+
70863+ if (unlikely(!(gr_status & GR_READY)))
70864+ return NULL;
70865+
70866+ preempt_disable();
70867+ path = gr_to_filename_rbac(new_dentry, mnt);
70868+ match = lookup_name_entry_create(path);
70869+
70870+ curracl = current->acl;
70871+
70872+ if (match) {
70873+ read_lock(&gr_inode_lock);
70874+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
70875+ read_unlock(&gr_inode_lock);
70876+
70877+ if (matchpo) {
70878+ preempt_enable();
70879+ return matchpo;
70880+ }
70881+ }
70882+
70883+ // lookup parent
70884+
70885+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
70886+
70887+ preempt_enable();
70888+ return matchpo;
70889+}
70890+
70891+__u32
70892+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
70893+ const struct vfsmount * mnt, const __u32 mode)
70894+{
70895+ struct acl_object_label *matchpo;
70896+ __u32 retval;
70897+
70898+ if (unlikely(!(gr_status & GR_READY)))
70899+ return (mode & ~GR_AUDITS);
70900+
70901+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
70902+
70903+ retval = matchpo->mode & mode;
70904+
70905+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
70906+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
70907+ __u32 new_mode = mode;
70908+
70909+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
70910+
70911+ gr_log_learn(new_dentry, mnt, new_mode);
70912+ return new_mode;
70913+ }
70914+
70915+ return retval;
70916+}
70917+
70918+__u32
70919+gr_check_link(const struct dentry * new_dentry,
70920+ const struct dentry * parent_dentry,
70921+ const struct vfsmount * parent_mnt,
70922+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
70923+{
70924+ struct acl_object_label *obj;
70925+ __u32 oldmode, newmode;
70926+ __u32 needmode;
70927+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
70928+ GR_DELETE | GR_INHERIT;
70929+
70930+ if (unlikely(!(gr_status & GR_READY)))
70931+ return (GR_CREATE | GR_LINK);
70932+
70933+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
70934+ oldmode = obj->mode;
70935+
70936+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
70937+ newmode = obj->mode;
70938+
70939+ needmode = newmode & checkmodes;
70940+
70941+ // old name for hardlink must have at least the permissions of the new name
70942+ if ((oldmode & needmode) != needmode)
70943+ goto bad;
70944+
70945+ // if old name had restrictions/auditing, make sure the new name does as well
70946+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
70947+
70948+ // don't allow hardlinking of suid/sgid/fcapped files without permission
70949+ if (is_privileged_binary(old_dentry))
70950+ needmode |= GR_SETID;
70951+
70952+ if ((newmode & needmode) != needmode)
70953+ goto bad;
70954+
70955+ // enforce minimum permissions
70956+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
70957+ return newmode;
70958+bad:
70959+ needmode = oldmode;
70960+ if (is_privileged_binary(old_dentry))
70961+ needmode |= GR_SETID;
70962+
70963+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
70964+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
70965+ return (GR_CREATE | GR_LINK);
70966+ } else if (newmode & GR_SUPPRESS)
70967+ return GR_SUPPRESS;
70968+ else
70969+ return 0;
70970+}
70971+
70972+int
70973+gr_check_hidden_task(const struct task_struct *task)
70974+{
70975+ if (unlikely(!(gr_status & GR_READY)))
70976+ return 0;
70977+
70978+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
70979+ return 1;
70980+
70981+ return 0;
70982+}
70983+
70984+int
70985+gr_check_protected_task(const struct task_struct *task)
70986+{
70987+ if (unlikely(!(gr_status & GR_READY) || !task))
70988+ return 0;
70989+
70990+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
70991+ task->acl != current->acl)
70992+ return 1;
70993+
70994+ return 0;
70995+}
70996+
70997+int
70998+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
70999+{
71000+ struct task_struct *p;
71001+ int ret = 0;
71002+
71003+ if (unlikely(!(gr_status & GR_READY) || !pid))
71004+ return ret;
71005+
71006+ read_lock(&tasklist_lock);
71007+ do_each_pid_task(pid, type, p) {
71008+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
71009+ p->acl != current->acl) {
71010+ ret = 1;
71011+ goto out;
71012+ }
71013+ } while_each_pid_task(pid, type, p);
71014+out:
71015+ read_unlock(&tasklist_lock);
71016+
71017+ return ret;
71018+}
71019+
71020+void
71021+gr_copy_label(struct task_struct *tsk)
71022+{
71023+ struct task_struct *p = current;
71024+
71025+ tsk->inherited = p->inherited;
71026+ tsk->acl_sp_role = 0;
71027+ tsk->acl_role_id = p->acl_role_id;
71028+ tsk->acl = p->acl;
71029+ tsk->role = p->role;
71030+ tsk->signal->used_accept = 0;
71031+ tsk->signal->curr_ip = p->signal->curr_ip;
71032+ tsk->signal->saved_ip = p->signal->saved_ip;
71033+ if (p->exec_file)
71034+ get_file(p->exec_file);
71035+ tsk->exec_file = p->exec_file;
71036+ tsk->is_writable = p->is_writable;
71037+ if (unlikely(p->signal->used_accept)) {
71038+ p->signal->curr_ip = 0;
71039+ p->signal->saved_ip = 0;
71040+ }
71041+
71042+ return;
71043+}
71044+
71045+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
71046+
71047+int
71048+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
71049+{
71050+ unsigned int i;
71051+ __u16 num;
71052+ uid_t *uidlist;
71053+ uid_t curuid;
71054+ int realok = 0;
71055+ int effectiveok = 0;
71056+ int fsok = 0;
71057+ uid_t globalreal, globaleffective, globalfs;
71058+
71059+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
71060+ struct user_struct *user;
71061+
71062+ if (!uid_valid(real))
71063+ goto skipit;
71064+
71065+ /* find user based on global namespace */
71066+
71067+ globalreal = GR_GLOBAL_UID(real);
71068+
71069+ user = find_user(make_kuid(&init_user_ns, globalreal));
71070+ if (user == NULL)
71071+ goto skipit;
71072+
71073+ if (gr_process_kernel_setuid_ban(user)) {
71074+ /* for find_user */
71075+ free_uid(user);
71076+ return 1;
71077+ }
71078+
71079+ /* for find_user */
71080+ free_uid(user);
71081+
71082+skipit:
71083+#endif
71084+
71085+ if (unlikely(!(gr_status & GR_READY)))
71086+ return 0;
71087+
71088+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
71089+ gr_log_learn_uid_change(real, effective, fs);
71090+
71091+ num = current->acl->user_trans_num;
71092+ uidlist = current->acl->user_transitions;
71093+
71094+ if (uidlist == NULL)
71095+ return 0;
71096+
71097+ if (!uid_valid(real)) {
71098+ realok = 1;
71099+ globalreal = (uid_t)-1;
71100+ } else {
71101+ globalreal = GR_GLOBAL_UID(real);
71102+ }
71103+ if (!uid_valid(effective)) {
71104+ effectiveok = 1;
71105+ globaleffective = (uid_t)-1;
71106+ } else {
71107+ globaleffective = GR_GLOBAL_UID(effective);
71108+ }
71109+ if (!uid_valid(fs)) {
71110+ fsok = 1;
71111+ globalfs = (uid_t)-1;
71112+ } else {
71113+ globalfs = GR_GLOBAL_UID(fs);
71114+ }
71115+
71116+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
71117+ for (i = 0; i < num; i++) {
71118+ curuid = uidlist[i];
71119+ if (globalreal == curuid)
71120+ realok = 1;
71121+ if (globaleffective == curuid)
71122+ effectiveok = 1;
71123+ if (globalfs == curuid)
71124+ fsok = 1;
71125+ }
71126+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
71127+ for (i = 0; i < num; i++) {
71128+ curuid = uidlist[i];
71129+ if (globalreal == curuid)
71130+ break;
71131+ if (globaleffective == curuid)
71132+ break;
71133+ if (globalfs == curuid)
71134+ break;
71135+ }
71136+ /* not in deny list */
71137+ if (i == num) {
71138+ realok = 1;
71139+ effectiveok = 1;
71140+ fsok = 1;
71141+ }
71142+ }
71143+
71144+ if (realok && effectiveok && fsok)
71145+ return 0;
71146+ else {
71147+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
71148+ return 1;
71149+ }
71150+}
71151+
71152+int
71153+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
71154+{
71155+ unsigned int i;
71156+ __u16 num;
71157+ gid_t *gidlist;
71158+ gid_t curgid;
71159+ int realok = 0;
71160+ int effectiveok = 0;
71161+ int fsok = 0;
71162+ gid_t globalreal, globaleffective, globalfs;
71163+
71164+ if (unlikely(!(gr_status & GR_READY)))
71165+ return 0;
71166+
71167+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
71168+ gr_log_learn_gid_change(real, effective, fs);
71169+
71170+ num = current->acl->group_trans_num;
71171+ gidlist = current->acl->group_transitions;
71172+
71173+ if (gidlist == NULL)
71174+ return 0;
71175+
71176+ if (!gid_valid(real)) {
71177+ realok = 1;
71178+ globalreal = (gid_t)-1;
71179+ } else {
71180+ globalreal = GR_GLOBAL_GID(real);
71181+ }
71182+ if (!gid_valid(effective)) {
71183+ effectiveok = 1;
71184+ globaleffective = (gid_t)-1;
71185+ } else {
71186+ globaleffective = GR_GLOBAL_GID(effective);
71187+ }
71188+ if (!gid_valid(fs)) {
71189+ fsok = 1;
71190+ globalfs = (gid_t)-1;
71191+ } else {
71192+ globalfs = GR_GLOBAL_GID(fs);
71193+ }
71194+
71195+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
71196+ for (i = 0; i < num; i++) {
71197+ curgid = gidlist[i];
71198+ if (globalreal == curgid)
71199+ realok = 1;
71200+ if (globaleffective == curgid)
71201+ effectiveok = 1;
71202+ if (globalfs == curgid)
71203+ fsok = 1;
71204+ }
71205+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
71206+ for (i = 0; i < num; i++) {
71207+ curgid = gidlist[i];
71208+ if (globalreal == curgid)
71209+ break;
71210+ if (globaleffective == curgid)
71211+ break;
71212+ if (globalfs == curgid)
71213+ break;
71214+ }
71215+ /* not in deny list */
71216+ if (i == num) {
71217+ realok = 1;
71218+ effectiveok = 1;
71219+ fsok = 1;
71220+ }
71221+ }
71222+
71223+ if (realok && effectiveok && fsok)
71224+ return 0;
71225+ else {
71226+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
71227+ return 1;
71228+ }
71229+}
71230+
71231+extern int gr_acl_is_capable(const int cap);
71232+
71233+void
71234+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
71235+{
71236+ struct acl_role_label *role = task->role;
71237+ struct acl_subject_label *subj = NULL;
71238+ struct acl_object_label *obj;
71239+ struct file *filp;
71240+ uid_t uid;
71241+ gid_t gid;
71242+
71243+ if (unlikely(!(gr_status & GR_READY)))
71244+ return;
71245+
71246+ uid = GR_GLOBAL_UID(kuid);
71247+ gid = GR_GLOBAL_GID(kgid);
71248+
71249+ filp = task->exec_file;
71250+
71251+ /* kernel process, we'll give them the kernel role */
71252+ if (unlikely(!filp)) {
71253+ task->role = running_polstate.kernel_role;
71254+ task->acl = running_polstate.kernel_role->root_label;
71255+ return;
71256+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
71257+ /* save the current ip at time of role lookup so that the proper
71258+ IP will be learned for role_allowed_ip */
71259+ task->signal->saved_ip = task->signal->curr_ip;
71260+ role = lookup_acl_role_label(task, uid, gid);
71261+ }
71262+
71263+ /* don't change the role if we're not a privileged process */
71264+ if (role && task->role != role &&
71265+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
71266+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
71267+ return;
71268+
71269+ /* perform subject lookup in possibly new role
71270+ we can use this result below in the case where role == task->role
71271+ */
71272+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
71273+
71274+ /* if we changed uid/gid, but result in the same role
71275+ and are using inheritance, don't lose the inherited subject
71276+ if current subject is other than what normal lookup
71277+ would result in, we arrived via inheritance, don't
71278+ lose subject
71279+ */
71280+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
71281+ (subj == task->acl)))
71282+ task->acl = subj;
71283+
71284+ /* leave task->inherited unaffected */
71285+
71286+ task->role = role;
71287+
71288+ task->is_writable = 0;
71289+
71290+ /* ignore additional mmap checks for processes that are writable
71291+ by the default ACL */
71292+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71293+ if (unlikely(obj->mode & GR_WRITE))
71294+ task->is_writable = 1;
71295+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
71296+ if (unlikely(obj->mode & GR_WRITE))
71297+ task->is_writable = 1;
71298+
71299+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71300+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71301+#endif
71302+
71303+ gr_set_proc_res(task);
71304+
71305+ return;
71306+}
71307+
71308+int
71309+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
71310+ const int unsafe_flags)
71311+{
71312+ struct task_struct *task = current;
71313+ struct acl_subject_label *newacl;
71314+ struct acl_object_label *obj;
71315+ __u32 retmode;
71316+
71317+ if (unlikely(!(gr_status & GR_READY)))
71318+ return 0;
71319+
71320+ newacl = chk_subj_label(dentry, mnt, task->role);
71321+
71322+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
71323+ did an exec
71324+ */
71325+ rcu_read_lock();
71326+ read_lock(&tasklist_lock);
71327+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
71328+ (task->parent->acl->mode & GR_POVERRIDE))) {
71329+ read_unlock(&tasklist_lock);
71330+ rcu_read_unlock();
71331+ goto skip_check;
71332+ }
71333+ read_unlock(&tasklist_lock);
71334+ rcu_read_unlock();
71335+
71336+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
71337+ !(task->role->roletype & GR_ROLE_GOD) &&
71338+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
71339+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
71340+ if (unsafe_flags & LSM_UNSAFE_SHARE)
71341+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
71342+ else
71343+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
71344+ return -EACCES;
71345+ }
71346+
71347+skip_check:
71348+
71349+ obj = chk_obj_label(dentry, mnt, task->acl);
71350+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
71351+
71352+ if (!(task->acl->mode & GR_INHERITLEARN) &&
71353+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
71354+ if (obj->nested)
71355+ task->acl = obj->nested;
71356+ else
71357+ task->acl = newacl;
71358+ task->inherited = 0;
71359+ } else {
71360+ task->inherited = 1;
71361+ if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
71362+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
71363+ }
71364+
71365+ task->is_writable = 0;
71366+
71367+ /* ignore additional mmap checks for processes that are writable
71368+ by the default ACL */
71369+ obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
71370+ if (unlikely(obj->mode & GR_WRITE))
71371+ task->is_writable = 1;
71372+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
71373+ if (unlikely(obj->mode & GR_WRITE))
71374+ task->is_writable = 1;
71375+
71376+ gr_set_proc_res(task);
71377+
71378+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
71379+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
71380+#endif
71381+ return 0;
71382+}
71383+
71384+/* always called with valid inodev ptr */
71385+static void
71386+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
71387+{
71388+ struct acl_object_label *matchpo;
71389+ struct acl_subject_label *matchps;
71390+ struct acl_subject_label *subj;
71391+ struct acl_role_label *role;
71392+ unsigned int x;
71393+
71394+ FOR_EACH_ROLE_START(role)
71395+ FOR_EACH_SUBJECT_START(role, subj, x)
71396+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
71397+ matchpo->mode |= GR_DELETED;
71398+ FOR_EACH_SUBJECT_END(subj,x)
71399+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
71400+ /* nested subjects aren't in the role's subj_hash table */
71401+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
71402+ matchpo->mode |= GR_DELETED;
71403+ FOR_EACH_NESTED_SUBJECT_END(subj)
71404+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
71405+ matchps->mode |= GR_DELETED;
71406+ FOR_EACH_ROLE_END(role)
71407+
71408+ inodev->nentry->deleted = 1;
71409+
71410+ return;
71411+}
71412+
71413+void
71414+gr_handle_delete(const ino_t ino, const dev_t dev)
71415+{
71416+ struct inodev_entry *inodev;
71417+
71418+ if (unlikely(!(gr_status & GR_READY)))
71419+ return;
71420+
71421+ write_lock(&gr_inode_lock);
71422+ inodev = lookup_inodev_entry(ino, dev);
71423+ if (inodev != NULL)
71424+ do_handle_delete(inodev, ino, dev);
71425+ write_unlock(&gr_inode_lock);
71426+
71427+ return;
71428+}
71429+
71430+static void
71431+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
71432+ const ino_t newinode, const dev_t newdevice,
71433+ struct acl_subject_label *subj)
71434+{
71435+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
71436+ struct acl_object_label *match;
71437+
71438+ match = subj->obj_hash[index];
71439+
71440+ while (match && (match->inode != oldinode ||
71441+ match->device != olddevice ||
71442+ !(match->mode & GR_DELETED)))
71443+ match = match->next;
71444+
71445+ if (match && (match->inode == oldinode)
71446+ && (match->device == olddevice)
71447+ && (match->mode & GR_DELETED)) {
71448+ if (match->prev == NULL) {
71449+ subj->obj_hash[index] = match->next;
71450+ if (match->next != NULL)
71451+ match->next->prev = NULL;
71452+ } else {
71453+ match->prev->next = match->next;
71454+ if (match->next != NULL)
71455+ match->next->prev = match->prev;
71456+ }
71457+ match->prev = NULL;
71458+ match->next = NULL;
71459+ match->inode = newinode;
71460+ match->device = newdevice;
71461+ match->mode &= ~GR_DELETED;
71462+
71463+ insert_acl_obj_label(match, subj);
71464+ }
71465+
71466+ return;
71467+}
71468+
71469+static void
71470+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
71471+ const ino_t newinode, const dev_t newdevice,
71472+ struct acl_role_label *role)
71473+{
71474+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
71475+ struct acl_subject_label *match;
71476+
71477+ match = role->subj_hash[index];
71478+
71479+ while (match && (match->inode != oldinode ||
71480+ match->device != olddevice ||
71481+ !(match->mode & GR_DELETED)))
71482+ match = match->next;
71483+
71484+ if (match && (match->inode == oldinode)
71485+ && (match->device == olddevice)
71486+ && (match->mode & GR_DELETED)) {
71487+ if (match->prev == NULL) {
71488+ role->subj_hash[index] = match->next;
71489+ if (match->next != NULL)
71490+ match->next->prev = NULL;
71491+ } else {
71492+ match->prev->next = match->next;
71493+ if (match->next != NULL)
71494+ match->next->prev = match->prev;
71495+ }
71496+ match->prev = NULL;
71497+ match->next = NULL;
71498+ match->inode = newinode;
71499+ match->device = newdevice;
71500+ match->mode &= ~GR_DELETED;
71501+
71502+ insert_acl_subj_label(match, role);
71503+ }
71504+
71505+ return;
71506+}
71507+
71508+static void
71509+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
71510+ const ino_t newinode, const dev_t newdevice)
71511+{
71512+ unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
71513+ struct inodev_entry *match;
71514+
71515+ match = running_polstate.inodev_set.i_hash[index];
71516+
71517+ while (match && (match->nentry->inode != oldinode ||
71518+ match->nentry->device != olddevice || !match->nentry->deleted))
71519+ match = match->next;
71520+
71521+ if (match && (match->nentry->inode == oldinode)
71522+ && (match->nentry->device == olddevice) &&
71523+ match->nentry->deleted) {
71524+ if (match->prev == NULL) {
71525+ running_polstate.inodev_set.i_hash[index] = match->next;
71526+ if (match->next != NULL)
71527+ match->next->prev = NULL;
71528+ } else {
71529+ match->prev->next = match->next;
71530+ if (match->next != NULL)
71531+ match->next->prev = match->prev;
71532+ }
71533+ match->prev = NULL;
71534+ match->next = NULL;
71535+ match->nentry->inode = newinode;
71536+ match->nentry->device = newdevice;
71537+ match->nentry->deleted = 0;
71538+
71539+ insert_inodev_entry(match);
71540+ }
71541+
71542+ return;
71543+}
71544+
71545+static void
71546+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
71547+{
71548+ struct acl_subject_label *subj;
71549+ struct acl_role_label *role;
71550+ unsigned int x;
71551+
71552+ FOR_EACH_ROLE_START(role)
71553+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
71554+
71555+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
71556+ if ((subj->inode == ino) && (subj->device == dev)) {
71557+ subj->inode = ino;
71558+ subj->device = dev;
71559+ }
71560+ /* nested subjects aren't in the role's subj_hash table */
71561+ update_acl_obj_label(matchn->inode, matchn->device,
71562+ ino, dev, subj);
71563+ FOR_EACH_NESTED_SUBJECT_END(subj)
71564+ FOR_EACH_SUBJECT_START(role, subj, x)
71565+ update_acl_obj_label(matchn->inode, matchn->device,
71566+ ino, dev, subj);
71567+ FOR_EACH_SUBJECT_END(subj,x)
71568+ FOR_EACH_ROLE_END(role)
71569+
71570+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
71571+
71572+ return;
71573+}
71574+
71575+static void
71576+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
71577+ const struct vfsmount *mnt)
71578+{
71579+ ino_t ino = dentry->d_inode->i_ino;
71580+ dev_t dev = __get_dev(dentry);
71581+
71582+ __do_handle_create(matchn, ino, dev);
71583+
71584+ return;
71585+}
71586+
71587+void
71588+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
71589+{
71590+ struct name_entry *matchn;
71591+
71592+ if (unlikely(!(gr_status & GR_READY)))
71593+ return;
71594+
71595+ preempt_disable();
71596+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
71597+
71598+ if (unlikely((unsigned long)matchn)) {
71599+ write_lock(&gr_inode_lock);
71600+ do_handle_create(matchn, dentry, mnt);
71601+ write_unlock(&gr_inode_lock);
71602+ }
71603+ preempt_enable();
71604+
71605+ return;
71606+}
71607+
71608+void
71609+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
71610+{
71611+ struct name_entry *matchn;
71612+
71613+ if (unlikely(!(gr_status & GR_READY)))
71614+ return;
71615+
71616+ preempt_disable();
71617+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
71618+
71619+ if (unlikely((unsigned long)matchn)) {
71620+ write_lock(&gr_inode_lock);
71621+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
71622+ write_unlock(&gr_inode_lock);
71623+ }
71624+ preempt_enable();
71625+
71626+ return;
71627+}
71628+
71629+void
71630+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
71631+ struct dentry *old_dentry,
71632+ struct dentry *new_dentry,
71633+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
71634+{
71635+ struct name_entry *matchn;
71636+ struct name_entry *matchn2 = NULL;
71637+ struct inodev_entry *inodev;
71638+ struct inode *inode = new_dentry->d_inode;
71639+ ino_t old_ino = old_dentry->d_inode->i_ino;
71640+ dev_t old_dev = __get_dev(old_dentry);
71641+ unsigned int exchange = flags & RENAME_EXCHANGE;
71642+
71643+ /* vfs_rename swaps the name and parent link for old_dentry and
71644+ new_dentry
71645+ at this point, old_dentry has the new name, parent link, and inode
71646+ for the renamed file
71647+ if a file is being replaced by a rename, new_dentry has the inode
71648+ and name for the replaced file
71649+ */
71650+
71651+ if (unlikely(!(gr_status & GR_READY)))
71652+ return;
71653+
71654+ preempt_disable();
71655+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
71656+
71657+ /* exchange cases:
71658+ a filename exists for the source, but not dest
71659+ do a recreate on source
71660+ a filename exists for the dest, but not source
71661+ do a recreate on dest
71662+ a filename exists for both source and dest
71663+ delete source and dest, then create source and dest
71664+ a filename exists for neither source nor dest
71665+ no updates needed
71666+
71667+ the name entry lookups get us the old inode/dev associated with
71668+ each name, so do the deletes first (if possible) so that when
71669+ we do the create, we pick up on the right entries
71670+ */
71671+
71672+ if (exchange)
71673+ matchn2 = lookup_name_entry(gr_to_filename_rbac(new_dentry, mnt));
71674+
71675+ /* we wouldn't have to check d_inode if it weren't for
71676+ NFS silly-renaming
71677+ */
71678+
71679+ write_lock(&gr_inode_lock);
71680+ if (unlikely((replace || exchange) && inode)) {
71681+ ino_t new_ino = inode->i_ino;
71682+ dev_t new_dev = __get_dev(new_dentry);
71683+
71684+ inodev = lookup_inodev_entry(new_ino, new_dev);
71685+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
71686+ do_handle_delete(inodev, new_ino, new_dev);
71687+ }
71688+
71689+ inodev = lookup_inodev_entry(old_ino, old_dev);
71690+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
71691+ do_handle_delete(inodev, old_ino, old_dev);
71692+
71693+ if (unlikely(matchn != NULL))
71694+ do_handle_create(matchn, old_dentry, mnt);
71695+
71696+ if (unlikely(matchn2 != NULL))
71697+ do_handle_create(matchn2, new_dentry, mnt);
71698+
71699+ write_unlock(&gr_inode_lock);
71700+ preempt_enable();
71701+
71702+ return;
71703+}
71704+
71705+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
71706+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
71707+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
71708+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
71709+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
71710+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
71711+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
71712+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
71713+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
71714+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
71715+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
71716+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
71717+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
71718+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
71719+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
71720+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
71721+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
71722+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
71723+};
71724+
71725+void
71726+gr_learn_resource(const struct task_struct *task,
71727+ const int res, const unsigned long wanted, const int gt)
71728+{
71729+ struct acl_subject_label *acl;
71730+ const struct cred *cred;
71731+
71732+ if (unlikely((gr_status & GR_READY) &&
71733+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
71734+ goto skip_reslog;
71735+
71736+ gr_log_resource(task, res, wanted, gt);
71737+skip_reslog:
71738+
71739+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
71740+ return;
71741+
71742+ acl = task->acl;
71743+
71744+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
71745+ !(acl->resmask & (1U << (unsigned short) res))))
71746+ return;
71747+
71748+ if (wanted >= acl->res[res].rlim_cur) {
71749+ unsigned long res_add;
71750+
71751+ res_add = wanted + res_learn_bumps[res];
71752+
71753+ acl->res[res].rlim_cur = res_add;
71754+
71755+ if (wanted > acl->res[res].rlim_max)
71756+ acl->res[res].rlim_max = res_add;
71757+
71758+ /* only log the subject filename, since resource logging is supported for
71759+ single-subject learning only */
71760+ rcu_read_lock();
71761+ cred = __task_cred(task);
71762+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
71763+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
71764+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
71765+ "", (unsigned long) res, &task->signal->saved_ip);
71766+ rcu_read_unlock();
71767+ }
71768+
71769+ return;
71770+}
71771+EXPORT_SYMBOL_GPL(gr_learn_resource);
71772+#endif
71773+
71774+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
71775+void
71776+pax_set_initial_flags(struct linux_binprm *bprm)
71777+{
71778+ struct task_struct *task = current;
71779+ struct acl_subject_label *proc;
71780+ unsigned long flags;
71781+
71782+ if (unlikely(!(gr_status & GR_READY)))
71783+ return;
71784+
71785+ flags = pax_get_flags(task);
71786+
71787+ proc = task->acl;
71788+
71789+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
71790+ flags &= ~MF_PAX_PAGEEXEC;
71791+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
71792+ flags &= ~MF_PAX_SEGMEXEC;
71793+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
71794+ flags &= ~MF_PAX_RANDMMAP;
71795+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
71796+ flags &= ~MF_PAX_EMUTRAMP;
71797+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
71798+ flags &= ~MF_PAX_MPROTECT;
71799+
71800+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
71801+ flags |= MF_PAX_PAGEEXEC;
71802+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
71803+ flags |= MF_PAX_SEGMEXEC;
71804+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
71805+ flags |= MF_PAX_RANDMMAP;
71806+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
71807+ flags |= MF_PAX_EMUTRAMP;
71808+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
71809+ flags |= MF_PAX_MPROTECT;
71810+
71811+ pax_set_flags(task, flags);
71812+
71813+ return;
71814+}
71815+#endif
71816+
71817+int
71818+gr_handle_proc_ptrace(struct task_struct *task)
71819+{
71820+ struct file *filp;
71821+ struct task_struct *tmp = task;
71822+ struct task_struct *curtemp = current;
71823+ __u32 retmode;
71824+
71825+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
71826+ if (unlikely(!(gr_status & GR_READY)))
71827+ return 0;
71828+#endif
71829+
71830+ read_lock(&tasklist_lock);
71831+ read_lock(&grsec_exec_file_lock);
71832+ filp = task->exec_file;
71833+
71834+ while (task_pid_nr(tmp) > 0) {
71835+ if (tmp == curtemp)
71836+ break;
71837+ tmp = tmp->real_parent;
71838+ }
71839+
71840+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
71841+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
71842+ read_unlock(&grsec_exec_file_lock);
71843+ read_unlock(&tasklist_lock);
71844+ return 1;
71845+ }
71846+
71847+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
71848+ if (!(gr_status & GR_READY)) {
71849+ read_unlock(&grsec_exec_file_lock);
71850+ read_unlock(&tasklist_lock);
71851+ return 0;
71852+ }
71853+#endif
71854+
71855+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
71856+ read_unlock(&grsec_exec_file_lock);
71857+ read_unlock(&tasklist_lock);
71858+
71859+ if (retmode & GR_NOPTRACE)
71860+ return 1;
71861+
71862+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
71863+ && (current->acl != task->acl || (current->acl != current->role->root_label
71864+ && task_pid_nr(current) != task_pid_nr(task))))
71865+ return 1;
71866+
71867+ return 0;
71868+}
71869+
71870+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
71871+{
71872+ if (unlikely(!(gr_status & GR_READY)))
71873+ return;
71874+
71875+ if (!(current->role->roletype & GR_ROLE_GOD))
71876+ return;
71877+
71878+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
71879+ p->role->rolename, gr_task_roletype_to_char(p),
71880+ p->acl->filename);
71881+}
71882+
71883+int
71884+gr_handle_ptrace(struct task_struct *task, const long request)
71885+{
71886+ struct task_struct *tmp = task;
71887+ struct task_struct *curtemp = current;
71888+ __u32 retmode;
71889+
71890+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
71891+ if (unlikely(!(gr_status & GR_READY)))
71892+ return 0;
71893+#endif
71894+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
71895+ read_lock(&tasklist_lock);
71896+ while (task_pid_nr(tmp) > 0) {
71897+ if (tmp == curtemp)
71898+ break;
71899+ tmp = tmp->real_parent;
71900+ }
71901+
71902+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
71903+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
71904+ read_unlock(&tasklist_lock);
71905+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71906+ return 1;
71907+ }
71908+ read_unlock(&tasklist_lock);
71909+ }
71910+
71911+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
71912+ if (!(gr_status & GR_READY))
71913+ return 0;
71914+#endif
71915+
71916+ read_lock(&grsec_exec_file_lock);
71917+ if (unlikely(!task->exec_file)) {
71918+ read_unlock(&grsec_exec_file_lock);
71919+ return 0;
71920+ }
71921+
71922+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
71923+ read_unlock(&grsec_exec_file_lock);
71924+
71925+ if (retmode & GR_NOPTRACE) {
71926+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71927+ return 1;
71928+ }
71929+
71930+ if (retmode & GR_PTRACERD) {
71931+ switch (request) {
71932+ case PTRACE_SEIZE:
71933+ case PTRACE_POKETEXT:
71934+ case PTRACE_POKEDATA:
71935+ case PTRACE_POKEUSR:
71936+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
71937+ case PTRACE_SETREGS:
71938+ case PTRACE_SETFPREGS:
71939+#endif
71940+#ifdef CONFIG_X86
71941+ case PTRACE_SETFPXREGS:
71942+#endif
71943+#ifdef CONFIG_ALTIVEC
71944+ case PTRACE_SETVRREGS:
71945+#endif
71946+ return 1;
71947+ default:
71948+ return 0;
71949+ }
71950+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
71951+ !(current->role->roletype & GR_ROLE_GOD) &&
71952+ (current->acl != task->acl)) {
71953+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
71954+ return 1;
71955+ }
71956+
71957+ return 0;
71958+}
71959+
71960+static int is_writable_mmap(const struct file *filp)
71961+{
71962+ struct task_struct *task = current;
71963+ struct acl_object_label *obj, *obj2;
71964+
71965+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
71966+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
71967+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
71968+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
71969+ task->role->root_label);
71970+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
71971+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
71972+ return 1;
71973+ }
71974+ }
71975+ return 0;
71976+}
71977+
71978+int
71979+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
71980+{
71981+ __u32 mode;
71982+
71983+ if (unlikely(!file || !(prot & PROT_EXEC)))
71984+ return 1;
71985+
71986+ if (is_writable_mmap(file))
71987+ return 0;
71988+
71989+ mode =
71990+ gr_search_file(file->f_path.dentry,
71991+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
71992+ file->f_path.mnt);
71993+
71994+ if (!gr_tpe_allow(file))
71995+ return 0;
71996+
71997+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
71998+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
71999+ return 0;
72000+ } else if (unlikely(!(mode & GR_EXEC))) {
72001+ return 0;
72002+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
72003+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72004+ return 1;
72005+ }
72006+
72007+ return 1;
72008+}
72009+
72010+int
72011+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
72012+{
72013+ __u32 mode;
72014+
72015+ if (unlikely(!file || !(prot & PROT_EXEC)))
72016+ return 1;
72017+
72018+ if (is_writable_mmap(file))
72019+ return 0;
72020+
72021+ mode =
72022+ gr_search_file(file->f_path.dentry,
72023+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
72024+ file->f_path.mnt);
72025+
72026+ if (!gr_tpe_allow(file))
72027+ return 0;
72028+
72029+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
72030+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72031+ return 0;
72032+ } else if (unlikely(!(mode & GR_EXEC))) {
72033+ return 0;
72034+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
72035+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
72036+ return 1;
72037+ }
72038+
72039+ return 1;
72040+}
72041+
72042+void
72043+gr_acl_handle_psacct(struct task_struct *task, const long code)
72044+{
72045+ unsigned long runtime, cputime;
72046+ cputime_t utime, stime;
72047+ unsigned int wday, cday;
72048+ __u8 whr, chr;
72049+ __u8 wmin, cmin;
72050+ __u8 wsec, csec;
72051+ struct timespec curtime, starttime;
72052+
72053+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
72054+ !(task->acl->mode & GR_PROCACCT)))
72055+ return;
72056+
72057+ curtime = ns_to_timespec(ktime_get_ns());
72058+ starttime = ns_to_timespec(task->start_time);
72059+ runtime = curtime.tv_sec - starttime.tv_sec;
72060+ wday = runtime / (60 * 60 * 24);
72061+ runtime -= wday * (60 * 60 * 24);
72062+ whr = runtime / (60 * 60);
72063+ runtime -= whr * (60 * 60);
72064+ wmin = runtime / 60;
72065+ runtime -= wmin * 60;
72066+ wsec = runtime;
72067+
72068+ task_cputime(task, &utime, &stime);
72069+ cputime = cputime_to_secs(utime + stime);
72070+ cday = cputime / (60 * 60 * 24);
72071+ cputime -= cday * (60 * 60 * 24);
72072+ chr = cputime / (60 * 60);
72073+ cputime -= chr * (60 * 60);
72074+ cmin = cputime / 60;
72075+ cputime -= cmin * 60;
72076+ csec = cputime;
72077+
72078+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
72079+
72080+ return;
72081+}
72082+
72083+#ifdef CONFIG_TASKSTATS
72084+int gr_is_taskstats_denied(int pid)
72085+{
72086+ struct task_struct *task;
72087+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72088+ const struct cred *cred;
72089+#endif
72090+ int ret = 0;
72091+
72092+ /* restrict taskstats viewing to un-chrooted root users
72093+ who have the 'view' subject flag if the RBAC system is enabled
72094+ */
72095+
72096+ rcu_read_lock();
72097+ read_lock(&tasklist_lock);
72098+ task = find_task_by_vpid(pid);
72099+ if (task) {
72100+#ifdef CONFIG_GRKERNSEC_CHROOT
72101+ if (proc_is_chrooted(task))
72102+ ret = -EACCES;
72103+#endif
72104+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72105+ cred = __task_cred(task);
72106+#ifdef CONFIG_GRKERNSEC_PROC_USER
72107+ if (gr_is_global_nonroot(cred->uid))
72108+ ret = -EACCES;
72109+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72110+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
72111+ ret = -EACCES;
72112+#endif
72113+#endif
72114+ if (gr_status & GR_READY) {
72115+ if (!(task->acl->mode & GR_VIEW))
72116+ ret = -EACCES;
72117+ }
72118+ } else
72119+ ret = -ENOENT;
72120+
72121+ read_unlock(&tasklist_lock);
72122+ rcu_read_unlock();
72123+
72124+ return ret;
72125+}
72126+#endif
72127+
72128+/* AUXV entries are filled via a descendant of search_binary_handler
72129+ after we've already applied the subject for the target
72130+*/
72131+int gr_acl_enable_at_secure(void)
72132+{
72133+ if (unlikely(!(gr_status & GR_READY)))
72134+ return 0;
72135+
72136+ if (current->acl->mode & GR_ATSECURE)
72137+ return 1;
72138+
72139+ return 0;
72140+}
72141+
72142+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
72143+{
72144+ struct task_struct *task = current;
72145+ struct dentry *dentry = file->f_path.dentry;
72146+ struct vfsmount *mnt = file->f_path.mnt;
72147+ struct acl_object_label *obj, *tmp;
72148+ struct acl_subject_label *subj;
72149+ unsigned int bufsize;
72150+ int is_not_root;
72151+ char *path;
72152+ dev_t dev = __get_dev(dentry);
72153+
72154+ if (unlikely(!(gr_status & GR_READY)))
72155+ return 1;
72156+
72157+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
72158+ return 1;
72159+
72160+ /* ignore Eric Biederman */
72161+ if (IS_PRIVATE(dentry->d_inode))
72162+ return 1;
72163+
72164+ subj = task->acl;
72165+ read_lock(&gr_inode_lock);
72166+ do {
72167+ obj = lookup_acl_obj_label(ino, dev, subj);
72168+ if (obj != NULL) {
72169+ read_unlock(&gr_inode_lock);
72170+ return (obj->mode & GR_FIND) ? 1 : 0;
72171+ }
72172+ } while ((subj = subj->parent_subject));
72173+ read_unlock(&gr_inode_lock);
72174+
72175+ /* this is purely an optimization since we're looking for an object
72176+ for the directory we're doing a readdir on
72177+ if it's possible for any globbed object to match the entry we're
72178+ filling into the directory, then the object we find here will be
72179+ an anchor point with attached globbed objects
72180+ */
72181+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
72182+ if (obj->globbed == NULL)
72183+ return (obj->mode & GR_FIND) ? 1 : 0;
72184+
72185+ is_not_root = ((obj->filename[0] == '/') &&
72186+ (obj->filename[1] == '\0')) ? 0 : 1;
72187+ bufsize = PAGE_SIZE - namelen - is_not_root;
72188+
72189+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
72190+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
72191+ return 1;
72192+
72193+ preempt_disable();
72194+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
72195+ bufsize);
72196+
72197+ bufsize = strlen(path);
72198+
72199+ /* if base is "/", don't append an additional slash */
72200+ if (is_not_root)
72201+ *(path + bufsize) = '/';
72202+ memcpy(path + bufsize + is_not_root, name, namelen);
72203+ *(path + bufsize + namelen + is_not_root) = '\0';
72204+
72205+ tmp = obj->globbed;
72206+ while (tmp) {
72207+ if (!glob_match(tmp->filename, path)) {
72208+ preempt_enable();
72209+ return (tmp->mode & GR_FIND) ? 1 : 0;
72210+ }
72211+ tmp = tmp->next;
72212+ }
72213+ preempt_enable();
72214+ return (obj->mode & GR_FIND) ? 1 : 0;
72215+}
72216+
72217+void gr_put_exec_file(struct task_struct *task)
72218+{
72219+ struct file *filp;
72220+
72221+ write_lock(&grsec_exec_file_lock);
72222+ filp = task->exec_file;
72223+ task->exec_file = NULL;
72224+ write_unlock(&grsec_exec_file_lock);
72225+
72226+ if (filp)
72227+ fput(filp);
72228+
72229+ return;
72230+}
72231+
72232+
72233+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
72234+EXPORT_SYMBOL_GPL(gr_acl_is_enabled);
72235+#endif
72236+#ifdef CONFIG_SECURITY
72237+EXPORT_SYMBOL_GPL(gr_check_user_change);
72238+EXPORT_SYMBOL_GPL(gr_check_group_change);
72239+#endif
72240+
72241diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
72242new file mode 100644
72243index 0000000..18ffbbd
72244--- /dev/null
72245+++ b/grsecurity/gracl_alloc.c
72246@@ -0,0 +1,105 @@
72247+#include <linux/kernel.h>
72248+#include <linux/mm.h>
72249+#include <linux/slab.h>
72250+#include <linux/vmalloc.h>
72251+#include <linux/gracl.h>
72252+#include <linux/grsecurity.h>
72253+
72254+static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
72255+struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
72256+
72257+static __inline__ int
72258+alloc_pop(void)
72259+{
72260+ if (current_alloc_state->alloc_stack_next == 1)
72261+ return 0;
72262+
72263+ kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
72264+
72265+ current_alloc_state->alloc_stack_next--;
72266+
72267+ return 1;
72268+}
72269+
72270+static __inline__ int
72271+alloc_push(void *buf)
72272+{
72273+ if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
72274+ return 1;
72275+
72276+ current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
72277+
72278+ current_alloc_state->alloc_stack_next++;
72279+
72280+ return 0;
72281+}
72282+
72283+void *
72284+acl_alloc(unsigned long len)
72285+{
72286+ void *ret = NULL;
72287+
72288+ if (!len || len > PAGE_SIZE)
72289+ goto out;
72290+
72291+ ret = kmalloc(len, GFP_KERNEL);
72292+
72293+ if (ret) {
72294+ if (alloc_push(ret)) {
72295+ kfree(ret);
72296+ ret = NULL;
72297+ }
72298+ }
72299+
72300+out:
72301+ return ret;
72302+}
72303+
72304+void *
72305+acl_alloc_num(unsigned long num, unsigned long len)
72306+{
72307+ if (!len || (num > (PAGE_SIZE / len)))
72308+ return NULL;
72309+
72310+ return acl_alloc(num * len);
72311+}
72312+
72313+void
72314+acl_free_all(void)
72315+{
72316+ if (!current_alloc_state->alloc_stack)
72317+ return;
72318+
72319+ while (alloc_pop()) ;
72320+
72321+ if (current_alloc_state->alloc_stack) {
72322+ if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
72323+ kfree(current_alloc_state->alloc_stack);
72324+ else
72325+ vfree(current_alloc_state->alloc_stack);
72326+ }
72327+
72328+ current_alloc_state->alloc_stack = NULL;
72329+ current_alloc_state->alloc_stack_size = 1;
72330+ current_alloc_state->alloc_stack_next = 1;
72331+
72332+ return;
72333+}
72334+
72335+int
72336+acl_alloc_stack_init(unsigned long size)
72337+{
72338+ if ((size * sizeof (void *)) <= PAGE_SIZE)
72339+ current_alloc_state->alloc_stack =
72340+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
72341+ else
72342+ current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
72343+
72344+ current_alloc_state->alloc_stack_size = size;
72345+ current_alloc_state->alloc_stack_next = 1;
72346+
72347+ if (!current_alloc_state->alloc_stack)
72348+ return 0;
72349+ else
72350+ return 1;
72351+}
72352diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
72353new file mode 100644
72354index 0000000..1a94c11
72355--- /dev/null
72356+++ b/grsecurity/gracl_cap.c
72357@@ -0,0 +1,127 @@
72358+#include <linux/kernel.h>
72359+#include <linux/module.h>
72360+#include <linux/sched.h>
72361+#include <linux/gracl.h>
72362+#include <linux/grsecurity.h>
72363+#include <linux/grinternal.h>
72364+
72365+extern const char *captab_log[];
72366+extern int captab_log_entries;
72367+
72368+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
72369+{
72370+ struct acl_subject_label *curracl;
72371+
72372+ if (!gr_acl_is_enabled())
72373+ return 1;
72374+
72375+ curracl = task->acl;
72376+
72377+ if (curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
72378+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
72379+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
72380+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
72381+ gr_to_filename(task->exec_file->f_path.dentry,
72382+ task->exec_file->f_path.mnt) : curracl->filename,
72383+ curracl->filename, 0UL,
72384+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
72385+ return 1;
72386+ }
72387+
72388+ return 0;
72389+}
72390+
72391+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
72392+{
72393+ struct acl_subject_label *curracl;
72394+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
72395+ kernel_cap_t cap_audit = __cap_empty_set;
72396+
72397+ if (!gr_acl_is_enabled())
72398+ return 1;
72399+
72400+ curracl = task->acl;
72401+
72402+ cap_drop = curracl->cap_lower;
72403+ cap_mask = curracl->cap_mask;
72404+ cap_audit = curracl->cap_invert_audit;
72405+
72406+ while ((curracl = curracl->parent_subject)) {
72407+ /* if the cap isn't specified in the current computed mask but is specified in the
72408+ current level subject, and is lowered in the current level subject, then add
72409+ it to the set of dropped capabilities
72410+ otherwise, add the current level subject's mask to the current computed mask
72411+ */
72412+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
72413+ cap_raise(cap_mask, cap);
72414+ if (cap_raised(curracl->cap_lower, cap))
72415+ cap_raise(cap_drop, cap);
72416+ if (cap_raised(curracl->cap_invert_audit, cap))
72417+ cap_raise(cap_audit, cap);
72418+ }
72419+ }
72420+
72421+ if (!cap_raised(cap_drop, cap)) {
72422+ if (cap_raised(cap_audit, cap))
72423+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
72424+ return 1;
72425+ }
72426+
72427+ /* only learn the capability use if the process has the capability in the
72428+ general case, the two uses in sys.c of gr_learn_cap are an exception
72429+ to this rule to ensure any role transition involves what the full-learned
72430+ policy believes in a privileged process
72431+ */
72432+ if (cap_raised(cred->cap_effective, cap) && gr_learn_cap(task, cred, cap))
72433+ return 1;
72434+
72435+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
72436+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
72437+
72438+ return 0;
72439+}
72440+
72441+int
72442+gr_acl_is_capable(const int cap)
72443+{
72444+ return gr_task_acl_is_capable(current, current_cred(), cap);
72445+}
72446+
72447+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
72448+{
72449+ struct acl_subject_label *curracl;
72450+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
72451+
72452+ if (!gr_acl_is_enabled())
72453+ return 1;
72454+
72455+ curracl = task->acl;
72456+
72457+ cap_drop = curracl->cap_lower;
72458+ cap_mask = curracl->cap_mask;
72459+
72460+ while ((curracl = curracl->parent_subject)) {
72461+ /* if the cap isn't specified in the current computed mask but is specified in the
72462+ current level subject, and is lowered in the current level subject, then add
72463+ it to the set of dropped capabilities
72464+ otherwise, add the current level subject's mask to the current computed mask
72465+ */
72466+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
72467+ cap_raise(cap_mask, cap);
72468+ if (cap_raised(curracl->cap_lower, cap))
72469+ cap_raise(cap_drop, cap);
72470+ }
72471+ }
72472+
72473+ if (!cap_raised(cap_drop, cap))
72474+ return 1;
72475+
72476+ return 0;
72477+}
72478+
72479+int
72480+gr_acl_is_capable_nolog(const int cap)
72481+{
72482+ return gr_task_acl_is_capable_nolog(current, cap);
72483+}
72484+
72485diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
72486new file mode 100644
72487index 0000000..ca25605
72488--- /dev/null
72489+++ b/grsecurity/gracl_compat.c
72490@@ -0,0 +1,270 @@
72491+#include <linux/kernel.h>
72492+#include <linux/gracl.h>
72493+#include <linux/compat.h>
72494+#include <linux/gracl_compat.h>
72495+
72496+#include <asm/uaccess.h>
72497+
72498+int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
72499+{
72500+ struct gr_arg_wrapper_compat uwrapcompat;
72501+
72502+ if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
72503+ return -EFAULT;
72504+
72505+ if (((uwrapcompat.version != GRSECURITY_VERSION) &&
72506+ (uwrapcompat.version != 0x2901)) ||
72507+ (uwrapcompat.size != sizeof(struct gr_arg_compat)))
72508+ return -EINVAL;
72509+
72510+ uwrap->arg = compat_ptr(uwrapcompat.arg);
72511+ uwrap->version = uwrapcompat.version;
72512+ uwrap->size = sizeof(struct gr_arg);
72513+
72514+ return 0;
72515+}
72516+
72517+int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
72518+{
72519+ struct gr_arg_compat argcompat;
72520+
72521+ if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
72522+ return -EFAULT;
72523+
72524+ arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
72525+ arg->role_db.num_pointers = argcompat.role_db.num_pointers;
72526+ arg->role_db.num_roles = argcompat.role_db.num_roles;
72527+ arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
72528+ arg->role_db.num_subjects = argcompat.role_db.num_subjects;
72529+ arg->role_db.num_objects = argcompat.role_db.num_objects;
72530+
72531+ memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
72532+ memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
72533+ memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
72534+ memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
72535+ arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
72536+ arg->segv_device = argcompat.segv_device;
72537+ arg->segv_inode = argcompat.segv_inode;
72538+ arg->segv_uid = argcompat.segv_uid;
72539+ arg->num_sprole_pws = argcompat.num_sprole_pws;
72540+ arg->mode = argcompat.mode;
72541+
72542+ return 0;
72543+}
72544+
72545+int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
72546+{
72547+ struct acl_object_label_compat objcompat;
72548+
72549+ if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
72550+ return -EFAULT;
72551+
72552+ obj->filename = compat_ptr(objcompat.filename);
72553+ obj->inode = objcompat.inode;
72554+ obj->device = objcompat.device;
72555+ obj->mode = objcompat.mode;
72556+
72557+ obj->nested = compat_ptr(objcompat.nested);
72558+ obj->globbed = compat_ptr(objcompat.globbed);
72559+
72560+ obj->prev = compat_ptr(objcompat.prev);
72561+ obj->next = compat_ptr(objcompat.next);
72562+
72563+ return 0;
72564+}
72565+
72566+int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
72567+{
72568+ unsigned int i;
72569+ struct acl_subject_label_compat subjcompat;
72570+
72571+ if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
72572+ return -EFAULT;
72573+
72574+ subj->filename = compat_ptr(subjcompat.filename);
72575+ subj->inode = subjcompat.inode;
72576+ subj->device = subjcompat.device;
72577+ subj->mode = subjcompat.mode;
72578+ subj->cap_mask = subjcompat.cap_mask;
72579+ subj->cap_lower = subjcompat.cap_lower;
72580+ subj->cap_invert_audit = subjcompat.cap_invert_audit;
72581+
72582+ for (i = 0; i < GR_NLIMITS; i++) {
72583+ if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
72584+ subj->res[i].rlim_cur = RLIM_INFINITY;
72585+ else
72586+ subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
72587+ if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
72588+ subj->res[i].rlim_max = RLIM_INFINITY;
72589+ else
72590+ subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
72591+ }
72592+ subj->resmask = subjcompat.resmask;
72593+
72594+ subj->user_trans_type = subjcompat.user_trans_type;
72595+ subj->group_trans_type = subjcompat.group_trans_type;
72596+ subj->user_transitions = compat_ptr(subjcompat.user_transitions);
72597+ subj->group_transitions = compat_ptr(subjcompat.group_transitions);
72598+ subj->user_trans_num = subjcompat.user_trans_num;
72599+ subj->group_trans_num = subjcompat.group_trans_num;
72600+
72601+ memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
72602+ memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
72603+ subj->ip_type = subjcompat.ip_type;
72604+ subj->ips = compat_ptr(subjcompat.ips);
72605+ subj->ip_num = subjcompat.ip_num;
72606+ subj->inaddr_any_override = subjcompat.inaddr_any_override;
72607+
72608+ subj->crashes = subjcompat.crashes;
72609+ subj->expires = subjcompat.expires;
72610+
72611+ subj->parent_subject = compat_ptr(subjcompat.parent_subject);
72612+ subj->hash = compat_ptr(subjcompat.hash);
72613+ subj->prev = compat_ptr(subjcompat.prev);
72614+ subj->next = compat_ptr(subjcompat.next);
72615+
72616+ subj->obj_hash = compat_ptr(subjcompat.obj_hash);
72617+ subj->obj_hash_size = subjcompat.obj_hash_size;
72618+ subj->pax_flags = subjcompat.pax_flags;
72619+
72620+ return 0;
72621+}
72622+
72623+int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
72624+{
72625+ struct acl_role_label_compat rolecompat;
72626+
72627+ if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
72628+ return -EFAULT;
72629+
72630+ role->rolename = compat_ptr(rolecompat.rolename);
72631+ role->uidgid = rolecompat.uidgid;
72632+ role->roletype = rolecompat.roletype;
72633+
72634+ role->auth_attempts = rolecompat.auth_attempts;
72635+ role->expires = rolecompat.expires;
72636+
72637+ role->root_label = compat_ptr(rolecompat.root_label);
72638+ role->hash = compat_ptr(rolecompat.hash);
72639+
72640+ role->prev = compat_ptr(rolecompat.prev);
72641+ role->next = compat_ptr(rolecompat.next);
72642+
72643+ role->transitions = compat_ptr(rolecompat.transitions);
72644+ role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
72645+ role->domain_children = compat_ptr(rolecompat.domain_children);
72646+ role->domain_child_num = rolecompat.domain_child_num;
72647+
72648+ role->umask = rolecompat.umask;
72649+
72650+ role->subj_hash = compat_ptr(rolecompat.subj_hash);
72651+ role->subj_hash_size = rolecompat.subj_hash_size;
72652+
72653+ return 0;
72654+}
72655+
72656+int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
72657+{
72658+ struct role_allowed_ip_compat roleip_compat;
72659+
72660+ if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
72661+ return -EFAULT;
72662+
72663+ roleip->addr = roleip_compat.addr;
72664+ roleip->netmask = roleip_compat.netmask;
72665+
72666+ roleip->prev = compat_ptr(roleip_compat.prev);
72667+ roleip->next = compat_ptr(roleip_compat.next);
72668+
72669+ return 0;
72670+}
72671+
72672+int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
72673+{
72674+ struct role_transition_compat trans_compat;
72675+
72676+ if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
72677+ return -EFAULT;
72678+
72679+ trans->rolename = compat_ptr(trans_compat.rolename);
72680+
72681+ trans->prev = compat_ptr(trans_compat.prev);
72682+ trans->next = compat_ptr(trans_compat.next);
72683+
72684+ return 0;
72685+
72686+}
72687+
72688+int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
72689+{
72690+ struct gr_hash_struct_compat hash_compat;
72691+
72692+ if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
72693+ return -EFAULT;
72694+
72695+ hash->table = compat_ptr(hash_compat.table);
72696+ hash->nametable = compat_ptr(hash_compat.nametable);
72697+ hash->first = compat_ptr(hash_compat.first);
72698+
72699+ hash->table_size = hash_compat.table_size;
72700+ hash->used_size = hash_compat.used_size;
72701+
72702+ hash->type = hash_compat.type;
72703+
72704+ return 0;
72705+}
72706+
72707+int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
72708+{
72709+ compat_uptr_t ptrcompat;
72710+
72711+ if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
72712+ return -EFAULT;
72713+
72714+ *(void **)ptr = compat_ptr(ptrcompat);
72715+
72716+ return 0;
72717+}
72718+
72719+int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
72720+{
72721+ struct acl_ip_label_compat ip_compat;
72722+
72723+ if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
72724+ return -EFAULT;
72725+
72726+ ip->iface = compat_ptr(ip_compat.iface);
72727+ ip->addr = ip_compat.addr;
72728+ ip->netmask = ip_compat.netmask;
72729+ ip->low = ip_compat.low;
72730+ ip->high = ip_compat.high;
72731+ ip->mode = ip_compat.mode;
72732+ ip->type = ip_compat.type;
72733+
72734+ memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
72735+
72736+ ip->prev = compat_ptr(ip_compat.prev);
72737+ ip->next = compat_ptr(ip_compat.next);
72738+
72739+ return 0;
72740+}
72741+
72742+int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
72743+{
72744+ struct sprole_pw_compat pw_compat;
72745+
72746+ if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
72747+ return -EFAULT;
72748+
72749+ pw->rolename = compat_ptr(pw_compat.rolename);
72750+ memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
72751+ memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
72752+
72753+ return 0;
72754+}
72755+
72756+size_t get_gr_arg_wrapper_size_compat(void)
72757+{
72758+ return sizeof(struct gr_arg_wrapper_compat);
72759+}
72760+
72761diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
72762new file mode 100644
72763index 0000000..4008fdc
72764--- /dev/null
72765+++ b/grsecurity/gracl_fs.c
72766@@ -0,0 +1,445 @@
72767+#include <linux/kernel.h>
72768+#include <linux/sched.h>
72769+#include <linux/types.h>
72770+#include <linux/fs.h>
72771+#include <linux/file.h>
72772+#include <linux/stat.h>
72773+#include <linux/grsecurity.h>
72774+#include <linux/grinternal.h>
72775+#include <linux/gracl.h>
72776+
72777+umode_t
72778+gr_acl_umask(void)
72779+{
72780+ if (unlikely(!gr_acl_is_enabled()))
72781+ return 0;
72782+
72783+ return current->role->umask;
72784+}
72785+
72786+__u32
72787+gr_acl_handle_hidden_file(const struct dentry * dentry,
72788+ const struct vfsmount * mnt)
72789+{
72790+ __u32 mode;
72791+
72792+ if (unlikely(d_is_negative(dentry)))
72793+ return GR_FIND;
72794+
72795+ mode =
72796+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
72797+
72798+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
72799+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
72800+ return mode;
72801+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
72802+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
72803+ return 0;
72804+ } else if (unlikely(!(mode & GR_FIND)))
72805+ return 0;
72806+
72807+ return GR_FIND;
72808+}
72809+
72810+__u32
72811+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
72812+ int acc_mode)
72813+{
72814+ __u32 reqmode = GR_FIND;
72815+ __u32 mode;
72816+
72817+ if (unlikely(d_is_negative(dentry)))
72818+ return reqmode;
72819+
72820+ if (acc_mode & MAY_APPEND)
72821+ reqmode |= GR_APPEND;
72822+ else if (acc_mode & MAY_WRITE)
72823+ reqmode |= GR_WRITE;
72824+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
72825+ reqmode |= GR_READ;
72826+
72827+ mode =
72828+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
72829+ mnt);
72830+
72831+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72832+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
72833+ reqmode & GR_READ ? " reading" : "",
72834+ reqmode & GR_WRITE ? " writing" : reqmode &
72835+ GR_APPEND ? " appending" : "");
72836+ return reqmode;
72837+ } else
72838+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72839+ {
72840+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
72841+ reqmode & GR_READ ? " reading" : "",
72842+ reqmode & GR_WRITE ? " writing" : reqmode &
72843+ GR_APPEND ? " appending" : "");
72844+ return 0;
72845+ } else if (unlikely((mode & reqmode) != reqmode))
72846+ return 0;
72847+
72848+ return reqmode;
72849+}
72850+
72851+__u32
72852+gr_acl_handle_creat(const struct dentry * dentry,
72853+ const struct dentry * p_dentry,
72854+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
72855+ const int imode)
72856+{
72857+ __u32 reqmode = GR_WRITE | GR_CREATE;
72858+ __u32 mode;
72859+
72860+ if (acc_mode & MAY_APPEND)
72861+ reqmode |= GR_APPEND;
72862+ // if a directory was required or the directory already exists, then
72863+ // don't count this open as a read
72864+ if ((acc_mode & MAY_READ) &&
72865+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
72866+ reqmode |= GR_READ;
72867+ if ((open_flags & O_CREAT) &&
72868+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
72869+ reqmode |= GR_SETID;
72870+
72871+ mode =
72872+ gr_check_create(dentry, p_dentry, p_mnt,
72873+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
72874+
72875+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72876+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
72877+ reqmode & GR_READ ? " reading" : "",
72878+ reqmode & GR_WRITE ? " writing" : reqmode &
72879+ GR_APPEND ? " appending" : "");
72880+ return reqmode;
72881+ } else
72882+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72883+ {
72884+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
72885+ reqmode & GR_READ ? " reading" : "",
72886+ reqmode & GR_WRITE ? " writing" : reqmode &
72887+ GR_APPEND ? " appending" : "");
72888+ return 0;
72889+ } else if (unlikely((mode & reqmode) != reqmode))
72890+ return 0;
72891+
72892+ return reqmode;
72893+}
72894+
72895+__u32
72896+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
72897+ const int fmode)
72898+{
72899+ __u32 mode, reqmode = GR_FIND;
72900+
72901+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
72902+ reqmode |= GR_EXEC;
72903+ if (fmode & S_IWOTH)
72904+ reqmode |= GR_WRITE;
72905+ if (fmode & S_IROTH)
72906+ reqmode |= GR_READ;
72907+
72908+ mode =
72909+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
72910+ mnt);
72911+
72912+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
72913+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
72914+ reqmode & GR_READ ? " reading" : "",
72915+ reqmode & GR_WRITE ? " writing" : "",
72916+ reqmode & GR_EXEC ? " executing" : "");
72917+ return reqmode;
72918+ } else
72919+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
72920+ {
72921+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
72922+ reqmode & GR_READ ? " reading" : "",
72923+ reqmode & GR_WRITE ? " writing" : "",
72924+ reqmode & GR_EXEC ? " executing" : "");
72925+ return 0;
72926+ } else if (unlikely((mode & reqmode) != reqmode))
72927+ return 0;
72928+
72929+ return reqmode;
72930+}
72931+
72932+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
72933+{
72934+ __u32 mode;
72935+
72936+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
72937+
72938+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
72939+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
72940+ return mode;
72941+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
72942+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
72943+ return 0;
72944+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
72945+ return 0;
72946+
72947+ return (reqmode);
72948+}
72949+
72950+__u32
72951+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
72952+{
72953+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
72954+}
72955+
72956+__u32
72957+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
72958+{
72959+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
72960+}
72961+
72962+__u32
72963+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
72964+{
72965+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
72966+}
72967+
72968+__u32
72969+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
72970+{
72971+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
72972+}
72973+
72974+__u32
72975+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
72976+ umode_t *modeptr)
72977+{
72978+ umode_t mode;
72979+
72980+ *modeptr &= ~gr_acl_umask();
72981+ mode = *modeptr;
72982+
72983+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
72984+ return 1;
72985+
72986+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
72987+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
72988+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
72989+ GR_CHMOD_ACL_MSG);
72990+ } else {
72991+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
72992+ }
72993+}
72994+
72995+__u32
72996+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
72997+{
72998+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
72999+}
73000+
73001+__u32
73002+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
73003+{
73004+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
73005+}
73006+
73007+__u32
73008+gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
73009+{
73010+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
73011+}
73012+
73013+__u32
73014+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
73015+{
73016+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
73017+}
73018+
73019+__u32
73020+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
73021+{
73022+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
73023+ GR_UNIXCONNECT_ACL_MSG);
73024+}
73025+
73026+/* hardlinks require at minimum create and link permission,
73027+ any additional privilege required is based on the
73028+ privilege of the file being linked to
73029+*/
73030+__u32
73031+gr_acl_handle_link(const struct dentry * new_dentry,
73032+ const struct dentry * parent_dentry,
73033+ const struct vfsmount * parent_mnt,
73034+ const struct dentry * old_dentry,
73035+ const struct vfsmount * old_mnt, const struct filename *to)
73036+{
73037+ __u32 mode;
73038+ __u32 needmode = GR_CREATE | GR_LINK;
73039+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
73040+
73041+ mode =
73042+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
73043+ old_mnt);
73044+
73045+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
73046+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
73047+ return mode;
73048+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
73049+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
73050+ return 0;
73051+ } else if (unlikely((mode & needmode) != needmode))
73052+ return 0;
73053+
73054+ return 1;
73055+}
73056+
73057+__u32
73058+gr_acl_handle_symlink(const struct dentry * new_dentry,
73059+ const struct dentry * parent_dentry,
73060+ const struct vfsmount * parent_mnt, const struct filename *from)
73061+{
73062+ __u32 needmode = GR_WRITE | GR_CREATE;
73063+ __u32 mode;
73064+
73065+ mode =
73066+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
73067+ GR_CREATE | GR_AUDIT_CREATE |
73068+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
73069+
73070+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
73071+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
73072+ return mode;
73073+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
73074+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
73075+ return 0;
73076+ } else if (unlikely((mode & needmode) != needmode))
73077+ return 0;
73078+
73079+ return (GR_WRITE | GR_CREATE);
73080+}
73081+
73082+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
73083+{
73084+ __u32 mode;
73085+
73086+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
73087+
73088+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
73089+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
73090+ return mode;
73091+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
73092+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
73093+ return 0;
73094+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
73095+ return 0;
73096+
73097+ return (reqmode);
73098+}
73099+
73100+__u32
73101+gr_acl_handle_mknod(const struct dentry * new_dentry,
73102+ const struct dentry * parent_dentry,
73103+ const struct vfsmount * parent_mnt,
73104+ const int mode)
73105+{
73106+ __u32 reqmode = GR_WRITE | GR_CREATE;
73107+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
73108+ reqmode |= GR_SETID;
73109+
73110+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
73111+ reqmode, GR_MKNOD_ACL_MSG);
73112+}
73113+
73114+__u32
73115+gr_acl_handle_mkdir(const struct dentry *new_dentry,
73116+ const struct dentry *parent_dentry,
73117+ const struct vfsmount *parent_mnt)
73118+{
73119+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
73120+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
73121+}
73122+
73123+#define RENAME_CHECK_SUCCESS(old, new) \
73124+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
73125+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
73126+
73127+int
73128+gr_acl_handle_rename(struct dentry *new_dentry,
73129+ struct dentry *parent_dentry,
73130+ const struct vfsmount *parent_mnt,
73131+ struct dentry *old_dentry,
73132+ struct inode *old_parent_inode,
73133+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags)
73134+{
73135+ __u32 comp1, comp2;
73136+ int error = 0;
73137+
73138+ if (unlikely(!gr_acl_is_enabled()))
73139+ return 0;
73140+
73141+ if (flags & RENAME_EXCHANGE) {
73142+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
73143+ GR_AUDIT_READ | GR_AUDIT_WRITE |
73144+ GR_SUPPRESS, parent_mnt);
73145+ comp2 =
73146+ gr_search_file(old_dentry,
73147+ GR_READ | GR_WRITE | GR_AUDIT_READ |
73148+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
73149+ } else if (d_is_negative(new_dentry)) {
73150+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
73151+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
73152+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
73153+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
73154+ GR_DELETE | GR_AUDIT_DELETE |
73155+ GR_AUDIT_READ | GR_AUDIT_WRITE |
73156+ GR_SUPPRESS, old_mnt);
73157+ } else {
73158+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
73159+ GR_CREATE | GR_DELETE |
73160+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
73161+ GR_AUDIT_READ | GR_AUDIT_WRITE |
73162+ GR_SUPPRESS, parent_mnt);
73163+ comp2 =
73164+ gr_search_file(old_dentry,
73165+ GR_READ | GR_WRITE | GR_AUDIT_READ |
73166+ GR_DELETE | GR_AUDIT_DELETE |
73167+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
73168+ }
73169+
73170+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
73171+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
73172+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
73173+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
73174+ && !(comp2 & GR_SUPPRESS)) {
73175+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
73176+ error = -EACCES;
73177+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
73178+ error = -EACCES;
73179+
73180+ return error;
73181+}
73182+
73183+void
73184+gr_acl_handle_exit(void)
73185+{
73186+ u16 id;
73187+ char *rolename;
73188+
73189+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
73190+ !(current->role->roletype & GR_ROLE_PERSIST))) {
73191+ id = current->acl_role_id;
73192+ rolename = current->role->rolename;
73193+ gr_set_acls(1);
73194+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
73195+ }
73196+
73197+ gr_put_exec_file(current);
73198+ return;
73199+}
73200+
73201+int
73202+gr_acl_handle_procpidmem(const struct task_struct *task)
73203+{
73204+ if (unlikely(!gr_acl_is_enabled()))
73205+ return 0;
73206+
73207+ if (task != current && task->acl->mode & GR_PROTPROCFD)
73208+ return -EACCES;
73209+
73210+ return 0;
73211+}
73212diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
73213new file mode 100644
73214index 0000000..f056b81
73215--- /dev/null
73216+++ b/grsecurity/gracl_ip.c
73217@@ -0,0 +1,386 @@
73218+#include <linux/kernel.h>
73219+#include <asm/uaccess.h>
73220+#include <asm/errno.h>
73221+#include <net/sock.h>
73222+#include <linux/file.h>
73223+#include <linux/fs.h>
73224+#include <linux/net.h>
73225+#include <linux/in.h>
73226+#include <linux/skbuff.h>
73227+#include <linux/ip.h>
73228+#include <linux/udp.h>
73229+#include <linux/types.h>
73230+#include <linux/sched.h>
73231+#include <linux/netdevice.h>
73232+#include <linux/inetdevice.h>
73233+#include <linux/gracl.h>
73234+#include <linux/grsecurity.h>
73235+#include <linux/grinternal.h>
73236+
73237+#define GR_BIND 0x01
73238+#define GR_CONNECT 0x02
73239+#define GR_INVERT 0x04
73240+#define GR_BINDOVERRIDE 0x08
73241+#define GR_CONNECTOVERRIDE 0x10
73242+#define GR_SOCK_FAMILY 0x20
73243+
73244+static const char * gr_protocols[IPPROTO_MAX] = {
73245+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
73246+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
73247+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
73248+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
73249+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
73250+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
73251+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
73252+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
73253+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
73254+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
73255+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
73256+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
73257+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
73258+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
73259+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
73260+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
73261+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
73262+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
73263+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
73264+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
73265+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
73266+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
73267+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
73268+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
73269+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
73270+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
73271+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
73272+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
73273+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
73274+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
73275+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
73276+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
73277+ };
73278+
73279+static const char * gr_socktypes[SOCK_MAX] = {
73280+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
73281+ "unknown:7", "unknown:8", "unknown:9", "packet"
73282+ };
73283+
73284+static const char * gr_sockfamilies[AF_MAX+1] = {
73285+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
73286+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
73287+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
73288+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
73289+ };
73290+
73291+const char *
73292+gr_proto_to_name(unsigned char proto)
73293+{
73294+ return gr_protocols[proto];
73295+}
73296+
73297+const char *
73298+gr_socktype_to_name(unsigned char type)
73299+{
73300+ return gr_socktypes[type];
73301+}
73302+
73303+const char *
73304+gr_sockfamily_to_name(unsigned char family)
73305+{
73306+ return gr_sockfamilies[family];
73307+}
73308+
73309+extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
73310+
73311+int
73312+gr_search_socket(const int domain, const int type, const int protocol)
73313+{
73314+ struct acl_subject_label *curr;
73315+ const struct cred *cred = current_cred();
73316+
73317+ if (unlikely(!gr_acl_is_enabled()))
73318+ goto exit;
73319+
73320+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
73321+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
73322+ goto exit; // let the kernel handle it
73323+
73324+ curr = current->acl;
73325+
73326+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
73327+ /* the family is allowed, if this is PF_INET allow it only if
73328+ the extra sock type/protocol checks pass */
73329+ if (domain == PF_INET)
73330+ goto inet_check;
73331+ goto exit;
73332+ } else {
73333+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73334+ __u32 fakeip = 0;
73335+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73336+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73337+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73338+ gr_to_filename(current->exec_file->f_path.dentry,
73339+ current->exec_file->f_path.mnt) :
73340+ curr->filename, curr->filename,
73341+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
73342+ &current->signal->saved_ip);
73343+ goto exit;
73344+ }
73345+ goto exit_fail;
73346+ }
73347+
73348+inet_check:
73349+ /* the rest of this checking is for IPv4 only */
73350+ if (!curr->ips)
73351+ goto exit;
73352+
73353+ if ((curr->ip_type & (1U << type)) &&
73354+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
73355+ goto exit;
73356+
73357+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73358+ /* we don't place acls on raw sockets , and sometimes
73359+ dgram/ip sockets are opened for ioctl and not
73360+ bind/connect, so we'll fake a bind learn log */
73361+ if (type == SOCK_RAW || type == SOCK_PACKET) {
73362+ __u32 fakeip = 0;
73363+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73364+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73365+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73366+ gr_to_filename(current->exec_file->f_path.dentry,
73367+ current->exec_file->f_path.mnt) :
73368+ curr->filename, curr->filename,
73369+ &fakeip, 0, type,
73370+ protocol, GR_CONNECT, &current->signal->saved_ip);
73371+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
73372+ __u32 fakeip = 0;
73373+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73374+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73375+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73376+ gr_to_filename(current->exec_file->f_path.dentry,
73377+ current->exec_file->f_path.mnt) :
73378+ curr->filename, curr->filename,
73379+ &fakeip, 0, type,
73380+ protocol, GR_BIND, &current->signal->saved_ip);
73381+ }
73382+ /* we'll log when they use connect or bind */
73383+ goto exit;
73384+ }
73385+
73386+exit_fail:
73387+ if (domain == PF_INET)
73388+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
73389+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
73390+ else if (rcu_access_pointer(net_families[domain]) != NULL)
73391+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
73392+ gr_socktype_to_name(type), protocol);
73393+
73394+ return 0;
73395+exit:
73396+ return 1;
73397+}
73398+
73399+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
73400+{
73401+ if ((ip->mode & mode) &&
73402+ (ip_port >= ip->low) &&
73403+ (ip_port <= ip->high) &&
73404+ ((ntohl(ip_addr) & our_netmask) ==
73405+ (ntohl(our_addr) & our_netmask))
73406+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
73407+ && (ip->type & (1U << type))) {
73408+ if (ip->mode & GR_INVERT)
73409+ return 2; // specifically denied
73410+ else
73411+ return 1; // allowed
73412+ }
73413+
73414+ return 0; // not specifically allowed, may continue parsing
73415+}
73416+
73417+static int
73418+gr_search_connectbind(const int full_mode, struct sock *sk,
73419+ struct sockaddr_in *addr, const int type)
73420+{
73421+ char iface[IFNAMSIZ] = {0};
73422+ struct acl_subject_label *curr;
73423+ struct acl_ip_label *ip;
73424+ struct inet_sock *isk;
73425+ struct net_device *dev;
73426+ struct in_device *idev;
73427+ unsigned long i;
73428+ int ret;
73429+ int mode = full_mode & (GR_BIND | GR_CONNECT);
73430+ __u32 ip_addr = 0;
73431+ __u32 our_addr;
73432+ __u32 our_netmask;
73433+ char *p;
73434+ __u16 ip_port = 0;
73435+ const struct cred *cred = current_cred();
73436+
73437+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
73438+ return 0;
73439+
73440+ curr = current->acl;
73441+ isk = inet_sk(sk);
73442+
73443+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
73444+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
73445+ addr->sin_addr.s_addr = curr->inaddr_any_override;
73446+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
73447+ struct sockaddr_in saddr;
73448+ int err;
73449+
73450+ saddr.sin_family = AF_INET;
73451+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
73452+ saddr.sin_port = isk->inet_sport;
73453+
73454+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
73455+ if (err)
73456+ return err;
73457+
73458+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
73459+ if (err)
73460+ return err;
73461+ }
73462+
73463+ if (!curr->ips)
73464+ return 0;
73465+
73466+ ip_addr = addr->sin_addr.s_addr;
73467+ ip_port = ntohs(addr->sin_port);
73468+
73469+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
73470+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
73471+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
73472+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
73473+ gr_to_filename(current->exec_file->f_path.dentry,
73474+ current->exec_file->f_path.mnt) :
73475+ curr->filename, curr->filename,
73476+ &ip_addr, ip_port, type,
73477+ sk->sk_protocol, mode, &current->signal->saved_ip);
73478+ return 0;
73479+ }
73480+
73481+ for (i = 0; i < curr->ip_num; i++) {
73482+ ip = *(curr->ips + i);
73483+ if (ip->iface != NULL) {
73484+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
73485+ p = strchr(iface, ':');
73486+ if (p != NULL)
73487+ *p = '\0';
73488+ dev = dev_get_by_name(sock_net(sk), iface);
73489+ if (dev == NULL)
73490+ continue;
73491+ idev = in_dev_get(dev);
73492+ if (idev == NULL) {
73493+ dev_put(dev);
73494+ continue;
73495+ }
73496+ rcu_read_lock();
73497+ for_ifa(idev) {
73498+ if (!strcmp(ip->iface, ifa->ifa_label)) {
73499+ our_addr = ifa->ifa_address;
73500+ our_netmask = 0xffffffff;
73501+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
73502+ if (ret == 1) {
73503+ rcu_read_unlock();
73504+ in_dev_put(idev);
73505+ dev_put(dev);
73506+ return 0;
73507+ } else if (ret == 2) {
73508+ rcu_read_unlock();
73509+ in_dev_put(idev);
73510+ dev_put(dev);
73511+ goto denied;
73512+ }
73513+ }
73514+ } endfor_ifa(idev);
73515+ rcu_read_unlock();
73516+ in_dev_put(idev);
73517+ dev_put(dev);
73518+ } else {
73519+ our_addr = ip->addr;
73520+ our_netmask = ip->netmask;
73521+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
73522+ if (ret == 1)
73523+ return 0;
73524+ else if (ret == 2)
73525+ goto denied;
73526+ }
73527+ }
73528+
73529+denied:
73530+ if (mode == GR_BIND)
73531+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
73532+ else if (mode == GR_CONNECT)
73533+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
73534+
73535+ return -EACCES;
73536+}
73537+
73538+int
73539+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
73540+{
73541+ /* always allow disconnection of dgram sockets with connect */
73542+ if (addr->sin_family == AF_UNSPEC)
73543+ return 0;
73544+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
73545+}
73546+
73547+int
73548+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
73549+{
73550+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
73551+}
73552+
73553+int gr_search_listen(struct socket *sock)
73554+{
73555+ struct sock *sk = sock->sk;
73556+ struct sockaddr_in addr;
73557+
73558+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
73559+ addr.sin_port = inet_sk(sk)->inet_sport;
73560+
73561+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
73562+}
73563+
73564+int gr_search_accept(struct socket *sock)
73565+{
73566+ struct sock *sk = sock->sk;
73567+ struct sockaddr_in addr;
73568+
73569+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
73570+ addr.sin_port = inet_sk(sk)->inet_sport;
73571+
73572+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
73573+}
73574+
73575+int
73576+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
73577+{
73578+ if (addr)
73579+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
73580+ else {
73581+ struct sockaddr_in sin;
73582+ const struct inet_sock *inet = inet_sk(sk);
73583+
73584+ sin.sin_addr.s_addr = inet->inet_daddr;
73585+ sin.sin_port = inet->inet_dport;
73586+
73587+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
73588+ }
73589+}
73590+
73591+int
73592+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
73593+{
73594+ struct sockaddr_in sin;
73595+
73596+ if (unlikely(skb->len < sizeof (struct udphdr)))
73597+ return 0; // skip this packet
73598+
73599+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
73600+ sin.sin_port = udp_hdr(skb)->source;
73601+
73602+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
73603+}
73604diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
73605new file mode 100644
73606index 0000000..25f54ef
73607--- /dev/null
73608+++ b/grsecurity/gracl_learn.c
73609@@ -0,0 +1,207 @@
73610+#include <linux/kernel.h>
73611+#include <linux/mm.h>
73612+#include <linux/sched.h>
73613+#include <linux/poll.h>
73614+#include <linux/string.h>
73615+#include <linux/file.h>
73616+#include <linux/types.h>
73617+#include <linux/vmalloc.h>
73618+#include <linux/grinternal.h>
73619+
73620+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
73621+ size_t count, loff_t *ppos);
73622+extern int gr_acl_is_enabled(void);
73623+
73624+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
73625+static int gr_learn_attached;
73626+
73627+/* use a 512k buffer */
73628+#define LEARN_BUFFER_SIZE (512 * 1024)
73629+
73630+static DEFINE_SPINLOCK(gr_learn_lock);
73631+static DEFINE_MUTEX(gr_learn_user_mutex);
73632+
73633+/* we need to maintain two buffers, so that the kernel context of grlearn
73634+ uses a semaphore around the userspace copying, and the other kernel contexts
73635+ use a spinlock when copying into the buffer, since they cannot sleep
73636+*/
73637+static char *learn_buffer;
73638+static char *learn_buffer_user;
73639+static int learn_buffer_len;
73640+static int learn_buffer_user_len;
73641+
73642+static ssize_t
73643+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
73644+{
73645+ DECLARE_WAITQUEUE(wait, current);
73646+ ssize_t retval = 0;
73647+
73648+ add_wait_queue(&learn_wait, &wait);
73649+ set_current_state(TASK_INTERRUPTIBLE);
73650+ do {
73651+ mutex_lock(&gr_learn_user_mutex);
73652+ spin_lock(&gr_learn_lock);
73653+ if (learn_buffer_len)
73654+ break;
73655+ spin_unlock(&gr_learn_lock);
73656+ mutex_unlock(&gr_learn_user_mutex);
73657+ if (file->f_flags & O_NONBLOCK) {
73658+ retval = -EAGAIN;
73659+ goto out;
73660+ }
73661+ if (signal_pending(current)) {
73662+ retval = -ERESTARTSYS;
73663+ goto out;
73664+ }
73665+
73666+ schedule();
73667+ } while (1);
73668+
73669+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
73670+ learn_buffer_user_len = learn_buffer_len;
73671+ retval = learn_buffer_len;
73672+ learn_buffer_len = 0;
73673+
73674+ spin_unlock(&gr_learn_lock);
73675+
73676+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
73677+ retval = -EFAULT;
73678+
73679+ mutex_unlock(&gr_learn_user_mutex);
73680+out:
73681+ set_current_state(TASK_RUNNING);
73682+ remove_wait_queue(&learn_wait, &wait);
73683+ return retval;
73684+}
73685+
73686+static unsigned int
73687+poll_learn(struct file * file, poll_table * wait)
73688+{
73689+ poll_wait(file, &learn_wait, wait);
73690+
73691+ if (learn_buffer_len)
73692+ return (POLLIN | POLLRDNORM);
73693+
73694+ return 0;
73695+}
73696+
73697+void
73698+gr_clear_learn_entries(void)
73699+{
73700+ char *tmp;
73701+
73702+ mutex_lock(&gr_learn_user_mutex);
73703+ spin_lock(&gr_learn_lock);
73704+ tmp = learn_buffer;
73705+ learn_buffer = NULL;
73706+ spin_unlock(&gr_learn_lock);
73707+ if (tmp)
73708+ vfree(tmp);
73709+ if (learn_buffer_user != NULL) {
73710+ vfree(learn_buffer_user);
73711+ learn_buffer_user = NULL;
73712+ }
73713+ learn_buffer_len = 0;
73714+ mutex_unlock(&gr_learn_user_mutex);
73715+
73716+ return;
73717+}
73718+
73719+void
73720+gr_add_learn_entry(const char *fmt, ...)
73721+{
73722+ va_list args;
73723+ unsigned int len;
73724+
73725+ if (!gr_learn_attached)
73726+ return;
73727+
73728+ spin_lock(&gr_learn_lock);
73729+
73730+ /* leave a gap at the end so we know when it's "full" but don't have to
73731+ compute the exact length of the string we're trying to append
73732+ */
73733+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
73734+ spin_unlock(&gr_learn_lock);
73735+ wake_up_interruptible(&learn_wait);
73736+ return;
73737+ }
73738+ if (learn_buffer == NULL) {
73739+ spin_unlock(&gr_learn_lock);
73740+ return;
73741+ }
73742+
73743+ va_start(args, fmt);
73744+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
73745+ va_end(args);
73746+
73747+ learn_buffer_len += len + 1;
73748+
73749+ spin_unlock(&gr_learn_lock);
73750+ wake_up_interruptible(&learn_wait);
73751+
73752+ return;
73753+}
73754+
73755+static int
73756+open_learn(struct inode *inode, struct file *file)
73757+{
73758+ if (file->f_mode & FMODE_READ && gr_learn_attached)
73759+ return -EBUSY;
73760+ if (file->f_mode & FMODE_READ) {
73761+ int retval = 0;
73762+ mutex_lock(&gr_learn_user_mutex);
73763+ if (learn_buffer == NULL)
73764+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
73765+ if (learn_buffer_user == NULL)
73766+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
73767+ if (learn_buffer == NULL) {
73768+ retval = -ENOMEM;
73769+ goto out_error;
73770+ }
73771+ if (learn_buffer_user == NULL) {
73772+ retval = -ENOMEM;
73773+ goto out_error;
73774+ }
73775+ learn_buffer_len = 0;
73776+ learn_buffer_user_len = 0;
73777+ gr_learn_attached = 1;
73778+out_error:
73779+ mutex_unlock(&gr_learn_user_mutex);
73780+ return retval;
73781+ }
73782+ return 0;
73783+}
73784+
73785+static int
73786+close_learn(struct inode *inode, struct file *file)
73787+{
73788+ if (file->f_mode & FMODE_READ) {
73789+ char *tmp = NULL;
73790+ mutex_lock(&gr_learn_user_mutex);
73791+ spin_lock(&gr_learn_lock);
73792+ tmp = learn_buffer;
73793+ learn_buffer = NULL;
73794+ spin_unlock(&gr_learn_lock);
73795+ if (tmp)
73796+ vfree(tmp);
73797+ if (learn_buffer_user != NULL) {
73798+ vfree(learn_buffer_user);
73799+ learn_buffer_user = NULL;
73800+ }
73801+ learn_buffer_len = 0;
73802+ learn_buffer_user_len = 0;
73803+ gr_learn_attached = 0;
73804+ mutex_unlock(&gr_learn_user_mutex);
73805+ }
73806+
73807+ return 0;
73808+}
73809+
73810+const struct file_operations grsec_fops = {
73811+ .read = read_learn,
73812+ .write = write_grsec_handler,
73813+ .open = open_learn,
73814+ .release = close_learn,
73815+ .poll = poll_learn,
73816+};
73817diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
73818new file mode 100644
73819index 0000000..3f8ade0
73820--- /dev/null
73821+++ b/grsecurity/gracl_policy.c
73822@@ -0,0 +1,1782 @@
73823+#include <linux/kernel.h>
73824+#include <linux/module.h>
73825+#include <linux/sched.h>
73826+#include <linux/mm.h>
73827+#include <linux/file.h>
73828+#include <linux/fs.h>
73829+#include <linux/namei.h>
73830+#include <linux/mount.h>
73831+#include <linux/tty.h>
73832+#include <linux/proc_fs.h>
73833+#include <linux/lglock.h>
73834+#include <linux/slab.h>
73835+#include <linux/vmalloc.h>
73836+#include <linux/types.h>
73837+#include <linux/sysctl.h>
73838+#include <linux/netdevice.h>
73839+#include <linux/ptrace.h>
73840+#include <linux/gracl.h>
73841+#include <linux/gralloc.h>
73842+#include <linux/security.h>
73843+#include <linux/grinternal.h>
73844+#include <linux/pid_namespace.h>
73845+#include <linux/stop_machine.h>
73846+#include <linux/fdtable.h>
73847+#include <linux/percpu.h>
73848+#include <linux/lglock.h>
73849+#include <linux/hugetlb.h>
73850+#include <linux/posix-timers.h>
73851+#include "../fs/mount.h"
73852+
73853+#include <asm/uaccess.h>
73854+#include <asm/errno.h>
73855+#include <asm/mman.h>
73856+
73857+extern struct gr_policy_state *polstate;
73858+
73859+#define FOR_EACH_ROLE_START(role) \
73860+ role = polstate->role_list; \
73861+ while (role) {
73862+
73863+#define FOR_EACH_ROLE_END(role) \
73864+ role = role->prev; \
73865+ }
73866+
73867+struct path gr_real_root;
73868+
73869+extern struct gr_alloc_state *current_alloc_state;
73870+
73871+u16 acl_sp_role_value;
73872+
73873+static DEFINE_MUTEX(gr_dev_mutex);
73874+
73875+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
73876+extern void gr_clear_learn_entries(void);
73877+
73878+struct gr_arg *gr_usermode __read_only;
73879+unsigned char *gr_system_salt __read_only;
73880+unsigned char *gr_system_sum __read_only;
73881+
73882+static unsigned int gr_auth_attempts = 0;
73883+static unsigned long gr_auth_expires = 0UL;
73884+
73885+struct acl_object_label *fakefs_obj_rw;
73886+struct acl_object_label *fakefs_obj_rwx;
73887+
73888+extern int gr_init_uidset(void);
73889+extern void gr_free_uidset(void);
73890+extern void gr_remove_uid(uid_t uid);
73891+extern int gr_find_uid(uid_t uid);
73892+
73893+extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename);
73894+extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
73895+extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
73896+extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
73897+extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
73898+extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
73899+extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
73900+extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
73901+extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
73902+extern struct acl_subject_label *lookup_acl_subj_label(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
73903+extern struct acl_subject_label *lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
73904+extern void assign_special_role(const char *rolename);
73905+extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
73906+extern int gr_rbac_disable(void *unused);
73907+extern void gr_enable_rbac_system(void);
73908+
73909+static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
73910+{
73911+ if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
73912+ return -EFAULT;
73913+
73914+ return 0;
73915+}
73916+
73917+static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
73918+{
73919+ if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
73920+ return -EFAULT;
73921+
73922+ return 0;
73923+}
73924+
73925+static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
73926+{
73927+ if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
73928+ return -EFAULT;
73929+
73930+ return 0;
73931+}
73932+
73933+static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
73934+{
73935+ if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
73936+ return -EFAULT;
73937+
73938+ return 0;
73939+}
73940+
73941+static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
73942+{
73943+ if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
73944+ return -EFAULT;
73945+
73946+ return 0;
73947+}
73948+
73949+static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
73950+{
73951+ if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
73952+ return -EFAULT;
73953+
73954+ return 0;
73955+}
73956+
73957+static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
73958+{
73959+ if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
73960+ return -EFAULT;
73961+
73962+ return 0;
73963+}
73964+
73965+static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
73966+{
73967+ if (copy_from_user(trans, userp, sizeof(struct role_transition)))
73968+ return -EFAULT;
73969+
73970+ return 0;
73971+}
73972+
73973+int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
73974+{
73975+ if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
73976+ return -EFAULT;
73977+
73978+ return 0;
73979+}
73980+
73981+static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
73982+{
73983+ if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
73984+ return -EFAULT;
73985+
73986+ if (((uwrap->version != GRSECURITY_VERSION) &&
73987+ (uwrap->version != 0x2901)) ||
73988+ (uwrap->size != sizeof(struct gr_arg)))
73989+ return -EINVAL;
73990+
73991+ return 0;
73992+}
73993+
73994+static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
73995+{
73996+ if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
73997+ return -EFAULT;
73998+
73999+ return 0;
74000+}
74001+
74002+static size_t get_gr_arg_wrapper_size_normal(void)
74003+{
74004+ return sizeof(struct gr_arg_wrapper);
74005+}
74006+
74007+#ifdef CONFIG_COMPAT
74008+extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
74009+extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
74010+extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
74011+extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
74012+extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
74013+extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
74014+extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
74015+extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
74016+extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
74017+extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
74018+extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
74019+extern size_t get_gr_arg_wrapper_size_compat(void);
74020+
74021+int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
74022+int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
74023+int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
74024+int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
74025+int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
74026+int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
74027+int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
74028+int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
74029+int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
74030+int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
74031+int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
74032+size_t (* get_gr_arg_wrapper_size)(void) __read_only;
74033+
74034+#else
74035+#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
74036+#define copy_gr_arg copy_gr_arg_normal
74037+#define copy_gr_hash_struct copy_gr_hash_struct_normal
74038+#define copy_acl_object_label copy_acl_object_label_normal
74039+#define copy_acl_subject_label copy_acl_subject_label_normal
74040+#define copy_acl_role_label copy_acl_role_label_normal
74041+#define copy_acl_ip_label copy_acl_ip_label_normal
74042+#define copy_pointer_from_array copy_pointer_from_array_normal
74043+#define copy_sprole_pw copy_sprole_pw_normal
74044+#define copy_role_transition copy_role_transition_normal
74045+#define copy_role_allowed_ip copy_role_allowed_ip_normal
74046+#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
74047+#endif
74048+
74049+static struct acl_subject_label *
74050+lookup_subject_map(const struct acl_subject_label *userp)
74051+{
74052+ unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
74053+ struct subject_map *match;
74054+
74055+ match = polstate->subj_map_set.s_hash[index];
74056+
74057+ while (match && match->user != userp)
74058+ match = match->next;
74059+
74060+ if (match != NULL)
74061+ return match->kernel;
74062+ else
74063+ return NULL;
74064+}
74065+
74066+static void
74067+insert_subj_map_entry(struct subject_map *subjmap)
74068+{
74069+ unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
74070+ struct subject_map **curr;
74071+
74072+ subjmap->prev = NULL;
74073+
74074+ curr = &polstate->subj_map_set.s_hash[index];
74075+ if (*curr != NULL)
74076+ (*curr)->prev = subjmap;
74077+
74078+ subjmap->next = *curr;
74079+ *curr = subjmap;
74080+
74081+ return;
74082+}
74083+
74084+static void
74085+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
74086+{
74087+ unsigned int index =
74088+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
74089+ struct acl_role_label **curr;
74090+ struct acl_role_label *tmp, *tmp2;
74091+
74092+ curr = &polstate->acl_role_set.r_hash[index];
74093+
74094+ /* simple case, slot is empty, just set it to our role */
74095+ if (*curr == NULL) {
74096+ *curr = role;
74097+ } else {
74098+ /* example:
74099+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
74100+ 2 -> 3
74101+ */
74102+ /* first check to see if we can already be reached via this slot */
74103+ tmp = *curr;
74104+ while (tmp && tmp != role)
74105+ tmp = tmp->next;
74106+ if (tmp == role) {
74107+ /* we don't need to add ourselves to this slot's chain */
74108+ return;
74109+ }
74110+ /* we need to add ourselves to this chain, two cases */
74111+ if (role->next == NULL) {
74112+ /* simple case, append the current chain to our role */
74113+ role->next = *curr;
74114+ *curr = role;
74115+ } else {
74116+ /* 1 -> 2 -> 3 -> 4
74117+ 2 -> 3 -> 4
74118+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
74119+ */
74120+ /* trickier case: walk our role's chain until we find
74121+ the role for the start of the current slot's chain */
74122+ tmp = role;
74123+ tmp2 = *curr;
74124+ while (tmp->next && tmp->next != tmp2)
74125+ tmp = tmp->next;
74126+ if (tmp->next == tmp2) {
74127+ /* from example above, we found 3, so just
74128+ replace this slot's chain with ours */
74129+ *curr = role;
74130+ } else {
74131+ /* we didn't find a subset of our role's chain
74132+ in the current slot's chain, so append their
74133+ chain to ours, and set us as the first role in
74134+ the slot's chain
74135+
74136+ we could fold this case with the case above,
74137+ but making it explicit for clarity
74138+ */
74139+ tmp->next = tmp2;
74140+ *curr = role;
74141+ }
74142+ }
74143+ }
74144+
74145+ return;
74146+}
74147+
74148+static void
74149+insert_acl_role_label(struct acl_role_label *role)
74150+{
74151+ int i;
74152+
74153+ if (polstate->role_list == NULL) {
74154+ polstate->role_list = role;
74155+ role->prev = NULL;
74156+ } else {
74157+ role->prev = polstate->role_list;
74158+ polstate->role_list = role;
74159+ }
74160+
74161+ /* used for hash chains */
74162+ role->next = NULL;
74163+
74164+ if (role->roletype & GR_ROLE_DOMAIN) {
74165+ for (i = 0; i < role->domain_child_num; i++)
74166+ __insert_acl_role_label(role, role->domain_children[i]);
74167+ } else
74168+ __insert_acl_role_label(role, role->uidgid);
74169+}
74170+
74171+static int
74172+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
74173+{
74174+ struct name_entry **curr, *nentry;
74175+ struct inodev_entry *ientry;
74176+ unsigned int len = strlen(name);
74177+ unsigned int key = full_name_hash(name, len);
74178+ unsigned int index = key % polstate->name_set.n_size;
74179+
74180+ curr = &polstate->name_set.n_hash[index];
74181+
74182+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
74183+ curr = &((*curr)->next);
74184+
74185+ if (*curr != NULL)
74186+ return 1;
74187+
74188+ nentry = acl_alloc(sizeof (struct name_entry));
74189+ if (nentry == NULL)
74190+ return 0;
74191+ ientry = acl_alloc(sizeof (struct inodev_entry));
74192+ if (ientry == NULL)
74193+ return 0;
74194+ ientry->nentry = nentry;
74195+
74196+ nentry->key = key;
74197+ nentry->name = name;
74198+ nentry->inode = inode;
74199+ nentry->device = device;
74200+ nentry->len = len;
74201+ nentry->deleted = deleted;
74202+
74203+ nentry->prev = NULL;
74204+ curr = &polstate->name_set.n_hash[index];
74205+ if (*curr != NULL)
74206+ (*curr)->prev = nentry;
74207+ nentry->next = *curr;
74208+ *curr = nentry;
74209+
74210+ /* insert us into the table searchable by inode/dev */
74211+ __insert_inodev_entry(polstate, ientry);
74212+
74213+ return 1;
74214+}
74215+
74216+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
74217+
74218+static void *
74219+create_table(__u32 * len, int elementsize)
74220+{
74221+ unsigned int table_sizes[] = {
74222+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
74223+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
74224+ 4194301, 8388593, 16777213, 33554393, 67108859
74225+ };
74226+ void *newtable = NULL;
74227+ unsigned int pwr = 0;
74228+
74229+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
74230+ table_sizes[pwr] <= *len)
74231+ pwr++;
74232+
74233+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
74234+ return newtable;
74235+
74236+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
74237+ newtable =
74238+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
74239+ else
74240+ newtable = vmalloc(table_sizes[pwr] * elementsize);
74241+
74242+ *len = table_sizes[pwr];
74243+
74244+ return newtable;
74245+}
74246+
74247+static int
74248+init_variables(const struct gr_arg *arg, bool reload)
74249+{
74250+ struct task_struct *reaper = init_pid_ns.child_reaper;
74251+ unsigned int stacksize;
74252+
74253+ polstate->subj_map_set.s_size = arg->role_db.num_subjects;
74254+ polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
74255+ polstate->name_set.n_size = arg->role_db.num_objects;
74256+ polstate->inodev_set.i_size = arg->role_db.num_objects;
74257+
74258+ if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
74259+ !polstate->name_set.n_size || !polstate->inodev_set.i_size)
74260+ return 1;
74261+
74262+ if (!reload) {
74263+ if (!gr_init_uidset())
74264+ return 1;
74265+ }
74266+
74267+ /* set up the stack that holds allocation info */
74268+
74269+ stacksize = arg->role_db.num_pointers + 5;
74270+
74271+ if (!acl_alloc_stack_init(stacksize))
74272+ return 1;
74273+
74274+ if (!reload) {
74275+ /* grab reference for the real root dentry and vfsmount */
74276+ get_fs_root(reaper->fs, &gr_real_root);
74277+
74278+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
74279+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
74280+#endif
74281+
74282+ fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
74283+ if (fakefs_obj_rw == NULL)
74284+ return 1;
74285+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
74286+
74287+ fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
74288+ if (fakefs_obj_rwx == NULL)
74289+ return 1;
74290+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
74291+ }
74292+
74293+ polstate->subj_map_set.s_hash =
74294+ (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
74295+ polstate->acl_role_set.r_hash =
74296+ (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
74297+ polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
74298+ polstate->inodev_set.i_hash =
74299+ (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
74300+
74301+ if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
74302+ !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
74303+ return 1;
74304+
74305+ memset(polstate->subj_map_set.s_hash, 0,
74306+ sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
74307+ memset(polstate->acl_role_set.r_hash, 0,
74308+ sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
74309+ memset(polstate->name_set.n_hash, 0,
74310+ sizeof (struct name_entry *) * polstate->name_set.n_size);
74311+ memset(polstate->inodev_set.i_hash, 0,
74312+ sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
74313+
74314+ return 0;
74315+}
74316+
74317+/* free information not needed after startup
74318+ currently contains user->kernel pointer mappings for subjects
74319+*/
74320+
74321+static void
74322+free_init_variables(void)
74323+{
74324+ __u32 i;
74325+
74326+ if (polstate->subj_map_set.s_hash) {
74327+ for (i = 0; i < polstate->subj_map_set.s_size; i++) {
74328+ if (polstate->subj_map_set.s_hash[i]) {
74329+ kfree(polstate->subj_map_set.s_hash[i]);
74330+ polstate->subj_map_set.s_hash[i] = NULL;
74331+ }
74332+ }
74333+
74334+ if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
74335+ PAGE_SIZE)
74336+ kfree(polstate->subj_map_set.s_hash);
74337+ else
74338+ vfree(polstate->subj_map_set.s_hash);
74339+ }
74340+
74341+ return;
74342+}
74343+
74344+static void
74345+free_variables(bool reload)
74346+{
74347+ struct acl_subject_label *s;
74348+ struct acl_role_label *r;
74349+ struct task_struct *task, *task2;
74350+ unsigned int x;
74351+
74352+ if (!reload) {
74353+ gr_clear_learn_entries();
74354+
74355+ read_lock(&tasklist_lock);
74356+ do_each_thread(task2, task) {
74357+ task->acl_sp_role = 0;
74358+ task->acl_role_id = 0;
74359+ task->inherited = 0;
74360+ task->acl = NULL;
74361+ task->role = NULL;
74362+ } while_each_thread(task2, task);
74363+ read_unlock(&tasklist_lock);
74364+
74365+ kfree(fakefs_obj_rw);
74366+ fakefs_obj_rw = NULL;
74367+ kfree(fakefs_obj_rwx);
74368+ fakefs_obj_rwx = NULL;
74369+
74370+ /* release the reference to the real root dentry and vfsmount */
74371+ path_put(&gr_real_root);
74372+ memset(&gr_real_root, 0, sizeof(gr_real_root));
74373+ }
74374+
74375+ /* free all object hash tables */
74376+
74377+ FOR_EACH_ROLE_START(r)
74378+ if (r->subj_hash == NULL)
74379+ goto next_role;
74380+ FOR_EACH_SUBJECT_START(r, s, x)
74381+ if (s->obj_hash == NULL)
74382+ break;
74383+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
74384+ kfree(s->obj_hash);
74385+ else
74386+ vfree(s->obj_hash);
74387+ FOR_EACH_SUBJECT_END(s, x)
74388+ FOR_EACH_NESTED_SUBJECT_START(r, s)
74389+ if (s->obj_hash == NULL)
74390+ break;
74391+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
74392+ kfree(s->obj_hash);
74393+ else
74394+ vfree(s->obj_hash);
74395+ FOR_EACH_NESTED_SUBJECT_END(s)
74396+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
74397+ kfree(r->subj_hash);
74398+ else
74399+ vfree(r->subj_hash);
74400+ r->subj_hash = NULL;
74401+next_role:
74402+ FOR_EACH_ROLE_END(r)
74403+
74404+ acl_free_all();
74405+
74406+ if (polstate->acl_role_set.r_hash) {
74407+ if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
74408+ PAGE_SIZE)
74409+ kfree(polstate->acl_role_set.r_hash);
74410+ else
74411+ vfree(polstate->acl_role_set.r_hash);
74412+ }
74413+ if (polstate->name_set.n_hash) {
74414+ if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
74415+ PAGE_SIZE)
74416+ kfree(polstate->name_set.n_hash);
74417+ else
74418+ vfree(polstate->name_set.n_hash);
74419+ }
74420+
74421+ if (polstate->inodev_set.i_hash) {
74422+ if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
74423+ PAGE_SIZE)
74424+ kfree(polstate->inodev_set.i_hash);
74425+ else
74426+ vfree(polstate->inodev_set.i_hash);
74427+ }
74428+
74429+ if (!reload)
74430+ gr_free_uidset();
74431+
74432+ memset(&polstate->name_set, 0, sizeof (struct name_db));
74433+ memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
74434+ memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
74435+ memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
74436+
74437+ polstate->default_role = NULL;
74438+ polstate->kernel_role = NULL;
74439+ polstate->role_list = NULL;
74440+
74441+ return;
74442+}
74443+
74444+static struct acl_subject_label *
74445+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
74446+
74447+static int alloc_and_copy_string(char **name, unsigned int maxlen)
74448+{
74449+ unsigned int len = strnlen_user(*name, maxlen);
74450+ char *tmp;
74451+
74452+ if (!len || len >= maxlen)
74453+ return -EINVAL;
74454+
74455+ if ((tmp = (char *) acl_alloc(len)) == NULL)
74456+ return -ENOMEM;
74457+
74458+ if (copy_from_user(tmp, *name, len))
74459+ return -EFAULT;
74460+
74461+ tmp[len-1] = '\0';
74462+ *name = tmp;
74463+
74464+ return 0;
74465+}
74466+
74467+static int
74468+copy_user_glob(struct acl_object_label *obj)
74469+{
74470+ struct acl_object_label *g_tmp, **guser;
74471+ int error;
74472+
74473+ if (obj->globbed == NULL)
74474+ return 0;
74475+
74476+ guser = &obj->globbed;
74477+ while (*guser) {
74478+ g_tmp = (struct acl_object_label *)
74479+ acl_alloc(sizeof (struct acl_object_label));
74480+ if (g_tmp == NULL)
74481+ return -ENOMEM;
74482+
74483+ if (copy_acl_object_label(g_tmp, *guser))
74484+ return -EFAULT;
74485+
74486+ error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
74487+ if (error)
74488+ return error;
74489+
74490+ *guser = g_tmp;
74491+ guser = &(g_tmp->next);
74492+ }
74493+
74494+ return 0;
74495+}
74496+
74497+static int
74498+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
74499+ struct acl_role_label *role)
74500+{
74501+ struct acl_object_label *o_tmp;
74502+ int ret;
74503+
74504+ while (userp) {
74505+ if ((o_tmp = (struct acl_object_label *)
74506+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
74507+ return -ENOMEM;
74508+
74509+ if (copy_acl_object_label(o_tmp, userp))
74510+ return -EFAULT;
74511+
74512+ userp = o_tmp->prev;
74513+
74514+ ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
74515+ if (ret)
74516+ return ret;
74517+
74518+ insert_acl_obj_label(o_tmp, subj);
74519+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
74520+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
74521+ return -ENOMEM;
74522+
74523+ ret = copy_user_glob(o_tmp);
74524+ if (ret)
74525+ return ret;
74526+
74527+ if (o_tmp->nested) {
74528+ int already_copied;
74529+
74530+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
74531+ if (IS_ERR(o_tmp->nested))
74532+ return PTR_ERR(o_tmp->nested);
74533+
74534+ /* insert into nested subject list if we haven't copied this one yet
74535+ to prevent duplicate entries */
74536+ if (!already_copied) {
74537+ o_tmp->nested->next = role->hash->first;
74538+ role->hash->first = o_tmp->nested;
74539+ }
74540+ }
74541+ }
74542+
74543+ return 0;
74544+}
74545+
74546+static __u32
74547+count_user_subjs(struct acl_subject_label *userp)
74548+{
74549+ struct acl_subject_label s_tmp;
74550+ __u32 num = 0;
74551+
74552+ while (userp) {
74553+ if (copy_acl_subject_label(&s_tmp, userp))
74554+ break;
74555+
74556+ userp = s_tmp.prev;
74557+ }
74558+
74559+ return num;
74560+}
74561+
74562+static int
74563+copy_user_allowedips(struct acl_role_label *rolep)
74564+{
74565+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
74566+
74567+ ruserip = rolep->allowed_ips;
74568+
74569+ while (ruserip) {
74570+ rlast = rtmp;
74571+
74572+ if ((rtmp = (struct role_allowed_ip *)
74573+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
74574+ return -ENOMEM;
74575+
74576+ if (copy_role_allowed_ip(rtmp, ruserip))
74577+ return -EFAULT;
74578+
74579+ ruserip = rtmp->prev;
74580+
74581+ if (!rlast) {
74582+ rtmp->prev = NULL;
74583+ rolep->allowed_ips = rtmp;
74584+ } else {
74585+ rlast->next = rtmp;
74586+ rtmp->prev = rlast;
74587+ }
74588+
74589+ if (!ruserip)
74590+ rtmp->next = NULL;
74591+ }
74592+
74593+ return 0;
74594+}
74595+
74596+static int
74597+copy_user_transitions(struct acl_role_label *rolep)
74598+{
74599+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
74600+ int error;
74601+
74602+ rusertp = rolep->transitions;
74603+
74604+ while (rusertp) {
74605+ rlast = rtmp;
74606+
74607+ if ((rtmp = (struct role_transition *)
74608+ acl_alloc(sizeof (struct role_transition))) == NULL)
74609+ return -ENOMEM;
74610+
74611+ if (copy_role_transition(rtmp, rusertp))
74612+ return -EFAULT;
74613+
74614+ rusertp = rtmp->prev;
74615+
74616+ error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
74617+ if (error)
74618+ return error;
74619+
74620+ if (!rlast) {
74621+ rtmp->prev = NULL;
74622+ rolep->transitions = rtmp;
74623+ } else {
74624+ rlast->next = rtmp;
74625+ rtmp->prev = rlast;
74626+ }
74627+
74628+ if (!rusertp)
74629+ rtmp->next = NULL;
74630+ }
74631+
74632+ return 0;
74633+}
74634+
74635+static __u32 count_user_objs(const struct acl_object_label __user *userp)
74636+{
74637+ struct acl_object_label o_tmp;
74638+ __u32 num = 0;
74639+
74640+ while (userp) {
74641+ if (copy_acl_object_label(&o_tmp, userp))
74642+ break;
74643+
74644+ userp = o_tmp.prev;
74645+ num++;
74646+ }
74647+
74648+ return num;
74649+}
74650+
74651+static struct acl_subject_label *
74652+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
74653+{
74654+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
74655+ __u32 num_objs;
74656+ struct acl_ip_label **i_tmp, *i_utmp2;
74657+ struct gr_hash_struct ghash;
74658+ struct subject_map *subjmap;
74659+ unsigned int i_num;
74660+ int err;
74661+
74662+ if (already_copied != NULL)
74663+ *already_copied = 0;
74664+
74665+ s_tmp = lookup_subject_map(userp);
74666+
74667+ /* we've already copied this subject into the kernel, just return
74668+ the reference to it, and don't copy it over again
74669+ */
74670+ if (s_tmp) {
74671+ if (already_copied != NULL)
74672+ *already_copied = 1;
74673+ return(s_tmp);
74674+ }
74675+
74676+ if ((s_tmp = (struct acl_subject_label *)
74677+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
74678+ return ERR_PTR(-ENOMEM);
74679+
74680+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
74681+ if (subjmap == NULL)
74682+ return ERR_PTR(-ENOMEM);
74683+
74684+ subjmap->user = userp;
74685+ subjmap->kernel = s_tmp;
74686+ insert_subj_map_entry(subjmap);
74687+
74688+ if (copy_acl_subject_label(s_tmp, userp))
74689+ return ERR_PTR(-EFAULT);
74690+
74691+ err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
74692+ if (err)
74693+ return ERR_PTR(err);
74694+
74695+ if (!strcmp(s_tmp->filename, "/"))
74696+ role->root_label = s_tmp;
74697+
74698+ if (copy_gr_hash_struct(&ghash, s_tmp->hash))
74699+ return ERR_PTR(-EFAULT);
74700+
74701+ /* copy user and group transition tables */
74702+
74703+ if (s_tmp->user_trans_num) {
74704+ uid_t *uidlist;
74705+
74706+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
74707+ if (uidlist == NULL)
74708+ return ERR_PTR(-ENOMEM);
74709+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
74710+ return ERR_PTR(-EFAULT);
74711+
74712+ s_tmp->user_transitions = uidlist;
74713+ }
74714+
74715+ if (s_tmp->group_trans_num) {
74716+ gid_t *gidlist;
74717+
74718+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
74719+ if (gidlist == NULL)
74720+ return ERR_PTR(-ENOMEM);
74721+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
74722+ return ERR_PTR(-EFAULT);
74723+
74724+ s_tmp->group_transitions = gidlist;
74725+ }
74726+
74727+ /* set up object hash table */
74728+ num_objs = count_user_objs(ghash.first);
74729+
74730+ s_tmp->obj_hash_size = num_objs;
74731+ s_tmp->obj_hash =
74732+ (struct acl_object_label **)
74733+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
74734+
74735+ if (!s_tmp->obj_hash)
74736+ return ERR_PTR(-ENOMEM);
74737+
74738+ memset(s_tmp->obj_hash, 0,
74739+ s_tmp->obj_hash_size *
74740+ sizeof (struct acl_object_label *));
74741+
74742+ /* add in objects */
74743+ err = copy_user_objs(ghash.first, s_tmp, role);
74744+
74745+ if (err)
74746+ return ERR_PTR(err);
74747+
74748+ /* set pointer for parent subject */
74749+ if (s_tmp->parent_subject) {
74750+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
74751+
74752+ if (IS_ERR(s_tmp2))
74753+ return s_tmp2;
74754+
74755+ s_tmp->parent_subject = s_tmp2;
74756+ }
74757+
74758+ /* add in ip acls */
74759+
74760+ if (!s_tmp->ip_num) {
74761+ s_tmp->ips = NULL;
74762+ goto insert;
74763+ }
74764+
74765+ i_tmp =
74766+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
74767+ sizeof (struct acl_ip_label *));
74768+
74769+ if (!i_tmp)
74770+ return ERR_PTR(-ENOMEM);
74771+
74772+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
74773+ *(i_tmp + i_num) =
74774+ (struct acl_ip_label *)
74775+ acl_alloc(sizeof (struct acl_ip_label));
74776+ if (!*(i_tmp + i_num))
74777+ return ERR_PTR(-ENOMEM);
74778+
74779+ if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
74780+ return ERR_PTR(-EFAULT);
74781+
74782+ if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
74783+ return ERR_PTR(-EFAULT);
74784+
74785+ if ((*(i_tmp + i_num))->iface == NULL)
74786+ continue;
74787+
74788+ err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
74789+ if (err)
74790+ return ERR_PTR(err);
74791+ }
74792+
74793+ s_tmp->ips = i_tmp;
74794+
74795+insert:
74796+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
74797+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
74798+ return ERR_PTR(-ENOMEM);
74799+
74800+ return s_tmp;
74801+}
74802+
74803+static int
74804+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
74805+{
74806+ struct acl_subject_label s_pre;
74807+ struct acl_subject_label * ret;
74808+ int err;
74809+
74810+ while (userp) {
74811+ if (copy_acl_subject_label(&s_pre, userp))
74812+ return -EFAULT;
74813+
74814+ ret = do_copy_user_subj(userp, role, NULL);
74815+
74816+ err = PTR_ERR(ret);
74817+ if (IS_ERR(ret))
74818+ return err;
74819+
74820+ insert_acl_subj_label(ret, role);
74821+
74822+ userp = s_pre.prev;
74823+ }
74824+
74825+ return 0;
74826+}
74827+
74828+static int
74829+copy_user_acl(struct gr_arg *arg)
74830+{
74831+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
74832+ struct acl_subject_label *subj_list;
74833+ struct sprole_pw *sptmp;
74834+ struct gr_hash_struct *ghash;
74835+ uid_t *domainlist;
74836+ unsigned int r_num;
74837+ int err = 0;
74838+ __u16 i;
74839+ __u32 num_subjs;
74840+
74841+ /* we need a default and kernel role */
74842+ if (arg->role_db.num_roles < 2)
74843+ return -EINVAL;
74844+
74845+ /* copy special role authentication info from userspace */
74846+
74847+ polstate->num_sprole_pws = arg->num_sprole_pws;
74848+ polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
74849+
74850+ if (!polstate->acl_special_roles && polstate->num_sprole_pws)
74851+ return -ENOMEM;
74852+
74853+ for (i = 0; i < polstate->num_sprole_pws; i++) {
74854+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
74855+ if (!sptmp)
74856+ return -ENOMEM;
74857+ if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
74858+ return -EFAULT;
74859+
74860+ err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
74861+ if (err)
74862+ return err;
74863+
74864+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
74865+ printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
74866+#endif
74867+
74868+ polstate->acl_special_roles[i] = sptmp;
74869+ }
74870+
74871+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
74872+
74873+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
74874+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
74875+
74876+ if (!r_tmp)
74877+ return -ENOMEM;
74878+
74879+ if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
74880+ return -EFAULT;
74881+
74882+ if (copy_acl_role_label(r_tmp, r_utmp2))
74883+ return -EFAULT;
74884+
74885+ err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
74886+ if (err)
74887+ return err;
74888+
74889+ if (!strcmp(r_tmp->rolename, "default")
74890+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
74891+ polstate->default_role = r_tmp;
74892+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
74893+ polstate->kernel_role = r_tmp;
74894+ }
74895+
74896+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
74897+ return -ENOMEM;
74898+
74899+ if (copy_gr_hash_struct(ghash, r_tmp->hash))
74900+ return -EFAULT;
74901+
74902+ r_tmp->hash = ghash;
74903+
74904+ num_subjs = count_user_subjs(r_tmp->hash->first);
74905+
74906+ r_tmp->subj_hash_size = num_subjs;
74907+ r_tmp->subj_hash =
74908+ (struct acl_subject_label **)
74909+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
74910+
74911+ if (!r_tmp->subj_hash)
74912+ return -ENOMEM;
74913+
74914+ err = copy_user_allowedips(r_tmp);
74915+ if (err)
74916+ return err;
74917+
74918+ /* copy domain info */
74919+ if (r_tmp->domain_children != NULL) {
74920+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
74921+ if (domainlist == NULL)
74922+ return -ENOMEM;
74923+
74924+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
74925+ return -EFAULT;
74926+
74927+ r_tmp->domain_children = domainlist;
74928+ }
74929+
74930+ err = copy_user_transitions(r_tmp);
74931+ if (err)
74932+ return err;
74933+
74934+ memset(r_tmp->subj_hash, 0,
74935+ r_tmp->subj_hash_size *
74936+ sizeof (struct acl_subject_label *));
74937+
74938+ /* acquire the list of subjects, then NULL out
74939+ the list prior to parsing the subjects for this role,
74940+ as during this parsing the list is replaced with a list
74941+ of *nested* subjects for the role
74942+ */
74943+ subj_list = r_tmp->hash->first;
74944+
74945+ /* set nested subject list to null */
74946+ r_tmp->hash->first = NULL;
74947+
74948+ err = copy_user_subjs(subj_list, r_tmp);
74949+
74950+ if (err)
74951+ return err;
74952+
74953+ insert_acl_role_label(r_tmp);
74954+ }
74955+
74956+ if (polstate->default_role == NULL || polstate->kernel_role == NULL)
74957+ return -EINVAL;
74958+
74959+ return err;
74960+}
74961+
74962+static int gracl_reload_apply_policies(void *reload)
74963+{
74964+ struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
74965+ struct task_struct *task, *task2;
74966+ struct acl_role_label *role, *rtmp;
74967+ struct acl_subject_label *subj;
74968+ const struct cred *cred;
74969+ int role_applied;
74970+ int ret = 0;
74971+
74972+ memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
74973+ memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
74974+
74975+ /* first make sure we'll be able to apply the new policy cleanly */
74976+ do_each_thread(task2, task) {
74977+ if (task->exec_file == NULL)
74978+ continue;
74979+ role_applied = 0;
74980+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
74981+ /* preserve special roles */
74982+ FOR_EACH_ROLE_START(role)
74983+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
74984+ rtmp = task->role;
74985+ task->role = role;
74986+ role_applied = 1;
74987+ break;
74988+ }
74989+ FOR_EACH_ROLE_END(role)
74990+ }
74991+ if (!role_applied) {
74992+ cred = __task_cred(task);
74993+ rtmp = task->role;
74994+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
74995+ }
74996+ /* this handles non-nested inherited subjects, nested subjects will still
74997+ be dropped currently */
74998+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
74999+ task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL);
75000+ /* change the role back so that we've made no modifications to the policy */
75001+ task->role = rtmp;
75002+
75003+ if (subj == NULL || task->tmpacl == NULL) {
75004+ ret = -EINVAL;
75005+ goto out;
75006+ }
75007+ } while_each_thread(task2, task);
75008+
75009+ /* now actually apply the policy */
75010+
75011+ do_each_thread(task2, task) {
75012+ if (task->exec_file) {
75013+ role_applied = 0;
75014+ if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
75015+ /* preserve special roles */
75016+ FOR_EACH_ROLE_START(role)
75017+ if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
75018+ task->role = role;
75019+ role_applied = 1;
75020+ break;
75021+ }
75022+ FOR_EACH_ROLE_END(role)
75023+ }
75024+ if (!role_applied) {
75025+ cred = __task_cred(task);
75026+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
75027+ }
75028+ /* this handles non-nested inherited subjects, nested subjects will still
75029+ be dropped currently */
75030+ if (!reload_state->oldmode && task->inherited)
75031+ subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
75032+ else {
75033+ /* looked up and tagged to the task previously */
75034+ subj = task->tmpacl;
75035+ }
75036+ /* subj will be non-null */
75037+ __gr_apply_subject_to_task(polstate, task, subj);
75038+ if (reload_state->oldmode) {
75039+ task->acl_role_id = 0;
75040+ task->acl_sp_role = 0;
75041+ task->inherited = 0;
75042+ }
75043+ } else {
75044+ // it's a kernel process
75045+ task->role = polstate->kernel_role;
75046+ task->acl = polstate->kernel_role->root_label;
75047+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
75048+ task->acl->mode &= ~GR_PROCFIND;
75049+#endif
75050+ }
75051+ } while_each_thread(task2, task);
75052+
75053+ memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
75054+ memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
75055+
75056+out:
75057+
75058+ return ret;
75059+}
75060+
75061+static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
75062+{
75063+ struct gr_reload_state new_reload_state = { };
75064+ int err;
75065+
75066+ new_reload_state.oldpolicy_ptr = polstate;
75067+ new_reload_state.oldalloc_ptr = current_alloc_state;
75068+ new_reload_state.oldmode = oldmode;
75069+
75070+ current_alloc_state = &new_reload_state.newalloc;
75071+ polstate = &new_reload_state.newpolicy;
75072+
75073+ /* everything relevant is now saved off, copy in the new policy */
75074+ if (init_variables(args, true)) {
75075+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
75076+ err = -ENOMEM;
75077+ goto error;
75078+ }
75079+
75080+ err = copy_user_acl(args);
75081+ free_init_variables();
75082+ if (err)
75083+ goto error;
75084+ /* the new policy is copied in, with the old policy available via saved_state
75085+ first go through applying roles, making sure to preserve special roles
75086+ then apply new subjects, making sure to preserve inherited and nested subjects,
75087+ though currently only inherited subjects will be preserved
75088+ */
75089+ err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
75090+ if (err)
75091+ goto error;
75092+
75093+ /* we've now applied the new policy, so restore the old policy state to free it */
75094+ polstate = &new_reload_state.oldpolicy;
75095+ current_alloc_state = &new_reload_state.oldalloc;
75096+ free_variables(true);
75097+
75098+ /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
75099+ to running_polstate/current_alloc_state inside stop_machine
75100+ */
75101+ err = 0;
75102+ goto out;
75103+error:
75104+ /* on error of loading the new policy, we'll just keep the previous
75105+ policy set around
75106+ */
75107+ free_variables(true);
75108+
75109+ /* doesn't affect runtime, but maintains consistent state */
75110+out:
75111+ polstate = new_reload_state.oldpolicy_ptr;
75112+ current_alloc_state = new_reload_state.oldalloc_ptr;
75113+
75114+ return err;
75115+}
75116+
75117+static int
75118+gracl_init(struct gr_arg *args)
75119+{
75120+ int error = 0;
75121+
75122+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
75123+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
75124+
75125+ if (init_variables(args, false)) {
75126+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
75127+ error = -ENOMEM;
75128+ goto out;
75129+ }
75130+
75131+ error = copy_user_acl(args);
75132+ free_init_variables();
75133+ if (error)
75134+ goto out;
75135+
75136+ error = gr_set_acls(0);
75137+ if (error)
75138+ goto out;
75139+
75140+ gr_enable_rbac_system();
75141+
75142+ return 0;
75143+
75144+out:
75145+ free_variables(false);
75146+ return error;
75147+}
75148+
75149+static int
75150+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
75151+ unsigned char **sum)
75152+{
75153+ struct acl_role_label *r;
75154+ struct role_allowed_ip *ipp;
75155+ struct role_transition *trans;
75156+ unsigned int i;
75157+ int found = 0;
75158+ u32 curr_ip = current->signal->curr_ip;
75159+
75160+ current->signal->saved_ip = curr_ip;
75161+
75162+ /* check transition table */
75163+
75164+ for (trans = current->role->transitions; trans; trans = trans->next) {
75165+ if (!strcmp(rolename, trans->rolename)) {
75166+ found = 1;
75167+ break;
75168+ }
75169+ }
75170+
75171+ if (!found)
75172+ return 0;
75173+
75174+ /* handle special roles that do not require authentication
75175+ and check ip */
75176+
75177+ FOR_EACH_ROLE_START(r)
75178+ if (!strcmp(rolename, r->rolename) &&
75179+ (r->roletype & GR_ROLE_SPECIAL)) {
75180+ found = 0;
75181+ if (r->allowed_ips != NULL) {
75182+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
75183+ if ((ntohl(curr_ip) & ipp->netmask) ==
75184+ (ntohl(ipp->addr) & ipp->netmask))
75185+ found = 1;
75186+ }
75187+ } else
75188+ found = 2;
75189+ if (!found)
75190+ return 0;
75191+
75192+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
75193+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
75194+ *salt = NULL;
75195+ *sum = NULL;
75196+ return 1;
75197+ }
75198+ }
75199+ FOR_EACH_ROLE_END(r)
75200+
75201+ for (i = 0; i < polstate->num_sprole_pws; i++) {
75202+ if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
75203+ *salt = polstate->acl_special_roles[i]->salt;
75204+ *sum = polstate->acl_special_roles[i]->sum;
75205+ return 1;
75206+ }
75207+ }
75208+
75209+ return 0;
75210+}
75211+
75212+int gr_check_secure_terminal(struct task_struct *task)
75213+{
75214+ struct task_struct *p, *p2, *p3;
75215+ struct files_struct *files;
75216+ struct fdtable *fdt;
75217+ struct file *our_file = NULL, *file;
75218+ int i;
75219+
75220+ if (task->signal->tty == NULL)
75221+ return 1;
75222+
75223+ files = get_files_struct(task);
75224+ if (files != NULL) {
75225+ rcu_read_lock();
75226+ fdt = files_fdtable(files);
75227+ for (i=0; i < fdt->max_fds; i++) {
75228+ file = fcheck_files(files, i);
75229+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
75230+ get_file(file);
75231+ our_file = file;
75232+ }
75233+ }
75234+ rcu_read_unlock();
75235+ put_files_struct(files);
75236+ }
75237+
75238+ if (our_file == NULL)
75239+ return 1;
75240+
75241+ read_lock(&tasklist_lock);
75242+ do_each_thread(p2, p) {
75243+ files = get_files_struct(p);
75244+ if (files == NULL ||
75245+ (p->signal && p->signal->tty == task->signal->tty)) {
75246+ if (files != NULL)
75247+ put_files_struct(files);
75248+ continue;
75249+ }
75250+ rcu_read_lock();
75251+ fdt = files_fdtable(files);
75252+ for (i=0; i < fdt->max_fds; i++) {
75253+ file = fcheck_files(files, i);
75254+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
75255+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
75256+ p3 = task;
75257+ while (task_pid_nr(p3) > 0) {
75258+ if (p3 == p)
75259+ break;
75260+ p3 = p3->real_parent;
75261+ }
75262+ if (p3 == p)
75263+ break;
75264+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
75265+ gr_handle_alertkill(p);
75266+ rcu_read_unlock();
75267+ put_files_struct(files);
75268+ read_unlock(&tasklist_lock);
75269+ fput(our_file);
75270+ return 0;
75271+ }
75272+ }
75273+ rcu_read_unlock();
75274+ put_files_struct(files);
75275+ } while_each_thread(p2, p);
75276+ read_unlock(&tasklist_lock);
75277+
75278+ fput(our_file);
75279+ return 1;
75280+}
75281+
75282+ssize_t
75283+write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
75284+{
75285+ struct gr_arg_wrapper uwrap;
75286+ unsigned char *sprole_salt = NULL;
75287+ unsigned char *sprole_sum = NULL;
75288+ int error = 0;
75289+ int error2 = 0;
75290+ size_t req_count = 0;
75291+ unsigned char oldmode = 0;
75292+
75293+ mutex_lock(&gr_dev_mutex);
75294+
75295+ if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
75296+ error = -EPERM;
75297+ goto out;
75298+ }
75299+
75300+#ifdef CONFIG_COMPAT
75301+ pax_open_kernel();
75302+ if (is_compat_task()) {
75303+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
75304+ copy_gr_arg = &copy_gr_arg_compat;
75305+ copy_acl_object_label = &copy_acl_object_label_compat;
75306+ copy_acl_subject_label = &copy_acl_subject_label_compat;
75307+ copy_acl_role_label = &copy_acl_role_label_compat;
75308+ copy_acl_ip_label = &copy_acl_ip_label_compat;
75309+ copy_role_allowed_ip = &copy_role_allowed_ip_compat;
75310+ copy_role_transition = &copy_role_transition_compat;
75311+ copy_sprole_pw = &copy_sprole_pw_compat;
75312+ copy_gr_hash_struct = &copy_gr_hash_struct_compat;
75313+ copy_pointer_from_array = &copy_pointer_from_array_compat;
75314+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
75315+ } else {
75316+ copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
75317+ copy_gr_arg = &copy_gr_arg_normal;
75318+ copy_acl_object_label = &copy_acl_object_label_normal;
75319+ copy_acl_subject_label = &copy_acl_subject_label_normal;
75320+ copy_acl_role_label = &copy_acl_role_label_normal;
75321+ copy_acl_ip_label = &copy_acl_ip_label_normal;
75322+ copy_role_allowed_ip = &copy_role_allowed_ip_normal;
75323+ copy_role_transition = &copy_role_transition_normal;
75324+ copy_sprole_pw = &copy_sprole_pw_normal;
75325+ copy_gr_hash_struct = &copy_gr_hash_struct_normal;
75326+ copy_pointer_from_array = &copy_pointer_from_array_normal;
75327+ get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
75328+ }
75329+ pax_close_kernel();
75330+#endif
75331+
75332+ req_count = get_gr_arg_wrapper_size();
75333+
75334+ if (count != req_count) {
75335+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
75336+ error = -EINVAL;
75337+ goto out;
75338+ }
75339+
75340+
75341+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
75342+ gr_auth_expires = 0;
75343+ gr_auth_attempts = 0;
75344+ }
75345+
75346+ error = copy_gr_arg_wrapper(buf, &uwrap);
75347+ if (error)
75348+ goto out;
75349+
75350+ error = copy_gr_arg(uwrap.arg, gr_usermode);
75351+ if (error)
75352+ goto out;
75353+
75354+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
75355+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
75356+ time_after(gr_auth_expires, get_seconds())) {
75357+ error = -EBUSY;
75358+ goto out;
75359+ }
75360+
75361+ /* if non-root trying to do anything other than use a special role,
75362+ do not attempt authentication, do not count towards authentication
75363+ locking
75364+ */
75365+
75366+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
75367+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
75368+ gr_is_global_nonroot(current_uid())) {
75369+ error = -EPERM;
75370+ goto out;
75371+ }
75372+
75373+ /* ensure pw and special role name are null terminated */
75374+
75375+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
75376+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
75377+
75378+ /* Okay.
75379+ * We have our enough of the argument structure..(we have yet
75380+ * to copy_from_user the tables themselves) . Copy the tables
75381+ * only if we need them, i.e. for loading operations. */
75382+
75383+ switch (gr_usermode->mode) {
75384+ case GR_STATUS:
75385+ if (gr_acl_is_enabled()) {
75386+ error = 1;
75387+ if (!gr_check_secure_terminal(current))
75388+ error = 3;
75389+ } else
75390+ error = 2;
75391+ goto out;
75392+ case GR_SHUTDOWN:
75393+ if (gr_acl_is_enabled() && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75394+ stop_machine(gr_rbac_disable, NULL, NULL);
75395+ free_variables(false);
75396+ memset(gr_usermode, 0, sizeof(struct gr_arg));
75397+ memset(gr_system_salt, 0, GR_SALT_LEN);
75398+ memset(gr_system_sum, 0, GR_SHA_LEN);
75399+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
75400+ } else if (gr_acl_is_enabled()) {
75401+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
75402+ error = -EPERM;
75403+ } else {
75404+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
75405+ error = -EAGAIN;
75406+ }
75407+ break;
75408+ case GR_ENABLE:
75409+ if (!gr_acl_is_enabled() && !(error2 = gracl_init(gr_usermode)))
75410+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
75411+ else {
75412+ if (gr_acl_is_enabled())
75413+ error = -EAGAIN;
75414+ else
75415+ error = error2;
75416+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
75417+ }
75418+ break;
75419+ case GR_OLDRELOAD:
75420+ oldmode = 1;
75421+ case GR_RELOAD:
75422+ if (!gr_acl_is_enabled()) {
75423+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
75424+ error = -EAGAIN;
75425+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75426+ error2 = gracl_reload(gr_usermode, oldmode);
75427+ if (!error2)
75428+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
75429+ else {
75430+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
75431+ error = error2;
75432+ }
75433+ } else {
75434+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
75435+ error = -EPERM;
75436+ }
75437+ break;
75438+ case GR_SEGVMOD:
75439+ if (unlikely(!gr_acl_is_enabled())) {
75440+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
75441+ error = -EAGAIN;
75442+ break;
75443+ }
75444+
75445+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
75446+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
75447+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
75448+ struct acl_subject_label *segvacl;
75449+ segvacl =
75450+ lookup_acl_subj_label(gr_usermode->segv_inode,
75451+ gr_usermode->segv_device,
75452+ current->role);
75453+ if (segvacl) {
75454+ segvacl->crashes = 0;
75455+ segvacl->expires = 0;
75456+ }
75457+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
75458+ gr_remove_uid(gr_usermode->segv_uid);
75459+ }
75460+ } else {
75461+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
75462+ error = -EPERM;
75463+ }
75464+ break;
75465+ case GR_SPROLE:
75466+ case GR_SPROLEPAM:
75467+ if (unlikely(!gr_acl_is_enabled())) {
75468+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
75469+ error = -EAGAIN;
75470+ break;
75471+ }
75472+
75473+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
75474+ current->role->expires = 0;
75475+ current->role->auth_attempts = 0;
75476+ }
75477+
75478+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
75479+ time_after(current->role->expires, get_seconds())) {
75480+ error = -EBUSY;
75481+ goto out;
75482+ }
75483+
75484+ if (lookup_special_role_auth
75485+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
75486+ && ((!sprole_salt && !sprole_sum)
75487+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
75488+ char *p = "";
75489+ assign_special_role(gr_usermode->sp_role);
75490+ read_lock(&tasklist_lock);
75491+ if (current->real_parent)
75492+ p = current->real_parent->role->rolename;
75493+ read_unlock(&tasklist_lock);
75494+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
75495+ p, acl_sp_role_value);
75496+ } else {
75497+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
75498+ error = -EPERM;
75499+ if(!(current->role->auth_attempts++))
75500+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
75501+
75502+ goto out;
75503+ }
75504+ break;
75505+ case GR_UNSPROLE:
75506+ if (unlikely(!gr_acl_is_enabled())) {
75507+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
75508+ error = -EAGAIN;
75509+ break;
75510+ }
75511+
75512+ if (current->role->roletype & GR_ROLE_SPECIAL) {
75513+ char *p = "";
75514+ int i = 0;
75515+
75516+ read_lock(&tasklist_lock);
75517+ if (current->real_parent) {
75518+ p = current->real_parent->role->rolename;
75519+ i = current->real_parent->acl_role_id;
75520+ }
75521+ read_unlock(&tasklist_lock);
75522+
75523+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
75524+ gr_set_acls(1);
75525+ } else {
75526+ error = -EPERM;
75527+ goto out;
75528+ }
75529+ break;
75530+ default:
75531+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
75532+ error = -EINVAL;
75533+ break;
75534+ }
75535+
75536+ if (error != -EPERM)
75537+ goto out;
75538+
75539+ if(!(gr_auth_attempts++))
75540+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
75541+
75542+ out:
75543+ mutex_unlock(&gr_dev_mutex);
75544+
75545+ if (!error)
75546+ error = req_count;
75547+
75548+ return error;
75549+}
75550+
75551+int
75552+gr_set_acls(const int type)
75553+{
75554+ struct task_struct *task, *task2;
75555+ struct acl_role_label *role = current->role;
75556+ struct acl_subject_label *subj;
75557+ __u16 acl_role_id = current->acl_role_id;
75558+ const struct cred *cred;
75559+ int ret;
75560+
75561+ rcu_read_lock();
75562+ read_lock(&tasklist_lock);
75563+ read_lock(&grsec_exec_file_lock);
75564+ do_each_thread(task2, task) {
75565+ /* check to see if we're called from the exit handler,
75566+ if so, only replace ACLs that have inherited the admin
75567+ ACL */
75568+
75569+ if (type && (task->role != role ||
75570+ task->acl_role_id != acl_role_id))
75571+ continue;
75572+
75573+ task->acl_role_id = 0;
75574+ task->acl_sp_role = 0;
75575+ task->inherited = 0;
75576+
75577+ if (task->exec_file) {
75578+ cred = __task_cred(task);
75579+ task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
75580+ subj = __gr_get_subject_for_task(polstate, task, NULL);
75581+ if (subj == NULL) {
75582+ ret = -EINVAL;
75583+ read_unlock(&grsec_exec_file_lock);
75584+ read_unlock(&tasklist_lock);
75585+ rcu_read_unlock();
75586+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
75587+ return ret;
75588+ }
75589+ __gr_apply_subject_to_task(polstate, task, subj);
75590+ } else {
75591+ // it's a kernel process
75592+ task->role = polstate->kernel_role;
75593+ task->acl = polstate->kernel_role->root_label;
75594+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
75595+ task->acl->mode &= ~GR_PROCFIND;
75596+#endif
75597+ }
75598+ } while_each_thread(task2, task);
75599+ read_unlock(&grsec_exec_file_lock);
75600+ read_unlock(&tasklist_lock);
75601+ rcu_read_unlock();
75602+
75603+ return 0;
75604+}
75605diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
75606new file mode 100644
75607index 0000000..39645c9
75608--- /dev/null
75609+++ b/grsecurity/gracl_res.c
75610@@ -0,0 +1,68 @@
75611+#include <linux/kernel.h>
75612+#include <linux/sched.h>
75613+#include <linux/gracl.h>
75614+#include <linux/grinternal.h>
75615+
75616+static const char *restab_log[] = {
75617+ [RLIMIT_CPU] = "RLIMIT_CPU",
75618+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
75619+ [RLIMIT_DATA] = "RLIMIT_DATA",
75620+ [RLIMIT_STACK] = "RLIMIT_STACK",
75621+ [RLIMIT_CORE] = "RLIMIT_CORE",
75622+ [RLIMIT_RSS] = "RLIMIT_RSS",
75623+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
75624+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
75625+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
75626+ [RLIMIT_AS] = "RLIMIT_AS",
75627+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
75628+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
75629+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
75630+ [RLIMIT_NICE] = "RLIMIT_NICE",
75631+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
75632+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
75633+ [GR_CRASH_RES] = "RLIMIT_CRASH"
75634+};
75635+
75636+void
75637+gr_log_resource(const struct task_struct *task,
75638+ const int res, const unsigned long wanted, const int gt)
75639+{
75640+ const struct cred *cred;
75641+ unsigned long rlim;
75642+
75643+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
75644+ return;
75645+
75646+ // not yet supported resource
75647+ if (unlikely(!restab_log[res]))
75648+ return;
75649+
75650+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
75651+ rlim = task_rlimit_max(task, res);
75652+ else
75653+ rlim = task_rlimit(task, res);
75654+
75655+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
75656+ return;
75657+
75658+ rcu_read_lock();
75659+ cred = __task_cred(task);
75660+
75661+ if (res == RLIMIT_NPROC &&
75662+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
75663+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
75664+ goto out_rcu_unlock;
75665+ else if (res == RLIMIT_MEMLOCK &&
75666+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
75667+ goto out_rcu_unlock;
75668+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
75669+ goto out_rcu_unlock;
75670+ rcu_read_unlock();
75671+
75672+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
75673+
75674+ return;
75675+out_rcu_unlock:
75676+ rcu_read_unlock();
75677+ return;
75678+}
75679diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
75680new file mode 100644
75681index 0000000..2040e61
75682--- /dev/null
75683+++ b/grsecurity/gracl_segv.c
75684@@ -0,0 +1,313 @@
75685+#include <linux/kernel.h>
75686+#include <linux/mm.h>
75687+#include <asm/uaccess.h>
75688+#include <asm/errno.h>
75689+#include <asm/mman.h>
75690+#include <net/sock.h>
75691+#include <linux/file.h>
75692+#include <linux/fs.h>
75693+#include <linux/net.h>
75694+#include <linux/in.h>
75695+#include <linux/slab.h>
75696+#include <linux/types.h>
75697+#include <linux/sched.h>
75698+#include <linux/timer.h>
75699+#include <linux/gracl.h>
75700+#include <linux/grsecurity.h>
75701+#include <linux/grinternal.h>
75702+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75703+#include <linux/magic.h>
75704+#include <linux/pagemap.h>
75705+#include "../fs/btrfs/async-thread.h"
75706+#include "../fs/btrfs/ctree.h"
75707+#include "../fs/btrfs/btrfs_inode.h"
75708+#endif
75709+
75710+static struct crash_uid *uid_set;
75711+static unsigned short uid_used;
75712+static DEFINE_SPINLOCK(gr_uid_lock);
75713+extern rwlock_t gr_inode_lock;
75714+extern struct acl_subject_label *
75715+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
75716+ struct acl_role_label *role);
75717+
75718+static inline dev_t __get_dev(const struct dentry *dentry)
75719+{
75720+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
75721+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
75722+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
75723+ else
75724+#endif
75725+ return dentry->d_sb->s_dev;
75726+}
75727+
75728+int
75729+gr_init_uidset(void)
75730+{
75731+ uid_set =
75732+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
75733+ uid_used = 0;
75734+
75735+ return uid_set ? 1 : 0;
75736+}
75737+
75738+void
75739+gr_free_uidset(void)
75740+{
75741+ if (uid_set) {
75742+ struct crash_uid *tmpset;
75743+ spin_lock(&gr_uid_lock);
75744+ tmpset = uid_set;
75745+ uid_set = NULL;
75746+ uid_used = 0;
75747+ spin_unlock(&gr_uid_lock);
75748+ if (tmpset)
75749+ kfree(tmpset);
75750+ }
75751+
75752+ return;
75753+}
75754+
75755+int
75756+gr_find_uid(const uid_t uid)
75757+{
75758+ struct crash_uid *tmp = uid_set;
75759+ uid_t buid;
75760+ int low = 0, high = uid_used - 1, mid;
75761+
75762+ while (high >= low) {
75763+ mid = (low + high) >> 1;
75764+ buid = tmp[mid].uid;
75765+ if (buid == uid)
75766+ return mid;
75767+ if (buid > uid)
75768+ high = mid - 1;
75769+ if (buid < uid)
75770+ low = mid + 1;
75771+ }
75772+
75773+ return -1;
75774+}
75775+
75776+static __inline__ void
75777+gr_insertsort(void)
75778+{
75779+ unsigned short i, j;
75780+ struct crash_uid index;
75781+
75782+ for (i = 1; i < uid_used; i++) {
75783+ index = uid_set[i];
75784+ j = i;
75785+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
75786+ uid_set[j] = uid_set[j - 1];
75787+ j--;
75788+ }
75789+ uid_set[j] = index;
75790+ }
75791+
75792+ return;
75793+}
75794+
75795+static __inline__ void
75796+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
75797+{
75798+ int loc;
75799+ uid_t uid = GR_GLOBAL_UID(kuid);
75800+
75801+ if (uid_used == GR_UIDTABLE_MAX)
75802+ return;
75803+
75804+ loc = gr_find_uid(uid);
75805+
75806+ if (loc >= 0) {
75807+ uid_set[loc].expires = expires;
75808+ return;
75809+ }
75810+
75811+ uid_set[uid_used].uid = uid;
75812+ uid_set[uid_used].expires = expires;
75813+ uid_used++;
75814+
75815+ gr_insertsort();
75816+
75817+ return;
75818+}
75819+
75820+void
75821+gr_remove_uid(const unsigned short loc)
75822+{
75823+ unsigned short i;
75824+
75825+ for (i = loc + 1; i < uid_used; i++)
75826+ uid_set[i - 1] = uid_set[i];
75827+
75828+ uid_used--;
75829+
75830+ return;
75831+}
75832+
75833+int
75834+gr_check_crash_uid(const kuid_t kuid)
75835+{
75836+ int loc;
75837+ int ret = 0;
75838+ uid_t uid;
75839+
75840+ if (unlikely(!gr_acl_is_enabled()))
75841+ return 0;
75842+
75843+ uid = GR_GLOBAL_UID(kuid);
75844+
75845+ spin_lock(&gr_uid_lock);
75846+ loc = gr_find_uid(uid);
75847+
75848+ if (loc < 0)
75849+ goto out_unlock;
75850+
75851+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
75852+ gr_remove_uid(loc);
75853+ else
75854+ ret = 1;
75855+
75856+out_unlock:
75857+ spin_unlock(&gr_uid_lock);
75858+ return ret;
75859+}
75860+
75861+static __inline__ int
75862+proc_is_setxid(const struct cred *cred)
75863+{
75864+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
75865+ !uid_eq(cred->uid, cred->fsuid))
75866+ return 1;
75867+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
75868+ !gid_eq(cred->gid, cred->fsgid))
75869+ return 1;
75870+
75871+ return 0;
75872+}
75873+
75874+extern int gr_fake_force_sig(int sig, struct task_struct *t);
75875+
75876+void
75877+gr_handle_crash(struct task_struct *task, const int sig)
75878+{
75879+ struct acl_subject_label *curr;
75880+ struct task_struct *tsk, *tsk2;
75881+ const struct cred *cred;
75882+ const struct cred *cred2;
75883+
75884+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
75885+ return;
75886+
75887+ if (unlikely(!gr_acl_is_enabled()))
75888+ return;
75889+
75890+ curr = task->acl;
75891+
75892+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
75893+ return;
75894+
75895+ if (time_before_eq(curr->expires, get_seconds())) {
75896+ curr->expires = 0;
75897+ curr->crashes = 0;
75898+ }
75899+
75900+ curr->crashes++;
75901+
75902+ if (!curr->expires)
75903+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
75904+
75905+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
75906+ time_after(curr->expires, get_seconds())) {
75907+ rcu_read_lock();
75908+ cred = __task_cred(task);
75909+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
75910+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
75911+ spin_lock(&gr_uid_lock);
75912+ gr_insert_uid(cred->uid, curr->expires);
75913+ spin_unlock(&gr_uid_lock);
75914+ curr->expires = 0;
75915+ curr->crashes = 0;
75916+ read_lock(&tasklist_lock);
75917+ do_each_thread(tsk2, tsk) {
75918+ cred2 = __task_cred(tsk);
75919+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
75920+ gr_fake_force_sig(SIGKILL, tsk);
75921+ } while_each_thread(tsk2, tsk);
75922+ read_unlock(&tasklist_lock);
75923+ } else {
75924+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
75925+ read_lock(&tasklist_lock);
75926+ read_lock(&grsec_exec_file_lock);
75927+ do_each_thread(tsk2, tsk) {
75928+ if (likely(tsk != task)) {
75929+ // if this thread has the same subject as the one that triggered
75930+ // RES_CRASH and it's the same binary, kill it
75931+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
75932+ gr_fake_force_sig(SIGKILL, tsk);
75933+ }
75934+ } while_each_thread(tsk2, tsk);
75935+ read_unlock(&grsec_exec_file_lock);
75936+ read_unlock(&tasklist_lock);
75937+ }
75938+ rcu_read_unlock();
75939+ }
75940+
75941+ return;
75942+}
75943+
75944+int
75945+gr_check_crash_exec(const struct file *filp)
75946+{
75947+ struct acl_subject_label *curr;
75948+
75949+ if (unlikely(!gr_acl_is_enabled()))
75950+ return 0;
75951+
75952+ read_lock(&gr_inode_lock);
75953+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
75954+ __get_dev(filp->f_path.dentry),
75955+ current->role);
75956+ read_unlock(&gr_inode_lock);
75957+
75958+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
75959+ (!curr->crashes && !curr->expires))
75960+ return 0;
75961+
75962+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
75963+ time_after(curr->expires, get_seconds()))
75964+ return 1;
75965+ else if (time_before_eq(curr->expires, get_seconds())) {
75966+ curr->crashes = 0;
75967+ curr->expires = 0;
75968+ }
75969+
75970+ return 0;
75971+}
75972+
75973+void
75974+gr_handle_alertkill(struct task_struct *task)
75975+{
75976+ struct acl_subject_label *curracl;
75977+ __u32 curr_ip;
75978+ struct task_struct *p, *p2;
75979+
75980+ if (unlikely(!gr_acl_is_enabled()))
75981+ return;
75982+
75983+ curracl = task->acl;
75984+ curr_ip = task->signal->curr_ip;
75985+
75986+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
75987+ read_lock(&tasklist_lock);
75988+ do_each_thread(p2, p) {
75989+ if (p->signal->curr_ip == curr_ip)
75990+ gr_fake_force_sig(SIGKILL, p);
75991+ } while_each_thread(p2, p);
75992+ read_unlock(&tasklist_lock);
75993+ } else if (curracl->mode & GR_KILLPROC)
75994+ gr_fake_force_sig(SIGKILL, task);
75995+
75996+ return;
75997+}
75998diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
75999new file mode 100644
76000index 0000000..6b0c9cc
76001--- /dev/null
76002+++ b/grsecurity/gracl_shm.c
76003@@ -0,0 +1,40 @@
76004+#include <linux/kernel.h>
76005+#include <linux/mm.h>
76006+#include <linux/sched.h>
76007+#include <linux/file.h>
76008+#include <linux/ipc.h>
76009+#include <linux/gracl.h>
76010+#include <linux/grsecurity.h>
76011+#include <linux/grinternal.h>
76012+
76013+int
76014+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76015+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
76016+{
76017+ struct task_struct *task;
76018+
76019+ if (!gr_acl_is_enabled())
76020+ return 1;
76021+
76022+ rcu_read_lock();
76023+ read_lock(&tasklist_lock);
76024+
76025+ task = find_task_by_vpid(shm_cprid);
76026+
76027+ if (unlikely(!task))
76028+ task = find_task_by_vpid(shm_lapid);
76029+
76030+ if (unlikely(task && (time_before_eq64(task->start_time, shm_createtime) ||
76031+ (task_pid_nr(task) == shm_lapid)) &&
76032+ (task->acl->mode & GR_PROTSHM) &&
76033+ (task->acl != current->acl))) {
76034+ read_unlock(&tasklist_lock);
76035+ rcu_read_unlock();
76036+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
76037+ return 0;
76038+ }
76039+ read_unlock(&tasklist_lock);
76040+ rcu_read_unlock();
76041+
76042+ return 1;
76043+}
76044diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
76045new file mode 100644
76046index 0000000..bc0be01
76047--- /dev/null
76048+++ b/grsecurity/grsec_chdir.c
76049@@ -0,0 +1,19 @@
76050+#include <linux/kernel.h>
76051+#include <linux/sched.h>
76052+#include <linux/fs.h>
76053+#include <linux/file.h>
76054+#include <linux/grsecurity.h>
76055+#include <linux/grinternal.h>
76056+
76057+void
76058+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
76059+{
76060+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
76061+ if ((grsec_enable_chdir && grsec_enable_group &&
76062+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
76063+ !grsec_enable_group)) {
76064+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
76065+ }
76066+#endif
76067+ return;
76068+}
76069diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
76070new file mode 100644
76071index 0000000..6d99cec
76072--- /dev/null
76073+++ b/grsecurity/grsec_chroot.c
76074@@ -0,0 +1,385 @@
76075+#include <linux/kernel.h>
76076+#include <linux/module.h>
76077+#include <linux/sched.h>
76078+#include <linux/file.h>
76079+#include <linux/fs.h>
76080+#include <linux/mount.h>
76081+#include <linux/types.h>
76082+#include "../fs/mount.h"
76083+#include <linux/grsecurity.h>
76084+#include <linux/grinternal.h>
76085+
76086+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76087+int gr_init_ran;
76088+#endif
76089+
76090+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
76091+{
76092+#ifdef CONFIG_GRKERNSEC
76093+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
76094+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
76095+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76096+ && gr_init_ran
76097+#endif
76098+ )
76099+ task->gr_is_chrooted = 1;
76100+ else {
76101+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
76102+ if (task_pid_nr(task) == 1 && !gr_init_ran)
76103+ gr_init_ran = 1;
76104+#endif
76105+ task->gr_is_chrooted = 0;
76106+ }
76107+
76108+ task->gr_chroot_dentry = path->dentry;
76109+#endif
76110+ return;
76111+}
76112+
76113+void gr_clear_chroot_entries(struct task_struct *task)
76114+{
76115+#ifdef CONFIG_GRKERNSEC
76116+ task->gr_is_chrooted = 0;
76117+ task->gr_chroot_dentry = NULL;
76118+#endif
76119+ return;
76120+}
76121+
76122+int
76123+gr_handle_chroot_unix(const pid_t pid)
76124+{
76125+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
76126+ struct task_struct *p;
76127+
76128+ if (unlikely(!grsec_enable_chroot_unix))
76129+ return 1;
76130+
76131+ if (likely(!proc_is_chrooted(current)))
76132+ return 1;
76133+
76134+ rcu_read_lock();
76135+ read_lock(&tasklist_lock);
76136+ p = find_task_by_vpid_unrestricted(pid);
76137+ if (unlikely(p && !have_same_root(current, p))) {
76138+ read_unlock(&tasklist_lock);
76139+ rcu_read_unlock();
76140+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
76141+ return 0;
76142+ }
76143+ read_unlock(&tasklist_lock);
76144+ rcu_read_unlock();
76145+#endif
76146+ return 1;
76147+}
76148+
76149+int
76150+gr_handle_chroot_nice(void)
76151+{
76152+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
76153+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
76154+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
76155+ return -EPERM;
76156+ }
76157+#endif
76158+ return 0;
76159+}
76160+
76161+int
76162+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
76163+{
76164+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
76165+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
76166+ && proc_is_chrooted(current)) {
76167+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
76168+ return -EACCES;
76169+ }
76170+#endif
76171+ return 0;
76172+}
76173+
76174+int
76175+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
76176+{
76177+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76178+ struct task_struct *p;
76179+ int ret = 0;
76180+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
76181+ return ret;
76182+
76183+ read_lock(&tasklist_lock);
76184+ do_each_pid_task(pid, type, p) {
76185+ if (!have_same_root(current, p)) {
76186+ ret = 1;
76187+ goto out;
76188+ }
76189+ } while_each_pid_task(pid, type, p);
76190+out:
76191+ read_unlock(&tasklist_lock);
76192+ return ret;
76193+#endif
76194+ return 0;
76195+}
76196+
76197+int
76198+gr_pid_is_chrooted(struct task_struct *p)
76199+{
76200+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76201+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
76202+ return 0;
76203+
76204+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
76205+ !have_same_root(current, p)) {
76206+ return 1;
76207+ }
76208+#endif
76209+ return 0;
76210+}
76211+
76212+EXPORT_SYMBOL_GPL(gr_pid_is_chrooted);
76213+
76214+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
76215+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
76216+{
76217+ struct path path, currentroot;
76218+ int ret = 0;
76219+
76220+ path.dentry = (struct dentry *)u_dentry;
76221+ path.mnt = (struct vfsmount *)u_mnt;
76222+ get_fs_root(current->fs, &currentroot);
76223+ if (path_is_under(&path, &currentroot))
76224+ ret = 1;
76225+ path_put(&currentroot);
76226+
76227+ return ret;
76228+}
76229+#endif
76230+
76231+int
76232+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
76233+{
76234+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76235+ if (!grsec_enable_chroot_fchdir)
76236+ return 1;
76237+
76238+ if (!proc_is_chrooted(current))
76239+ return 1;
76240+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
76241+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
76242+ return 0;
76243+ }
76244+#endif
76245+ return 1;
76246+}
76247+
76248+int
76249+gr_chroot_fhandle(void)
76250+{
76251+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
76252+ if (!grsec_enable_chroot_fchdir)
76253+ return 1;
76254+
76255+ if (!proc_is_chrooted(current))
76256+ return 1;
76257+ else {
76258+ gr_log_noargs(GR_DONT_AUDIT, GR_CHROOT_FHANDLE_MSG);
76259+ return 0;
76260+ }
76261+#endif
76262+ return 1;
76263+}
76264+
76265+int
76266+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76267+ const u64 shm_createtime)
76268+{
76269+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
76270+ struct task_struct *p;
76271+
76272+ if (unlikely(!grsec_enable_chroot_shmat))
76273+ return 1;
76274+
76275+ if (likely(!proc_is_chrooted(current)))
76276+ return 1;
76277+
76278+ rcu_read_lock();
76279+ read_lock(&tasklist_lock);
76280+
76281+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
76282+ if (time_before_eq64(p->start_time, shm_createtime)) {
76283+ if (have_same_root(current, p)) {
76284+ goto allow;
76285+ } else {
76286+ read_unlock(&tasklist_lock);
76287+ rcu_read_unlock();
76288+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76289+ return 0;
76290+ }
76291+ }
76292+ /* creator exited, pid reuse, fall through to next check */
76293+ }
76294+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
76295+ if (unlikely(!have_same_root(current, p))) {
76296+ read_unlock(&tasklist_lock);
76297+ rcu_read_unlock();
76298+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
76299+ return 0;
76300+ }
76301+ }
76302+
76303+allow:
76304+ read_unlock(&tasklist_lock);
76305+ rcu_read_unlock();
76306+#endif
76307+ return 1;
76308+}
76309+
76310+void
76311+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
76312+{
76313+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
76314+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
76315+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
76316+#endif
76317+ return;
76318+}
76319+
76320+int
76321+gr_handle_chroot_mknod(const struct dentry *dentry,
76322+ const struct vfsmount *mnt, const int mode)
76323+{
76324+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
76325+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
76326+ proc_is_chrooted(current)) {
76327+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
76328+ return -EPERM;
76329+ }
76330+#endif
76331+ return 0;
76332+}
76333+
76334+int
76335+gr_handle_chroot_mount(const struct dentry *dentry,
76336+ const struct vfsmount *mnt, const char *dev_name)
76337+{
76338+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
76339+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
76340+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
76341+ return -EPERM;
76342+ }
76343+#endif
76344+ return 0;
76345+}
76346+
76347+int
76348+gr_handle_chroot_pivot(void)
76349+{
76350+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
76351+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
76352+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
76353+ return -EPERM;
76354+ }
76355+#endif
76356+ return 0;
76357+}
76358+
76359+int
76360+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
76361+{
76362+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
76363+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
76364+ !gr_is_outside_chroot(dentry, mnt)) {
76365+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
76366+ return -EPERM;
76367+ }
76368+#endif
76369+ return 0;
76370+}
76371+
76372+extern const char *captab_log[];
76373+extern int captab_log_entries;
76374+
76375+int
76376+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
76377+{
76378+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76379+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
76380+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
76381+ if (cap_raised(chroot_caps, cap)) {
76382+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
76383+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
76384+ }
76385+ return 0;
76386+ }
76387+ }
76388+#endif
76389+ return 1;
76390+}
76391+
76392+int
76393+gr_chroot_is_capable(const int cap)
76394+{
76395+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76396+ return gr_task_chroot_is_capable(current, current_cred(), cap);
76397+#endif
76398+ return 1;
76399+}
76400+
76401+int
76402+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
76403+{
76404+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76405+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
76406+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
76407+ if (cap_raised(chroot_caps, cap)) {
76408+ return 0;
76409+ }
76410+ }
76411+#endif
76412+ return 1;
76413+}
76414+
76415+int
76416+gr_chroot_is_capable_nolog(const int cap)
76417+{
76418+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
76419+ return gr_task_chroot_is_capable_nolog(current, cap);
76420+#endif
76421+ return 1;
76422+}
76423+
76424+int
76425+gr_handle_chroot_sysctl(const int op)
76426+{
76427+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
76428+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
76429+ proc_is_chrooted(current))
76430+ return -EACCES;
76431+#endif
76432+ return 0;
76433+}
76434+
76435+void
76436+gr_handle_chroot_chdir(const struct path *path)
76437+{
76438+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
76439+ if (grsec_enable_chroot_chdir)
76440+ set_fs_pwd(current->fs, path);
76441+#endif
76442+ return;
76443+}
76444+
76445+int
76446+gr_handle_chroot_chmod(const struct dentry *dentry,
76447+ const struct vfsmount *mnt, const int mode)
76448+{
76449+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
76450+ /* allow chmod +s on directories, but not files */
76451+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
76452+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
76453+ proc_is_chrooted(current)) {
76454+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
76455+ return -EPERM;
76456+ }
76457+#endif
76458+ return 0;
76459+}
76460diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
76461new file mode 100644
76462index 0000000..0f9ac91
76463--- /dev/null
76464+++ b/grsecurity/grsec_disabled.c
76465@@ -0,0 +1,440 @@
76466+#include <linux/kernel.h>
76467+#include <linux/module.h>
76468+#include <linux/sched.h>
76469+#include <linux/file.h>
76470+#include <linux/fs.h>
76471+#include <linux/kdev_t.h>
76472+#include <linux/net.h>
76473+#include <linux/in.h>
76474+#include <linux/ip.h>
76475+#include <linux/skbuff.h>
76476+#include <linux/sysctl.h>
76477+
76478+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
76479+void
76480+pax_set_initial_flags(struct linux_binprm *bprm)
76481+{
76482+ return;
76483+}
76484+#endif
76485+
76486+#ifdef CONFIG_SYSCTL
76487+__u32
76488+gr_handle_sysctl(const struct ctl_table * table, const int op)
76489+{
76490+ return 0;
76491+}
76492+#endif
76493+
76494+#ifdef CONFIG_TASKSTATS
76495+int gr_is_taskstats_denied(int pid)
76496+{
76497+ return 0;
76498+}
76499+#endif
76500+
76501+int
76502+gr_acl_is_enabled(void)
76503+{
76504+ return 0;
76505+}
76506+
76507+int
76508+gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap)
76509+{
76510+ return 0;
76511+}
76512+
76513+void
76514+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
76515+{
76516+ return;
76517+}
76518+
76519+int
76520+gr_handle_rawio(const struct inode *inode)
76521+{
76522+ return 0;
76523+}
76524+
76525+void
76526+gr_acl_handle_psacct(struct task_struct *task, const long code)
76527+{
76528+ return;
76529+}
76530+
76531+int
76532+gr_handle_ptrace(struct task_struct *task, const long request)
76533+{
76534+ return 0;
76535+}
76536+
76537+int
76538+gr_handle_proc_ptrace(struct task_struct *task)
76539+{
76540+ return 0;
76541+}
76542+
76543+int
76544+gr_set_acls(const int type)
76545+{
76546+ return 0;
76547+}
76548+
76549+int
76550+gr_check_hidden_task(const struct task_struct *tsk)
76551+{
76552+ return 0;
76553+}
76554+
76555+int
76556+gr_check_protected_task(const struct task_struct *task)
76557+{
76558+ return 0;
76559+}
76560+
76561+int
76562+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
76563+{
76564+ return 0;
76565+}
76566+
76567+void
76568+gr_copy_label(struct task_struct *tsk)
76569+{
76570+ return;
76571+}
76572+
76573+void
76574+gr_set_pax_flags(struct task_struct *task)
76575+{
76576+ return;
76577+}
76578+
76579+int
76580+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
76581+ const int unsafe_share)
76582+{
76583+ return 0;
76584+}
76585+
76586+void
76587+gr_handle_delete(const ino_t ino, const dev_t dev)
76588+{
76589+ return;
76590+}
76591+
76592+void
76593+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
76594+{
76595+ return;
76596+}
76597+
76598+void
76599+gr_handle_crash(struct task_struct *task, const int sig)
76600+{
76601+ return;
76602+}
76603+
76604+int
76605+gr_check_crash_exec(const struct file *filp)
76606+{
76607+ return 0;
76608+}
76609+
76610+int
76611+gr_check_crash_uid(const kuid_t uid)
76612+{
76613+ return 0;
76614+}
76615+
76616+void
76617+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
76618+ struct dentry *old_dentry,
76619+ struct dentry *new_dentry,
76620+ struct vfsmount *mnt, const __u8 replace, unsigned int flags)
76621+{
76622+ return;
76623+}
76624+
76625+int
76626+gr_search_socket(const int family, const int type, const int protocol)
76627+{
76628+ return 1;
76629+}
76630+
76631+int
76632+gr_search_connectbind(const int mode, const struct socket *sock,
76633+ const struct sockaddr_in *addr)
76634+{
76635+ return 0;
76636+}
76637+
76638+void
76639+gr_handle_alertkill(struct task_struct *task)
76640+{
76641+ return;
76642+}
76643+
76644+__u32
76645+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
76646+{
76647+ return 1;
76648+}
76649+
76650+__u32
76651+gr_acl_handle_hidden_file(const struct dentry * dentry,
76652+ const struct vfsmount * mnt)
76653+{
76654+ return 1;
76655+}
76656+
76657+__u32
76658+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
76659+ int acc_mode)
76660+{
76661+ return 1;
76662+}
76663+
76664+__u32
76665+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
76666+{
76667+ return 1;
76668+}
76669+
76670+__u32
76671+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
76672+{
76673+ return 1;
76674+}
76675+
76676+int
76677+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
76678+ unsigned int *vm_flags)
76679+{
76680+ return 1;
76681+}
76682+
76683+__u32
76684+gr_acl_handle_truncate(const struct dentry * dentry,
76685+ const struct vfsmount * mnt)
76686+{
76687+ return 1;
76688+}
76689+
76690+__u32
76691+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
76692+{
76693+ return 1;
76694+}
76695+
76696+__u32
76697+gr_acl_handle_access(const struct dentry * dentry,
76698+ const struct vfsmount * mnt, const int fmode)
76699+{
76700+ return 1;
76701+}
76702+
76703+__u32
76704+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
76705+ umode_t *mode)
76706+{
76707+ return 1;
76708+}
76709+
76710+__u32
76711+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
76712+{
76713+ return 1;
76714+}
76715+
76716+__u32
76717+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
76718+{
76719+ return 1;
76720+}
76721+
76722+__u32
76723+gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
76724+{
76725+ return 1;
76726+}
76727+
76728+void
76729+grsecurity_init(void)
76730+{
76731+ return;
76732+}
76733+
76734+umode_t gr_acl_umask(void)
76735+{
76736+ return 0;
76737+}
76738+
76739+__u32
76740+gr_acl_handle_mknod(const struct dentry * new_dentry,
76741+ const struct dentry * parent_dentry,
76742+ const struct vfsmount * parent_mnt,
76743+ const int mode)
76744+{
76745+ return 1;
76746+}
76747+
76748+__u32
76749+gr_acl_handle_mkdir(const struct dentry * new_dentry,
76750+ const struct dentry * parent_dentry,
76751+ const struct vfsmount * parent_mnt)
76752+{
76753+ return 1;
76754+}
76755+
76756+__u32
76757+gr_acl_handle_symlink(const struct dentry * new_dentry,
76758+ const struct dentry * parent_dentry,
76759+ const struct vfsmount * parent_mnt, const struct filename *from)
76760+{
76761+ return 1;
76762+}
76763+
76764+__u32
76765+gr_acl_handle_link(const struct dentry * new_dentry,
76766+ const struct dentry * parent_dentry,
76767+ const struct vfsmount * parent_mnt,
76768+ const struct dentry * old_dentry,
76769+ const struct vfsmount * old_mnt, const struct filename *to)
76770+{
76771+ return 1;
76772+}
76773+
76774+int
76775+gr_acl_handle_rename(const struct dentry *new_dentry,
76776+ const struct dentry *parent_dentry,
76777+ const struct vfsmount *parent_mnt,
76778+ const struct dentry *old_dentry,
76779+ const struct inode *old_parent_inode,
76780+ const struct vfsmount *old_mnt, const struct filename *newname,
76781+ unsigned int flags)
76782+{
76783+ return 0;
76784+}
76785+
76786+int
76787+gr_acl_handle_filldir(const struct file *file, const char *name,
76788+ const int namelen, const ino_t ino)
76789+{
76790+ return 1;
76791+}
76792+
76793+int
76794+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
76795+ const u64 shm_createtime, const kuid_t cuid, const int shmid)
76796+{
76797+ return 1;
76798+}
76799+
76800+int
76801+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
76802+{
76803+ return 0;
76804+}
76805+
76806+int
76807+gr_search_accept(const struct socket *sock)
76808+{
76809+ return 0;
76810+}
76811+
76812+int
76813+gr_search_listen(const struct socket *sock)
76814+{
76815+ return 0;
76816+}
76817+
76818+int
76819+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
76820+{
76821+ return 0;
76822+}
76823+
76824+__u32
76825+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
76826+{
76827+ return 1;
76828+}
76829+
76830+__u32
76831+gr_acl_handle_creat(const struct dentry * dentry,
76832+ const struct dentry * p_dentry,
76833+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
76834+ const int imode)
76835+{
76836+ return 1;
76837+}
76838+
76839+void
76840+gr_acl_handle_exit(void)
76841+{
76842+ return;
76843+}
76844+
76845+int
76846+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
76847+{
76848+ return 1;
76849+}
76850+
76851+void
76852+gr_set_role_label(const kuid_t uid, const kgid_t gid)
76853+{
76854+ return;
76855+}
76856+
76857+int
76858+gr_acl_handle_procpidmem(const struct task_struct *task)
76859+{
76860+ return 0;
76861+}
76862+
76863+int
76864+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
76865+{
76866+ return 0;
76867+}
76868+
76869+int
76870+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
76871+{
76872+ return 0;
76873+}
76874+
76875+int
76876+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
76877+{
76878+ return 0;
76879+}
76880+
76881+int
76882+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
76883+{
76884+ return 0;
76885+}
76886+
76887+int gr_acl_enable_at_secure(void)
76888+{
76889+ return 0;
76890+}
76891+
76892+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
76893+{
76894+ return dentry->d_sb->s_dev;
76895+}
76896+
76897+void gr_put_exec_file(struct task_struct *task)
76898+{
76899+ return;
76900+}
76901+
76902+#ifdef CONFIG_SECURITY
76903+EXPORT_SYMBOL_GPL(gr_check_user_change);
76904+EXPORT_SYMBOL_GPL(gr_check_group_change);
76905+#endif
76906diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
76907new file mode 100644
76908index 0000000..14638ff
76909--- /dev/null
76910+++ b/grsecurity/grsec_exec.c
76911@@ -0,0 +1,188 @@
76912+#include <linux/kernel.h>
76913+#include <linux/sched.h>
76914+#include <linux/file.h>
76915+#include <linux/binfmts.h>
76916+#include <linux/fs.h>
76917+#include <linux/types.h>
76918+#include <linux/grdefs.h>
76919+#include <linux/grsecurity.h>
76920+#include <linux/grinternal.h>
76921+#include <linux/capability.h>
76922+#include <linux/module.h>
76923+#include <linux/compat.h>
76924+
76925+#include <asm/uaccess.h>
76926+
76927+#ifdef CONFIG_GRKERNSEC_EXECLOG
76928+static char gr_exec_arg_buf[132];
76929+static DEFINE_MUTEX(gr_exec_arg_mutex);
76930+#endif
76931+
76932+struct user_arg_ptr {
76933+#ifdef CONFIG_COMPAT
76934+ bool is_compat;
76935+#endif
76936+ union {
76937+ const char __user *const __user *native;
76938+#ifdef CONFIG_COMPAT
76939+ const compat_uptr_t __user *compat;
76940+#endif
76941+ } ptr;
76942+};
76943+
76944+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
76945+
76946+void
76947+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
76948+{
76949+#ifdef CONFIG_GRKERNSEC_EXECLOG
76950+ char *grarg = gr_exec_arg_buf;
76951+ unsigned int i, x, execlen = 0;
76952+ char c;
76953+
76954+ if (!((grsec_enable_execlog && grsec_enable_group &&
76955+ in_group_p(grsec_audit_gid))
76956+ || (grsec_enable_execlog && !grsec_enable_group)))
76957+ return;
76958+
76959+ mutex_lock(&gr_exec_arg_mutex);
76960+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
76961+
76962+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
76963+ const char __user *p;
76964+ unsigned int len;
76965+
76966+ p = get_user_arg_ptr(argv, i);
76967+ if (IS_ERR(p))
76968+ goto log;
76969+
76970+ len = strnlen_user(p, 128 - execlen);
76971+ if (len > 128 - execlen)
76972+ len = 128 - execlen;
76973+ else if (len > 0)
76974+ len--;
76975+ if (copy_from_user(grarg + execlen, p, len))
76976+ goto log;
76977+
76978+ /* rewrite unprintable characters */
76979+ for (x = 0; x < len; x++) {
76980+ c = *(grarg + execlen + x);
76981+ if (c < 32 || c > 126)
76982+ *(grarg + execlen + x) = ' ';
76983+ }
76984+
76985+ execlen += len;
76986+ *(grarg + execlen) = ' ';
76987+ *(grarg + execlen + 1) = '\0';
76988+ execlen++;
76989+ }
76990+
76991+ log:
76992+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
76993+ bprm->file->f_path.mnt, grarg);
76994+ mutex_unlock(&gr_exec_arg_mutex);
76995+#endif
76996+ return;
76997+}
76998+
76999+#ifdef CONFIG_GRKERNSEC
77000+extern int gr_acl_is_capable(const int cap);
77001+extern int gr_acl_is_capable_nolog(const int cap);
77002+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
77003+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
77004+extern int gr_chroot_is_capable(const int cap);
77005+extern int gr_chroot_is_capable_nolog(const int cap);
77006+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
77007+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
77008+#endif
77009+
77010+const char *captab_log[] = {
77011+ "CAP_CHOWN",
77012+ "CAP_DAC_OVERRIDE",
77013+ "CAP_DAC_READ_SEARCH",
77014+ "CAP_FOWNER",
77015+ "CAP_FSETID",
77016+ "CAP_KILL",
77017+ "CAP_SETGID",
77018+ "CAP_SETUID",
77019+ "CAP_SETPCAP",
77020+ "CAP_LINUX_IMMUTABLE",
77021+ "CAP_NET_BIND_SERVICE",
77022+ "CAP_NET_BROADCAST",
77023+ "CAP_NET_ADMIN",
77024+ "CAP_NET_RAW",
77025+ "CAP_IPC_LOCK",
77026+ "CAP_IPC_OWNER",
77027+ "CAP_SYS_MODULE",
77028+ "CAP_SYS_RAWIO",
77029+ "CAP_SYS_CHROOT",
77030+ "CAP_SYS_PTRACE",
77031+ "CAP_SYS_PACCT",
77032+ "CAP_SYS_ADMIN",
77033+ "CAP_SYS_BOOT",
77034+ "CAP_SYS_NICE",
77035+ "CAP_SYS_RESOURCE",
77036+ "CAP_SYS_TIME",
77037+ "CAP_SYS_TTY_CONFIG",
77038+ "CAP_MKNOD",
77039+ "CAP_LEASE",
77040+ "CAP_AUDIT_WRITE",
77041+ "CAP_AUDIT_CONTROL",
77042+ "CAP_SETFCAP",
77043+ "CAP_MAC_OVERRIDE",
77044+ "CAP_MAC_ADMIN",
77045+ "CAP_SYSLOG",
77046+ "CAP_WAKE_ALARM",
77047+ "CAP_BLOCK_SUSPEND"
77048+};
77049+
77050+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
77051+
77052+int gr_is_capable(const int cap)
77053+{
77054+#ifdef CONFIG_GRKERNSEC
77055+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
77056+ return 1;
77057+ return 0;
77058+#else
77059+ return 1;
77060+#endif
77061+}
77062+
77063+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
77064+{
77065+#ifdef CONFIG_GRKERNSEC
77066+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
77067+ return 1;
77068+ return 0;
77069+#else
77070+ return 1;
77071+#endif
77072+}
77073+
77074+int gr_is_capable_nolog(const int cap)
77075+{
77076+#ifdef CONFIG_GRKERNSEC
77077+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
77078+ return 1;
77079+ return 0;
77080+#else
77081+ return 1;
77082+#endif
77083+}
77084+
77085+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
77086+{
77087+#ifdef CONFIG_GRKERNSEC
77088+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
77089+ return 1;
77090+ return 0;
77091+#else
77092+ return 1;
77093+#endif
77094+}
77095+
77096+EXPORT_SYMBOL_GPL(gr_is_capable);
77097+EXPORT_SYMBOL_GPL(gr_is_capable_nolog);
77098+EXPORT_SYMBOL_GPL(gr_task_is_capable);
77099+EXPORT_SYMBOL_GPL(gr_task_is_capable_nolog);
77100diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
77101new file mode 100644
77102index 0000000..06cc6ea
77103--- /dev/null
77104+++ b/grsecurity/grsec_fifo.c
77105@@ -0,0 +1,24 @@
77106+#include <linux/kernel.h>
77107+#include <linux/sched.h>
77108+#include <linux/fs.h>
77109+#include <linux/file.h>
77110+#include <linux/grinternal.h>
77111+
77112+int
77113+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
77114+ const struct dentry *dir, const int flag, const int acc_mode)
77115+{
77116+#ifdef CONFIG_GRKERNSEC_FIFO
77117+ const struct cred *cred = current_cred();
77118+
77119+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
77120+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
77121+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
77122+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
77123+ if (!inode_permission(dentry->d_inode, acc_mode))
77124+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
77125+ return -EACCES;
77126+ }
77127+#endif
77128+ return 0;
77129+}
77130diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
77131new file mode 100644
77132index 0000000..8ca18bf
77133--- /dev/null
77134+++ b/grsecurity/grsec_fork.c
77135@@ -0,0 +1,23 @@
77136+#include <linux/kernel.h>
77137+#include <linux/sched.h>
77138+#include <linux/grsecurity.h>
77139+#include <linux/grinternal.h>
77140+#include <linux/errno.h>
77141+
77142+void
77143+gr_log_forkfail(const int retval)
77144+{
77145+#ifdef CONFIG_GRKERNSEC_FORKFAIL
77146+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
77147+ switch (retval) {
77148+ case -EAGAIN:
77149+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
77150+ break;
77151+ case -ENOMEM:
77152+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
77153+ break;
77154+ }
77155+ }
77156+#endif
77157+ return;
77158+}
77159diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
77160new file mode 100644
77161index 0000000..b7cb191
77162--- /dev/null
77163+++ b/grsecurity/grsec_init.c
77164@@ -0,0 +1,286 @@
77165+#include <linux/kernel.h>
77166+#include <linux/sched.h>
77167+#include <linux/mm.h>
77168+#include <linux/gracl.h>
77169+#include <linux/slab.h>
77170+#include <linux/vmalloc.h>
77171+#include <linux/percpu.h>
77172+#include <linux/module.h>
77173+
77174+int grsec_enable_ptrace_readexec;
77175+int grsec_enable_setxid;
77176+int grsec_enable_symlinkown;
77177+kgid_t grsec_symlinkown_gid;
77178+int grsec_enable_brute;
77179+int grsec_enable_link;
77180+int grsec_enable_dmesg;
77181+int grsec_enable_harden_ptrace;
77182+int grsec_enable_harden_ipc;
77183+int grsec_enable_fifo;
77184+int grsec_enable_execlog;
77185+int grsec_enable_signal;
77186+int grsec_enable_forkfail;
77187+int grsec_enable_audit_ptrace;
77188+int grsec_enable_time;
77189+int grsec_enable_group;
77190+kgid_t grsec_audit_gid;
77191+int grsec_enable_chdir;
77192+int grsec_enable_mount;
77193+int grsec_enable_rofs;
77194+int grsec_deny_new_usb;
77195+int grsec_enable_chroot_findtask;
77196+int grsec_enable_chroot_mount;
77197+int grsec_enable_chroot_shmat;
77198+int grsec_enable_chroot_fchdir;
77199+int grsec_enable_chroot_double;
77200+int grsec_enable_chroot_pivot;
77201+int grsec_enable_chroot_chdir;
77202+int grsec_enable_chroot_chmod;
77203+int grsec_enable_chroot_mknod;
77204+int grsec_enable_chroot_nice;
77205+int grsec_enable_chroot_execlog;
77206+int grsec_enable_chroot_caps;
77207+int grsec_enable_chroot_sysctl;
77208+int grsec_enable_chroot_unix;
77209+int grsec_enable_tpe;
77210+kgid_t grsec_tpe_gid;
77211+int grsec_enable_blackhole;
77212+#ifdef CONFIG_IPV6_MODULE
77213+EXPORT_SYMBOL_GPL(grsec_enable_blackhole);
77214+#endif
77215+int grsec_lastack_retries;
77216+int grsec_enable_tpe_all;
77217+int grsec_enable_tpe_invert;
77218+int grsec_enable_socket_all;
77219+kgid_t grsec_socket_all_gid;
77220+int grsec_enable_socket_client;
77221+kgid_t grsec_socket_client_gid;
77222+int grsec_enable_socket_server;
77223+kgid_t grsec_socket_server_gid;
77224+int grsec_resource_logging;
77225+int grsec_disable_privio;
77226+int grsec_enable_log_rwxmaps;
77227+int grsec_lock;
77228+
77229+DEFINE_SPINLOCK(grsec_alert_lock);
77230+unsigned long grsec_alert_wtime = 0;
77231+unsigned long grsec_alert_fyet = 0;
77232+
77233+DEFINE_SPINLOCK(grsec_audit_lock);
77234+
77235+DEFINE_RWLOCK(grsec_exec_file_lock);
77236+
77237+char *gr_shared_page[4];
77238+
77239+char *gr_alert_log_fmt;
77240+char *gr_audit_log_fmt;
77241+char *gr_alert_log_buf;
77242+char *gr_audit_log_buf;
77243+
77244+extern struct gr_arg *gr_usermode;
77245+extern unsigned char *gr_system_salt;
77246+extern unsigned char *gr_system_sum;
77247+
77248+void __init
77249+grsecurity_init(void)
77250+{
77251+ int j;
77252+ /* create the per-cpu shared pages */
77253+
77254+#ifdef CONFIG_X86
77255+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
77256+#endif
77257+
77258+ for (j = 0; j < 4; j++) {
77259+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
77260+ if (gr_shared_page[j] == NULL) {
77261+ panic("Unable to allocate grsecurity shared page");
77262+ return;
77263+ }
77264+ }
77265+
77266+ /* allocate log buffers */
77267+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
77268+ if (!gr_alert_log_fmt) {
77269+ panic("Unable to allocate grsecurity alert log format buffer");
77270+ return;
77271+ }
77272+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
77273+ if (!gr_audit_log_fmt) {
77274+ panic("Unable to allocate grsecurity audit log format buffer");
77275+ return;
77276+ }
77277+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77278+ if (!gr_alert_log_buf) {
77279+ panic("Unable to allocate grsecurity alert log buffer");
77280+ return;
77281+ }
77282+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
77283+ if (!gr_audit_log_buf) {
77284+ panic("Unable to allocate grsecurity audit log buffer");
77285+ return;
77286+ }
77287+
77288+ /* allocate memory for authentication structure */
77289+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
77290+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
77291+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
77292+
77293+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
77294+ panic("Unable to allocate grsecurity authentication structure");
77295+ return;
77296+ }
77297+
77298+#ifdef CONFIG_GRKERNSEC_IO
77299+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
77300+ grsec_disable_privio = 1;
77301+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77302+ grsec_disable_privio = 1;
77303+#else
77304+ grsec_disable_privio = 0;
77305+#endif
77306+#endif
77307+
77308+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
77309+ /* for backward compatibility, tpe_invert always defaults to on if
77310+ enabled in the kernel
77311+ */
77312+ grsec_enable_tpe_invert = 1;
77313+#endif
77314+
77315+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
77316+#ifndef CONFIG_GRKERNSEC_SYSCTL
77317+ grsec_lock = 1;
77318+#endif
77319+
77320+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
77321+ grsec_enable_log_rwxmaps = 1;
77322+#endif
77323+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
77324+ grsec_enable_group = 1;
77325+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
77326+#endif
77327+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
77328+ grsec_enable_ptrace_readexec = 1;
77329+#endif
77330+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
77331+ grsec_enable_chdir = 1;
77332+#endif
77333+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
77334+ grsec_enable_harden_ptrace = 1;
77335+#endif
77336+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
77337+ grsec_enable_harden_ipc = 1;
77338+#endif
77339+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77340+ grsec_enable_mount = 1;
77341+#endif
77342+#ifdef CONFIG_GRKERNSEC_LINK
77343+ grsec_enable_link = 1;
77344+#endif
77345+#ifdef CONFIG_GRKERNSEC_BRUTE
77346+ grsec_enable_brute = 1;
77347+#endif
77348+#ifdef CONFIG_GRKERNSEC_DMESG
77349+ grsec_enable_dmesg = 1;
77350+#endif
77351+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
77352+ grsec_enable_blackhole = 1;
77353+ grsec_lastack_retries = 4;
77354+#endif
77355+#ifdef CONFIG_GRKERNSEC_FIFO
77356+ grsec_enable_fifo = 1;
77357+#endif
77358+#ifdef CONFIG_GRKERNSEC_EXECLOG
77359+ grsec_enable_execlog = 1;
77360+#endif
77361+#ifdef CONFIG_GRKERNSEC_SETXID
77362+ grsec_enable_setxid = 1;
77363+#endif
77364+#ifdef CONFIG_GRKERNSEC_SIGNAL
77365+ grsec_enable_signal = 1;
77366+#endif
77367+#ifdef CONFIG_GRKERNSEC_FORKFAIL
77368+ grsec_enable_forkfail = 1;
77369+#endif
77370+#ifdef CONFIG_GRKERNSEC_TIME
77371+ grsec_enable_time = 1;
77372+#endif
77373+#ifdef CONFIG_GRKERNSEC_RESLOG
77374+ grsec_resource_logging = 1;
77375+#endif
77376+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
77377+ grsec_enable_chroot_findtask = 1;
77378+#endif
77379+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
77380+ grsec_enable_chroot_unix = 1;
77381+#endif
77382+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
77383+ grsec_enable_chroot_mount = 1;
77384+#endif
77385+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
77386+ grsec_enable_chroot_fchdir = 1;
77387+#endif
77388+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
77389+ grsec_enable_chroot_shmat = 1;
77390+#endif
77391+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
77392+ grsec_enable_audit_ptrace = 1;
77393+#endif
77394+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
77395+ grsec_enable_chroot_double = 1;
77396+#endif
77397+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
77398+ grsec_enable_chroot_pivot = 1;
77399+#endif
77400+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
77401+ grsec_enable_chroot_chdir = 1;
77402+#endif
77403+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
77404+ grsec_enable_chroot_chmod = 1;
77405+#endif
77406+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
77407+ grsec_enable_chroot_mknod = 1;
77408+#endif
77409+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
77410+ grsec_enable_chroot_nice = 1;
77411+#endif
77412+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
77413+ grsec_enable_chroot_execlog = 1;
77414+#endif
77415+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
77416+ grsec_enable_chroot_caps = 1;
77417+#endif
77418+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
77419+ grsec_enable_chroot_sysctl = 1;
77420+#endif
77421+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
77422+ grsec_enable_symlinkown = 1;
77423+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
77424+#endif
77425+#ifdef CONFIG_GRKERNSEC_TPE
77426+ grsec_enable_tpe = 1;
77427+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
77428+#ifdef CONFIG_GRKERNSEC_TPE_ALL
77429+ grsec_enable_tpe_all = 1;
77430+#endif
77431+#endif
77432+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
77433+ grsec_enable_socket_all = 1;
77434+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
77435+#endif
77436+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
77437+ grsec_enable_socket_client = 1;
77438+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
77439+#endif
77440+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
77441+ grsec_enable_socket_server = 1;
77442+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
77443+#endif
77444+#endif
77445+#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
77446+ grsec_deny_new_usb = 1;
77447+#endif
77448+
77449+ return;
77450+}
77451diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
77452new file mode 100644
77453index 0000000..1773300
77454--- /dev/null
77455+++ b/grsecurity/grsec_ipc.c
77456@@ -0,0 +1,48 @@
77457+#include <linux/kernel.h>
77458+#include <linux/mm.h>
77459+#include <linux/sched.h>
77460+#include <linux/file.h>
77461+#include <linux/ipc.h>
77462+#include <linux/ipc_namespace.h>
77463+#include <linux/grsecurity.h>
77464+#include <linux/grinternal.h>
77465+
77466+int
77467+gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
77468+{
77469+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
77470+ int write;
77471+ int orig_granted_mode;
77472+ kuid_t euid;
77473+ kgid_t egid;
77474+
77475+ if (!grsec_enable_harden_ipc)
77476+ return 1;
77477+
77478+ euid = current_euid();
77479+ egid = current_egid();
77480+
77481+ write = requested_mode & 00002;
77482+ orig_granted_mode = ipcp->mode;
77483+
77484+ if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid))
77485+ orig_granted_mode >>= 6;
77486+ else {
77487+ /* if likely wrong permissions, lock to user */
77488+ if (orig_granted_mode & 0007)
77489+ orig_granted_mode = 0;
77490+ /* otherwise do a egid-only check */
77491+ else if (gid_eq(egid, ipcp->cgid) || gid_eq(egid, ipcp->gid))
77492+ orig_granted_mode >>= 3;
77493+ /* otherwise, no access */
77494+ else
77495+ orig_granted_mode = 0;
77496+ }
77497+ if (!(requested_mode & ~granted_mode & 0007) && (requested_mode & ~orig_granted_mode & 0007) &&
77498+ !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
77499+ gr_log_str_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", GR_GLOBAL_UID(ipcp->cuid));
77500+ return 0;
77501+ }
77502+#endif
77503+ return 1;
77504+}
77505diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
77506new file mode 100644
77507index 0000000..5e05e20
77508--- /dev/null
77509+++ b/grsecurity/grsec_link.c
77510@@ -0,0 +1,58 @@
77511+#include <linux/kernel.h>
77512+#include <linux/sched.h>
77513+#include <linux/fs.h>
77514+#include <linux/file.h>
77515+#include <linux/grinternal.h>
77516+
77517+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
77518+{
77519+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
77520+ const struct inode *link_inode = link->dentry->d_inode;
77521+
77522+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
77523+ /* ignore root-owned links, e.g. /proc/self */
77524+ gr_is_global_nonroot(link_inode->i_uid) && target &&
77525+ !uid_eq(link_inode->i_uid, target->i_uid)) {
77526+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
77527+ return 1;
77528+ }
77529+#endif
77530+ return 0;
77531+}
77532+
77533+int
77534+gr_handle_follow_link(const struct inode *parent,
77535+ const struct inode *inode,
77536+ const struct dentry *dentry, const struct vfsmount *mnt)
77537+{
77538+#ifdef CONFIG_GRKERNSEC_LINK
77539+ const struct cred *cred = current_cred();
77540+
77541+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
77542+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
77543+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
77544+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
77545+ return -EACCES;
77546+ }
77547+#endif
77548+ return 0;
77549+}
77550+
77551+int
77552+gr_handle_hardlink(const struct dentry *dentry,
77553+ const struct vfsmount *mnt,
77554+ struct inode *inode, const int mode, const struct filename *to)
77555+{
77556+#ifdef CONFIG_GRKERNSEC_LINK
77557+ const struct cred *cred = current_cred();
77558+
77559+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
77560+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
77561+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
77562+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
77563+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
77564+ return -EPERM;
77565+ }
77566+#endif
77567+ return 0;
77568+}
77569diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
77570new file mode 100644
77571index 0000000..dbe0a6b
77572--- /dev/null
77573+++ b/grsecurity/grsec_log.c
77574@@ -0,0 +1,341 @@
77575+#include <linux/kernel.h>
77576+#include <linux/sched.h>
77577+#include <linux/file.h>
77578+#include <linux/tty.h>
77579+#include <linux/fs.h>
77580+#include <linux/mm.h>
77581+#include <linux/grinternal.h>
77582+
77583+#ifdef CONFIG_TREE_PREEMPT_RCU
77584+#define DISABLE_PREEMPT() preempt_disable()
77585+#define ENABLE_PREEMPT() preempt_enable()
77586+#else
77587+#define DISABLE_PREEMPT()
77588+#define ENABLE_PREEMPT()
77589+#endif
77590+
77591+#define BEGIN_LOCKS(x) \
77592+ DISABLE_PREEMPT(); \
77593+ rcu_read_lock(); \
77594+ read_lock(&tasklist_lock); \
77595+ read_lock(&grsec_exec_file_lock); \
77596+ if (x != GR_DO_AUDIT) \
77597+ spin_lock(&grsec_alert_lock); \
77598+ else \
77599+ spin_lock(&grsec_audit_lock)
77600+
77601+#define END_LOCKS(x) \
77602+ if (x != GR_DO_AUDIT) \
77603+ spin_unlock(&grsec_alert_lock); \
77604+ else \
77605+ spin_unlock(&grsec_audit_lock); \
77606+ read_unlock(&grsec_exec_file_lock); \
77607+ read_unlock(&tasklist_lock); \
77608+ rcu_read_unlock(); \
77609+ ENABLE_PREEMPT(); \
77610+ if (x == GR_DONT_AUDIT) \
77611+ gr_handle_alertkill(current)
77612+
77613+enum {
77614+ FLOODING,
77615+ NO_FLOODING
77616+};
77617+
77618+extern char *gr_alert_log_fmt;
77619+extern char *gr_audit_log_fmt;
77620+extern char *gr_alert_log_buf;
77621+extern char *gr_audit_log_buf;
77622+
77623+static int gr_log_start(int audit)
77624+{
77625+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
77626+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
77627+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77628+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
77629+ unsigned long curr_secs = get_seconds();
77630+
77631+ if (audit == GR_DO_AUDIT)
77632+ goto set_fmt;
77633+
77634+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
77635+ grsec_alert_wtime = curr_secs;
77636+ grsec_alert_fyet = 0;
77637+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
77638+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
77639+ grsec_alert_fyet++;
77640+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
77641+ grsec_alert_wtime = curr_secs;
77642+ grsec_alert_fyet++;
77643+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
77644+ return FLOODING;
77645+ }
77646+ else return FLOODING;
77647+
77648+set_fmt:
77649+#endif
77650+ memset(buf, 0, PAGE_SIZE);
77651+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
77652+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
77653+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
77654+ } else if (current->signal->curr_ip) {
77655+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
77656+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
77657+ } else if (gr_acl_is_enabled()) {
77658+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
77659+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
77660+ } else {
77661+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
77662+ strcpy(buf, fmt);
77663+ }
77664+
77665+ return NO_FLOODING;
77666+}
77667+
77668+static void gr_log_middle(int audit, const char *msg, va_list ap)
77669+ __attribute__ ((format (printf, 2, 0)));
77670+
77671+static void gr_log_middle(int audit, const char *msg, va_list ap)
77672+{
77673+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77674+ unsigned int len = strlen(buf);
77675+
77676+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
77677+
77678+ return;
77679+}
77680+
77681+static void gr_log_middle_varargs(int audit, const char *msg, ...)
77682+ __attribute__ ((format (printf, 2, 3)));
77683+
77684+static void gr_log_middle_varargs(int audit, const char *msg, ...)
77685+{
77686+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77687+ unsigned int len = strlen(buf);
77688+ va_list ap;
77689+
77690+ va_start(ap, msg);
77691+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
77692+ va_end(ap);
77693+
77694+ return;
77695+}
77696+
77697+static void gr_log_end(int audit, int append_default)
77698+{
77699+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
77700+ if (append_default) {
77701+ struct task_struct *task = current;
77702+ struct task_struct *parent = task->real_parent;
77703+ const struct cred *cred = __task_cred(task);
77704+ const struct cred *pcred = __task_cred(parent);
77705+ unsigned int len = strlen(buf);
77706+
77707+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77708+ }
77709+
77710+ printk("%s\n", buf);
77711+
77712+ return;
77713+}
77714+
77715+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
77716+{
77717+ int logtype;
77718+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
77719+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
77720+ void *voidptr = NULL;
77721+ int num1 = 0, num2 = 0;
77722+ unsigned long ulong1 = 0, ulong2 = 0;
77723+ struct dentry *dentry = NULL;
77724+ struct vfsmount *mnt = NULL;
77725+ struct file *file = NULL;
77726+ struct task_struct *task = NULL;
77727+ struct vm_area_struct *vma = NULL;
77728+ const struct cred *cred, *pcred;
77729+ va_list ap;
77730+
77731+ BEGIN_LOCKS(audit);
77732+ logtype = gr_log_start(audit);
77733+ if (logtype == FLOODING) {
77734+ END_LOCKS(audit);
77735+ return;
77736+ }
77737+ va_start(ap, argtypes);
77738+ switch (argtypes) {
77739+ case GR_TTYSNIFF:
77740+ task = va_arg(ap, struct task_struct *);
77741+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
77742+ break;
77743+ case GR_SYSCTL_HIDDEN:
77744+ str1 = va_arg(ap, char *);
77745+ gr_log_middle_varargs(audit, msg, result, str1);
77746+ break;
77747+ case GR_RBAC:
77748+ dentry = va_arg(ap, struct dentry *);
77749+ mnt = va_arg(ap, struct vfsmount *);
77750+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
77751+ break;
77752+ case GR_RBAC_STR:
77753+ dentry = va_arg(ap, struct dentry *);
77754+ mnt = va_arg(ap, struct vfsmount *);
77755+ str1 = va_arg(ap, char *);
77756+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
77757+ break;
77758+ case GR_STR_RBAC:
77759+ str1 = va_arg(ap, char *);
77760+ dentry = va_arg(ap, struct dentry *);
77761+ mnt = va_arg(ap, struct vfsmount *);
77762+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
77763+ break;
77764+ case GR_RBAC_MODE2:
77765+ dentry = va_arg(ap, struct dentry *);
77766+ mnt = va_arg(ap, struct vfsmount *);
77767+ str1 = va_arg(ap, char *);
77768+ str2 = va_arg(ap, char *);
77769+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
77770+ break;
77771+ case GR_RBAC_MODE3:
77772+ dentry = va_arg(ap, struct dentry *);
77773+ mnt = va_arg(ap, struct vfsmount *);
77774+ str1 = va_arg(ap, char *);
77775+ str2 = va_arg(ap, char *);
77776+ str3 = va_arg(ap, char *);
77777+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
77778+ break;
77779+ case GR_FILENAME:
77780+ dentry = va_arg(ap, struct dentry *);
77781+ mnt = va_arg(ap, struct vfsmount *);
77782+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
77783+ break;
77784+ case GR_STR_FILENAME:
77785+ str1 = va_arg(ap, char *);
77786+ dentry = va_arg(ap, struct dentry *);
77787+ mnt = va_arg(ap, struct vfsmount *);
77788+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
77789+ break;
77790+ case GR_FILENAME_STR:
77791+ dentry = va_arg(ap, struct dentry *);
77792+ mnt = va_arg(ap, struct vfsmount *);
77793+ str1 = va_arg(ap, char *);
77794+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
77795+ break;
77796+ case GR_FILENAME_TWO_INT:
77797+ dentry = va_arg(ap, struct dentry *);
77798+ mnt = va_arg(ap, struct vfsmount *);
77799+ num1 = va_arg(ap, int);
77800+ num2 = va_arg(ap, int);
77801+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
77802+ break;
77803+ case GR_FILENAME_TWO_INT_STR:
77804+ dentry = va_arg(ap, struct dentry *);
77805+ mnt = va_arg(ap, struct vfsmount *);
77806+ num1 = va_arg(ap, int);
77807+ num2 = va_arg(ap, int);
77808+ str1 = va_arg(ap, char *);
77809+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
77810+ break;
77811+ case GR_TEXTREL:
77812+ file = va_arg(ap, struct file *);
77813+ ulong1 = va_arg(ap, unsigned long);
77814+ ulong2 = va_arg(ap, unsigned long);
77815+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
77816+ break;
77817+ case GR_PTRACE:
77818+ task = va_arg(ap, struct task_struct *);
77819+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
77820+ break;
77821+ case GR_RESOURCE:
77822+ task = va_arg(ap, struct task_struct *);
77823+ cred = __task_cred(task);
77824+ pcred = __task_cred(task->real_parent);
77825+ ulong1 = va_arg(ap, unsigned long);
77826+ str1 = va_arg(ap, char *);
77827+ ulong2 = va_arg(ap, unsigned long);
77828+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77829+ break;
77830+ case GR_CAP:
77831+ task = va_arg(ap, struct task_struct *);
77832+ cred = __task_cred(task);
77833+ pcred = __task_cred(task->real_parent);
77834+ str1 = va_arg(ap, char *);
77835+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77836+ break;
77837+ case GR_SIG:
77838+ str1 = va_arg(ap, char *);
77839+ voidptr = va_arg(ap, void *);
77840+ gr_log_middle_varargs(audit, msg, str1, voidptr);
77841+ break;
77842+ case GR_SIG2:
77843+ task = va_arg(ap, struct task_struct *);
77844+ cred = __task_cred(task);
77845+ pcred = __task_cred(task->real_parent);
77846+ num1 = va_arg(ap, int);
77847+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77848+ break;
77849+ case GR_CRASH1:
77850+ task = va_arg(ap, struct task_struct *);
77851+ cred = __task_cred(task);
77852+ pcred = __task_cred(task->real_parent);
77853+ ulong1 = va_arg(ap, unsigned long);
77854+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
77855+ break;
77856+ case GR_CRASH2:
77857+ task = va_arg(ap, struct task_struct *);
77858+ cred = __task_cred(task);
77859+ pcred = __task_cred(task->real_parent);
77860+ ulong1 = va_arg(ap, unsigned long);
77861+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
77862+ break;
77863+ case GR_RWXMAP:
77864+ file = va_arg(ap, struct file *);
77865+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
77866+ break;
77867+ case GR_RWXMAPVMA:
77868+ vma = va_arg(ap, struct vm_area_struct *);
77869+ if (vma->vm_file)
77870+ str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
77871+ else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
77872+ str1 = "<stack>";
77873+ else if (vma->vm_start <= current->mm->brk &&
77874+ vma->vm_end >= current->mm->start_brk)
77875+ str1 = "<heap>";
77876+ else
77877+ str1 = "<anonymous mapping>";
77878+ gr_log_middle_varargs(audit, msg, str1);
77879+ break;
77880+ case GR_PSACCT:
77881+ {
77882+ unsigned int wday, cday;
77883+ __u8 whr, chr;
77884+ __u8 wmin, cmin;
77885+ __u8 wsec, csec;
77886+ char cur_tty[64] = { 0 };
77887+ char parent_tty[64] = { 0 };
77888+
77889+ task = va_arg(ap, struct task_struct *);
77890+ wday = va_arg(ap, unsigned int);
77891+ cday = va_arg(ap, unsigned int);
77892+ whr = va_arg(ap, int);
77893+ chr = va_arg(ap, int);
77894+ wmin = va_arg(ap, int);
77895+ cmin = va_arg(ap, int);
77896+ wsec = va_arg(ap, int);
77897+ csec = va_arg(ap, int);
77898+ ulong1 = va_arg(ap, unsigned long);
77899+ cred = __task_cred(task);
77900+ pcred = __task_cred(task->real_parent);
77901+
77902+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
77903+ }
77904+ break;
77905+ default:
77906+ gr_log_middle(audit, msg, ap);
77907+ }
77908+ va_end(ap);
77909+ // these don't need DEFAULTSECARGS printed on the end
77910+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
77911+ gr_log_end(audit, 0);
77912+ else
77913+ gr_log_end(audit, 1);
77914+ END_LOCKS(audit);
77915+}
77916diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
77917new file mode 100644
77918index 0000000..0e39d8c
77919--- /dev/null
77920+++ b/grsecurity/grsec_mem.c
77921@@ -0,0 +1,48 @@
77922+#include <linux/kernel.h>
77923+#include <linux/sched.h>
77924+#include <linux/mm.h>
77925+#include <linux/mman.h>
77926+#include <linux/module.h>
77927+#include <linux/grinternal.h>
77928+
77929+void gr_handle_msr_write(void)
77930+{
77931+ gr_log_noargs(GR_DONT_AUDIT, GR_MSRWRITE_MSG);
77932+ return;
77933+}
77934+EXPORT_SYMBOL_GPL(gr_handle_msr_write);
77935+
77936+void
77937+gr_handle_ioperm(void)
77938+{
77939+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
77940+ return;
77941+}
77942+
77943+void
77944+gr_handle_iopl(void)
77945+{
77946+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
77947+ return;
77948+}
77949+
77950+void
77951+gr_handle_mem_readwrite(u64 from, u64 to)
77952+{
77953+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
77954+ return;
77955+}
77956+
77957+void
77958+gr_handle_vm86(void)
77959+{
77960+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
77961+ return;
77962+}
77963+
77964+void
77965+gr_log_badprocpid(const char *entry)
77966+{
77967+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
77968+ return;
77969+}
77970diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
77971new file mode 100644
77972index 0000000..cd9e124
77973--- /dev/null
77974+++ b/grsecurity/grsec_mount.c
77975@@ -0,0 +1,65 @@
77976+#include <linux/kernel.h>
77977+#include <linux/sched.h>
77978+#include <linux/mount.h>
77979+#include <linux/major.h>
77980+#include <linux/grsecurity.h>
77981+#include <linux/grinternal.h>
77982+
77983+void
77984+gr_log_remount(const char *devname, const int retval)
77985+{
77986+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77987+ if (grsec_enable_mount && (retval >= 0))
77988+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
77989+#endif
77990+ return;
77991+}
77992+
77993+void
77994+gr_log_unmount(const char *devname, const int retval)
77995+{
77996+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
77997+ if (grsec_enable_mount && (retval >= 0))
77998+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
77999+#endif
78000+ return;
78001+}
78002+
78003+void
78004+gr_log_mount(const char *from, const char *to, const int retval)
78005+{
78006+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
78007+ if (grsec_enable_mount && (retval >= 0))
78008+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
78009+#endif
78010+ return;
78011+}
78012+
78013+int
78014+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
78015+{
78016+#ifdef CONFIG_GRKERNSEC_ROFS
78017+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
78018+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
78019+ return -EPERM;
78020+ } else
78021+ return 0;
78022+#endif
78023+ return 0;
78024+}
78025+
78026+int
78027+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
78028+{
78029+#ifdef CONFIG_GRKERNSEC_ROFS
78030+ struct inode *inode = dentry->d_inode;
78031+
78032+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
78033+ inode && (S_ISBLK(inode->i_mode) || (S_ISCHR(inode->i_mode) && imajor(inode) == RAW_MAJOR))) {
78034+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
78035+ return -EPERM;
78036+ } else
78037+ return 0;
78038+#endif
78039+ return 0;
78040+}
78041diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
78042new file mode 100644
78043index 0000000..6ee9d50
78044--- /dev/null
78045+++ b/grsecurity/grsec_pax.c
78046@@ -0,0 +1,45 @@
78047+#include <linux/kernel.h>
78048+#include <linux/sched.h>
78049+#include <linux/mm.h>
78050+#include <linux/file.h>
78051+#include <linux/grinternal.h>
78052+#include <linux/grsecurity.h>
78053+
78054+void
78055+gr_log_textrel(struct vm_area_struct * vma)
78056+{
78057+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78058+ if (grsec_enable_log_rwxmaps)
78059+ gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
78060+#endif
78061+ return;
78062+}
78063+
78064+void gr_log_ptgnustack(struct file *file)
78065+{
78066+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78067+ if (grsec_enable_log_rwxmaps)
78068+ gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
78069+#endif
78070+ return;
78071+}
78072+
78073+void
78074+gr_log_rwxmmap(struct file *file)
78075+{
78076+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78077+ if (grsec_enable_log_rwxmaps)
78078+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
78079+#endif
78080+ return;
78081+}
78082+
78083+void
78084+gr_log_rwxmprotect(struct vm_area_struct *vma)
78085+{
78086+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78087+ if (grsec_enable_log_rwxmaps)
78088+ gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
78089+#endif
78090+ return;
78091+}
78092diff --git a/grsecurity/grsec_proc.c b/grsecurity/grsec_proc.c
78093new file mode 100644
78094index 0000000..2005a3a
78095--- /dev/null
78096+++ b/grsecurity/grsec_proc.c
78097@@ -0,0 +1,20 @@
78098+#include <linux/kernel.h>
78099+#include <linux/sched.h>
78100+#include <linux/grsecurity.h>
78101+#include <linux/grinternal.h>
78102+
78103+int gr_proc_is_restricted(void)
78104+{
78105+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78106+ const struct cred *cred = current_cred();
78107+#endif
78108+
78109+#ifdef CONFIG_GRKERNSEC_PROC_USER
78110+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
78111+ return -EACCES;
78112+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78113+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
78114+ return -EACCES;
78115+#endif
78116+ return 0;
78117+}
78118diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
78119new file mode 100644
78120index 0000000..f7f29aa
78121--- /dev/null
78122+++ b/grsecurity/grsec_ptrace.c
78123@@ -0,0 +1,30 @@
78124+#include <linux/kernel.h>
78125+#include <linux/sched.h>
78126+#include <linux/grinternal.h>
78127+#include <linux/security.h>
78128+
78129+void
78130+gr_audit_ptrace(struct task_struct *task)
78131+{
78132+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
78133+ if (grsec_enable_audit_ptrace)
78134+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
78135+#endif
78136+ return;
78137+}
78138+
78139+int
78140+gr_ptrace_readexec(struct file *file, int unsafe_flags)
78141+{
78142+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78143+ const struct dentry *dentry = file->f_path.dentry;
78144+ const struct vfsmount *mnt = file->f_path.mnt;
78145+
78146+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
78147+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
78148+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
78149+ return -EACCES;
78150+ }
78151+#endif
78152+ return 0;
78153+}
78154diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
78155new file mode 100644
78156index 0000000..3860c7e
78157--- /dev/null
78158+++ b/grsecurity/grsec_sig.c
78159@@ -0,0 +1,236 @@
78160+#include <linux/kernel.h>
78161+#include <linux/sched.h>
78162+#include <linux/fs.h>
78163+#include <linux/delay.h>
78164+#include <linux/grsecurity.h>
78165+#include <linux/grinternal.h>
78166+#include <linux/hardirq.h>
78167+
78168+char *signames[] = {
78169+ [SIGSEGV] = "Segmentation fault",
78170+ [SIGILL] = "Illegal instruction",
78171+ [SIGABRT] = "Abort",
78172+ [SIGBUS] = "Invalid alignment/Bus error"
78173+};
78174+
78175+void
78176+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
78177+{
78178+#ifdef CONFIG_GRKERNSEC_SIGNAL
78179+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
78180+ (sig == SIGABRT) || (sig == SIGBUS))) {
78181+ if (task_pid_nr(t) == task_pid_nr(current)) {
78182+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
78183+ } else {
78184+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
78185+ }
78186+ }
78187+#endif
78188+ return;
78189+}
78190+
78191+int
78192+gr_handle_signal(const struct task_struct *p, const int sig)
78193+{
78194+#ifdef CONFIG_GRKERNSEC
78195+ /* ignore the 0 signal for protected task checks */
78196+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
78197+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
78198+ return -EPERM;
78199+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
78200+ return -EPERM;
78201+ }
78202+#endif
78203+ return 0;
78204+}
78205+
78206+#ifdef CONFIG_GRKERNSEC
78207+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
78208+
78209+int gr_fake_force_sig(int sig, struct task_struct *t)
78210+{
78211+ unsigned long int flags;
78212+ int ret, blocked, ignored;
78213+ struct k_sigaction *action;
78214+
78215+ spin_lock_irqsave(&t->sighand->siglock, flags);
78216+ action = &t->sighand->action[sig-1];
78217+ ignored = action->sa.sa_handler == SIG_IGN;
78218+ blocked = sigismember(&t->blocked, sig);
78219+ if (blocked || ignored) {
78220+ action->sa.sa_handler = SIG_DFL;
78221+ if (blocked) {
78222+ sigdelset(&t->blocked, sig);
78223+ recalc_sigpending_and_wake(t);
78224+ }
78225+ }
78226+ if (action->sa.sa_handler == SIG_DFL)
78227+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
78228+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
78229+
78230+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
78231+
78232+ return ret;
78233+}
78234+#endif
78235+
78236+#define GR_USER_BAN_TIME (15 * 60)
78237+#define GR_DAEMON_BRUTE_TIME (30 * 60)
78238+
78239+void gr_handle_brute_attach(int dumpable)
78240+{
78241+#ifdef CONFIG_GRKERNSEC_BRUTE
78242+ struct task_struct *p = current;
78243+ kuid_t uid = GLOBAL_ROOT_UID;
78244+ int daemon = 0;
78245+
78246+ if (!grsec_enable_brute)
78247+ return;
78248+
78249+ rcu_read_lock();
78250+ read_lock(&tasklist_lock);
78251+ read_lock(&grsec_exec_file_lock);
78252+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
78253+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
78254+ p->real_parent->brute = 1;
78255+ daemon = 1;
78256+ } else {
78257+ const struct cred *cred = __task_cred(p), *cred2;
78258+ struct task_struct *tsk, *tsk2;
78259+
78260+ if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
78261+ struct user_struct *user;
78262+
78263+ uid = cred->uid;
78264+
78265+ /* this is put upon execution past expiration */
78266+ user = find_user(uid);
78267+ if (user == NULL)
78268+ goto unlock;
78269+ user->suid_banned = 1;
78270+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
78271+ if (user->suid_ban_expires == ~0UL)
78272+ user->suid_ban_expires--;
78273+
78274+ /* only kill other threads of the same binary, from the same user */
78275+ do_each_thread(tsk2, tsk) {
78276+ cred2 = __task_cred(tsk);
78277+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
78278+ gr_fake_force_sig(SIGKILL, tsk);
78279+ } while_each_thread(tsk2, tsk);
78280+ }
78281+ }
78282+unlock:
78283+ read_unlock(&grsec_exec_file_lock);
78284+ read_unlock(&tasklist_lock);
78285+ rcu_read_unlock();
78286+
78287+ if (gr_is_global_nonroot(uid))
78288+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
78289+ else if (daemon)
78290+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
78291+
78292+#endif
78293+ return;
78294+}
78295+
78296+void gr_handle_brute_check(void)
78297+{
78298+#ifdef CONFIG_GRKERNSEC_BRUTE
78299+ struct task_struct *p = current;
78300+
78301+ if (unlikely(p->brute)) {
78302+ if (!grsec_enable_brute)
78303+ p->brute = 0;
78304+ else if (time_before(get_seconds(), p->brute_expires))
78305+ msleep(30 * 1000);
78306+ }
78307+#endif
78308+ return;
78309+}
78310+
78311+void gr_handle_kernel_exploit(void)
78312+{
78313+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78314+ const struct cred *cred;
78315+ struct task_struct *tsk, *tsk2;
78316+ struct user_struct *user;
78317+ kuid_t uid;
78318+
78319+ if (in_irq() || in_serving_softirq() || in_nmi())
78320+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
78321+
78322+ uid = current_uid();
78323+
78324+ if (gr_is_global_root(uid))
78325+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
78326+ else {
78327+ /* kill all the processes of this user, hold a reference
78328+ to their creds struct, and prevent them from creating
78329+ another process until system reset
78330+ */
78331+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
78332+ GR_GLOBAL_UID(uid));
78333+ /* we intentionally leak this ref */
78334+ user = get_uid(current->cred->user);
78335+ if (user)
78336+ user->kernel_banned = 1;
78337+
78338+ /* kill all processes of this user */
78339+ read_lock(&tasklist_lock);
78340+ do_each_thread(tsk2, tsk) {
78341+ cred = __task_cred(tsk);
78342+ if (uid_eq(cred->uid, uid))
78343+ gr_fake_force_sig(SIGKILL, tsk);
78344+ } while_each_thread(tsk2, tsk);
78345+ read_unlock(&tasklist_lock);
78346+ }
78347+#endif
78348+}
78349+
78350+#ifdef CONFIG_GRKERNSEC_BRUTE
78351+static bool suid_ban_expired(struct user_struct *user)
78352+{
78353+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
78354+ user->suid_banned = 0;
78355+ user->suid_ban_expires = 0;
78356+ free_uid(user);
78357+ return true;
78358+ }
78359+
78360+ return false;
78361+}
78362+#endif
78363+
78364+int gr_process_kernel_exec_ban(void)
78365+{
78366+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78367+ if (unlikely(current->cred->user->kernel_banned))
78368+ return -EPERM;
78369+#endif
78370+ return 0;
78371+}
78372+
78373+int gr_process_kernel_setuid_ban(struct user_struct *user)
78374+{
78375+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78376+ if (unlikely(user->kernel_banned))
78377+ gr_fake_force_sig(SIGKILL, current);
78378+#endif
78379+ return 0;
78380+}
78381+
78382+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
78383+{
78384+#ifdef CONFIG_GRKERNSEC_BRUTE
78385+ struct user_struct *user = current->cred->user;
78386+ if (unlikely(user->suid_banned)) {
78387+ if (suid_ban_expired(user))
78388+ return 0;
78389+ /* disallow execution of suid binaries only */
78390+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
78391+ return -EPERM;
78392+ }
78393+#endif
78394+ return 0;
78395+}
78396diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
78397new file mode 100644
78398index 0000000..e3650b6
78399--- /dev/null
78400+++ b/grsecurity/grsec_sock.c
78401@@ -0,0 +1,244 @@
78402+#include <linux/kernel.h>
78403+#include <linux/module.h>
78404+#include <linux/sched.h>
78405+#include <linux/file.h>
78406+#include <linux/net.h>
78407+#include <linux/in.h>
78408+#include <linux/ip.h>
78409+#include <net/sock.h>
78410+#include <net/inet_sock.h>
78411+#include <linux/grsecurity.h>
78412+#include <linux/grinternal.h>
78413+#include <linux/gracl.h>
78414+
78415+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
78416+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
78417+
78418+EXPORT_SYMBOL_GPL(gr_search_udp_recvmsg);
78419+EXPORT_SYMBOL_GPL(gr_search_udp_sendmsg);
78420+
78421+#ifdef CONFIG_UNIX_MODULE
78422+EXPORT_SYMBOL_GPL(gr_acl_handle_unix);
78423+EXPORT_SYMBOL_GPL(gr_acl_handle_mknod);
78424+EXPORT_SYMBOL_GPL(gr_handle_chroot_unix);
78425+EXPORT_SYMBOL_GPL(gr_handle_create);
78426+#endif
78427+
78428+#ifdef CONFIG_GRKERNSEC
78429+#define gr_conn_table_size 32749
78430+struct conn_table_entry {
78431+ struct conn_table_entry *next;
78432+ struct signal_struct *sig;
78433+};
78434+
78435+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
78436+DEFINE_SPINLOCK(gr_conn_table_lock);
78437+
78438+extern const char * gr_socktype_to_name(unsigned char type);
78439+extern const char * gr_proto_to_name(unsigned char proto);
78440+extern const char * gr_sockfamily_to_name(unsigned char family);
78441+
78442+static __inline__ int
78443+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
78444+{
78445+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
78446+}
78447+
78448+static __inline__ int
78449+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
78450+ __u16 sport, __u16 dport)
78451+{
78452+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
78453+ sig->gr_sport == sport && sig->gr_dport == dport))
78454+ return 1;
78455+ else
78456+ return 0;
78457+}
78458+
78459+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
78460+{
78461+ struct conn_table_entry **match;
78462+ unsigned int index;
78463+
78464+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
78465+ sig->gr_sport, sig->gr_dport,
78466+ gr_conn_table_size);
78467+
78468+ newent->sig = sig;
78469+
78470+ match = &gr_conn_table[index];
78471+ newent->next = *match;
78472+ *match = newent;
78473+
78474+ return;
78475+}
78476+
78477+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
78478+{
78479+ struct conn_table_entry *match, *last = NULL;
78480+ unsigned int index;
78481+
78482+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
78483+ sig->gr_sport, sig->gr_dport,
78484+ gr_conn_table_size);
78485+
78486+ match = gr_conn_table[index];
78487+ while (match && !conn_match(match->sig,
78488+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
78489+ sig->gr_dport)) {
78490+ last = match;
78491+ match = match->next;
78492+ }
78493+
78494+ if (match) {
78495+ if (last)
78496+ last->next = match->next;
78497+ else
78498+ gr_conn_table[index] = NULL;
78499+ kfree(match);
78500+ }
78501+
78502+ return;
78503+}
78504+
78505+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
78506+ __u16 sport, __u16 dport)
78507+{
78508+ struct conn_table_entry *match;
78509+ unsigned int index;
78510+
78511+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
78512+
78513+ match = gr_conn_table[index];
78514+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
78515+ match = match->next;
78516+
78517+ if (match)
78518+ return match->sig;
78519+ else
78520+ return NULL;
78521+}
78522+
78523+#endif
78524+
78525+void gr_update_task_in_ip_table(const struct inet_sock *inet)
78526+{
78527+#ifdef CONFIG_GRKERNSEC
78528+ struct signal_struct *sig = current->signal;
78529+ struct conn_table_entry *newent;
78530+
78531+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
78532+ if (newent == NULL)
78533+ return;
78534+ /* no bh lock needed since we are called with bh disabled */
78535+ spin_lock(&gr_conn_table_lock);
78536+ gr_del_task_from_ip_table_nolock(sig);
78537+ sig->gr_saddr = inet->inet_rcv_saddr;
78538+ sig->gr_daddr = inet->inet_daddr;
78539+ sig->gr_sport = inet->inet_sport;
78540+ sig->gr_dport = inet->inet_dport;
78541+ gr_add_to_task_ip_table_nolock(sig, newent);
78542+ spin_unlock(&gr_conn_table_lock);
78543+#endif
78544+ return;
78545+}
78546+
78547+void gr_del_task_from_ip_table(struct task_struct *task)
78548+{
78549+#ifdef CONFIG_GRKERNSEC
78550+ spin_lock_bh(&gr_conn_table_lock);
78551+ gr_del_task_from_ip_table_nolock(task->signal);
78552+ spin_unlock_bh(&gr_conn_table_lock);
78553+#endif
78554+ return;
78555+}
78556+
78557+void
78558+gr_attach_curr_ip(const struct sock *sk)
78559+{
78560+#ifdef CONFIG_GRKERNSEC
78561+ struct signal_struct *p, *set;
78562+ const struct inet_sock *inet = inet_sk(sk);
78563+
78564+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
78565+ return;
78566+
78567+ set = current->signal;
78568+
78569+ spin_lock_bh(&gr_conn_table_lock);
78570+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
78571+ inet->inet_dport, inet->inet_sport);
78572+ if (unlikely(p != NULL)) {
78573+ set->curr_ip = p->curr_ip;
78574+ set->used_accept = 1;
78575+ gr_del_task_from_ip_table_nolock(p);
78576+ spin_unlock_bh(&gr_conn_table_lock);
78577+ return;
78578+ }
78579+ spin_unlock_bh(&gr_conn_table_lock);
78580+
78581+ set->curr_ip = inet->inet_daddr;
78582+ set->used_accept = 1;
78583+#endif
78584+ return;
78585+}
78586+
78587+int
78588+gr_handle_sock_all(const int family, const int type, const int protocol)
78589+{
78590+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78591+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
78592+ (family != AF_UNIX)) {
78593+ if (family == AF_INET)
78594+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
78595+ else
78596+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
78597+ return -EACCES;
78598+ }
78599+#endif
78600+ return 0;
78601+}
78602+
78603+int
78604+gr_handle_sock_server(const struct sockaddr *sck)
78605+{
78606+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78607+ if (grsec_enable_socket_server &&
78608+ in_group_p(grsec_socket_server_gid) &&
78609+ sck && (sck->sa_family != AF_UNIX) &&
78610+ (sck->sa_family != AF_LOCAL)) {
78611+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
78612+ return -EACCES;
78613+ }
78614+#endif
78615+ return 0;
78616+}
78617+
78618+int
78619+gr_handle_sock_server_other(const struct sock *sck)
78620+{
78621+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78622+ if (grsec_enable_socket_server &&
78623+ in_group_p(grsec_socket_server_gid) &&
78624+ sck && (sck->sk_family != AF_UNIX) &&
78625+ (sck->sk_family != AF_LOCAL)) {
78626+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
78627+ return -EACCES;
78628+ }
78629+#endif
78630+ return 0;
78631+}
78632+
78633+int
78634+gr_handle_sock_client(const struct sockaddr *sck)
78635+{
78636+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78637+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
78638+ sck && (sck->sa_family != AF_UNIX) &&
78639+ (sck->sa_family != AF_LOCAL)) {
78640+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
78641+ return -EACCES;
78642+ }
78643+#endif
78644+ return 0;
78645+}
78646diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
78647new file mode 100644
78648index 0000000..8159888
78649--- /dev/null
78650+++ b/grsecurity/grsec_sysctl.c
78651@@ -0,0 +1,479 @@
78652+#include <linux/kernel.h>
78653+#include <linux/sched.h>
78654+#include <linux/sysctl.h>
78655+#include <linux/grsecurity.h>
78656+#include <linux/grinternal.h>
78657+
78658+int
78659+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
78660+{
78661+#ifdef CONFIG_GRKERNSEC_SYSCTL
78662+ if (dirname == NULL || name == NULL)
78663+ return 0;
78664+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
78665+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
78666+ return -EACCES;
78667+ }
78668+#endif
78669+ return 0;
78670+}
78671+
78672+#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
78673+static int __maybe_unused __read_only one = 1;
78674+#endif
78675+
78676+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
78677+ defined(CONFIG_GRKERNSEC_DENYUSB)
78678+struct ctl_table grsecurity_table[] = {
78679+#ifdef CONFIG_GRKERNSEC_SYSCTL
78680+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
78681+#ifdef CONFIG_GRKERNSEC_IO
78682+ {
78683+ .procname = "disable_priv_io",
78684+ .data = &grsec_disable_privio,
78685+ .maxlen = sizeof(int),
78686+ .mode = 0600,
78687+ .proc_handler = &proc_dointvec,
78688+ },
78689+#endif
78690+#endif
78691+#ifdef CONFIG_GRKERNSEC_LINK
78692+ {
78693+ .procname = "linking_restrictions",
78694+ .data = &grsec_enable_link,
78695+ .maxlen = sizeof(int),
78696+ .mode = 0600,
78697+ .proc_handler = &proc_dointvec,
78698+ },
78699+#endif
78700+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
78701+ {
78702+ .procname = "enforce_symlinksifowner",
78703+ .data = &grsec_enable_symlinkown,
78704+ .maxlen = sizeof(int),
78705+ .mode = 0600,
78706+ .proc_handler = &proc_dointvec,
78707+ },
78708+ {
78709+ .procname = "symlinkown_gid",
78710+ .data = &grsec_symlinkown_gid,
78711+ .maxlen = sizeof(int),
78712+ .mode = 0600,
78713+ .proc_handler = &proc_dointvec,
78714+ },
78715+#endif
78716+#ifdef CONFIG_GRKERNSEC_BRUTE
78717+ {
78718+ .procname = "deter_bruteforce",
78719+ .data = &grsec_enable_brute,
78720+ .maxlen = sizeof(int),
78721+ .mode = 0600,
78722+ .proc_handler = &proc_dointvec,
78723+ },
78724+#endif
78725+#ifdef CONFIG_GRKERNSEC_FIFO
78726+ {
78727+ .procname = "fifo_restrictions",
78728+ .data = &grsec_enable_fifo,
78729+ .maxlen = sizeof(int),
78730+ .mode = 0600,
78731+ .proc_handler = &proc_dointvec,
78732+ },
78733+#endif
78734+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
78735+ {
78736+ .procname = "ptrace_readexec",
78737+ .data = &grsec_enable_ptrace_readexec,
78738+ .maxlen = sizeof(int),
78739+ .mode = 0600,
78740+ .proc_handler = &proc_dointvec,
78741+ },
78742+#endif
78743+#ifdef CONFIG_GRKERNSEC_SETXID
78744+ {
78745+ .procname = "consistent_setxid",
78746+ .data = &grsec_enable_setxid,
78747+ .maxlen = sizeof(int),
78748+ .mode = 0600,
78749+ .proc_handler = &proc_dointvec,
78750+ },
78751+#endif
78752+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
78753+ {
78754+ .procname = "ip_blackhole",
78755+ .data = &grsec_enable_blackhole,
78756+ .maxlen = sizeof(int),
78757+ .mode = 0600,
78758+ .proc_handler = &proc_dointvec,
78759+ },
78760+ {
78761+ .procname = "lastack_retries",
78762+ .data = &grsec_lastack_retries,
78763+ .maxlen = sizeof(int),
78764+ .mode = 0600,
78765+ .proc_handler = &proc_dointvec,
78766+ },
78767+#endif
78768+#ifdef CONFIG_GRKERNSEC_EXECLOG
78769+ {
78770+ .procname = "exec_logging",
78771+ .data = &grsec_enable_execlog,
78772+ .maxlen = sizeof(int),
78773+ .mode = 0600,
78774+ .proc_handler = &proc_dointvec,
78775+ },
78776+#endif
78777+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
78778+ {
78779+ .procname = "rwxmap_logging",
78780+ .data = &grsec_enable_log_rwxmaps,
78781+ .maxlen = sizeof(int),
78782+ .mode = 0600,
78783+ .proc_handler = &proc_dointvec,
78784+ },
78785+#endif
78786+#ifdef CONFIG_GRKERNSEC_SIGNAL
78787+ {
78788+ .procname = "signal_logging",
78789+ .data = &grsec_enable_signal,
78790+ .maxlen = sizeof(int),
78791+ .mode = 0600,
78792+ .proc_handler = &proc_dointvec,
78793+ },
78794+#endif
78795+#ifdef CONFIG_GRKERNSEC_FORKFAIL
78796+ {
78797+ .procname = "forkfail_logging",
78798+ .data = &grsec_enable_forkfail,
78799+ .maxlen = sizeof(int),
78800+ .mode = 0600,
78801+ .proc_handler = &proc_dointvec,
78802+ },
78803+#endif
78804+#ifdef CONFIG_GRKERNSEC_TIME
78805+ {
78806+ .procname = "timechange_logging",
78807+ .data = &grsec_enable_time,
78808+ .maxlen = sizeof(int),
78809+ .mode = 0600,
78810+ .proc_handler = &proc_dointvec,
78811+ },
78812+#endif
78813+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
78814+ {
78815+ .procname = "chroot_deny_shmat",
78816+ .data = &grsec_enable_chroot_shmat,
78817+ .maxlen = sizeof(int),
78818+ .mode = 0600,
78819+ .proc_handler = &proc_dointvec,
78820+ },
78821+#endif
78822+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
78823+ {
78824+ .procname = "chroot_deny_unix",
78825+ .data = &grsec_enable_chroot_unix,
78826+ .maxlen = sizeof(int),
78827+ .mode = 0600,
78828+ .proc_handler = &proc_dointvec,
78829+ },
78830+#endif
78831+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
78832+ {
78833+ .procname = "chroot_deny_mount",
78834+ .data = &grsec_enable_chroot_mount,
78835+ .maxlen = sizeof(int),
78836+ .mode = 0600,
78837+ .proc_handler = &proc_dointvec,
78838+ },
78839+#endif
78840+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
78841+ {
78842+ .procname = "chroot_deny_fchdir",
78843+ .data = &grsec_enable_chroot_fchdir,
78844+ .maxlen = sizeof(int),
78845+ .mode = 0600,
78846+ .proc_handler = &proc_dointvec,
78847+ },
78848+#endif
78849+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
78850+ {
78851+ .procname = "chroot_deny_chroot",
78852+ .data = &grsec_enable_chroot_double,
78853+ .maxlen = sizeof(int),
78854+ .mode = 0600,
78855+ .proc_handler = &proc_dointvec,
78856+ },
78857+#endif
78858+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
78859+ {
78860+ .procname = "chroot_deny_pivot",
78861+ .data = &grsec_enable_chroot_pivot,
78862+ .maxlen = sizeof(int),
78863+ .mode = 0600,
78864+ .proc_handler = &proc_dointvec,
78865+ },
78866+#endif
78867+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
78868+ {
78869+ .procname = "chroot_enforce_chdir",
78870+ .data = &grsec_enable_chroot_chdir,
78871+ .maxlen = sizeof(int),
78872+ .mode = 0600,
78873+ .proc_handler = &proc_dointvec,
78874+ },
78875+#endif
78876+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
78877+ {
78878+ .procname = "chroot_deny_chmod",
78879+ .data = &grsec_enable_chroot_chmod,
78880+ .maxlen = sizeof(int),
78881+ .mode = 0600,
78882+ .proc_handler = &proc_dointvec,
78883+ },
78884+#endif
78885+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
78886+ {
78887+ .procname = "chroot_deny_mknod",
78888+ .data = &grsec_enable_chroot_mknod,
78889+ .maxlen = sizeof(int),
78890+ .mode = 0600,
78891+ .proc_handler = &proc_dointvec,
78892+ },
78893+#endif
78894+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
78895+ {
78896+ .procname = "chroot_restrict_nice",
78897+ .data = &grsec_enable_chroot_nice,
78898+ .maxlen = sizeof(int),
78899+ .mode = 0600,
78900+ .proc_handler = &proc_dointvec,
78901+ },
78902+#endif
78903+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
78904+ {
78905+ .procname = "chroot_execlog",
78906+ .data = &grsec_enable_chroot_execlog,
78907+ .maxlen = sizeof(int),
78908+ .mode = 0600,
78909+ .proc_handler = &proc_dointvec,
78910+ },
78911+#endif
78912+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
78913+ {
78914+ .procname = "chroot_caps",
78915+ .data = &grsec_enable_chroot_caps,
78916+ .maxlen = sizeof(int),
78917+ .mode = 0600,
78918+ .proc_handler = &proc_dointvec,
78919+ },
78920+#endif
78921+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
78922+ {
78923+ .procname = "chroot_deny_sysctl",
78924+ .data = &grsec_enable_chroot_sysctl,
78925+ .maxlen = sizeof(int),
78926+ .mode = 0600,
78927+ .proc_handler = &proc_dointvec,
78928+ },
78929+#endif
78930+#ifdef CONFIG_GRKERNSEC_TPE
78931+ {
78932+ .procname = "tpe",
78933+ .data = &grsec_enable_tpe,
78934+ .maxlen = sizeof(int),
78935+ .mode = 0600,
78936+ .proc_handler = &proc_dointvec,
78937+ },
78938+ {
78939+ .procname = "tpe_gid",
78940+ .data = &grsec_tpe_gid,
78941+ .maxlen = sizeof(int),
78942+ .mode = 0600,
78943+ .proc_handler = &proc_dointvec,
78944+ },
78945+#endif
78946+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
78947+ {
78948+ .procname = "tpe_invert",
78949+ .data = &grsec_enable_tpe_invert,
78950+ .maxlen = sizeof(int),
78951+ .mode = 0600,
78952+ .proc_handler = &proc_dointvec,
78953+ },
78954+#endif
78955+#ifdef CONFIG_GRKERNSEC_TPE_ALL
78956+ {
78957+ .procname = "tpe_restrict_all",
78958+ .data = &grsec_enable_tpe_all,
78959+ .maxlen = sizeof(int),
78960+ .mode = 0600,
78961+ .proc_handler = &proc_dointvec,
78962+ },
78963+#endif
78964+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
78965+ {
78966+ .procname = "socket_all",
78967+ .data = &grsec_enable_socket_all,
78968+ .maxlen = sizeof(int),
78969+ .mode = 0600,
78970+ .proc_handler = &proc_dointvec,
78971+ },
78972+ {
78973+ .procname = "socket_all_gid",
78974+ .data = &grsec_socket_all_gid,
78975+ .maxlen = sizeof(int),
78976+ .mode = 0600,
78977+ .proc_handler = &proc_dointvec,
78978+ },
78979+#endif
78980+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
78981+ {
78982+ .procname = "socket_client",
78983+ .data = &grsec_enable_socket_client,
78984+ .maxlen = sizeof(int),
78985+ .mode = 0600,
78986+ .proc_handler = &proc_dointvec,
78987+ },
78988+ {
78989+ .procname = "socket_client_gid",
78990+ .data = &grsec_socket_client_gid,
78991+ .maxlen = sizeof(int),
78992+ .mode = 0600,
78993+ .proc_handler = &proc_dointvec,
78994+ },
78995+#endif
78996+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
78997+ {
78998+ .procname = "socket_server",
78999+ .data = &grsec_enable_socket_server,
79000+ .maxlen = sizeof(int),
79001+ .mode = 0600,
79002+ .proc_handler = &proc_dointvec,
79003+ },
79004+ {
79005+ .procname = "socket_server_gid",
79006+ .data = &grsec_socket_server_gid,
79007+ .maxlen = sizeof(int),
79008+ .mode = 0600,
79009+ .proc_handler = &proc_dointvec,
79010+ },
79011+#endif
79012+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
79013+ {
79014+ .procname = "audit_group",
79015+ .data = &grsec_enable_group,
79016+ .maxlen = sizeof(int),
79017+ .mode = 0600,
79018+ .proc_handler = &proc_dointvec,
79019+ },
79020+ {
79021+ .procname = "audit_gid",
79022+ .data = &grsec_audit_gid,
79023+ .maxlen = sizeof(int),
79024+ .mode = 0600,
79025+ .proc_handler = &proc_dointvec,
79026+ },
79027+#endif
79028+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
79029+ {
79030+ .procname = "audit_chdir",
79031+ .data = &grsec_enable_chdir,
79032+ .maxlen = sizeof(int),
79033+ .mode = 0600,
79034+ .proc_handler = &proc_dointvec,
79035+ },
79036+#endif
79037+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
79038+ {
79039+ .procname = "audit_mount",
79040+ .data = &grsec_enable_mount,
79041+ .maxlen = sizeof(int),
79042+ .mode = 0600,
79043+ .proc_handler = &proc_dointvec,
79044+ },
79045+#endif
79046+#ifdef CONFIG_GRKERNSEC_DMESG
79047+ {
79048+ .procname = "dmesg",
79049+ .data = &grsec_enable_dmesg,
79050+ .maxlen = sizeof(int),
79051+ .mode = 0600,
79052+ .proc_handler = &proc_dointvec,
79053+ },
79054+#endif
79055+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
79056+ {
79057+ .procname = "chroot_findtask",
79058+ .data = &grsec_enable_chroot_findtask,
79059+ .maxlen = sizeof(int),
79060+ .mode = 0600,
79061+ .proc_handler = &proc_dointvec,
79062+ },
79063+#endif
79064+#ifdef CONFIG_GRKERNSEC_RESLOG
79065+ {
79066+ .procname = "resource_logging",
79067+ .data = &grsec_resource_logging,
79068+ .maxlen = sizeof(int),
79069+ .mode = 0600,
79070+ .proc_handler = &proc_dointvec,
79071+ },
79072+#endif
79073+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
79074+ {
79075+ .procname = "audit_ptrace",
79076+ .data = &grsec_enable_audit_ptrace,
79077+ .maxlen = sizeof(int),
79078+ .mode = 0600,
79079+ .proc_handler = &proc_dointvec,
79080+ },
79081+#endif
79082+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
79083+ {
79084+ .procname = "harden_ptrace",
79085+ .data = &grsec_enable_harden_ptrace,
79086+ .maxlen = sizeof(int),
79087+ .mode = 0600,
79088+ .proc_handler = &proc_dointvec,
79089+ },
79090+#endif
79091+#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
79092+ {
79093+ .procname = "harden_ipc",
79094+ .data = &grsec_enable_harden_ipc,
79095+ .maxlen = sizeof(int),
79096+ .mode = 0600,
79097+ .proc_handler = &proc_dointvec,
79098+ },
79099+#endif
79100+ {
79101+ .procname = "grsec_lock",
79102+ .data = &grsec_lock,
79103+ .maxlen = sizeof(int),
79104+ .mode = 0600,
79105+ .proc_handler = &proc_dointvec,
79106+ },
79107+#endif
79108+#ifdef CONFIG_GRKERNSEC_ROFS
79109+ {
79110+ .procname = "romount_protect",
79111+ .data = &grsec_enable_rofs,
79112+ .maxlen = sizeof(int),
79113+ .mode = 0600,
79114+ .proc_handler = &proc_dointvec_minmax,
79115+ .extra1 = &one,
79116+ .extra2 = &one,
79117+ },
79118+#endif
79119+#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
79120+ {
79121+ .procname = "deny_new_usb",
79122+ .data = &grsec_deny_new_usb,
79123+ .maxlen = sizeof(int),
79124+ .mode = 0600,
79125+ .proc_handler = &proc_dointvec,
79126+ },
79127+#endif
79128+ { }
79129+};
79130+#endif
79131diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
79132new file mode 100644
79133index 0000000..61b514e
79134--- /dev/null
79135+++ b/grsecurity/grsec_time.c
79136@@ -0,0 +1,16 @@
79137+#include <linux/kernel.h>
79138+#include <linux/sched.h>
79139+#include <linux/grinternal.h>
79140+#include <linux/module.h>
79141+
79142+void
79143+gr_log_timechange(void)
79144+{
79145+#ifdef CONFIG_GRKERNSEC_TIME
79146+ if (grsec_enable_time)
79147+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
79148+#endif
79149+ return;
79150+}
79151+
79152+EXPORT_SYMBOL_GPL(gr_log_timechange);
79153diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
79154new file mode 100644
79155index 0000000..d1953de
79156--- /dev/null
79157+++ b/grsecurity/grsec_tpe.c
79158@@ -0,0 +1,78 @@
79159+#include <linux/kernel.h>
79160+#include <linux/sched.h>
79161+#include <linux/file.h>
79162+#include <linux/fs.h>
79163+#include <linux/grinternal.h>
79164+
79165+extern int gr_acl_tpe_check(void);
79166+
79167+int
79168+gr_tpe_allow(const struct file *file)
79169+{
79170+#ifdef CONFIG_GRKERNSEC
79171+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
79172+ struct inode *file_inode = file->f_path.dentry->d_inode;
79173+ const struct cred *cred = current_cred();
79174+ char *msg = NULL;
79175+ char *msg2 = NULL;
79176+
79177+ // never restrict root
79178+ if (gr_is_global_root(cred->uid))
79179+ return 1;
79180+
79181+ if (grsec_enable_tpe) {
79182+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
79183+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
79184+ msg = "not being in trusted group";
79185+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
79186+ msg = "being in untrusted group";
79187+#else
79188+ if (in_group_p(grsec_tpe_gid))
79189+ msg = "being in untrusted group";
79190+#endif
79191+ }
79192+ if (!msg && gr_acl_tpe_check())
79193+ msg = "being in untrusted role";
79194+
79195+ // not in any affected group/role
79196+ if (!msg)
79197+ goto next_check;
79198+
79199+ if (gr_is_global_nonroot(inode->i_uid))
79200+ msg2 = "file in non-root-owned directory";
79201+ else if (inode->i_mode & S_IWOTH)
79202+ msg2 = "file in world-writable directory";
79203+ else if (inode->i_mode & S_IWGRP)
79204+ msg2 = "file in group-writable directory";
79205+ else if (file_inode->i_mode & S_IWOTH)
79206+ msg2 = "file is world-writable";
79207+
79208+ if (msg && msg2) {
79209+ char fullmsg[70] = {0};
79210+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
79211+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
79212+ return 0;
79213+ }
79214+ msg = NULL;
79215+next_check:
79216+#ifdef CONFIG_GRKERNSEC_TPE_ALL
79217+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
79218+ return 1;
79219+
79220+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
79221+ msg = "directory not owned by user";
79222+ else if (inode->i_mode & S_IWOTH)
79223+ msg = "file in world-writable directory";
79224+ else if (inode->i_mode & S_IWGRP)
79225+ msg = "file in group-writable directory";
79226+ else if (file_inode->i_mode & S_IWOTH)
79227+ msg = "file is world-writable";
79228+
79229+ if (msg) {
79230+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
79231+ return 0;
79232+ }
79233+#endif
79234+#endif
79235+ return 1;
79236+}
79237diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
79238new file mode 100644
79239index 0000000..ae02d8e
79240--- /dev/null
79241+++ b/grsecurity/grsec_usb.c
79242@@ -0,0 +1,15 @@
79243+#include <linux/kernel.h>
79244+#include <linux/grinternal.h>
79245+#include <linux/module.h>
79246+
79247+int gr_handle_new_usb(void)
79248+{
79249+#ifdef CONFIG_GRKERNSEC_DENYUSB
79250+ if (grsec_deny_new_usb) {
79251+ printk(KERN_ALERT "grsec: denied insert of new USB device\n");
79252+ return 1;
79253+ }
79254+#endif
79255+ return 0;
79256+}
79257+EXPORT_SYMBOL_GPL(gr_handle_new_usb);
79258diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
79259new file mode 100644
79260index 0000000..158b330
79261--- /dev/null
79262+++ b/grsecurity/grsum.c
79263@@ -0,0 +1,64 @@
79264+#include <linux/err.h>
79265+#include <linux/kernel.h>
79266+#include <linux/sched.h>
79267+#include <linux/mm.h>
79268+#include <linux/scatterlist.h>
79269+#include <linux/crypto.h>
79270+#include <linux/gracl.h>
79271+
79272+
79273+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
79274+#error "crypto and sha256 must be built into the kernel"
79275+#endif
79276+
79277+int
79278+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
79279+{
79280+ struct crypto_hash *tfm;
79281+ struct hash_desc desc;
79282+ struct scatterlist sg[2];
79283+ unsigned char temp_sum[GR_SHA_LEN] __attribute__((aligned(__alignof__(unsigned long))));
79284+ unsigned long *tmpsumptr = (unsigned long *)temp_sum;
79285+ unsigned long *sumptr = (unsigned long *)sum;
79286+ int cryptres;
79287+ int retval = 1;
79288+ volatile int mismatched = 0;
79289+ volatile int dummy = 0;
79290+ unsigned int i;
79291+
79292+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
79293+ if (IS_ERR(tfm)) {
79294+ /* should never happen, since sha256 should be built in */
79295+ memset(entry->pw, 0, GR_PW_LEN);
79296+ return 1;
79297+ }
79298+
79299+ sg_init_table(sg, 2);
79300+ sg_set_buf(&sg[0], salt, GR_SALT_LEN);
79301+ sg_set_buf(&sg[1], entry->pw, strlen(entry->pw));
79302+
79303+ desc.tfm = tfm;
79304+ desc.flags = 0;
79305+
79306+ cryptres = crypto_hash_digest(&desc, sg, GR_SALT_LEN + strlen(entry->pw),
79307+ temp_sum);
79308+
79309+ memset(entry->pw, 0, GR_PW_LEN);
79310+
79311+ if (cryptres)
79312+ goto out;
79313+
79314+ for (i = 0; i < GR_SHA_LEN/sizeof(tmpsumptr[0]); i++)
79315+ if (sumptr[i] != tmpsumptr[i])
79316+ mismatched = 1;
79317+ else
79318+ dummy = 1; // waste a cycle
79319+
79320+ if (!mismatched)
79321+ retval = dummy - 1;
79322+
79323+out:
79324+ crypto_free_hash(tfm);
79325+
79326+ return retval;
79327+}
79328diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
79329index 77ff547..181834f 100644
79330--- a/include/asm-generic/4level-fixup.h
79331+++ b/include/asm-generic/4level-fixup.h
79332@@ -13,8 +13,10 @@
79333 #define pmd_alloc(mm, pud, address) \
79334 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
79335 NULL: pmd_offset(pud, address))
79336+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
79337
79338 #define pud_alloc(mm, pgd, address) (pgd)
79339+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
79340 #define pud_offset(pgd, start) (pgd)
79341 #define pud_none(pud) 0
79342 #define pud_bad(pud) 0
79343diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
79344index b7babf0..1e4b4f1 100644
79345--- a/include/asm-generic/atomic-long.h
79346+++ b/include/asm-generic/atomic-long.h
79347@@ -22,6 +22,12 @@
79348
79349 typedef atomic64_t atomic_long_t;
79350
79351+#ifdef CONFIG_PAX_REFCOUNT
79352+typedef atomic64_unchecked_t atomic_long_unchecked_t;
79353+#else
79354+typedef atomic64_t atomic_long_unchecked_t;
79355+#endif
79356+
79357 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
79358
79359 static inline long atomic_long_read(atomic_long_t *l)
79360@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
79361 return (long)atomic64_read(v);
79362 }
79363
79364+#ifdef CONFIG_PAX_REFCOUNT
79365+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
79366+{
79367+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79368+
79369+ return (long)atomic64_read_unchecked(v);
79370+}
79371+#endif
79372+
79373 static inline void atomic_long_set(atomic_long_t *l, long i)
79374 {
79375 atomic64_t *v = (atomic64_t *)l;
79376@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
79377 atomic64_set(v, i);
79378 }
79379
79380+#ifdef CONFIG_PAX_REFCOUNT
79381+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
79382+{
79383+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79384+
79385+ atomic64_set_unchecked(v, i);
79386+}
79387+#endif
79388+
79389 static inline void atomic_long_inc(atomic_long_t *l)
79390 {
79391 atomic64_t *v = (atomic64_t *)l;
79392@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
79393 atomic64_inc(v);
79394 }
79395
79396+#ifdef CONFIG_PAX_REFCOUNT
79397+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
79398+{
79399+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79400+
79401+ atomic64_inc_unchecked(v);
79402+}
79403+#endif
79404+
79405 static inline void atomic_long_dec(atomic_long_t *l)
79406 {
79407 atomic64_t *v = (atomic64_t *)l;
79408@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
79409 atomic64_dec(v);
79410 }
79411
79412+#ifdef CONFIG_PAX_REFCOUNT
79413+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
79414+{
79415+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79416+
79417+ atomic64_dec_unchecked(v);
79418+}
79419+#endif
79420+
79421 static inline void atomic_long_add(long i, atomic_long_t *l)
79422 {
79423 atomic64_t *v = (atomic64_t *)l;
79424@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
79425 atomic64_add(i, v);
79426 }
79427
79428+#ifdef CONFIG_PAX_REFCOUNT
79429+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
79430+{
79431+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79432+
79433+ atomic64_add_unchecked(i, v);
79434+}
79435+#endif
79436+
79437 static inline void atomic_long_sub(long i, atomic_long_t *l)
79438 {
79439 atomic64_t *v = (atomic64_t *)l;
79440@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
79441 atomic64_sub(i, v);
79442 }
79443
79444+#ifdef CONFIG_PAX_REFCOUNT
79445+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
79446+{
79447+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79448+
79449+ atomic64_sub_unchecked(i, v);
79450+}
79451+#endif
79452+
79453 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
79454 {
79455 atomic64_t *v = (atomic64_t *)l;
79456@@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
79457 return atomic64_add_negative(i, v);
79458 }
79459
79460-static inline long atomic_long_add_return(long i, atomic_long_t *l)
79461+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
79462 {
79463 atomic64_t *v = (atomic64_t *)l;
79464
79465 return (long)atomic64_add_return(i, v);
79466 }
79467
79468+#ifdef CONFIG_PAX_REFCOUNT
79469+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
79470+{
79471+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79472+
79473+ return (long)atomic64_add_return_unchecked(i, v);
79474+}
79475+#endif
79476+
79477 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
79478 {
79479 atomic64_t *v = (atomic64_t *)l;
79480@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
79481 return (long)atomic64_inc_return(v);
79482 }
79483
79484+#ifdef CONFIG_PAX_REFCOUNT
79485+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
79486+{
79487+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
79488+
79489+ return (long)atomic64_inc_return_unchecked(v);
79490+}
79491+#endif
79492+
79493 static inline long atomic_long_dec_return(atomic_long_t *l)
79494 {
79495 atomic64_t *v = (atomic64_t *)l;
79496@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
79497
79498 typedef atomic_t atomic_long_t;
79499
79500+#ifdef CONFIG_PAX_REFCOUNT
79501+typedef atomic_unchecked_t atomic_long_unchecked_t;
79502+#else
79503+typedef atomic_t atomic_long_unchecked_t;
79504+#endif
79505+
79506 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
79507 static inline long atomic_long_read(atomic_long_t *l)
79508 {
79509@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
79510 return (long)atomic_read(v);
79511 }
79512
79513+#ifdef CONFIG_PAX_REFCOUNT
79514+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
79515+{
79516+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79517+
79518+ return (long)atomic_read_unchecked(v);
79519+}
79520+#endif
79521+
79522 static inline void atomic_long_set(atomic_long_t *l, long i)
79523 {
79524 atomic_t *v = (atomic_t *)l;
79525@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
79526 atomic_set(v, i);
79527 }
79528
79529+#ifdef CONFIG_PAX_REFCOUNT
79530+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
79531+{
79532+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79533+
79534+ atomic_set_unchecked(v, i);
79535+}
79536+#endif
79537+
79538 static inline void atomic_long_inc(atomic_long_t *l)
79539 {
79540 atomic_t *v = (atomic_t *)l;
79541@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
79542 atomic_inc(v);
79543 }
79544
79545+#ifdef CONFIG_PAX_REFCOUNT
79546+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
79547+{
79548+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79549+
79550+ atomic_inc_unchecked(v);
79551+}
79552+#endif
79553+
79554 static inline void atomic_long_dec(atomic_long_t *l)
79555 {
79556 atomic_t *v = (atomic_t *)l;
79557@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
79558 atomic_dec(v);
79559 }
79560
79561+#ifdef CONFIG_PAX_REFCOUNT
79562+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
79563+{
79564+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79565+
79566+ atomic_dec_unchecked(v);
79567+}
79568+#endif
79569+
79570 static inline void atomic_long_add(long i, atomic_long_t *l)
79571 {
79572 atomic_t *v = (atomic_t *)l;
79573@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
79574 atomic_add(i, v);
79575 }
79576
79577+#ifdef CONFIG_PAX_REFCOUNT
79578+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
79579+{
79580+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79581+
79582+ atomic_add_unchecked(i, v);
79583+}
79584+#endif
79585+
79586 static inline void atomic_long_sub(long i, atomic_long_t *l)
79587 {
79588 atomic_t *v = (atomic_t *)l;
79589@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
79590 atomic_sub(i, v);
79591 }
79592
79593+#ifdef CONFIG_PAX_REFCOUNT
79594+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
79595+{
79596+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79597+
79598+ atomic_sub_unchecked(i, v);
79599+}
79600+#endif
79601+
79602 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
79603 {
79604 atomic_t *v = (atomic_t *)l;
79605@@ -211,13 +349,23 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
79606 return atomic_add_negative(i, v);
79607 }
79608
79609-static inline long atomic_long_add_return(long i, atomic_long_t *l)
79610+static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
79611 {
79612 atomic_t *v = (atomic_t *)l;
79613
79614 return (long)atomic_add_return(i, v);
79615 }
79616
79617+#ifdef CONFIG_PAX_REFCOUNT
79618+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
79619+{
79620+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79621+
79622+ return (long)atomic_add_return_unchecked(i, v);
79623+}
79624+
79625+#endif
79626+
79627 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
79628 {
79629 atomic_t *v = (atomic_t *)l;
79630@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
79631 return (long)atomic_inc_return(v);
79632 }
79633
79634+#ifdef CONFIG_PAX_REFCOUNT
79635+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
79636+{
79637+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
79638+
79639+ return (long)atomic_inc_return_unchecked(v);
79640+}
79641+#endif
79642+
79643 static inline long atomic_long_dec_return(atomic_long_t *l)
79644 {
79645 atomic_t *v = (atomic_t *)l;
79646@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
79647
79648 #endif /* BITS_PER_LONG == 64 */
79649
79650+#ifdef CONFIG_PAX_REFCOUNT
79651+static inline void pax_refcount_needs_these_functions(void)
79652+{
79653+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
79654+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
79655+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
79656+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
79657+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
79658+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
79659+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
79660+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
79661+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
79662+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
79663+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
79664+#ifdef CONFIG_X86
79665+ atomic_clear_mask_unchecked(0, NULL);
79666+ atomic_set_mask_unchecked(0, NULL);
79667+#endif
79668+
79669+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
79670+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
79671+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
79672+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
79673+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
79674+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
79675+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
79676+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
79677+}
79678+#else
79679+#define atomic_read_unchecked(v) atomic_read(v)
79680+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
79681+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
79682+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
79683+#define atomic_inc_unchecked(v) atomic_inc(v)
79684+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
79685+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
79686+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
79687+#define atomic_dec_unchecked(v) atomic_dec(v)
79688+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
79689+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
79690+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
79691+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
79692+
79693+#define atomic_long_read_unchecked(v) atomic_long_read(v)
79694+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
79695+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
79696+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
79697+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
79698+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
79699+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
79700+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
79701+#endif
79702+
79703 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
79704diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
79705index 9c79e76..9f7827d 100644
79706--- a/include/asm-generic/atomic.h
79707+++ b/include/asm-generic/atomic.h
79708@@ -154,7 +154,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
79709 * Atomically clears the bits set in @mask from @v
79710 */
79711 #ifndef atomic_clear_mask
79712-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
79713+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
79714 {
79715 unsigned long flags;
79716
79717diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
79718index b18ce4f..2ee2843 100644
79719--- a/include/asm-generic/atomic64.h
79720+++ b/include/asm-generic/atomic64.h
79721@@ -16,6 +16,8 @@ typedef struct {
79722 long long counter;
79723 } atomic64_t;
79724
79725+typedef atomic64_t atomic64_unchecked_t;
79726+
79727 #define ATOMIC64_INIT(i) { (i) }
79728
79729 extern long long atomic64_read(const atomic64_t *v);
79730@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
79731 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
79732 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
79733
79734+#define atomic64_read_unchecked(v) atomic64_read(v)
79735+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
79736+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
79737+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
79738+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
79739+#define atomic64_inc_unchecked(v) atomic64_inc(v)
79740+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
79741+#define atomic64_dec_unchecked(v) atomic64_dec(v)
79742+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
79743+
79744 #endif /* _ASM_GENERIC_ATOMIC64_H */
79745diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
79746index 1402fa8..025a736 100644
79747--- a/include/asm-generic/barrier.h
79748+++ b/include/asm-generic/barrier.h
79749@@ -74,7 +74,7 @@
79750 do { \
79751 compiletime_assert_atomic_type(*p); \
79752 smp_mb(); \
79753- ACCESS_ONCE(*p) = (v); \
79754+ ACCESS_ONCE_RW(*p) = (v); \
79755 } while (0)
79756
79757 #define smp_load_acquire(p) \
79758diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
79759index a60a7cc..0fe12f2 100644
79760--- a/include/asm-generic/bitops/__fls.h
79761+++ b/include/asm-generic/bitops/__fls.h
79762@@ -9,7 +9,7 @@
79763 *
79764 * Undefined if no set bit exists, so code should check against 0 first.
79765 */
79766-static __always_inline unsigned long __fls(unsigned long word)
79767+static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
79768 {
79769 int num = BITS_PER_LONG - 1;
79770
79771diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
79772index 0576d1f..dad6c71 100644
79773--- a/include/asm-generic/bitops/fls.h
79774+++ b/include/asm-generic/bitops/fls.h
79775@@ -9,7 +9,7 @@
79776 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
79777 */
79778
79779-static __always_inline int fls(int x)
79780+static __always_inline int __intentional_overflow(-1) fls(int x)
79781 {
79782 int r = 32;
79783
79784diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
79785index b097cf8..3d40e14 100644
79786--- a/include/asm-generic/bitops/fls64.h
79787+++ b/include/asm-generic/bitops/fls64.h
79788@@ -15,7 +15,7 @@
79789 * at position 64.
79790 */
79791 #if BITS_PER_LONG == 32
79792-static __always_inline int fls64(__u64 x)
79793+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
79794 {
79795 __u32 h = x >> 32;
79796 if (h)
79797@@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
79798 return fls(x);
79799 }
79800 #elif BITS_PER_LONG == 64
79801-static __always_inline int fls64(__u64 x)
79802+static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
79803 {
79804 if (x == 0)
79805 return 0;
79806diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
79807index 1bfcfe5..e04c5c9 100644
79808--- a/include/asm-generic/cache.h
79809+++ b/include/asm-generic/cache.h
79810@@ -6,7 +6,7 @@
79811 * cache lines need to provide their own cache.h.
79812 */
79813
79814-#define L1_CACHE_SHIFT 5
79815-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
79816+#define L1_CACHE_SHIFT 5UL
79817+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
79818
79819 #endif /* __ASM_GENERIC_CACHE_H */
79820diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
79821index 0d68a1e..b74a761 100644
79822--- a/include/asm-generic/emergency-restart.h
79823+++ b/include/asm-generic/emergency-restart.h
79824@@ -1,7 +1,7 @@
79825 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
79826 #define _ASM_GENERIC_EMERGENCY_RESTART_H
79827
79828-static inline void machine_emergency_restart(void)
79829+static inline __noreturn void machine_emergency_restart(void)
79830 {
79831 machine_restart(NULL);
79832 }
79833diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
79834index 975e1cc..0b8a083 100644
79835--- a/include/asm-generic/io.h
79836+++ b/include/asm-generic/io.h
79837@@ -289,7 +289,7 @@ static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
79838 * These are pretty trivial
79839 */
79840 #ifndef virt_to_phys
79841-static inline unsigned long virt_to_phys(volatile void *address)
79842+static inline unsigned long __intentional_overflow(-1) virt_to_phys(volatile void *address)
79843 {
79844 return __pa((unsigned long)address);
79845 }
79846diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
79847index 90f99c7..00ce236 100644
79848--- a/include/asm-generic/kmap_types.h
79849+++ b/include/asm-generic/kmap_types.h
79850@@ -2,9 +2,9 @@
79851 #define _ASM_GENERIC_KMAP_TYPES_H
79852
79853 #ifdef __WITH_KM_FENCE
79854-# define KM_TYPE_NR 41
79855+# define KM_TYPE_NR 42
79856 #else
79857-# define KM_TYPE_NR 20
79858+# define KM_TYPE_NR 21
79859 #endif
79860
79861 #endif
79862diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
79863index 9ceb03b..62b0b8f 100644
79864--- a/include/asm-generic/local.h
79865+++ b/include/asm-generic/local.h
79866@@ -23,24 +23,37 @@ typedef struct
79867 atomic_long_t a;
79868 } local_t;
79869
79870+typedef struct {
79871+ atomic_long_unchecked_t a;
79872+} local_unchecked_t;
79873+
79874 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
79875
79876 #define local_read(l) atomic_long_read(&(l)->a)
79877+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
79878 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
79879+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
79880 #define local_inc(l) atomic_long_inc(&(l)->a)
79881+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
79882 #define local_dec(l) atomic_long_dec(&(l)->a)
79883+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
79884 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
79885+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
79886 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
79887+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
79888
79889 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
79890 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
79891 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
79892 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
79893 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
79894+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
79895 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
79896 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
79897+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
79898
79899 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
79900+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
79901 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
79902 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
79903 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
79904diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
79905index 725612b..9cc513a 100644
79906--- a/include/asm-generic/pgtable-nopmd.h
79907+++ b/include/asm-generic/pgtable-nopmd.h
79908@@ -1,14 +1,19 @@
79909 #ifndef _PGTABLE_NOPMD_H
79910 #define _PGTABLE_NOPMD_H
79911
79912-#ifndef __ASSEMBLY__
79913-
79914 #include <asm-generic/pgtable-nopud.h>
79915
79916-struct mm_struct;
79917-
79918 #define __PAGETABLE_PMD_FOLDED
79919
79920+#define PMD_SHIFT PUD_SHIFT
79921+#define PTRS_PER_PMD 1
79922+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
79923+#define PMD_MASK (~(PMD_SIZE-1))
79924+
79925+#ifndef __ASSEMBLY__
79926+
79927+struct mm_struct;
79928+
79929 /*
79930 * Having the pmd type consist of a pud gets the size right, and allows
79931 * us to conceptually access the pud entry that this pmd is folded into
79932@@ -16,11 +21,6 @@ struct mm_struct;
79933 */
79934 typedef struct { pud_t pud; } pmd_t;
79935
79936-#define PMD_SHIFT PUD_SHIFT
79937-#define PTRS_PER_PMD 1
79938-#define PMD_SIZE (1UL << PMD_SHIFT)
79939-#define PMD_MASK (~(PMD_SIZE-1))
79940-
79941 /*
79942 * The "pud_xxx()" functions here are trivial for a folded two-level
79943 * setup: the pmd is never bad, and a pmd always exists (as it's folded
79944diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
79945index 810431d..0ec4804f 100644
79946--- a/include/asm-generic/pgtable-nopud.h
79947+++ b/include/asm-generic/pgtable-nopud.h
79948@@ -1,10 +1,15 @@
79949 #ifndef _PGTABLE_NOPUD_H
79950 #define _PGTABLE_NOPUD_H
79951
79952-#ifndef __ASSEMBLY__
79953-
79954 #define __PAGETABLE_PUD_FOLDED
79955
79956+#define PUD_SHIFT PGDIR_SHIFT
79957+#define PTRS_PER_PUD 1
79958+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
79959+#define PUD_MASK (~(PUD_SIZE-1))
79960+
79961+#ifndef __ASSEMBLY__
79962+
79963 /*
79964 * Having the pud type consist of a pgd gets the size right, and allows
79965 * us to conceptually access the pgd entry that this pud is folded into
79966@@ -12,11 +17,6 @@
79967 */
79968 typedef struct { pgd_t pgd; } pud_t;
79969
79970-#define PUD_SHIFT PGDIR_SHIFT
79971-#define PTRS_PER_PUD 1
79972-#define PUD_SIZE (1UL << PUD_SHIFT)
79973-#define PUD_MASK (~(PUD_SIZE-1))
79974-
79975 /*
79976 * The "pgd_xxx()" functions here are trivial for a folded two-level
79977 * setup: the pud is never bad, and a pud always exists (as it's folded
79978@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
79979 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
79980
79981 #define pgd_populate(mm, pgd, pud) do { } while (0)
79982+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
79983 /*
79984 * (puds are folded into pgds so this doesn't get actually called,
79985 * but the define is needed for a generic inline function.)
79986diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
79987index 53b2acc..f4568e7 100644
79988--- a/include/asm-generic/pgtable.h
79989+++ b/include/asm-generic/pgtable.h
79990@@ -819,6 +819,22 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
79991 }
79992 #endif /* CONFIG_NUMA_BALANCING */
79993
79994+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
79995+#ifdef CONFIG_PAX_KERNEXEC
79996+#error KERNEXEC requires pax_open_kernel
79997+#else
79998+static inline unsigned long pax_open_kernel(void) { return 0; }
79999+#endif
80000+#endif
80001+
80002+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
80003+#ifdef CONFIG_PAX_KERNEXEC
80004+#error KERNEXEC requires pax_close_kernel
80005+#else
80006+static inline unsigned long pax_close_kernel(void) { return 0; }
80007+#endif
80008+#endif
80009+
80010 #endif /* CONFIG_MMU */
80011
80012 #endif /* !__ASSEMBLY__ */
80013diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
80014index 72d8803..cb9749c 100644
80015--- a/include/asm-generic/uaccess.h
80016+++ b/include/asm-generic/uaccess.h
80017@@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
80018 return __clear_user(to, n);
80019 }
80020
80021+#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
80022+#ifdef CONFIG_PAX_MEMORY_UDEREF
80023+#error UDEREF requires pax_open_userland
80024+#else
80025+static inline unsigned long pax_open_userland(void) { return 0; }
80026+#endif
80027+#endif
80028+
80029+#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
80030+#ifdef CONFIG_PAX_MEMORY_UDEREF
80031+#error UDEREF requires pax_close_userland
80032+#else
80033+static inline unsigned long pax_close_userland(void) { return 0; }
80034+#endif
80035+#endif
80036+
80037 #endif /* __ASM_GENERIC_UACCESS_H */
80038diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
80039index 5ba0360..e85c934 100644
80040--- a/include/asm-generic/vmlinux.lds.h
80041+++ b/include/asm-generic/vmlinux.lds.h
80042@@ -231,6 +231,7 @@
80043 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
80044 VMLINUX_SYMBOL(__start_rodata) = .; \
80045 *(.rodata) *(.rodata.*) \
80046+ *(.data..read_only) \
80047 *(__vermagic) /* Kernel version magic */ \
80048 . = ALIGN(8); \
80049 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
80050@@ -722,17 +723,18 @@
80051 * section in the linker script will go there too. @phdr should have
80052 * a leading colon.
80053 *
80054- * Note that this macros defines __per_cpu_load as an absolute symbol.
80055+ * Note that this macros defines per_cpu_load as an absolute symbol.
80056 * If there is no need to put the percpu section at a predetermined
80057 * address, use PERCPU_SECTION.
80058 */
80059 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
80060- VMLINUX_SYMBOL(__per_cpu_load) = .; \
80061- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
80062+ per_cpu_load = .; \
80063+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
80064 - LOAD_OFFSET) { \
80065+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
80066 PERCPU_INPUT(cacheline) \
80067 } phdr \
80068- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
80069+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
80070
80071 /**
80072 * PERCPU_SECTION - define output section for percpu area, simple version
80073diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
80074index 623a59c..1e79ab9 100644
80075--- a/include/crypto/algapi.h
80076+++ b/include/crypto/algapi.h
80077@@ -34,7 +34,7 @@ struct crypto_type {
80078 unsigned int maskclear;
80079 unsigned int maskset;
80080 unsigned int tfmsize;
80081-};
80082+} __do_const;
80083
80084 struct crypto_instance {
80085 struct crypto_alg alg;
80086diff --git a/include/drm/drmP.h b/include/drm/drmP.h
80087index 1968907..7d9ed9f 100644
80088--- a/include/drm/drmP.h
80089+++ b/include/drm/drmP.h
80090@@ -68,6 +68,7 @@
80091 #include <linux/workqueue.h>
80092 #include <linux/poll.h>
80093 #include <asm/pgalloc.h>
80094+#include <asm/local.h>
80095 #include <drm/drm.h>
80096 #include <drm/drm_sarea.h>
80097 #include <drm/drm_vma_manager.h>
80098@@ -260,10 +261,12 @@ do { \
80099 * \param cmd command.
80100 * \param arg argument.
80101 */
80102-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
80103+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
80104+ struct drm_file *file_priv);
80105+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
80106 struct drm_file *file_priv);
80107
80108-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
80109+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
80110 unsigned long arg);
80111
80112 #define DRM_IOCTL_NR(n) _IOC_NR(n)
80113@@ -279,10 +282,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
80114 struct drm_ioctl_desc {
80115 unsigned int cmd;
80116 int flags;
80117- drm_ioctl_t *func;
80118+ drm_ioctl_t func;
80119 unsigned int cmd_drv;
80120 const char *name;
80121-};
80122+} __do_const;
80123
80124 /**
80125 * Creates a driver or general drm_ioctl_desc array entry for the given
80126@@ -946,7 +949,8 @@ struct drm_info_list {
80127 int (*show)(struct seq_file*, void*); /** show callback */
80128 u32 driver_features; /**< Required driver features for this entry */
80129 void *data;
80130-};
80131+} __do_const;
80132+typedef struct drm_info_list __no_const drm_info_list_no_const;
80133
80134 /**
80135 * debugfs node structure. This structure represents a debugfs file.
80136@@ -1030,7 +1034,7 @@ struct drm_device {
80137
80138 /** \name Usage Counters */
80139 /*@{ */
80140- int open_count; /**< Outstanding files open, protected by drm_global_mutex. */
80141+ local_t open_count; /**< Outstanding files open, protected by drm_global_mutex. */
80142 spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */
80143 int buf_use; /**< Buffers in use -- cannot alloc */
80144 atomic_t buf_alloc; /**< Buffer allocation in progress */
80145diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
80146index a3d75fe..6802f9c 100644
80147--- a/include/drm/drm_crtc_helper.h
80148+++ b/include/drm/drm_crtc_helper.h
80149@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
80150 struct drm_connector *connector);
80151 /* disable encoder when not in use - more explicit than dpms off */
80152 void (*disable)(struct drm_encoder *encoder);
80153-};
80154+} __no_const;
80155
80156 /**
80157 * drm_connector_helper_funcs - helper operations for connectors
80158diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
80159index a70d456..6ea07cd 100644
80160--- a/include/drm/i915_pciids.h
80161+++ b/include/drm/i915_pciids.h
80162@@ -37,7 +37,7 @@
80163 */
80164 #define INTEL_VGA_DEVICE(id, info) { \
80165 0x8086, id, \
80166- ~0, ~0, \
80167+ PCI_ANY_ID, PCI_ANY_ID, \
80168 0x030000, 0xff0000, \
80169 (unsigned long) info }
80170
80171diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
80172index 72dcbe8..8db58d7 100644
80173--- a/include/drm/ttm/ttm_memory.h
80174+++ b/include/drm/ttm/ttm_memory.h
80175@@ -48,7 +48,7 @@
80176
80177 struct ttm_mem_shrink {
80178 int (*do_shrink) (struct ttm_mem_shrink *);
80179-};
80180+} __no_const;
80181
80182 /**
80183 * struct ttm_mem_global - Global memory accounting structure.
80184diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
80185index 49a8284..9643967 100644
80186--- a/include/drm/ttm/ttm_page_alloc.h
80187+++ b/include/drm/ttm/ttm_page_alloc.h
80188@@ -80,6 +80,7 @@ void ttm_dma_page_alloc_fini(void);
80189 */
80190 extern int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
80191
80192+struct device;
80193 extern int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
80194 extern void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
80195
80196diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
80197index 4b840e8..155d235 100644
80198--- a/include/keys/asymmetric-subtype.h
80199+++ b/include/keys/asymmetric-subtype.h
80200@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
80201 /* Verify the signature on a key of this subtype (optional) */
80202 int (*verify_signature)(const struct key *key,
80203 const struct public_key_signature *sig);
80204-};
80205+} __do_const;
80206
80207 /**
80208 * asymmetric_key_subtype - Get the subtype from an asymmetric key
80209diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
80210index c1da539..1dcec55 100644
80211--- a/include/linux/atmdev.h
80212+++ b/include/linux/atmdev.h
80213@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
80214 #endif
80215
80216 struct k_atm_aal_stats {
80217-#define __HANDLE_ITEM(i) atomic_t i
80218+#define __HANDLE_ITEM(i) atomic_unchecked_t i
80219 __AAL_STAT_ITEMS
80220 #undef __HANDLE_ITEM
80221 };
80222@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
80223 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
80224 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
80225 struct module *owner;
80226-};
80227+} __do_const ;
80228
80229 struct atmphy_ops {
80230 int (*start)(struct atm_dev *dev);
80231diff --git a/include/linux/audit.h b/include/linux/audit.h
80232index 22cfddb..1514eef 100644
80233--- a/include/linux/audit.h
80234+++ b/include/linux/audit.h
80235@@ -86,7 +86,7 @@ extern unsigned compat_dir_class[];
80236 extern unsigned compat_chattr_class[];
80237 extern unsigned compat_signal_class[];
80238
80239-extern int __weak audit_classify_compat_syscall(int abi, unsigned syscall);
80240+extern int audit_classify_compat_syscall(int abi, unsigned syscall);
80241
80242 /* audit_names->type values */
80243 #define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */
80244@@ -210,7 +210,7 @@ static inline void audit_ptrace(struct task_struct *t)
80245 extern unsigned int audit_serial(void);
80246 extern int auditsc_get_stamp(struct audit_context *ctx,
80247 struct timespec *t, unsigned int *serial);
80248-extern int audit_set_loginuid(kuid_t loginuid);
80249+extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
80250
80251 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
80252 {
80253diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
80254index 61f29e5..e67c658 100644
80255--- a/include/linux/binfmts.h
80256+++ b/include/linux/binfmts.h
80257@@ -44,7 +44,7 @@ struct linux_binprm {
80258 unsigned interp_flags;
80259 unsigned interp_data;
80260 unsigned long loader, exec;
80261-};
80262+} __randomize_layout;
80263
80264 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
80265 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
80266@@ -73,8 +73,10 @@ struct linux_binfmt {
80267 int (*load_binary)(struct linux_binprm *);
80268 int (*load_shlib)(struct file *);
80269 int (*core_dump)(struct coredump_params *cprm);
80270+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
80271+ void (*handle_mmap)(struct file *);
80272 unsigned long min_coredump; /* minimal dump size */
80273-};
80274+} __do_const __randomize_layout;
80275
80276 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
80277
80278diff --git a/include/linux/bitops.h b/include/linux/bitops.h
80279index 38b5f5c..645018c 100644
80280--- a/include/linux/bitops.h
80281+++ b/include/linux/bitops.h
80282@@ -125,7 +125,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
80283 * @word: value to rotate
80284 * @shift: bits to roll
80285 */
80286-static inline __u32 rol32(__u32 word, unsigned int shift)
80287+static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
80288 {
80289 return (word << shift) | (word >> (32 - shift));
80290 }
80291@@ -135,7 +135,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
80292 * @word: value to rotate
80293 * @shift: bits to roll
80294 */
80295-static inline __u32 ror32(__u32 word, unsigned int shift)
80296+static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
80297 {
80298 return (word >> shift) | (word << (32 - shift));
80299 }
80300@@ -191,7 +191,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
80301 return (__s32)(value << shift) >> shift;
80302 }
80303
80304-static inline unsigned fls_long(unsigned long l)
80305+static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
80306 {
80307 if (sizeof(l) == 4)
80308 return fls(l);
80309diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
80310index f2057ff8..59dfa2d 100644
80311--- a/include/linux/blkdev.h
80312+++ b/include/linux/blkdev.h
80313@@ -1625,7 +1625,7 @@ struct block_device_operations {
80314 /* this callback is with swap_lock and sometimes page table lock held */
80315 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
80316 struct module *owner;
80317-};
80318+} __do_const;
80319
80320 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
80321 unsigned long);
80322diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
80323index afc1343..9735539 100644
80324--- a/include/linux/blktrace_api.h
80325+++ b/include/linux/blktrace_api.h
80326@@ -25,7 +25,7 @@ struct blk_trace {
80327 struct dentry *dropped_file;
80328 struct dentry *msg_file;
80329 struct list_head running_list;
80330- atomic_t dropped;
80331+ atomic_unchecked_t dropped;
80332 };
80333
80334 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
80335diff --git a/include/linux/cache.h b/include/linux/cache.h
80336index 17e7e82..1d7da26 100644
80337--- a/include/linux/cache.h
80338+++ b/include/linux/cache.h
80339@@ -16,6 +16,14 @@
80340 #define __read_mostly
80341 #endif
80342
80343+#ifndef __read_only
80344+#ifdef CONFIG_PAX_KERNEXEC
80345+#error KERNEXEC requires __read_only
80346+#else
80347+#define __read_only __read_mostly
80348+#endif
80349+#endif
80350+
80351 #ifndef ____cacheline_aligned
80352 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
80353 #endif
80354diff --git a/include/linux/capability.h b/include/linux/capability.h
80355index aa93e5e..985a1b0 100644
80356--- a/include/linux/capability.h
80357+++ b/include/linux/capability.h
80358@@ -214,9 +214,14 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
80359 extern bool capable(int cap);
80360 extern bool ns_capable(struct user_namespace *ns, int cap);
80361 extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
80362+extern bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap);
80363 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
80364+extern bool capable_nolog(int cap);
80365+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
80366
80367 /* audit system wants to get cap info from files as well */
80368 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
80369
80370+extern int is_privileged_binary(const struct dentry *dentry);
80371+
80372 #endif /* !_LINUX_CAPABILITY_H */
80373diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
80374index 8609d57..86e4d79 100644
80375--- a/include/linux/cdrom.h
80376+++ b/include/linux/cdrom.h
80377@@ -87,7 +87,6 @@ struct cdrom_device_ops {
80378
80379 /* driver specifications */
80380 const int capability; /* capability flags */
80381- int n_minors; /* number of active minor devices */
80382 /* handle uniform packets for scsi type devices (scsi,atapi) */
80383 int (*generic_packet) (struct cdrom_device_info *,
80384 struct packet_command *);
80385diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
80386index 4ce9056..86caac6 100644
80387--- a/include/linux/cleancache.h
80388+++ b/include/linux/cleancache.h
80389@@ -31,7 +31,7 @@ struct cleancache_ops {
80390 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
80391 void (*invalidate_inode)(int, struct cleancache_filekey);
80392 void (*invalidate_fs)(int);
80393-};
80394+} __no_const;
80395
80396 extern struct cleancache_ops *
80397 cleancache_register_ops(struct cleancache_ops *ops);
80398diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
80399index da6996e..9d13d5f 100644
80400--- a/include/linux/clk-provider.h
80401+++ b/include/linux/clk-provider.h
80402@@ -180,6 +180,7 @@ struct clk_ops {
80403 void (*init)(struct clk_hw *hw);
80404 int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
80405 };
80406+typedef struct clk_ops __no_const clk_ops_no_const;
80407
80408 /**
80409 * struct clk_init_data - holds init data that's common to all clocks and is
80410diff --git a/include/linux/compat.h b/include/linux/compat.h
80411index e649426..a74047b 100644
80412--- a/include/linux/compat.h
80413+++ b/include/linux/compat.h
80414@@ -316,7 +316,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
80415 compat_size_t __user *len_ptr);
80416
80417 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
80418-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
80419+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
80420 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
80421 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
80422 compat_ssize_t msgsz, int msgflg);
80423@@ -436,7 +436,7 @@ extern int compat_ptrace_request(struct task_struct *child,
80424 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
80425 compat_ulong_t addr, compat_ulong_t data);
80426 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
80427- compat_long_t addr, compat_long_t data);
80428+ compat_ulong_t addr, compat_ulong_t data);
80429
80430 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
80431 /*
80432diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
80433index 2507fd2..55203f8 100644
80434--- a/include/linux/compiler-gcc4.h
80435+++ b/include/linux/compiler-gcc4.h
80436@@ -39,9 +39,34 @@
80437 # define __compiletime_warning(message) __attribute__((warning(message)))
80438 # define __compiletime_error(message) __attribute__((error(message)))
80439 #endif /* __CHECKER__ */
80440+
80441+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
80442+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
80443+#define __bos0(ptr) __bos((ptr), 0)
80444+#define __bos1(ptr) __bos((ptr), 1)
80445 #endif /* GCC_VERSION >= 40300 */
80446
80447 #if GCC_VERSION >= 40500
80448+
80449+#ifdef RANDSTRUCT_PLUGIN
80450+#define __randomize_layout __attribute__((randomize_layout))
80451+#define __no_randomize_layout __attribute__((no_randomize_layout))
80452+#endif
80453+
80454+#ifdef CONSTIFY_PLUGIN
80455+#define __no_const __attribute__((no_const))
80456+#define __do_const __attribute__((do_const))
80457+#endif
80458+
80459+#ifdef SIZE_OVERFLOW_PLUGIN
80460+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
80461+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
80462+#endif
80463+
80464+#ifdef LATENT_ENTROPY_PLUGIN
80465+#define __latent_entropy __attribute__((latent_entropy))
80466+#endif
80467+
80468 /*
80469 * Mark a position in code as unreachable. This can be used to
80470 * suppress control flow warnings after asm blocks that transfer
80471diff --git a/include/linux/compiler.h b/include/linux/compiler.h
80472index d5ad7b1..3b74638 100644
80473--- a/include/linux/compiler.h
80474+++ b/include/linux/compiler.h
80475@@ -5,11 +5,14 @@
80476
80477 #ifdef __CHECKER__
80478 # define __user __attribute__((noderef, address_space(1)))
80479+# define __force_user __force __user
80480 # define __kernel __attribute__((address_space(0)))
80481+# define __force_kernel __force __kernel
80482 # define __safe __attribute__((safe))
80483 # define __force __attribute__((force))
80484 # define __nocast __attribute__((nocast))
80485 # define __iomem __attribute__((noderef, address_space(2)))
80486+# define __force_iomem __force __iomem
80487 # define __must_hold(x) __attribute__((context(x,1,1)))
80488 # define __acquires(x) __attribute__((context(x,0,1)))
80489 # define __releases(x) __attribute__((context(x,1,0)))
80490@@ -17,20 +20,37 @@
80491 # define __release(x) __context__(x,-1)
80492 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
80493 # define __percpu __attribute__((noderef, address_space(3)))
80494+# define __force_percpu __force __percpu
80495 #ifdef CONFIG_SPARSE_RCU_POINTER
80496 # define __rcu __attribute__((noderef, address_space(4)))
80497+# define __force_rcu __force __rcu
80498 #else
80499 # define __rcu
80500+# define __force_rcu
80501 #endif
80502 extern void __chk_user_ptr(const volatile void __user *);
80503 extern void __chk_io_ptr(const volatile void __iomem *);
80504 #else
80505-# define __user
80506-# define __kernel
80507+# ifdef CHECKER_PLUGIN
80508+//# define __user
80509+//# define __force_user
80510+//# define __kernel
80511+//# define __force_kernel
80512+# else
80513+# ifdef STRUCTLEAK_PLUGIN
80514+# define __user __attribute__((user))
80515+# else
80516+# define __user
80517+# endif
80518+# define __force_user
80519+# define __kernel
80520+# define __force_kernel
80521+# endif
80522 # define __safe
80523 # define __force
80524 # define __nocast
80525 # define __iomem
80526+# define __force_iomem
80527 # define __chk_user_ptr(x) (void)0
80528 # define __chk_io_ptr(x) (void)0
80529 # define __builtin_warning(x, y...) (1)
80530@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
80531 # define __release(x) (void)0
80532 # define __cond_lock(x,c) (c)
80533 # define __percpu
80534+# define __force_percpu
80535 # define __rcu
80536+# define __force_rcu
80537 #endif
80538
80539 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
80540@@ -286,6 +308,34 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
80541 # define __attribute_const__ /* unimplemented */
80542 #endif
80543
80544+#ifndef __randomize_layout
80545+# define __randomize_layout
80546+#endif
80547+
80548+#ifndef __no_randomize_layout
80549+# define __no_randomize_layout
80550+#endif
80551+
80552+#ifndef __no_const
80553+# define __no_const
80554+#endif
80555+
80556+#ifndef __do_const
80557+# define __do_const
80558+#endif
80559+
80560+#ifndef __size_overflow
80561+# define __size_overflow(...)
80562+#endif
80563+
80564+#ifndef __intentional_overflow
80565+# define __intentional_overflow(...)
80566+#endif
80567+
80568+#ifndef __latent_entropy
80569+# define __latent_entropy
80570+#endif
80571+
80572 /*
80573 * Tell gcc if a function is cold. The compiler will assume any path
80574 * directly leading to the call is unlikely.
80575@@ -295,6 +345,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
80576 #define __cold
80577 #endif
80578
80579+#ifndef __alloc_size
80580+#define __alloc_size(...)
80581+#endif
80582+
80583+#ifndef __bos
80584+#define __bos(ptr, arg)
80585+#endif
80586+
80587+#ifndef __bos0
80588+#define __bos0(ptr)
80589+#endif
80590+
80591+#ifndef __bos1
80592+#define __bos1(ptr)
80593+#endif
80594+
80595 /* Simple shorthand for a section definition */
80596 #ifndef __section
80597 # define __section(S) __attribute__ ((__section__(#S)))
80598@@ -378,7 +444,8 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
80599 * use is to mediate communication between process-level code and irq/NMI
80600 * handlers, all running on the same CPU.
80601 */
80602-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
80603+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
80604+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
80605
80606 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
80607 #ifdef CONFIG_KPROBES
80608diff --git a/include/linux/completion.h b/include/linux/completion.h
80609index 5d5aaae..0ea9b84 100644
80610--- a/include/linux/completion.h
80611+++ b/include/linux/completion.h
80612@@ -90,16 +90,16 @@ static inline void reinit_completion(struct completion *x)
80613
80614 extern void wait_for_completion(struct completion *);
80615 extern void wait_for_completion_io(struct completion *);
80616-extern int wait_for_completion_interruptible(struct completion *x);
80617-extern int wait_for_completion_killable(struct completion *x);
80618+extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
80619+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
80620 extern unsigned long wait_for_completion_timeout(struct completion *x,
80621- unsigned long timeout);
80622+ unsigned long timeout) __intentional_overflow(-1);
80623 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
80624- unsigned long timeout);
80625+ unsigned long timeout) __intentional_overflow(-1);
80626 extern long wait_for_completion_interruptible_timeout(
80627- struct completion *x, unsigned long timeout);
80628+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
80629 extern long wait_for_completion_killable_timeout(
80630- struct completion *x, unsigned long timeout);
80631+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
80632 extern bool try_wait_for_completion(struct completion *x);
80633 extern bool completion_done(struct completion *x);
80634
80635diff --git a/include/linux/configfs.h b/include/linux/configfs.h
80636index 34025df..d94bbbc 100644
80637--- a/include/linux/configfs.h
80638+++ b/include/linux/configfs.h
80639@@ -125,7 +125,7 @@ struct configfs_attribute {
80640 const char *ca_name;
80641 struct module *ca_owner;
80642 umode_t ca_mode;
80643-};
80644+} __do_const;
80645
80646 /*
80647 * Users often need to create attribute structures for their configurable
80648diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
80649index 7d1955a..d86a3ca 100644
80650--- a/include/linux/cpufreq.h
80651+++ b/include/linux/cpufreq.h
80652@@ -203,6 +203,7 @@ struct global_attr {
80653 ssize_t (*store)(struct kobject *a, struct attribute *b,
80654 const char *c, size_t count);
80655 };
80656+typedef struct global_attr __no_const global_attr_no_const;
80657
80658 #define define_one_global_ro(_name) \
80659 static struct global_attr _name = \
80660@@ -269,7 +270,7 @@ struct cpufreq_driver {
80661 bool boost_supported;
80662 bool boost_enabled;
80663 int (*set_boost) (int state);
80664-};
80665+} __do_const;
80666
80667 /* flags */
80668 #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
80669diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
80670index 25e0df6..952dffd 100644
80671--- a/include/linux/cpuidle.h
80672+++ b/include/linux/cpuidle.h
80673@@ -50,7 +50,8 @@ struct cpuidle_state {
80674 int index);
80675
80676 int (*enter_dead) (struct cpuidle_device *dev, int index);
80677-};
80678+} __do_const;
80679+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
80680
80681 /* Idle State Flags */
80682 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
80683@@ -209,7 +210,7 @@ struct cpuidle_governor {
80684 void (*reflect) (struct cpuidle_device *dev, int index);
80685
80686 struct module *owner;
80687-};
80688+} __do_const;
80689
80690 #ifdef CONFIG_CPU_IDLE
80691 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
80692diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
80693index 2997af6..424ddc1 100644
80694--- a/include/linux/cpumask.h
80695+++ b/include/linux/cpumask.h
80696@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
80697 }
80698
80699 /* Valid inputs for n are -1 and 0. */
80700-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80701+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
80702 {
80703 return n+1;
80704 }
80705
80706-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80707+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
80708 {
80709 return n+1;
80710 }
80711
80712-static inline unsigned int cpumask_next_and(int n,
80713+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
80714 const struct cpumask *srcp,
80715 const struct cpumask *andp)
80716 {
80717@@ -174,7 +174,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
80718 *
80719 * Returns >= nr_cpu_ids if no further cpus set.
80720 */
80721-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80722+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
80723 {
80724 /* -1 is a legal arg here. */
80725 if (n != -1)
80726@@ -189,7 +189,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
80727 *
80728 * Returns >= nr_cpu_ids if no further cpus unset.
80729 */
80730-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80731+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
80732 {
80733 /* -1 is a legal arg here. */
80734 if (n != -1)
80735@@ -197,7 +197,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
80736 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
80737 }
80738
80739-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
80740+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
80741 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
80742 int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
80743
80744diff --git a/include/linux/cred.h b/include/linux/cred.h
80745index b2d0820..2ecafd3 100644
80746--- a/include/linux/cred.h
80747+++ b/include/linux/cred.h
80748@@ -35,7 +35,7 @@ struct group_info {
80749 int nblocks;
80750 kgid_t small_block[NGROUPS_SMALL];
80751 kgid_t *blocks[0];
80752-};
80753+} __randomize_layout;
80754
80755 /**
80756 * get_group_info - Get a reference to a group info structure
80757@@ -136,7 +136,7 @@ struct cred {
80758 struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
80759 struct group_info *group_info; /* supplementary groups for euid/fsgid */
80760 struct rcu_head rcu; /* RCU deletion hook */
80761-};
80762+} __randomize_layout;
80763
80764 extern void __put_cred(struct cred *);
80765 extern void exit_creds(struct task_struct *);
80766@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
80767 static inline void validate_process_creds(void)
80768 {
80769 }
80770+static inline void validate_task_creds(struct task_struct *task)
80771+{
80772+}
80773 #endif
80774
80775 /**
80776@@ -331,6 +334,7 @@ static inline void put_cred(const struct cred *_cred)
80777
80778 #define task_uid(task) (task_cred_xxx((task), uid))
80779 #define task_euid(task) (task_cred_xxx((task), euid))
80780+#define task_securebits(task) (task_cred_xxx((task), securebits))
80781
80782 #define current_cred_xxx(xxx) \
80783 ({ \
80784diff --git a/include/linux/crypto.h b/include/linux/crypto.h
80785index d45e949..51cf5ea 100644
80786--- a/include/linux/crypto.h
80787+++ b/include/linux/crypto.h
80788@@ -373,7 +373,7 @@ struct cipher_tfm {
80789 const u8 *key, unsigned int keylen);
80790 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
80791 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
80792-};
80793+} __no_const;
80794
80795 struct hash_tfm {
80796 int (*init)(struct hash_desc *desc);
80797@@ -394,13 +394,13 @@ struct compress_tfm {
80798 int (*cot_decompress)(struct crypto_tfm *tfm,
80799 const u8 *src, unsigned int slen,
80800 u8 *dst, unsigned int *dlen);
80801-};
80802+} __no_const;
80803
80804 struct rng_tfm {
80805 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
80806 unsigned int dlen);
80807 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
80808-};
80809+} __no_const;
80810
80811 #define crt_ablkcipher crt_u.ablkcipher
80812 #define crt_aead crt_u.aead
80813diff --git a/include/linux/ctype.h b/include/linux/ctype.h
80814index 653589e..4ef254a 100644
80815--- a/include/linux/ctype.h
80816+++ b/include/linux/ctype.h
80817@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
80818 * Fast implementation of tolower() for internal usage. Do not use in your
80819 * code.
80820 */
80821-static inline char _tolower(const char c)
80822+static inline unsigned char _tolower(const unsigned char c)
80823 {
80824 return c | 0x20;
80825 }
80826diff --git a/include/linux/dcache.h b/include/linux/dcache.h
80827index 75a227c..1456987 100644
80828--- a/include/linux/dcache.h
80829+++ b/include/linux/dcache.h
80830@@ -134,7 +134,7 @@ struct dentry {
80831 } d_u;
80832 struct list_head d_subdirs; /* our children */
80833 struct hlist_node d_alias; /* inode alias list */
80834-};
80835+} __randomize_layout;
80836
80837 /*
80838 * dentry->d_lock spinlock nesting subclasses:
80839diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
80840index 7925bf0..d5143d2 100644
80841--- a/include/linux/decompress/mm.h
80842+++ b/include/linux/decompress/mm.h
80843@@ -77,7 +77,7 @@ static void free(void *where)
80844 * warnings when not needed (indeed large_malloc / large_free are not
80845 * needed by inflate */
80846
80847-#define malloc(a) kmalloc(a, GFP_KERNEL)
80848+#define malloc(a) kmalloc((a), GFP_KERNEL)
80849 #define free(a) kfree(a)
80850
80851 #define large_malloc(a) vmalloc(a)
80852diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
80853index f1863dc..5c26074 100644
80854--- a/include/linux/devfreq.h
80855+++ b/include/linux/devfreq.h
80856@@ -114,7 +114,7 @@ struct devfreq_governor {
80857 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
80858 int (*event_handler)(struct devfreq *devfreq,
80859 unsigned int event, void *data);
80860-};
80861+} __do_const;
80862
80863 /**
80864 * struct devfreq - Device devfreq structure
80865diff --git a/include/linux/device.h b/include/linux/device.h
80866index 43d183a..03b6ba2 100644
80867--- a/include/linux/device.h
80868+++ b/include/linux/device.h
80869@@ -310,7 +310,7 @@ struct subsys_interface {
80870 struct list_head node;
80871 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
80872 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
80873-};
80874+} __do_const;
80875
80876 int subsys_interface_register(struct subsys_interface *sif);
80877 void subsys_interface_unregister(struct subsys_interface *sif);
80878@@ -506,7 +506,7 @@ struct device_type {
80879 void (*release)(struct device *dev);
80880
80881 const struct dev_pm_ops *pm;
80882-};
80883+} __do_const;
80884
80885 /* interface for exporting device attributes */
80886 struct device_attribute {
80887@@ -516,11 +516,12 @@ struct device_attribute {
80888 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
80889 const char *buf, size_t count);
80890 };
80891+typedef struct device_attribute __no_const device_attribute_no_const;
80892
80893 struct dev_ext_attribute {
80894 struct device_attribute attr;
80895 void *var;
80896-};
80897+} __do_const;
80898
80899 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
80900 char *buf);
80901diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
80902index 931b709..89b2d89 100644
80903--- a/include/linux/dma-mapping.h
80904+++ b/include/linux/dma-mapping.h
80905@@ -60,7 +60,7 @@ struct dma_map_ops {
80906 u64 (*get_required_mask)(struct device *dev);
80907 #endif
80908 int is_phys;
80909-};
80910+} __do_const;
80911
80912 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
80913
80914diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
80915index 1f9e642..39e4263 100644
80916--- a/include/linux/dmaengine.h
80917+++ b/include/linux/dmaengine.h
80918@@ -1147,9 +1147,9 @@ struct dma_pinned_list {
80919 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
80920 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
80921
80922-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
80923+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
80924 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
80925-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
80926+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
80927 struct dma_pinned_list *pinned_list, struct page *page,
80928 unsigned int offset, size_t len);
80929
80930diff --git a/include/linux/efi.h b/include/linux/efi.h
80931index 45cb4ff..c9b4912 100644
80932--- a/include/linux/efi.h
80933+++ b/include/linux/efi.h
80934@@ -1036,6 +1036,7 @@ struct efivar_operations {
80935 efi_set_variable_t *set_variable;
80936 efi_query_variable_store_t *query_variable_store;
80937 };
80938+typedef struct efivar_operations __no_const efivar_operations_no_const;
80939
80940 struct efivars {
80941 /*
80942diff --git a/include/linux/elf.h b/include/linux/elf.h
80943index 67a5fa7..b817372 100644
80944--- a/include/linux/elf.h
80945+++ b/include/linux/elf.h
80946@@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
80947 #define elf_note elf32_note
80948 #define elf_addr_t Elf32_Off
80949 #define Elf_Half Elf32_Half
80950+#define elf_dyn Elf32_Dyn
80951
80952 #else
80953
80954@@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
80955 #define elf_note elf64_note
80956 #define elf_addr_t Elf64_Off
80957 #define Elf_Half Elf64_Half
80958+#define elf_dyn Elf64_Dyn
80959
80960 #endif
80961
80962diff --git a/include/linux/err.h b/include/linux/err.h
80963index a729120..6ede2c9 100644
80964--- a/include/linux/err.h
80965+++ b/include/linux/err.h
80966@@ -20,12 +20,12 @@
80967
80968 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
80969
80970-static inline void * __must_check ERR_PTR(long error)
80971+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
80972 {
80973 return (void *) error;
80974 }
80975
80976-static inline long __must_check PTR_ERR(__force const void *ptr)
80977+static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
80978 {
80979 return (long) ptr;
80980 }
80981diff --git a/include/linux/extcon.h b/include/linux/extcon.h
80982index 36f49c4..a2a1f4c 100644
80983--- a/include/linux/extcon.h
80984+++ b/include/linux/extcon.h
80985@@ -135,7 +135,7 @@ struct extcon_dev {
80986 /* /sys/class/extcon/.../mutually_exclusive/... */
80987 struct attribute_group attr_g_muex;
80988 struct attribute **attrs_muex;
80989- struct device_attribute *d_attrs_muex;
80990+ device_attribute_no_const *d_attrs_muex;
80991 };
80992
80993 /**
80994diff --git a/include/linux/fb.h b/include/linux/fb.h
80995index 09bb7a1..d98870a 100644
80996--- a/include/linux/fb.h
80997+++ b/include/linux/fb.h
80998@@ -305,7 +305,7 @@ struct fb_ops {
80999 /* called at KDB enter and leave time to prepare the console */
81000 int (*fb_debug_enter)(struct fb_info *info);
81001 int (*fb_debug_leave)(struct fb_info *info);
81002-};
81003+} __do_const;
81004
81005 #ifdef CONFIG_FB_TILEBLITTING
81006 #define FB_TILE_CURSOR_NONE 0
81007diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
81008index 230f87b..1fd0485 100644
81009--- a/include/linux/fdtable.h
81010+++ b/include/linux/fdtable.h
81011@@ -100,7 +100,7 @@ struct files_struct *get_files_struct(struct task_struct *);
81012 void put_files_struct(struct files_struct *fs);
81013 void reset_files_struct(struct files_struct *);
81014 int unshare_files(struct files_struct **);
81015-struct files_struct *dup_fd(struct files_struct *, int *);
81016+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
81017 void do_close_on_exec(struct files_struct *);
81018 int iterate_fd(struct files_struct *, unsigned,
81019 int (*)(const void *, struct file *, unsigned),
81020diff --git a/include/linux/filter.h b/include/linux/filter.h
81021index a5227ab..c789945 100644
81022--- a/include/linux/filter.h
81023+++ b/include/linux/filter.h
81024@@ -9,6 +9,11 @@
81025 #include <linux/skbuff.h>
81026 #include <linux/workqueue.h>
81027 #include <uapi/linux/filter.h>
81028+#include <asm/cacheflush.h>
81029+
81030+struct sk_buff;
81031+struct sock;
81032+struct seccomp_data;
81033
81034 /* Internally used and optimized filter representation with extended
81035 * instruction set based on top of classic BPF.
81036@@ -320,20 +325,23 @@ struct sock_fprog_kern {
81037 struct sock_filter *filter;
81038 };
81039
81040-struct sk_buff;
81041-struct sock;
81042-struct seccomp_data;
81043+struct bpf_work_struct {
81044+ struct bpf_prog *prog;
81045+ struct work_struct work;
81046+};
81047
81048 struct bpf_prog {
81049+ u32 pages; /* Number of allocated pages */
81050 u32 jited:1, /* Is our filter JIT'ed? */
81051 len:31; /* Number of filter blocks */
81052 struct sock_fprog_kern *orig_prog; /* Original BPF program */
81053+ struct bpf_work_struct *work; /* Deferred free work struct */
81054 unsigned int (*bpf_func)(const struct sk_buff *skb,
81055 const struct bpf_insn *filter);
81056+ /* Instructions for interpreter */
81057 union {
81058 struct sock_filter insns[0];
81059 struct bpf_insn insnsi[0];
81060- struct work_struct work;
81061 };
81062 };
81063
81064@@ -353,6 +361,26 @@ static inline unsigned int bpf_prog_size(unsigned int proglen)
81065
81066 #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
81067
81068+#ifdef CONFIG_DEBUG_SET_MODULE_RONX
81069+static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
81070+{
81071+ set_memory_ro((unsigned long)fp, fp->pages);
81072+}
81073+
81074+static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
81075+{
81076+ set_memory_rw((unsigned long)fp, fp->pages);
81077+}
81078+#else
81079+static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
81080+{
81081+}
81082+
81083+static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
81084+{
81085+}
81086+#endif /* CONFIG_DEBUG_SET_MODULE_RONX */
81087+
81088 int sk_filter(struct sock *sk, struct sk_buff *skb);
81089
81090 void bpf_prog_select_runtime(struct bpf_prog *fp);
81091@@ -361,6 +389,17 @@ void bpf_prog_free(struct bpf_prog *fp);
81092 int bpf_convert_filter(struct sock_filter *prog, int len,
81093 struct bpf_insn *new_prog, int *new_len);
81094
81095+struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
81096+struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
81097+ gfp_t gfp_extra_flags);
81098+void __bpf_prog_free(struct bpf_prog *fp);
81099+
81100+static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
81101+{
81102+ bpf_prog_unlock_ro(fp);
81103+ __bpf_prog_free(fp);
81104+}
81105+
81106 int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
81107 void bpf_prog_destroy(struct bpf_prog *fp);
81108
81109@@ -450,7 +489,7 @@ static inline void bpf_jit_compile(struct bpf_prog *fp)
81110
81111 static inline void bpf_jit_free(struct bpf_prog *fp)
81112 {
81113- kfree(fp);
81114+ bpf_prog_unlock_free(fp);
81115 }
81116 #endif /* CONFIG_BPF_JIT */
81117
81118diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
81119index 8293262..2b3b8bd 100644
81120--- a/include/linux/frontswap.h
81121+++ b/include/linux/frontswap.h
81122@@ -11,7 +11,7 @@ struct frontswap_ops {
81123 int (*load)(unsigned, pgoff_t, struct page *);
81124 void (*invalidate_page)(unsigned, pgoff_t);
81125 void (*invalidate_area)(unsigned);
81126-};
81127+} __no_const;
81128
81129 extern bool frontswap_enabled;
81130 extern struct frontswap_ops *
81131diff --git a/include/linux/fs.h b/include/linux/fs.h
81132index 9418772..0155807 100644
81133--- a/include/linux/fs.h
81134+++ b/include/linux/fs.h
81135@@ -401,7 +401,7 @@ struct address_space {
81136 spinlock_t private_lock; /* for use by the address_space */
81137 struct list_head private_list; /* ditto */
81138 void *private_data; /* ditto */
81139-} __attribute__((aligned(sizeof(long))));
81140+} __attribute__((aligned(sizeof(long)))) __randomize_layout;
81141 /*
81142 * On most architectures that alignment is already the case; but
81143 * must be enforced here for CRIS, to let the least significant bit
81144@@ -444,7 +444,7 @@ struct block_device {
81145 int bd_fsfreeze_count;
81146 /* Mutex for freeze */
81147 struct mutex bd_fsfreeze_mutex;
81148-};
81149+} __randomize_layout;
81150
81151 /*
81152 * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
81153@@ -613,7 +613,7 @@ struct inode {
81154 #endif
81155
81156 void *i_private; /* fs or device private pointer */
81157-};
81158+} __randomize_layout;
81159
81160 static inline int inode_unhashed(struct inode *inode)
81161 {
81162@@ -806,7 +806,7 @@ struct file {
81163 struct list_head f_tfile_llink;
81164 #endif /* #ifdef CONFIG_EPOLL */
81165 struct address_space *f_mapping;
81166-} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
81167+} __attribute__((aligned(4))) __randomize_layout; /* lest something weird decides that 2 is OK */
81168
81169 struct file_handle {
81170 __u32 handle_bytes;
81171@@ -934,7 +934,7 @@ struct file_lock {
81172 int state; /* state of grant or error if -ve */
81173 } afs;
81174 } fl_u;
81175-};
81176+} __randomize_layout;
81177
81178 /* The following constant reflects the upper bound of the file/locking space */
81179 #ifndef OFFSET_MAX
81180@@ -1284,7 +1284,7 @@ struct super_block {
81181 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
81182 struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
81183 struct rcu_head rcu;
81184-};
81185+} __randomize_layout;
81186
81187 extern struct timespec current_fs_time(struct super_block *sb);
81188
81189@@ -1510,7 +1510,8 @@ struct file_operations {
81190 long (*fallocate)(struct file *file, int mode, loff_t offset,
81191 loff_t len);
81192 int (*show_fdinfo)(struct seq_file *m, struct file *f);
81193-};
81194+} __do_const __randomize_layout;
81195+typedef struct file_operations __no_const file_operations_no_const;
81196
81197 struct inode_operations {
81198 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
81199@@ -2796,4 +2797,14 @@ static inline bool dir_relax(struct inode *inode)
81200 return !IS_DEADDIR(inode);
81201 }
81202
81203+static inline bool is_sidechannel_device(const struct inode *inode)
81204+{
81205+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
81206+ umode_t mode = inode->i_mode;
81207+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
81208+#else
81209+ return false;
81210+#endif
81211+}
81212+
81213 #endif /* _LINUX_FS_H */
81214diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
81215index 0efc3e6..fd23610 100644
81216--- a/include/linux/fs_struct.h
81217+++ b/include/linux/fs_struct.h
81218@@ -6,13 +6,13 @@
81219 #include <linux/seqlock.h>
81220
81221 struct fs_struct {
81222- int users;
81223+ atomic_t users;
81224 spinlock_t lock;
81225 seqcount_t seq;
81226 int umask;
81227 int in_exec;
81228 struct path root, pwd;
81229-};
81230+} __randomize_layout;
81231
81232 extern struct kmem_cache *fs_cachep;
81233
81234diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
81235index 7714849..a4a5c7a 100644
81236--- a/include/linux/fscache-cache.h
81237+++ b/include/linux/fscache-cache.h
81238@@ -113,7 +113,7 @@ struct fscache_operation {
81239 fscache_operation_release_t release;
81240 };
81241
81242-extern atomic_t fscache_op_debug_id;
81243+extern atomic_unchecked_t fscache_op_debug_id;
81244 extern void fscache_op_work_func(struct work_struct *work);
81245
81246 extern void fscache_enqueue_operation(struct fscache_operation *);
81247@@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
81248 INIT_WORK(&op->work, fscache_op_work_func);
81249 atomic_set(&op->usage, 1);
81250 op->state = FSCACHE_OP_ST_INITIALISED;
81251- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
81252+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
81253 op->processor = processor;
81254 op->release = release;
81255 INIT_LIST_HEAD(&op->pend_link);
81256diff --git a/include/linux/fscache.h b/include/linux/fscache.h
81257index 115bb81..e7b812b 100644
81258--- a/include/linux/fscache.h
81259+++ b/include/linux/fscache.h
81260@@ -152,7 +152,7 @@ struct fscache_cookie_def {
81261 * - this is mandatory for any object that may have data
81262 */
81263 void (*now_uncached)(void *cookie_netfs_data);
81264-};
81265+} __do_const;
81266
81267 /*
81268 * fscache cached network filesystem type
81269diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
81270index 1c804b0..1432c2b 100644
81271--- a/include/linux/fsnotify.h
81272+++ b/include/linux/fsnotify.h
81273@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
81274 struct inode *inode = file_inode(file);
81275 __u32 mask = FS_ACCESS;
81276
81277+ if (is_sidechannel_device(inode))
81278+ return;
81279+
81280 if (S_ISDIR(inode->i_mode))
81281 mask |= FS_ISDIR;
81282
81283@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
81284 struct inode *inode = file_inode(file);
81285 __u32 mask = FS_MODIFY;
81286
81287+ if (is_sidechannel_device(inode))
81288+ return;
81289+
81290 if (S_ISDIR(inode->i_mode))
81291 mask |= FS_ISDIR;
81292
81293@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
81294 */
81295 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
81296 {
81297- return kstrdup(name, GFP_KERNEL);
81298+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
81299 }
81300
81301 /*
81302diff --git a/include/linux/genhd.h b/include/linux/genhd.h
81303index ec274e0..e678159 100644
81304--- a/include/linux/genhd.h
81305+++ b/include/linux/genhd.h
81306@@ -194,7 +194,7 @@ struct gendisk {
81307 struct kobject *slave_dir;
81308
81309 struct timer_rand_state *random;
81310- atomic_t sync_io; /* RAID */
81311+ atomic_unchecked_t sync_io; /* RAID */
81312 struct disk_events *ev;
81313 #ifdef CONFIG_BLK_DEV_INTEGRITY
81314 struct blk_integrity *integrity;
81315@@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
81316 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
81317
81318 /* drivers/char/random.c */
81319-extern void add_disk_randomness(struct gendisk *disk);
81320+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
81321 extern void rand_initialize_disk(struct gendisk *disk);
81322
81323 static inline sector_t get_start_sect(struct block_device *bdev)
81324diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
81325index c0894dd..2fbf10c 100644
81326--- a/include/linux/genl_magic_func.h
81327+++ b/include/linux/genl_magic_func.h
81328@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
81329 },
81330
81331 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
81332-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
81333+static struct genl_ops ZZZ_genl_ops[] = {
81334 #include GENL_MAGIC_INCLUDE_FILE
81335 };
81336
81337diff --git a/include/linux/gfp.h b/include/linux/gfp.h
81338index 5e7219d..b1ed627 100644
81339--- a/include/linux/gfp.h
81340+++ b/include/linux/gfp.h
81341@@ -34,6 +34,13 @@ struct vm_area_struct;
81342 #define ___GFP_NO_KSWAPD 0x400000u
81343 #define ___GFP_OTHER_NODE 0x800000u
81344 #define ___GFP_WRITE 0x1000000u
81345+
81346+#ifdef CONFIG_PAX_USERCOPY_SLABS
81347+#define ___GFP_USERCOPY 0x2000000u
81348+#else
81349+#define ___GFP_USERCOPY 0
81350+#endif
81351+
81352 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
81353
81354 /*
81355@@ -90,6 +97,7 @@ struct vm_area_struct;
81356 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
81357 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
81358 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
81359+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
81360
81361 /*
81362 * This may seem redundant, but it's a way of annotating false positives vs.
81363@@ -97,7 +105,7 @@ struct vm_area_struct;
81364 */
81365 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
81366
81367-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
81368+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
81369 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
81370
81371 /* This equals 0, but use constants in case they ever change */
81372@@ -155,6 +163,8 @@ struct vm_area_struct;
81373 /* 4GB DMA on some platforms */
81374 #define GFP_DMA32 __GFP_DMA32
81375
81376+#define GFP_USERCOPY __GFP_USERCOPY
81377+
81378 /* Convert GFP flags to their corresponding migrate type */
81379 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
81380 {
81381diff --git a/include/linux/gracl.h b/include/linux/gracl.h
81382new file mode 100644
81383index 0000000..edb2cb6
81384--- /dev/null
81385+++ b/include/linux/gracl.h
81386@@ -0,0 +1,340 @@
81387+#ifndef GR_ACL_H
81388+#define GR_ACL_H
81389+
81390+#include <linux/grdefs.h>
81391+#include <linux/resource.h>
81392+#include <linux/capability.h>
81393+#include <linux/dcache.h>
81394+#include <asm/resource.h>
81395+
81396+/* Major status information */
81397+
81398+#define GR_VERSION "grsecurity 3.0"
81399+#define GRSECURITY_VERSION 0x3000
81400+
81401+enum {
81402+ GR_SHUTDOWN = 0,
81403+ GR_ENABLE = 1,
81404+ GR_SPROLE = 2,
81405+ GR_OLDRELOAD = 3,
81406+ GR_SEGVMOD = 4,
81407+ GR_STATUS = 5,
81408+ GR_UNSPROLE = 6,
81409+ GR_PASSSET = 7,
81410+ GR_SPROLEPAM = 8,
81411+ GR_RELOAD = 9,
81412+};
81413+
81414+/* Password setup definitions
81415+ * kernel/grhash.c */
81416+enum {
81417+ GR_PW_LEN = 128,
81418+ GR_SALT_LEN = 16,
81419+ GR_SHA_LEN = 32,
81420+};
81421+
81422+enum {
81423+ GR_SPROLE_LEN = 64,
81424+};
81425+
81426+enum {
81427+ GR_NO_GLOB = 0,
81428+ GR_REG_GLOB,
81429+ GR_CREATE_GLOB
81430+};
81431+
81432+#define GR_NLIMITS 32
81433+
81434+/* Begin Data Structures */
81435+
81436+struct sprole_pw {
81437+ unsigned char *rolename;
81438+ unsigned char salt[GR_SALT_LEN];
81439+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
81440+};
81441+
81442+struct name_entry {
81443+ __u32 key;
81444+ ino_t inode;
81445+ dev_t device;
81446+ char *name;
81447+ __u16 len;
81448+ __u8 deleted;
81449+ struct name_entry *prev;
81450+ struct name_entry *next;
81451+};
81452+
81453+struct inodev_entry {
81454+ struct name_entry *nentry;
81455+ struct inodev_entry *prev;
81456+ struct inodev_entry *next;
81457+};
81458+
81459+struct acl_role_db {
81460+ struct acl_role_label **r_hash;
81461+ __u32 r_size;
81462+};
81463+
81464+struct inodev_db {
81465+ struct inodev_entry **i_hash;
81466+ __u32 i_size;
81467+};
81468+
81469+struct name_db {
81470+ struct name_entry **n_hash;
81471+ __u32 n_size;
81472+};
81473+
81474+struct crash_uid {
81475+ uid_t uid;
81476+ unsigned long expires;
81477+};
81478+
81479+struct gr_hash_struct {
81480+ void **table;
81481+ void **nametable;
81482+ void *first;
81483+ __u32 table_size;
81484+ __u32 used_size;
81485+ int type;
81486+};
81487+
81488+/* Userspace Grsecurity ACL data structures */
81489+
81490+struct acl_subject_label {
81491+ char *filename;
81492+ ino_t inode;
81493+ dev_t device;
81494+ __u32 mode;
81495+ kernel_cap_t cap_mask;
81496+ kernel_cap_t cap_lower;
81497+ kernel_cap_t cap_invert_audit;
81498+
81499+ struct rlimit res[GR_NLIMITS];
81500+ __u32 resmask;
81501+
81502+ __u8 user_trans_type;
81503+ __u8 group_trans_type;
81504+ uid_t *user_transitions;
81505+ gid_t *group_transitions;
81506+ __u16 user_trans_num;
81507+ __u16 group_trans_num;
81508+
81509+ __u32 sock_families[2];
81510+ __u32 ip_proto[8];
81511+ __u32 ip_type;
81512+ struct acl_ip_label **ips;
81513+ __u32 ip_num;
81514+ __u32 inaddr_any_override;
81515+
81516+ __u32 crashes;
81517+ unsigned long expires;
81518+
81519+ struct acl_subject_label *parent_subject;
81520+ struct gr_hash_struct *hash;
81521+ struct acl_subject_label *prev;
81522+ struct acl_subject_label *next;
81523+
81524+ struct acl_object_label **obj_hash;
81525+ __u32 obj_hash_size;
81526+ __u16 pax_flags;
81527+};
81528+
81529+struct role_allowed_ip {
81530+ __u32 addr;
81531+ __u32 netmask;
81532+
81533+ struct role_allowed_ip *prev;
81534+ struct role_allowed_ip *next;
81535+};
81536+
81537+struct role_transition {
81538+ char *rolename;
81539+
81540+ struct role_transition *prev;
81541+ struct role_transition *next;
81542+};
81543+
81544+struct acl_role_label {
81545+ char *rolename;
81546+ uid_t uidgid;
81547+ __u16 roletype;
81548+
81549+ __u16 auth_attempts;
81550+ unsigned long expires;
81551+
81552+ struct acl_subject_label *root_label;
81553+ struct gr_hash_struct *hash;
81554+
81555+ struct acl_role_label *prev;
81556+ struct acl_role_label *next;
81557+
81558+ struct role_transition *transitions;
81559+ struct role_allowed_ip *allowed_ips;
81560+ uid_t *domain_children;
81561+ __u16 domain_child_num;
81562+
81563+ umode_t umask;
81564+
81565+ struct acl_subject_label **subj_hash;
81566+ __u32 subj_hash_size;
81567+};
81568+
81569+struct user_acl_role_db {
81570+ struct acl_role_label **r_table;
81571+ __u32 num_pointers; /* Number of allocations to track */
81572+ __u32 num_roles; /* Number of roles */
81573+ __u32 num_domain_children; /* Number of domain children */
81574+ __u32 num_subjects; /* Number of subjects */
81575+ __u32 num_objects; /* Number of objects */
81576+};
81577+
81578+struct acl_object_label {
81579+ char *filename;
81580+ ino_t inode;
81581+ dev_t device;
81582+ __u32 mode;
81583+
81584+ struct acl_subject_label *nested;
81585+ struct acl_object_label *globbed;
81586+
81587+ /* next two structures not used */
81588+
81589+ struct acl_object_label *prev;
81590+ struct acl_object_label *next;
81591+};
81592+
81593+struct acl_ip_label {
81594+ char *iface;
81595+ __u32 addr;
81596+ __u32 netmask;
81597+ __u16 low, high;
81598+ __u8 mode;
81599+ __u32 type;
81600+ __u32 proto[8];
81601+
81602+ /* next two structures not used */
81603+
81604+ struct acl_ip_label *prev;
81605+ struct acl_ip_label *next;
81606+};
81607+
81608+struct gr_arg {
81609+ struct user_acl_role_db role_db;
81610+ unsigned char pw[GR_PW_LEN];
81611+ unsigned char salt[GR_SALT_LEN];
81612+ unsigned char sum[GR_SHA_LEN];
81613+ unsigned char sp_role[GR_SPROLE_LEN];
81614+ struct sprole_pw *sprole_pws;
81615+ dev_t segv_device;
81616+ ino_t segv_inode;
81617+ uid_t segv_uid;
81618+ __u16 num_sprole_pws;
81619+ __u16 mode;
81620+};
81621+
81622+struct gr_arg_wrapper {
81623+ struct gr_arg *arg;
81624+ __u32 version;
81625+ __u32 size;
81626+};
81627+
81628+struct subject_map {
81629+ struct acl_subject_label *user;
81630+ struct acl_subject_label *kernel;
81631+ struct subject_map *prev;
81632+ struct subject_map *next;
81633+};
81634+
81635+struct acl_subj_map_db {
81636+ struct subject_map **s_hash;
81637+ __u32 s_size;
81638+};
81639+
81640+struct gr_policy_state {
81641+ struct sprole_pw **acl_special_roles;
81642+ __u16 num_sprole_pws;
81643+ struct acl_role_label *kernel_role;
81644+ struct acl_role_label *role_list;
81645+ struct acl_role_label *default_role;
81646+ struct acl_role_db acl_role_set;
81647+ struct acl_subj_map_db subj_map_set;
81648+ struct name_db name_set;
81649+ struct inodev_db inodev_set;
81650+};
81651+
81652+struct gr_alloc_state {
81653+ unsigned long alloc_stack_next;
81654+ unsigned long alloc_stack_size;
81655+ void **alloc_stack;
81656+};
81657+
81658+struct gr_reload_state {
81659+ struct gr_policy_state oldpolicy;
81660+ struct gr_alloc_state oldalloc;
81661+ struct gr_policy_state newpolicy;
81662+ struct gr_alloc_state newalloc;
81663+ struct gr_policy_state *oldpolicy_ptr;
81664+ struct gr_alloc_state *oldalloc_ptr;
81665+ unsigned char oldmode;
81666+};
81667+
81668+/* End Data Structures Section */
81669+
81670+/* Hash functions generated by empirical testing by Brad Spengler
81671+ Makes good use of the low bits of the inode. Generally 0-1 times
81672+ in loop for successful match. 0-3 for unsuccessful match.
81673+ Shift/add algorithm with modulus of table size and an XOR*/
81674+
81675+static __inline__ unsigned int
81676+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
81677+{
81678+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
81679+}
81680+
81681+ static __inline__ unsigned int
81682+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
81683+{
81684+ return ((const unsigned long)userp % sz);
81685+}
81686+
81687+static __inline__ unsigned int
81688+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
81689+{
81690+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
81691+}
81692+
81693+static __inline__ unsigned int
81694+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
81695+{
81696+ return full_name_hash((const unsigned char *)name, len) % sz;
81697+}
81698+
81699+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
81700+ subj = NULL; \
81701+ iter = 0; \
81702+ while (iter < role->subj_hash_size) { \
81703+ if (subj == NULL) \
81704+ subj = role->subj_hash[iter]; \
81705+ if (subj == NULL) { \
81706+ iter++; \
81707+ continue; \
81708+ }
81709+
81710+#define FOR_EACH_SUBJECT_END(subj,iter) \
81711+ subj = subj->next; \
81712+ if (subj == NULL) \
81713+ iter++; \
81714+ }
81715+
81716+
81717+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
81718+ subj = role->hash->first; \
81719+ while (subj != NULL) {
81720+
81721+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
81722+ subj = subj->next; \
81723+ }
81724+
81725+#endif
81726+
81727diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
81728new file mode 100644
81729index 0000000..33ebd1f
81730--- /dev/null
81731+++ b/include/linux/gracl_compat.h
81732@@ -0,0 +1,156 @@
81733+#ifndef GR_ACL_COMPAT_H
81734+#define GR_ACL_COMPAT_H
81735+
81736+#include <linux/resource.h>
81737+#include <asm/resource.h>
81738+
81739+struct sprole_pw_compat {
81740+ compat_uptr_t rolename;
81741+ unsigned char salt[GR_SALT_LEN];
81742+ unsigned char sum[GR_SHA_LEN];
81743+};
81744+
81745+struct gr_hash_struct_compat {
81746+ compat_uptr_t table;
81747+ compat_uptr_t nametable;
81748+ compat_uptr_t first;
81749+ __u32 table_size;
81750+ __u32 used_size;
81751+ int type;
81752+};
81753+
81754+struct acl_subject_label_compat {
81755+ compat_uptr_t filename;
81756+ compat_ino_t inode;
81757+ __u32 device;
81758+ __u32 mode;
81759+ kernel_cap_t cap_mask;
81760+ kernel_cap_t cap_lower;
81761+ kernel_cap_t cap_invert_audit;
81762+
81763+ struct compat_rlimit res[GR_NLIMITS];
81764+ __u32 resmask;
81765+
81766+ __u8 user_trans_type;
81767+ __u8 group_trans_type;
81768+ compat_uptr_t user_transitions;
81769+ compat_uptr_t group_transitions;
81770+ __u16 user_trans_num;
81771+ __u16 group_trans_num;
81772+
81773+ __u32 sock_families[2];
81774+ __u32 ip_proto[8];
81775+ __u32 ip_type;
81776+ compat_uptr_t ips;
81777+ __u32 ip_num;
81778+ __u32 inaddr_any_override;
81779+
81780+ __u32 crashes;
81781+ compat_ulong_t expires;
81782+
81783+ compat_uptr_t parent_subject;
81784+ compat_uptr_t hash;
81785+ compat_uptr_t prev;
81786+ compat_uptr_t next;
81787+
81788+ compat_uptr_t obj_hash;
81789+ __u32 obj_hash_size;
81790+ __u16 pax_flags;
81791+};
81792+
81793+struct role_allowed_ip_compat {
81794+ __u32 addr;
81795+ __u32 netmask;
81796+
81797+ compat_uptr_t prev;
81798+ compat_uptr_t next;
81799+};
81800+
81801+struct role_transition_compat {
81802+ compat_uptr_t rolename;
81803+
81804+ compat_uptr_t prev;
81805+ compat_uptr_t next;
81806+};
81807+
81808+struct acl_role_label_compat {
81809+ compat_uptr_t rolename;
81810+ uid_t uidgid;
81811+ __u16 roletype;
81812+
81813+ __u16 auth_attempts;
81814+ compat_ulong_t expires;
81815+
81816+ compat_uptr_t root_label;
81817+ compat_uptr_t hash;
81818+
81819+ compat_uptr_t prev;
81820+ compat_uptr_t next;
81821+
81822+ compat_uptr_t transitions;
81823+ compat_uptr_t allowed_ips;
81824+ compat_uptr_t domain_children;
81825+ __u16 domain_child_num;
81826+
81827+ umode_t umask;
81828+
81829+ compat_uptr_t subj_hash;
81830+ __u32 subj_hash_size;
81831+};
81832+
81833+struct user_acl_role_db_compat {
81834+ compat_uptr_t r_table;
81835+ __u32 num_pointers;
81836+ __u32 num_roles;
81837+ __u32 num_domain_children;
81838+ __u32 num_subjects;
81839+ __u32 num_objects;
81840+};
81841+
81842+struct acl_object_label_compat {
81843+ compat_uptr_t filename;
81844+ compat_ino_t inode;
81845+ __u32 device;
81846+ __u32 mode;
81847+
81848+ compat_uptr_t nested;
81849+ compat_uptr_t globbed;
81850+
81851+ compat_uptr_t prev;
81852+ compat_uptr_t next;
81853+};
81854+
81855+struct acl_ip_label_compat {
81856+ compat_uptr_t iface;
81857+ __u32 addr;
81858+ __u32 netmask;
81859+ __u16 low, high;
81860+ __u8 mode;
81861+ __u32 type;
81862+ __u32 proto[8];
81863+
81864+ compat_uptr_t prev;
81865+ compat_uptr_t next;
81866+};
81867+
81868+struct gr_arg_compat {
81869+ struct user_acl_role_db_compat role_db;
81870+ unsigned char pw[GR_PW_LEN];
81871+ unsigned char salt[GR_SALT_LEN];
81872+ unsigned char sum[GR_SHA_LEN];
81873+ unsigned char sp_role[GR_SPROLE_LEN];
81874+ compat_uptr_t sprole_pws;
81875+ __u32 segv_device;
81876+ compat_ino_t segv_inode;
81877+ uid_t segv_uid;
81878+ __u16 num_sprole_pws;
81879+ __u16 mode;
81880+};
81881+
81882+struct gr_arg_wrapper_compat {
81883+ compat_uptr_t arg;
81884+ __u32 version;
81885+ __u32 size;
81886+};
81887+
81888+#endif
81889diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
81890new file mode 100644
81891index 0000000..323ecf2
81892--- /dev/null
81893+++ b/include/linux/gralloc.h
81894@@ -0,0 +1,9 @@
81895+#ifndef __GRALLOC_H
81896+#define __GRALLOC_H
81897+
81898+void acl_free_all(void);
81899+int acl_alloc_stack_init(unsigned long size);
81900+void *acl_alloc(unsigned long len);
81901+void *acl_alloc_num(unsigned long num, unsigned long len);
81902+
81903+#endif
81904diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
81905new file mode 100644
81906index 0000000..be66033
81907--- /dev/null
81908+++ b/include/linux/grdefs.h
81909@@ -0,0 +1,140 @@
81910+#ifndef GRDEFS_H
81911+#define GRDEFS_H
81912+
81913+/* Begin grsecurity status declarations */
81914+
81915+enum {
81916+ GR_READY = 0x01,
81917+ GR_STATUS_INIT = 0x00 // disabled state
81918+};
81919+
81920+/* Begin ACL declarations */
81921+
81922+/* Role flags */
81923+
81924+enum {
81925+ GR_ROLE_USER = 0x0001,
81926+ GR_ROLE_GROUP = 0x0002,
81927+ GR_ROLE_DEFAULT = 0x0004,
81928+ GR_ROLE_SPECIAL = 0x0008,
81929+ GR_ROLE_AUTH = 0x0010,
81930+ GR_ROLE_NOPW = 0x0020,
81931+ GR_ROLE_GOD = 0x0040,
81932+ GR_ROLE_LEARN = 0x0080,
81933+ GR_ROLE_TPE = 0x0100,
81934+ GR_ROLE_DOMAIN = 0x0200,
81935+ GR_ROLE_PAM = 0x0400,
81936+ GR_ROLE_PERSIST = 0x0800
81937+};
81938+
81939+/* ACL Subject and Object mode flags */
81940+enum {
81941+ GR_DELETED = 0x80000000
81942+};
81943+
81944+/* ACL Object-only mode flags */
81945+enum {
81946+ GR_READ = 0x00000001,
81947+ GR_APPEND = 0x00000002,
81948+ GR_WRITE = 0x00000004,
81949+ GR_EXEC = 0x00000008,
81950+ GR_FIND = 0x00000010,
81951+ GR_INHERIT = 0x00000020,
81952+ GR_SETID = 0x00000040,
81953+ GR_CREATE = 0x00000080,
81954+ GR_DELETE = 0x00000100,
81955+ GR_LINK = 0x00000200,
81956+ GR_AUDIT_READ = 0x00000400,
81957+ GR_AUDIT_APPEND = 0x00000800,
81958+ GR_AUDIT_WRITE = 0x00001000,
81959+ GR_AUDIT_EXEC = 0x00002000,
81960+ GR_AUDIT_FIND = 0x00004000,
81961+ GR_AUDIT_INHERIT= 0x00008000,
81962+ GR_AUDIT_SETID = 0x00010000,
81963+ GR_AUDIT_CREATE = 0x00020000,
81964+ GR_AUDIT_DELETE = 0x00040000,
81965+ GR_AUDIT_LINK = 0x00080000,
81966+ GR_PTRACERD = 0x00100000,
81967+ GR_NOPTRACE = 0x00200000,
81968+ GR_SUPPRESS = 0x00400000,
81969+ GR_NOLEARN = 0x00800000,
81970+ GR_INIT_TRANSFER= 0x01000000
81971+};
81972+
81973+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
81974+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
81975+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
81976+
81977+/* ACL subject-only mode flags */
81978+enum {
81979+ GR_KILL = 0x00000001,
81980+ GR_VIEW = 0x00000002,
81981+ GR_PROTECTED = 0x00000004,
81982+ GR_LEARN = 0x00000008,
81983+ GR_OVERRIDE = 0x00000010,
81984+ /* just a placeholder, this mode is only used in userspace */
81985+ GR_DUMMY = 0x00000020,
81986+ GR_PROTSHM = 0x00000040,
81987+ GR_KILLPROC = 0x00000080,
81988+ GR_KILLIPPROC = 0x00000100,
81989+ /* just a placeholder, this mode is only used in userspace */
81990+ GR_NOTROJAN = 0x00000200,
81991+ GR_PROTPROCFD = 0x00000400,
81992+ GR_PROCACCT = 0x00000800,
81993+ GR_RELAXPTRACE = 0x00001000,
81994+ //GR_NESTED = 0x00002000,
81995+ GR_INHERITLEARN = 0x00004000,
81996+ GR_PROCFIND = 0x00008000,
81997+ GR_POVERRIDE = 0x00010000,
81998+ GR_KERNELAUTH = 0x00020000,
81999+ GR_ATSECURE = 0x00040000,
82000+ GR_SHMEXEC = 0x00080000
82001+};
82002+
82003+enum {
82004+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
82005+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
82006+ GR_PAX_ENABLE_MPROTECT = 0x0004,
82007+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
82008+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
82009+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
82010+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
82011+ GR_PAX_DISABLE_MPROTECT = 0x0400,
82012+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
82013+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
82014+};
82015+
82016+enum {
82017+ GR_ID_USER = 0x01,
82018+ GR_ID_GROUP = 0x02,
82019+};
82020+
82021+enum {
82022+ GR_ID_ALLOW = 0x01,
82023+ GR_ID_DENY = 0x02,
82024+};
82025+
82026+#define GR_CRASH_RES 31
82027+#define GR_UIDTABLE_MAX 500
82028+
82029+/* begin resource learning section */
82030+enum {
82031+ GR_RLIM_CPU_BUMP = 60,
82032+ GR_RLIM_FSIZE_BUMP = 50000,
82033+ GR_RLIM_DATA_BUMP = 10000,
82034+ GR_RLIM_STACK_BUMP = 1000,
82035+ GR_RLIM_CORE_BUMP = 10000,
82036+ GR_RLIM_RSS_BUMP = 500000,
82037+ GR_RLIM_NPROC_BUMP = 1,
82038+ GR_RLIM_NOFILE_BUMP = 5,
82039+ GR_RLIM_MEMLOCK_BUMP = 50000,
82040+ GR_RLIM_AS_BUMP = 500000,
82041+ GR_RLIM_LOCKS_BUMP = 2,
82042+ GR_RLIM_SIGPENDING_BUMP = 5,
82043+ GR_RLIM_MSGQUEUE_BUMP = 10000,
82044+ GR_RLIM_NICE_BUMP = 1,
82045+ GR_RLIM_RTPRIO_BUMP = 1,
82046+ GR_RLIM_RTTIME_BUMP = 1000000
82047+};
82048+
82049+#endif
82050diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
82051new file mode 100644
82052index 0000000..d25522e
82053--- /dev/null
82054+++ b/include/linux/grinternal.h
82055@@ -0,0 +1,229 @@
82056+#ifndef __GRINTERNAL_H
82057+#define __GRINTERNAL_H
82058+
82059+#ifdef CONFIG_GRKERNSEC
82060+
82061+#include <linux/fs.h>
82062+#include <linux/mnt_namespace.h>
82063+#include <linux/nsproxy.h>
82064+#include <linux/gracl.h>
82065+#include <linux/grdefs.h>
82066+#include <linux/grmsg.h>
82067+
82068+void gr_add_learn_entry(const char *fmt, ...)
82069+ __attribute__ ((format (printf, 1, 2)));
82070+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
82071+ const struct vfsmount *mnt);
82072+__u32 gr_check_create(const struct dentry *new_dentry,
82073+ const struct dentry *parent,
82074+ const struct vfsmount *mnt, const __u32 mode);
82075+int gr_check_protected_task(const struct task_struct *task);
82076+__u32 to_gr_audit(const __u32 reqmode);
82077+int gr_set_acls(const int type);
82078+int gr_acl_is_enabled(void);
82079+char gr_roletype_to_char(void);
82080+
82081+void gr_handle_alertkill(struct task_struct *task);
82082+char *gr_to_filename(const struct dentry *dentry,
82083+ const struct vfsmount *mnt);
82084+char *gr_to_filename1(const struct dentry *dentry,
82085+ const struct vfsmount *mnt);
82086+char *gr_to_filename2(const struct dentry *dentry,
82087+ const struct vfsmount *mnt);
82088+char *gr_to_filename3(const struct dentry *dentry,
82089+ const struct vfsmount *mnt);
82090+
82091+extern int grsec_enable_ptrace_readexec;
82092+extern int grsec_enable_harden_ptrace;
82093+extern int grsec_enable_link;
82094+extern int grsec_enable_fifo;
82095+extern int grsec_enable_execve;
82096+extern int grsec_enable_shm;
82097+extern int grsec_enable_execlog;
82098+extern int grsec_enable_signal;
82099+extern int grsec_enable_audit_ptrace;
82100+extern int grsec_enable_forkfail;
82101+extern int grsec_enable_time;
82102+extern int grsec_enable_rofs;
82103+extern int grsec_deny_new_usb;
82104+extern int grsec_enable_chroot_shmat;
82105+extern int grsec_enable_chroot_mount;
82106+extern int grsec_enable_chroot_double;
82107+extern int grsec_enable_chroot_pivot;
82108+extern int grsec_enable_chroot_chdir;
82109+extern int grsec_enable_chroot_chmod;
82110+extern int grsec_enable_chroot_mknod;
82111+extern int grsec_enable_chroot_fchdir;
82112+extern int grsec_enable_chroot_nice;
82113+extern int grsec_enable_chroot_execlog;
82114+extern int grsec_enable_chroot_caps;
82115+extern int grsec_enable_chroot_sysctl;
82116+extern int grsec_enable_chroot_unix;
82117+extern int grsec_enable_symlinkown;
82118+extern kgid_t grsec_symlinkown_gid;
82119+extern int grsec_enable_tpe;
82120+extern kgid_t grsec_tpe_gid;
82121+extern int grsec_enable_tpe_all;
82122+extern int grsec_enable_tpe_invert;
82123+extern int grsec_enable_socket_all;
82124+extern kgid_t grsec_socket_all_gid;
82125+extern int grsec_enable_socket_client;
82126+extern kgid_t grsec_socket_client_gid;
82127+extern int grsec_enable_socket_server;
82128+extern kgid_t grsec_socket_server_gid;
82129+extern kgid_t grsec_audit_gid;
82130+extern int grsec_enable_group;
82131+extern int grsec_enable_log_rwxmaps;
82132+extern int grsec_enable_mount;
82133+extern int grsec_enable_chdir;
82134+extern int grsec_resource_logging;
82135+extern int grsec_enable_blackhole;
82136+extern int grsec_lastack_retries;
82137+extern int grsec_enable_brute;
82138+extern int grsec_enable_harden_ipc;
82139+extern int grsec_lock;
82140+
82141+extern spinlock_t grsec_alert_lock;
82142+extern unsigned long grsec_alert_wtime;
82143+extern unsigned long grsec_alert_fyet;
82144+
82145+extern spinlock_t grsec_audit_lock;
82146+
82147+extern rwlock_t grsec_exec_file_lock;
82148+
82149+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
82150+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
82151+ (tsk)->exec_file->f_path.mnt) : "/")
82152+
82153+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
82154+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
82155+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
82156+
82157+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
82158+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
82159+ (tsk)->exec_file->f_path.mnt) : "/")
82160+
82161+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
82162+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
82163+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
82164+
82165+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
82166+
82167+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
82168+
82169+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
82170+{
82171+ if (file1 && file2) {
82172+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
82173+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
82174+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
82175+ return true;
82176+ }
82177+
82178+ return false;
82179+}
82180+
82181+#define GR_CHROOT_CAPS {{ \
82182+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
82183+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
82184+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
82185+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
82186+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
82187+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
82188+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
82189+
82190+#define security_learn(normal_msg,args...) \
82191+({ \
82192+ read_lock(&grsec_exec_file_lock); \
82193+ gr_add_learn_entry(normal_msg "\n", ## args); \
82194+ read_unlock(&grsec_exec_file_lock); \
82195+})
82196+
82197+enum {
82198+ GR_DO_AUDIT,
82199+ GR_DONT_AUDIT,
82200+ /* used for non-audit messages that we shouldn't kill the task on */
82201+ GR_DONT_AUDIT_GOOD
82202+};
82203+
82204+enum {
82205+ GR_TTYSNIFF,
82206+ GR_RBAC,
82207+ GR_RBAC_STR,
82208+ GR_STR_RBAC,
82209+ GR_RBAC_MODE2,
82210+ GR_RBAC_MODE3,
82211+ GR_FILENAME,
82212+ GR_SYSCTL_HIDDEN,
82213+ GR_NOARGS,
82214+ GR_ONE_INT,
82215+ GR_ONE_INT_TWO_STR,
82216+ GR_ONE_STR,
82217+ GR_STR_INT,
82218+ GR_TWO_STR_INT,
82219+ GR_TWO_INT,
82220+ GR_TWO_U64,
82221+ GR_THREE_INT,
82222+ GR_FIVE_INT_TWO_STR,
82223+ GR_TWO_STR,
82224+ GR_THREE_STR,
82225+ GR_FOUR_STR,
82226+ GR_STR_FILENAME,
82227+ GR_FILENAME_STR,
82228+ GR_FILENAME_TWO_INT,
82229+ GR_FILENAME_TWO_INT_STR,
82230+ GR_TEXTREL,
82231+ GR_PTRACE,
82232+ GR_RESOURCE,
82233+ GR_CAP,
82234+ GR_SIG,
82235+ GR_SIG2,
82236+ GR_CRASH1,
82237+ GR_CRASH2,
82238+ GR_PSACCT,
82239+ GR_RWXMAP,
82240+ GR_RWXMAPVMA
82241+};
82242+
82243+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
82244+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
82245+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
82246+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
82247+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
82248+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
82249+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
82250+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
82251+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
82252+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
82253+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
82254+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
82255+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
82256+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
82257+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
82258+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
82259+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
82260+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
82261+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
82262+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
82263+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
82264+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
82265+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
82266+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
82267+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
82268+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
82269+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
82270+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
82271+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
82272+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
82273+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
82274+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
82275+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
82276+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
82277+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
82278+#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
82279+
82280+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
82281+
82282+#endif
82283+
82284+#endif
82285diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
82286new file mode 100644
82287index 0000000..b02ba9d
82288--- /dev/null
82289+++ b/include/linux/grmsg.h
82290@@ -0,0 +1,117 @@
82291+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
82292+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
82293+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
82294+#define GR_STOPMOD_MSG "denied modification of module state by "
82295+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
82296+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
82297+#define GR_IOPERM_MSG "denied use of ioperm() by "
82298+#define GR_IOPL_MSG "denied use of iopl() by "
82299+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
82300+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
82301+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
82302+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
82303+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
82304+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
82305+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
82306+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
82307+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
82308+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
82309+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
82310+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
82311+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
82312+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
82313+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
82314+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
82315+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
82316+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
82317+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
82318+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
82319+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
82320+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
82321+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
82322+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
82323+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
82324+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
82325+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
82326+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
82327+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
82328+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
82329+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
82330+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
82331+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
82332+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
82333+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
82334+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
82335+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
82336+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
82337+#define GR_CHROOT_FHANDLE_MSG "denied use of file handles inside chroot by "
82338+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
82339+#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
82340+#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
82341+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
82342+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
82343+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
82344+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
82345+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
82346+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
82347+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
82348+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
82349+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
82350+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
82351+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
82352+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
82353+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
82354+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
82355+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
82356+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
82357+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
82358+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
82359+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
82360+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
82361+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
82362+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
82363+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
82364+#define GR_FAILFORK_MSG "failed fork with errno %s by "
82365+#define GR_NICE_CHROOT_MSG "denied priority change by "
82366+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
82367+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
82368+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
82369+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
82370+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
82371+#define GR_TIME_MSG "time set by "
82372+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
82373+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
82374+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
82375+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
82376+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
82377+#define GR_BIND_MSG "denied bind() by "
82378+#define GR_CONNECT_MSG "denied connect() by "
82379+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
82380+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
82381+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
82382+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
82383+#define GR_CAP_ACL_MSG "use of %s denied for "
82384+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
82385+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
82386+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
82387+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
82388+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
82389+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
82390+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
82391+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
82392+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
82393+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
82394+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
82395+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
82396+#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
82397+#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
82398+#define GR_VM86_MSG "denied use of vm86 by "
82399+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
82400+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
82401+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
82402+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
82403+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
82404+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
82405+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
82406+#define GR_IPC_DENIED_MSG "denied %s of overly-permissive IPC object with creator uid %u by "
82407+#define GR_MSRWRITE_MSG "denied write to CPU MSR by "
82408diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
82409new file mode 100644
82410index 0000000..10b9635
82411--- /dev/null
82412+++ b/include/linux/grsecurity.h
82413@@ -0,0 +1,254 @@
82414+#ifndef GR_SECURITY_H
82415+#define GR_SECURITY_H
82416+#include <linux/fs.h>
82417+#include <linux/fs_struct.h>
82418+#include <linux/binfmts.h>
82419+#include <linux/gracl.h>
82420+
82421+/* notify of brain-dead configs */
82422+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
82423+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
82424+#endif
82425+#if defined(CONFIG_GRKERNSEC_PROC) && !defined(CONFIG_GRKERNSEC_PROC_USER) && !defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
82426+#error "CONFIG_GRKERNSEC_PROC enabled, but neither CONFIG_GRKERNSEC_PROC_USER nor CONFIG_GRKERNSEC_PROC_USERGROUP enabled"
82427+#endif
82428+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
82429+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
82430+#endif
82431+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
82432+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
82433+#endif
82434+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
82435+#error "CONFIG_PAX enabled, but no PaX options are enabled."
82436+#endif
82437+
82438+int gr_handle_new_usb(void);
82439+
82440+void gr_handle_brute_attach(int dumpable);
82441+void gr_handle_brute_check(void);
82442+void gr_handle_kernel_exploit(void);
82443+
82444+char gr_roletype_to_char(void);
82445+
82446+int gr_proc_is_restricted(void);
82447+
82448+int gr_acl_enable_at_secure(void);
82449+
82450+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
82451+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
82452+
82453+int gr_learn_cap(const struct task_struct *task, const struct cred *cred, const int cap);
82454+
82455+void gr_del_task_from_ip_table(struct task_struct *p);
82456+
82457+int gr_pid_is_chrooted(struct task_struct *p);
82458+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
82459+int gr_handle_chroot_nice(void);
82460+int gr_handle_chroot_sysctl(const int op);
82461+int gr_handle_chroot_setpriority(struct task_struct *p,
82462+ const int niceval);
82463+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
82464+int gr_chroot_fhandle(void);
82465+int gr_handle_chroot_chroot(const struct dentry *dentry,
82466+ const struct vfsmount *mnt);
82467+void gr_handle_chroot_chdir(const struct path *path);
82468+int gr_handle_chroot_chmod(const struct dentry *dentry,
82469+ const struct vfsmount *mnt, const int mode);
82470+int gr_handle_chroot_mknod(const struct dentry *dentry,
82471+ const struct vfsmount *mnt, const int mode);
82472+int gr_handle_chroot_mount(const struct dentry *dentry,
82473+ const struct vfsmount *mnt,
82474+ const char *dev_name);
82475+int gr_handle_chroot_pivot(void);
82476+int gr_handle_chroot_unix(const pid_t pid);
82477+
82478+int gr_handle_rawio(const struct inode *inode);
82479+
82480+void gr_handle_ioperm(void);
82481+void gr_handle_iopl(void);
82482+void gr_handle_msr_write(void);
82483+
82484+umode_t gr_acl_umask(void);
82485+
82486+int gr_tpe_allow(const struct file *file);
82487+
82488+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
82489+void gr_clear_chroot_entries(struct task_struct *task);
82490+
82491+void gr_log_forkfail(const int retval);
82492+void gr_log_timechange(void);
82493+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
82494+void gr_log_chdir(const struct dentry *dentry,
82495+ const struct vfsmount *mnt);
82496+void gr_log_chroot_exec(const struct dentry *dentry,
82497+ const struct vfsmount *mnt);
82498+void gr_log_remount(const char *devname, const int retval);
82499+void gr_log_unmount(const char *devname, const int retval);
82500+void gr_log_mount(const char *from, const char *to, const int retval);
82501+void gr_log_textrel(struct vm_area_struct *vma);
82502+void gr_log_ptgnustack(struct file *file);
82503+void gr_log_rwxmmap(struct file *file);
82504+void gr_log_rwxmprotect(struct vm_area_struct *vma);
82505+
82506+int gr_handle_follow_link(const struct inode *parent,
82507+ const struct inode *inode,
82508+ const struct dentry *dentry,
82509+ const struct vfsmount *mnt);
82510+int gr_handle_fifo(const struct dentry *dentry,
82511+ const struct vfsmount *mnt,
82512+ const struct dentry *dir, const int flag,
82513+ const int acc_mode);
82514+int gr_handle_hardlink(const struct dentry *dentry,
82515+ const struct vfsmount *mnt,
82516+ struct inode *inode,
82517+ const int mode, const struct filename *to);
82518+
82519+int gr_is_capable(const int cap);
82520+int gr_is_capable_nolog(const int cap);
82521+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
82522+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
82523+
82524+void gr_copy_label(struct task_struct *tsk);
82525+void gr_handle_crash(struct task_struct *task, const int sig);
82526+int gr_handle_signal(const struct task_struct *p, const int sig);
82527+int gr_check_crash_uid(const kuid_t uid);
82528+int gr_check_protected_task(const struct task_struct *task);
82529+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
82530+int gr_acl_handle_mmap(const struct file *file,
82531+ const unsigned long prot);
82532+int gr_acl_handle_mprotect(const struct file *file,
82533+ const unsigned long prot);
82534+int gr_check_hidden_task(const struct task_struct *tsk);
82535+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
82536+ const struct vfsmount *mnt);
82537+__u32 gr_acl_handle_utime(const struct dentry *dentry,
82538+ const struct vfsmount *mnt);
82539+__u32 gr_acl_handle_access(const struct dentry *dentry,
82540+ const struct vfsmount *mnt, const int fmode);
82541+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
82542+ const struct vfsmount *mnt, umode_t *mode);
82543+__u32 gr_acl_handle_chown(const struct dentry *dentry,
82544+ const struct vfsmount *mnt);
82545+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
82546+ const struct vfsmount *mnt);
82547+__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
82548+ const struct vfsmount *mnt);
82549+int gr_handle_ptrace(struct task_struct *task, const long request);
82550+int gr_handle_proc_ptrace(struct task_struct *task);
82551+__u32 gr_acl_handle_execve(const struct dentry *dentry,
82552+ const struct vfsmount *mnt);
82553+int gr_check_crash_exec(const struct file *filp);
82554+int gr_acl_is_enabled(void);
82555+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
82556+ const kgid_t gid);
82557+int gr_set_proc_label(const struct dentry *dentry,
82558+ const struct vfsmount *mnt,
82559+ const int unsafe_flags);
82560+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
82561+ const struct vfsmount *mnt);
82562+__u32 gr_acl_handle_open(const struct dentry *dentry,
82563+ const struct vfsmount *mnt, int acc_mode);
82564+__u32 gr_acl_handle_creat(const struct dentry *dentry,
82565+ const struct dentry *p_dentry,
82566+ const struct vfsmount *p_mnt,
82567+ int open_flags, int acc_mode, const int imode);
82568+void gr_handle_create(const struct dentry *dentry,
82569+ const struct vfsmount *mnt);
82570+void gr_handle_proc_create(const struct dentry *dentry,
82571+ const struct inode *inode);
82572+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
82573+ const struct dentry *parent_dentry,
82574+ const struct vfsmount *parent_mnt,
82575+ const int mode);
82576+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
82577+ const struct dentry *parent_dentry,
82578+ const struct vfsmount *parent_mnt);
82579+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
82580+ const struct vfsmount *mnt);
82581+void gr_handle_delete(const ino_t ino, const dev_t dev);
82582+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
82583+ const struct vfsmount *mnt);
82584+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
82585+ const struct dentry *parent_dentry,
82586+ const struct vfsmount *parent_mnt,
82587+ const struct filename *from);
82588+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
82589+ const struct dentry *parent_dentry,
82590+ const struct vfsmount *parent_mnt,
82591+ const struct dentry *old_dentry,
82592+ const struct vfsmount *old_mnt, const struct filename *to);
82593+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
82594+int gr_acl_handle_rename(struct dentry *new_dentry,
82595+ struct dentry *parent_dentry,
82596+ const struct vfsmount *parent_mnt,
82597+ struct dentry *old_dentry,
82598+ struct inode *old_parent_inode,
82599+ struct vfsmount *old_mnt, const struct filename *newname, unsigned int flags);
82600+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
82601+ struct dentry *old_dentry,
82602+ struct dentry *new_dentry,
82603+ struct vfsmount *mnt, const __u8 replace, unsigned int flags);
82604+__u32 gr_check_link(const struct dentry *new_dentry,
82605+ const struct dentry *parent_dentry,
82606+ const struct vfsmount *parent_mnt,
82607+ const struct dentry *old_dentry,
82608+ const struct vfsmount *old_mnt);
82609+int gr_acl_handle_filldir(const struct file *file, const char *name,
82610+ const unsigned int namelen, const ino_t ino);
82611+
82612+__u32 gr_acl_handle_unix(const struct dentry *dentry,
82613+ const struct vfsmount *mnt);
82614+void gr_acl_handle_exit(void);
82615+void gr_acl_handle_psacct(struct task_struct *task, const long code);
82616+int gr_acl_handle_procpidmem(const struct task_struct *task);
82617+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
82618+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
82619+void gr_audit_ptrace(struct task_struct *task);
82620+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
82621+void gr_put_exec_file(struct task_struct *task);
82622+
82623+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
82624+
82625+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
82626+extern void gr_learn_resource(const struct task_struct *task, const int res,
82627+ const unsigned long wanted, const int gt);
82628+#else
82629+static inline void gr_learn_resource(const struct task_struct *task, const int res,
82630+ const unsigned long wanted, const int gt)
82631+{
82632+}
82633+#endif
82634+
82635+#ifdef CONFIG_GRKERNSEC_RESLOG
82636+extern void gr_log_resource(const struct task_struct *task, const int res,
82637+ const unsigned long wanted, const int gt);
82638+#else
82639+static inline void gr_log_resource(const struct task_struct *task, const int res,
82640+ const unsigned long wanted, const int gt)
82641+{
82642+}
82643+#endif
82644+
82645+#ifdef CONFIG_GRKERNSEC
82646+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
82647+void gr_handle_vm86(void);
82648+void gr_handle_mem_readwrite(u64 from, u64 to);
82649+
82650+void gr_log_badprocpid(const char *entry);
82651+
82652+extern int grsec_enable_dmesg;
82653+extern int grsec_disable_privio;
82654+
82655+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
82656+extern kgid_t grsec_proc_gid;
82657+#endif
82658+
82659+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
82660+extern int grsec_enable_chroot_findtask;
82661+#endif
82662+#ifdef CONFIG_GRKERNSEC_SETXID
82663+extern int grsec_enable_setxid;
82664+#endif
82665+#endif
82666+
82667+#endif
82668diff --git a/include/linux/grsock.h b/include/linux/grsock.h
82669new file mode 100644
82670index 0000000..e7ffaaf
82671--- /dev/null
82672+++ b/include/linux/grsock.h
82673@@ -0,0 +1,19 @@
82674+#ifndef __GRSOCK_H
82675+#define __GRSOCK_H
82676+
82677+extern void gr_attach_curr_ip(const struct sock *sk);
82678+extern int gr_handle_sock_all(const int family, const int type,
82679+ const int protocol);
82680+extern int gr_handle_sock_server(const struct sockaddr *sck);
82681+extern int gr_handle_sock_server_other(const struct sock *sck);
82682+extern int gr_handle_sock_client(const struct sockaddr *sck);
82683+extern int gr_search_connect(struct socket * sock,
82684+ struct sockaddr_in * addr);
82685+extern int gr_search_bind(struct socket * sock,
82686+ struct sockaddr_in * addr);
82687+extern int gr_search_listen(struct socket * sock);
82688+extern int gr_search_accept(struct socket * sock);
82689+extern int gr_search_socket(const int domain, const int type,
82690+ const int protocol);
82691+
82692+#endif
82693diff --git a/include/linux/hash.h b/include/linux/hash.h
82694index d0494c3..69b7715 100644
82695--- a/include/linux/hash.h
82696+++ b/include/linux/hash.h
82697@@ -87,7 +87,7 @@ static inline u32 hash32_ptr(const void *ptr)
82698 struct fast_hash_ops {
82699 u32 (*hash)(const void *data, u32 len, u32 seed);
82700 u32 (*hash2)(const u32 *data, u32 len, u32 seed);
82701-};
82702+} __no_const;
82703
82704 /**
82705 * arch_fast_hash - Caclulates a hash over a given buffer that can have
82706diff --git a/include/linux/highmem.h b/include/linux/highmem.h
82707index 9286a46..373f27f 100644
82708--- a/include/linux/highmem.h
82709+++ b/include/linux/highmem.h
82710@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
82711 kunmap_atomic(kaddr);
82712 }
82713
82714+static inline void sanitize_highpage(struct page *page)
82715+{
82716+ void *kaddr;
82717+ unsigned long flags;
82718+
82719+ local_irq_save(flags);
82720+ kaddr = kmap_atomic(page);
82721+ clear_page(kaddr);
82722+ kunmap_atomic(kaddr);
82723+ local_irq_restore(flags);
82724+}
82725+
82726 static inline void zero_user_segments(struct page *page,
82727 unsigned start1, unsigned end1,
82728 unsigned start2, unsigned end2)
82729diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
82730index 1c7b89a..7dda400 100644
82731--- a/include/linux/hwmon-sysfs.h
82732+++ b/include/linux/hwmon-sysfs.h
82733@@ -25,7 +25,8 @@
82734 struct sensor_device_attribute{
82735 struct device_attribute dev_attr;
82736 int index;
82737-};
82738+} __do_const;
82739+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
82740 #define to_sensor_dev_attr(_dev_attr) \
82741 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
82742
82743@@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
82744 struct device_attribute dev_attr;
82745 u8 index;
82746 u8 nr;
82747-};
82748+} __do_const;
82749+typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
82750 #define to_sensor_dev_attr_2(_dev_attr) \
82751 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
82752
82753diff --git a/include/linux/i2c.h b/include/linux/i2c.h
82754index b556e0a..c10a515 100644
82755--- a/include/linux/i2c.h
82756+++ b/include/linux/i2c.h
82757@@ -378,6 +378,7 @@ struct i2c_algorithm {
82758 /* To determine what the adapter supports */
82759 u32 (*functionality) (struct i2c_adapter *);
82760 };
82761+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
82762
82763 /**
82764 * struct i2c_bus_recovery_info - I2C bus recovery information
82765diff --git a/include/linux/i2o.h b/include/linux/i2o.h
82766index d23c3c2..eb63c81 100644
82767--- a/include/linux/i2o.h
82768+++ b/include/linux/i2o.h
82769@@ -565,7 +565,7 @@ struct i2o_controller {
82770 struct i2o_device *exec; /* Executive */
82771 #if BITS_PER_LONG == 64
82772 spinlock_t context_list_lock; /* lock for context_list */
82773- atomic_t context_list_counter; /* needed for unique contexts */
82774+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
82775 struct list_head context_list; /* list of context id's
82776 and pointers */
82777 #endif
82778diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
82779index aff7ad8..3942bbd 100644
82780--- a/include/linux/if_pppox.h
82781+++ b/include/linux/if_pppox.h
82782@@ -76,7 +76,7 @@ struct pppox_proto {
82783 int (*ioctl)(struct socket *sock, unsigned int cmd,
82784 unsigned long arg);
82785 struct module *owner;
82786-};
82787+} __do_const;
82788
82789 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
82790 extern void unregister_pppox_proto(int proto_num);
82791diff --git a/include/linux/init.h b/include/linux/init.h
82792index 2df8e8d..3e1280d 100644
82793--- a/include/linux/init.h
82794+++ b/include/linux/init.h
82795@@ -37,9 +37,17 @@
82796 * section.
82797 */
82798
82799+#define add_init_latent_entropy __latent_entropy
82800+
82801+#ifdef CONFIG_MEMORY_HOTPLUG
82802+#define add_meminit_latent_entropy
82803+#else
82804+#define add_meminit_latent_entropy __latent_entropy
82805+#endif
82806+
82807 /* These are for everybody (although not all archs will actually
82808 discard it in modules) */
82809-#define __init __section(.init.text) __cold notrace
82810+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
82811 #define __initdata __section(.init.data)
82812 #define __initconst __constsection(.init.rodata)
82813 #define __exitdata __section(.exit.data)
82814@@ -100,7 +108,7 @@
82815 #define __cpuexitconst
82816
82817 /* Used for MEMORY_HOTPLUG */
82818-#define __meminit __section(.meminit.text) __cold notrace
82819+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
82820 #define __meminitdata __section(.meminit.data)
82821 #define __meminitconst __constsection(.meminit.rodata)
82822 #define __memexit __section(.memexit.text) __exitused __cold notrace
82823diff --git a/include/linux/init_task.h b/include/linux/init_task.h
82824index 2bb4c4f3..e0fac69 100644
82825--- a/include/linux/init_task.h
82826+++ b/include/linux/init_task.h
82827@@ -149,6 +149,12 @@ extern struct task_group root_task_group;
82828
82829 #define INIT_TASK_COMM "swapper"
82830
82831+#ifdef CONFIG_X86
82832+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
82833+#else
82834+#define INIT_TASK_THREAD_INFO
82835+#endif
82836+
82837 #ifdef CONFIG_RT_MUTEXES
82838 # define INIT_RT_MUTEXES(tsk) \
82839 .pi_waiters = RB_ROOT, \
82840@@ -196,6 +202,7 @@ extern struct task_group root_task_group;
82841 RCU_POINTER_INITIALIZER(cred, &init_cred), \
82842 .comm = INIT_TASK_COMM, \
82843 .thread = INIT_THREAD, \
82844+ INIT_TASK_THREAD_INFO \
82845 .fs = &init_fs, \
82846 .files = &init_files, \
82847 .signal = &init_signals, \
82848diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
82849index 698ad05..8601bb7 100644
82850--- a/include/linux/interrupt.h
82851+++ b/include/linux/interrupt.h
82852@@ -418,8 +418,8 @@ extern const char * const softirq_to_name[NR_SOFTIRQS];
82853
82854 struct softirq_action
82855 {
82856- void (*action)(struct softirq_action *);
82857-};
82858+ void (*action)(void);
82859+} __no_const;
82860
82861 asmlinkage void do_softirq(void);
82862 asmlinkage void __do_softirq(void);
82863@@ -433,7 +433,7 @@ static inline void do_softirq_own_stack(void)
82864 }
82865 #endif
82866
82867-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
82868+extern void open_softirq(int nr, void (*action)(void));
82869 extern void softirq_init(void);
82870 extern void __raise_softirq_irqoff(unsigned int nr);
82871
82872diff --git a/include/linux/iommu.h b/include/linux/iommu.h
82873index 20f9a52..63ee2e3 100644
82874--- a/include/linux/iommu.h
82875+++ b/include/linux/iommu.h
82876@@ -131,7 +131,7 @@ struct iommu_ops {
82877 u32 (*domain_get_windows)(struct iommu_domain *domain);
82878
82879 unsigned long pgsize_bitmap;
82880-};
82881+} __do_const;
82882
82883 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
82884 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
82885diff --git a/include/linux/ioport.h b/include/linux/ioport.h
82886index 142ec54..873e033 100644
82887--- a/include/linux/ioport.h
82888+++ b/include/linux/ioport.h
82889@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
82890 int adjust_resource(struct resource *res, resource_size_t start,
82891 resource_size_t size);
82892 resource_size_t resource_alignment(struct resource *res);
82893-static inline resource_size_t resource_size(const struct resource *res)
82894+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
82895 {
82896 return res->end - res->start + 1;
82897 }
82898diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
82899index 35e7eca..6afb7ad 100644
82900--- a/include/linux/ipc_namespace.h
82901+++ b/include/linux/ipc_namespace.h
82902@@ -69,7 +69,7 @@ struct ipc_namespace {
82903 struct user_namespace *user_ns;
82904
82905 unsigned int proc_inum;
82906-};
82907+} __randomize_layout;
82908
82909 extern struct ipc_namespace init_ipc_ns;
82910 extern atomic_t nr_ipc_ns;
82911diff --git a/include/linux/irq.h b/include/linux/irq.h
82912index 62af592..cc3b0d0 100644
82913--- a/include/linux/irq.h
82914+++ b/include/linux/irq.h
82915@@ -344,7 +344,8 @@ struct irq_chip {
82916 void (*irq_release_resources)(struct irq_data *data);
82917
82918 unsigned long flags;
82919-};
82920+} __do_const;
82921+typedef struct irq_chip __no_const irq_chip_no_const;
82922
82923 /*
82924 * irq_chip specific flags
82925diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
82926index 45e2d8c..26d85da 100644
82927--- a/include/linux/irqchip/arm-gic.h
82928+++ b/include/linux/irqchip/arm-gic.h
82929@@ -75,9 +75,11 @@
82930
82931 #ifndef __ASSEMBLY__
82932
82933+#include <linux/irq.h>
82934+
82935 struct device_node;
82936
82937-extern struct irq_chip gic_arch_extn;
82938+extern irq_chip_no_const gic_arch_extn;
82939
82940 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
82941 u32 offset, struct device_node *);
82942diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
82943index c367cbd..c9b79e6 100644
82944--- a/include/linux/jiffies.h
82945+++ b/include/linux/jiffies.h
82946@@ -280,20 +280,20 @@ extern unsigned long preset_lpj;
82947 /*
82948 * Convert various time units to each other:
82949 */
82950-extern unsigned int jiffies_to_msecs(const unsigned long j);
82951-extern unsigned int jiffies_to_usecs(const unsigned long j);
82952+extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
82953+extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
82954
82955-static inline u64 jiffies_to_nsecs(const unsigned long j)
82956+static inline u64 __intentional_overflow(-1) jiffies_to_nsecs(const unsigned long j)
82957 {
82958 return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
82959 }
82960
82961-extern unsigned long msecs_to_jiffies(const unsigned int m);
82962-extern unsigned long usecs_to_jiffies(const unsigned int u);
82963+extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
82964+extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
82965 extern unsigned long timespec_to_jiffies(const struct timespec *value);
82966 extern void jiffies_to_timespec(const unsigned long jiffies,
82967- struct timespec *value);
82968-extern unsigned long timeval_to_jiffies(const struct timeval *value);
82969+ struct timespec *value) __intentional_overflow(-1);
82970+extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
82971 extern void jiffies_to_timeval(const unsigned long jiffies,
82972 struct timeval *value);
82973
82974diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
82975index 6883e19..e854fcb 100644
82976--- a/include/linux/kallsyms.h
82977+++ b/include/linux/kallsyms.h
82978@@ -15,7 +15,8 @@
82979
82980 struct module;
82981
82982-#ifdef CONFIG_KALLSYMS
82983+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
82984+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
82985 /* Lookup the address for a symbol. Returns 0 if not found. */
82986 unsigned long kallsyms_lookup_name(const char *name);
82987
82988@@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
82989 /* Stupid that this does nothing, but I didn't create this mess. */
82990 #define __print_symbol(fmt, addr)
82991 #endif /*CONFIG_KALLSYMS*/
82992+#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
82993+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
82994+extern unsigned long kallsyms_lookup_name(const char *name);
82995+extern void __print_symbol(const char *fmt, unsigned long address);
82996+extern int sprint_backtrace(char *buffer, unsigned long address);
82997+extern int sprint_symbol(char *buffer, unsigned long address);
82998+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
82999+const char *kallsyms_lookup(unsigned long addr,
83000+ unsigned long *symbolsize,
83001+ unsigned long *offset,
83002+ char **modname, char *namebuf);
83003+extern int kallsyms_lookup_size_offset(unsigned long addr,
83004+ unsigned long *symbolsize,
83005+ unsigned long *offset);
83006+#endif
83007
83008 /* This macro allows us to keep printk typechecking */
83009 static __printf(1, 2)
83010diff --git a/include/linux/key-type.h b/include/linux/key-type.h
83011index 44792ee..6172f2a 100644
83012--- a/include/linux/key-type.h
83013+++ b/include/linux/key-type.h
83014@@ -132,7 +132,7 @@ struct key_type {
83015 /* internal fields */
83016 struct list_head link; /* link in types list */
83017 struct lock_class_key lock_class; /* key->sem lock class */
83018-};
83019+} __do_const;
83020
83021 extern struct key_type key_type_keyring;
83022
83023diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
83024index e465bb1..19f605f 100644
83025--- a/include/linux/kgdb.h
83026+++ b/include/linux/kgdb.h
83027@@ -52,7 +52,7 @@ extern int kgdb_connected;
83028 extern int kgdb_io_module_registered;
83029
83030 extern atomic_t kgdb_setting_breakpoint;
83031-extern atomic_t kgdb_cpu_doing_single_step;
83032+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
83033
83034 extern struct task_struct *kgdb_usethread;
83035 extern struct task_struct *kgdb_contthread;
83036@@ -254,7 +254,7 @@ struct kgdb_arch {
83037 void (*correct_hw_break)(void);
83038
83039 void (*enable_nmi)(bool on);
83040-};
83041+} __do_const;
83042
83043 /**
83044 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
83045@@ -279,7 +279,7 @@ struct kgdb_io {
83046 void (*pre_exception) (void);
83047 void (*post_exception) (void);
83048 int is_console;
83049-};
83050+} __do_const;
83051
83052 extern struct kgdb_arch arch_kgdb_ops;
83053
83054diff --git a/include/linux/kmod.h b/include/linux/kmod.h
83055index 0555cc6..40116ce 100644
83056--- a/include/linux/kmod.h
83057+++ b/include/linux/kmod.h
83058@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
83059 * usually useless though. */
83060 extern __printf(2, 3)
83061 int __request_module(bool wait, const char *name, ...);
83062+extern __printf(3, 4)
83063+int ___request_module(bool wait, char *param_name, const char *name, ...);
83064 #define request_module(mod...) __request_module(true, mod)
83065 #define request_module_nowait(mod...) __request_module(false, mod)
83066 #define try_then_request_module(x, mod...) \
83067@@ -57,6 +59,9 @@ struct subprocess_info {
83068 struct work_struct work;
83069 struct completion *complete;
83070 char *path;
83071+#ifdef CONFIG_GRKERNSEC
83072+ char *origpath;
83073+#endif
83074 char **argv;
83075 char **envp;
83076 int wait;
83077diff --git a/include/linux/kobject.h b/include/linux/kobject.h
83078index 2d61b90..a1d0a13 100644
83079--- a/include/linux/kobject.h
83080+++ b/include/linux/kobject.h
83081@@ -118,7 +118,7 @@ struct kobj_type {
83082 struct attribute **default_attrs;
83083 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
83084 const void *(*namespace)(struct kobject *kobj);
83085-};
83086+} __do_const;
83087
83088 struct kobj_uevent_env {
83089 char *argv[3];
83090@@ -142,6 +142,7 @@ struct kobj_attribute {
83091 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
83092 const char *buf, size_t count);
83093 };
83094+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
83095
83096 extern const struct sysfs_ops kobj_sysfs_ops;
83097
83098@@ -169,7 +170,7 @@ struct kset {
83099 spinlock_t list_lock;
83100 struct kobject kobj;
83101 const struct kset_uevent_ops *uevent_ops;
83102-};
83103+} __randomize_layout;
83104
83105 extern void kset_init(struct kset *kset);
83106 extern int __must_check kset_register(struct kset *kset);
83107diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
83108index df32d25..fb52e27 100644
83109--- a/include/linux/kobject_ns.h
83110+++ b/include/linux/kobject_ns.h
83111@@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
83112 const void *(*netlink_ns)(struct sock *sk);
83113 const void *(*initial_ns)(void);
83114 void (*drop_ns)(void *);
83115-};
83116+} __do_const;
83117
83118 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
83119 int kobj_ns_type_registered(enum kobj_ns_type type);
83120diff --git a/include/linux/kref.h b/include/linux/kref.h
83121index 484604d..0f6c5b6 100644
83122--- a/include/linux/kref.h
83123+++ b/include/linux/kref.h
83124@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
83125 static inline int kref_sub(struct kref *kref, unsigned int count,
83126 void (*release)(struct kref *kref))
83127 {
83128- WARN_ON(release == NULL);
83129+ BUG_ON(release == NULL);
83130
83131 if (atomic_sub_and_test((int) count, &kref->refcount)) {
83132 release(kref);
83133diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
83134index a4c33b3..e854710 100644
83135--- a/include/linux/kvm_host.h
83136+++ b/include/linux/kvm_host.h
83137@@ -452,7 +452,7 @@ static inline void kvm_irqfd_exit(void)
83138 {
83139 }
83140 #endif
83141-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
83142+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
83143 struct module *module);
83144 void kvm_exit(void);
83145
83146@@ -618,7 +618,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
83147 struct kvm_guest_debug *dbg);
83148 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
83149
83150-int kvm_arch_init(void *opaque);
83151+int kvm_arch_init(const void *opaque);
83152 void kvm_arch_exit(void);
83153
83154 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
83155diff --git a/include/linux/libata.h b/include/linux/libata.h
83156index 92abb49..e7fff2a 100644
83157--- a/include/linux/libata.h
83158+++ b/include/linux/libata.h
83159@@ -976,7 +976,7 @@ struct ata_port_operations {
83160 * fields must be pointers.
83161 */
83162 const struct ata_port_operations *inherits;
83163-};
83164+} __do_const;
83165
83166 struct ata_port_info {
83167 unsigned long flags;
83168diff --git a/include/linux/linkage.h b/include/linux/linkage.h
83169index a6a42dd..6c5ebce 100644
83170--- a/include/linux/linkage.h
83171+++ b/include/linux/linkage.h
83172@@ -36,6 +36,7 @@
83173 #endif
83174
83175 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
83176+#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
83177 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
83178
83179 /*
83180diff --git a/include/linux/list.h b/include/linux/list.h
83181index cbbb96f..602d023 100644
83182--- a/include/linux/list.h
83183+++ b/include/linux/list.h
83184@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
83185 extern void list_del(struct list_head *entry);
83186 #endif
83187
83188+extern void __pax_list_add(struct list_head *new,
83189+ struct list_head *prev,
83190+ struct list_head *next);
83191+static inline void pax_list_add(struct list_head *new, struct list_head *head)
83192+{
83193+ __pax_list_add(new, head, head->next);
83194+}
83195+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
83196+{
83197+ __pax_list_add(new, head->prev, head);
83198+}
83199+extern void pax_list_del(struct list_head *entry);
83200+
83201 /**
83202 * list_replace - replace old entry by new one
83203 * @old : the element to be replaced
83204@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
83205 INIT_LIST_HEAD(entry);
83206 }
83207
83208+extern void pax_list_del_init(struct list_head *entry);
83209+
83210 /**
83211 * list_move - delete from one list and add as another's head
83212 * @list: the entry to move
83213diff --git a/include/linux/lockref.h b/include/linux/lockref.h
83214index 4bfde0e..d6e2e09 100644
83215--- a/include/linux/lockref.h
83216+++ b/include/linux/lockref.h
83217@@ -47,4 +47,36 @@ static inline int __lockref_is_dead(const struct lockref *l)
83218 return ((int)l->count < 0);
83219 }
83220
83221+static inline unsigned int __lockref_read(struct lockref *lockref)
83222+{
83223+ return lockref->count;
83224+}
83225+
83226+static inline void __lockref_set(struct lockref *lockref, unsigned int count)
83227+{
83228+ lockref->count = count;
83229+}
83230+
83231+static inline void __lockref_inc(struct lockref *lockref)
83232+{
83233+
83234+#ifdef CONFIG_PAX_REFCOUNT
83235+ atomic_inc((atomic_t *)&lockref->count);
83236+#else
83237+ lockref->count++;
83238+#endif
83239+
83240+}
83241+
83242+static inline void __lockref_dec(struct lockref *lockref)
83243+{
83244+
83245+#ifdef CONFIG_PAX_REFCOUNT
83246+ atomic_dec((atomic_t *)&lockref->count);
83247+#else
83248+ lockref->count--;
83249+#endif
83250+
83251+}
83252+
83253 #endif /* __LINUX_LOCKREF_H */
83254diff --git a/include/linux/math64.h b/include/linux/math64.h
83255index c45c089..298841c 100644
83256--- a/include/linux/math64.h
83257+++ b/include/linux/math64.h
83258@@ -15,7 +15,7 @@
83259 * This is commonly provided by 32bit archs to provide an optimized 64bit
83260 * divide.
83261 */
83262-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83263+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83264 {
83265 *remainder = dividend % divisor;
83266 return dividend / divisor;
83267@@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
83268 /**
83269 * div64_u64 - unsigned 64bit divide with 64bit divisor
83270 */
83271-static inline u64 div64_u64(u64 dividend, u64 divisor)
83272+static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
83273 {
83274 return dividend / divisor;
83275 }
83276@@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
83277 #define div64_ul(x, y) div_u64((x), (y))
83278
83279 #ifndef div_u64_rem
83280-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83281+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
83282 {
83283 *remainder = do_div(dividend, divisor);
83284 return dividend;
83285@@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
83286 #endif
83287
83288 #ifndef div64_u64
83289-extern u64 div64_u64(u64 dividend, u64 divisor);
83290+extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
83291 #endif
83292
83293 #ifndef div64_s64
83294@@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
83295 * divide.
83296 */
83297 #ifndef div_u64
83298-static inline u64 div_u64(u64 dividend, u32 divisor)
83299+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
83300 {
83301 u32 remainder;
83302 return div_u64_rem(dividend, divisor, &remainder);
83303diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
83304index f230a97..714c006 100644
83305--- a/include/linux/mempolicy.h
83306+++ b/include/linux/mempolicy.h
83307@@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
83308 }
83309
83310 #define vma_policy(vma) ((vma)->vm_policy)
83311+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
83312+{
83313+ vma->vm_policy = pol;
83314+}
83315
83316 static inline void mpol_get(struct mempolicy *pol)
83317 {
83318@@ -228,6 +232,9 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
83319 }
83320
83321 #define vma_policy(vma) NULL
83322+static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
83323+{
83324+}
83325
83326 static inline int
83327 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
83328diff --git a/include/linux/mm.h b/include/linux/mm.h
83329index f952cc8..b9f6135 100644
83330--- a/include/linux/mm.h
83331+++ b/include/linux/mm.h
83332@@ -127,6 +127,11 @@ extern unsigned int kobjsize(const void *objp);
83333 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
83334 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
83335 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
83336+
83337+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
83338+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
83339+#endif
83340+
83341 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
83342
83343 #ifdef CONFIG_MEM_SOFT_DIRTY
83344@@ -237,8 +242,8 @@ struct vm_operations_struct {
83345 /* called by access_process_vm when get_user_pages() fails, typically
83346 * for use by special VMAs that can switch between memory and hardware
83347 */
83348- int (*access)(struct vm_area_struct *vma, unsigned long addr,
83349- void *buf, int len, int write);
83350+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
83351+ void *buf, size_t len, int write);
83352
83353 /* Called by the /proc/PID/maps code to ask the vma whether it
83354 * has a special name. Returning non-NULL will also cause this
83355@@ -274,6 +279,7 @@ struct vm_operations_struct {
83356 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
83357 unsigned long size, pgoff_t pgoff);
83358 };
83359+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
83360
83361 struct mmu_gather;
83362 struct inode;
83363@@ -1163,8 +1169,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
83364 unsigned long *pfn);
83365 int follow_phys(struct vm_area_struct *vma, unsigned long address,
83366 unsigned int flags, unsigned long *prot, resource_size_t *phys);
83367-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
83368- void *buf, int len, int write);
83369+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
83370+ void *buf, size_t len, int write);
83371
83372 static inline void unmap_shared_mapping_range(struct address_space *mapping,
83373 loff_t const holebegin, loff_t const holelen)
83374@@ -1204,9 +1210,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
83375 }
83376 #endif
83377
83378-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
83379-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
83380- void *buf, int len, int write);
83381+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
83382+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
83383+ void *buf, size_t len, int write);
83384
83385 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
83386 unsigned long start, unsigned long nr_pages,
83387@@ -1238,34 +1244,6 @@ int set_page_dirty_lock(struct page *page);
83388 int clear_page_dirty_for_io(struct page *page);
83389 int get_cmdline(struct task_struct *task, char *buffer, int buflen);
83390
83391-/* Is the vma a continuation of the stack vma above it? */
83392-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
83393-{
83394- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
83395-}
83396-
83397-static inline int stack_guard_page_start(struct vm_area_struct *vma,
83398- unsigned long addr)
83399-{
83400- return (vma->vm_flags & VM_GROWSDOWN) &&
83401- (vma->vm_start == addr) &&
83402- !vma_growsdown(vma->vm_prev, addr);
83403-}
83404-
83405-/* Is the vma a continuation of the stack vma below it? */
83406-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
83407-{
83408- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
83409-}
83410-
83411-static inline int stack_guard_page_end(struct vm_area_struct *vma,
83412- unsigned long addr)
83413-{
83414- return (vma->vm_flags & VM_GROWSUP) &&
83415- (vma->vm_end == addr) &&
83416- !vma_growsup(vma->vm_next, addr);
83417-}
83418-
83419 extern pid_t
83420 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
83421
83422@@ -1365,6 +1343,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
83423 }
83424 #endif
83425
83426+#ifdef CONFIG_MMU
83427+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
83428+#else
83429+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
83430+{
83431+ return __pgprot(0);
83432+}
83433+#endif
83434+
83435 int vma_wants_writenotify(struct vm_area_struct *vma);
83436
83437 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
83438@@ -1383,8 +1370,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
83439 {
83440 return 0;
83441 }
83442+
83443+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
83444+ unsigned long address)
83445+{
83446+ return 0;
83447+}
83448 #else
83449 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
83450+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
83451 #endif
83452
83453 #ifdef __PAGETABLE_PMD_FOLDED
83454@@ -1393,8 +1387,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
83455 {
83456 return 0;
83457 }
83458+
83459+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
83460+ unsigned long address)
83461+{
83462+ return 0;
83463+}
83464 #else
83465 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
83466+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
83467 #endif
83468
83469 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
83470@@ -1412,11 +1413,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
83471 NULL: pud_offset(pgd, address);
83472 }
83473
83474+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
83475+{
83476+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
83477+ NULL: pud_offset(pgd, address);
83478+}
83479+
83480 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
83481 {
83482 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
83483 NULL: pmd_offset(pud, address);
83484 }
83485+
83486+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
83487+{
83488+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
83489+ NULL: pmd_offset(pud, address);
83490+}
83491 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
83492
83493 #if USE_SPLIT_PTE_PTLOCKS
83494@@ -1815,7 +1828,7 @@ extern int install_special_mapping(struct mm_struct *mm,
83495 unsigned long addr, unsigned long len,
83496 unsigned long flags, struct page **pages);
83497
83498-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
83499+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
83500
83501 extern unsigned long mmap_region(struct file *file, unsigned long addr,
83502 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
83503@@ -1823,6 +1836,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
83504 unsigned long len, unsigned long prot, unsigned long flags,
83505 unsigned long pgoff, unsigned long *populate);
83506 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
83507+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
83508
83509 #ifdef CONFIG_MMU
83510 extern int __mm_populate(unsigned long addr, unsigned long len,
83511@@ -1851,10 +1865,11 @@ struct vm_unmapped_area_info {
83512 unsigned long high_limit;
83513 unsigned long align_mask;
83514 unsigned long align_offset;
83515+ unsigned long threadstack_offset;
83516 };
83517
83518-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
83519-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
83520+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
83521+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
83522
83523 /*
83524 * Search for an unmapped address range.
83525@@ -1866,7 +1881,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
83526 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
83527 */
83528 static inline unsigned long
83529-vm_unmapped_area(struct vm_unmapped_area_info *info)
83530+vm_unmapped_area(const struct vm_unmapped_area_info *info)
83531 {
83532 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
83533 return unmapped_area(info);
83534@@ -1928,6 +1943,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
83535 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
83536 struct vm_area_struct **pprev);
83537
83538+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
83539+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
83540+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
83541+
83542 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
83543 NULL if none. Assume start_addr < end_addr. */
83544 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
83545@@ -1956,15 +1975,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
83546 return vma;
83547 }
83548
83549-#ifdef CONFIG_MMU
83550-pgprot_t vm_get_page_prot(unsigned long vm_flags);
83551-#else
83552-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
83553-{
83554- return __pgprot(0);
83555-}
83556-#endif
83557-
83558 #ifdef CONFIG_NUMA_BALANCING
83559 unsigned long change_prot_numa(struct vm_area_struct *vma,
83560 unsigned long start, unsigned long end);
83561@@ -2016,6 +2026,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
83562 static inline void vm_stat_account(struct mm_struct *mm,
83563 unsigned long flags, struct file *file, long pages)
83564 {
83565+
83566+#ifdef CONFIG_PAX_RANDMMAP
83567+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
83568+#endif
83569+
83570 mm->total_vm += pages;
83571 }
83572 #endif /* CONFIG_PROC_FS */
83573@@ -2104,7 +2119,7 @@ extern int unpoison_memory(unsigned long pfn);
83574 extern int sysctl_memory_failure_early_kill;
83575 extern int sysctl_memory_failure_recovery;
83576 extern void shake_page(struct page *p, int access);
83577-extern atomic_long_t num_poisoned_pages;
83578+extern atomic_long_unchecked_t num_poisoned_pages;
83579 extern int soft_offline_page(struct page *page, int flags);
83580
83581 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
83582@@ -2139,5 +2154,11 @@ void __init setup_nr_node_ids(void);
83583 static inline void setup_nr_node_ids(void) {}
83584 #endif
83585
83586+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
83587+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
83588+#else
83589+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
83590+#endif
83591+
83592 #endif /* __KERNEL__ */
83593 #endif /* _LINUX_MM_H */
83594diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
83595index 6e0b286..90d9c0d 100644
83596--- a/include/linux/mm_types.h
83597+++ b/include/linux/mm_types.h
83598@@ -308,7 +308,9 @@ struct vm_area_struct {
83599 #ifdef CONFIG_NUMA
83600 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
83601 #endif
83602-};
83603+
83604+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
83605+} __randomize_layout;
83606
83607 struct core_thread {
83608 struct task_struct *task;
83609@@ -454,7 +456,25 @@ struct mm_struct {
83610 bool tlb_flush_pending;
83611 #endif
83612 struct uprobes_state uprobes_state;
83613-};
83614+
83615+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
83616+ unsigned long pax_flags;
83617+#endif
83618+
83619+#ifdef CONFIG_PAX_DLRESOLVE
83620+ unsigned long call_dl_resolve;
83621+#endif
83622+
83623+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
83624+ unsigned long call_syscall;
83625+#endif
83626+
83627+#ifdef CONFIG_PAX_ASLR
83628+ unsigned long delta_mmap; /* randomized offset */
83629+ unsigned long delta_stack; /* randomized offset */
83630+#endif
83631+
83632+} __randomize_layout;
83633
83634 static inline void mm_init_cpumask(struct mm_struct *mm)
83635 {
83636diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
83637index c5d5278..f0b68c8 100644
83638--- a/include/linux/mmiotrace.h
83639+++ b/include/linux/mmiotrace.h
83640@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
83641 /* Called from ioremap.c */
83642 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
83643 void __iomem *addr);
83644-extern void mmiotrace_iounmap(volatile void __iomem *addr);
83645+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
83646
83647 /* For anyone to insert markers. Remember trailing newline. */
83648 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
83649@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
83650 {
83651 }
83652
83653-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
83654+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
83655 {
83656 }
83657
83658diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
83659index b21bac4..94142ca 100644
83660--- a/include/linux/mmzone.h
83661+++ b/include/linux/mmzone.h
83662@@ -527,7 +527,7 @@ struct zone {
83663
83664 ZONE_PADDING(_pad3_)
83665 /* Zone statistics */
83666- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
83667+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
83668 } ____cacheline_internodealigned_in_smp;
83669
83670 typedef enum {
83671diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
83672index 44eeef0..a92d3f9 100644
83673--- a/include/linux/mod_devicetable.h
83674+++ b/include/linux/mod_devicetable.h
83675@@ -139,7 +139,7 @@ struct usb_device_id {
83676 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
83677 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
83678
83679-#define HID_ANY_ID (~0)
83680+#define HID_ANY_ID (~0U)
83681 #define HID_BUS_ANY 0xffff
83682 #define HID_GROUP_ANY 0x0000
83683
83684@@ -475,7 +475,7 @@ struct dmi_system_id {
83685 const char *ident;
83686 struct dmi_strmatch matches[4];
83687 void *driver_data;
83688-};
83689+} __do_const;
83690 /*
83691 * struct dmi_device_id appears during expansion of
83692 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
83693diff --git a/include/linux/module.h b/include/linux/module.h
83694index 71f282a..b2387e2 100644
83695--- a/include/linux/module.h
83696+++ b/include/linux/module.h
83697@@ -17,9 +17,11 @@
83698 #include <linux/moduleparam.h>
83699 #include <linux/jump_label.h>
83700 #include <linux/export.h>
83701+#include <linux/fs.h>
83702
83703 #include <linux/percpu.h>
83704 #include <asm/module.h>
83705+#include <asm/pgtable.h>
83706
83707 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
83708 #define MODULE_SIG_STRING "~Module signature appended~\n"
83709@@ -42,7 +44,7 @@ struct module_kobject {
83710 struct kobject *drivers_dir;
83711 struct module_param_attrs *mp;
83712 struct completion *kobj_completion;
83713-};
83714+} __randomize_layout;
83715
83716 struct module_attribute {
83717 struct attribute attr;
83718@@ -54,12 +56,13 @@ struct module_attribute {
83719 int (*test)(struct module *);
83720 void (*free)(struct module *);
83721 };
83722+typedef struct module_attribute __no_const module_attribute_no_const;
83723
83724 struct module_version_attribute {
83725 struct module_attribute mattr;
83726 const char *module_name;
83727 const char *version;
83728-} __attribute__ ((__aligned__(sizeof(void *))));
83729+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
83730
83731 extern ssize_t __modver_version_show(struct module_attribute *,
83732 struct module_kobject *, char *);
83733@@ -235,7 +238,7 @@ struct module {
83734
83735 /* Sysfs stuff. */
83736 struct module_kobject mkobj;
83737- struct module_attribute *modinfo_attrs;
83738+ module_attribute_no_const *modinfo_attrs;
83739 const char *version;
83740 const char *srcversion;
83741 struct kobject *holders_dir;
83742@@ -284,19 +287,16 @@ struct module {
83743 int (*init)(void);
83744
83745 /* If this is non-NULL, vfree after init() returns */
83746- void *module_init;
83747+ void *module_init_rx, *module_init_rw;
83748
83749 /* Here is the actual code + data, vfree'd on unload. */
83750- void *module_core;
83751+ void *module_core_rx, *module_core_rw;
83752
83753 /* Here are the sizes of the init and core sections */
83754- unsigned int init_size, core_size;
83755+ unsigned int init_size_rw, core_size_rw;
83756
83757 /* The size of the executable code in each section. */
83758- unsigned int init_text_size, core_text_size;
83759-
83760- /* Size of RO sections of the module (text+rodata) */
83761- unsigned int init_ro_size, core_ro_size;
83762+ unsigned int init_size_rx, core_size_rx;
83763
83764 /* Arch-specific module values */
83765 struct mod_arch_specific arch;
83766@@ -352,6 +352,10 @@ struct module {
83767 #ifdef CONFIG_EVENT_TRACING
83768 struct ftrace_event_call **trace_events;
83769 unsigned int num_trace_events;
83770+ struct file_operations trace_id;
83771+ struct file_operations trace_enable;
83772+ struct file_operations trace_format;
83773+ struct file_operations trace_filter;
83774 #endif
83775 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
83776 unsigned int num_ftrace_callsites;
83777@@ -375,7 +379,7 @@ struct module {
83778 ctor_fn_t *ctors;
83779 unsigned int num_ctors;
83780 #endif
83781-};
83782+} __randomize_layout;
83783 #ifndef MODULE_ARCH_INIT
83784 #define MODULE_ARCH_INIT {}
83785 #endif
83786@@ -396,18 +400,48 @@ bool is_module_address(unsigned long addr);
83787 bool is_module_percpu_address(unsigned long addr);
83788 bool is_module_text_address(unsigned long addr);
83789
83790+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
83791+{
83792+
83793+#ifdef CONFIG_PAX_KERNEXEC
83794+ if (ktla_ktva(addr) >= (unsigned long)start &&
83795+ ktla_ktva(addr) < (unsigned long)start + size)
83796+ return 1;
83797+#endif
83798+
83799+ return ((void *)addr >= start && (void *)addr < start + size);
83800+}
83801+
83802+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
83803+{
83804+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
83805+}
83806+
83807+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
83808+{
83809+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
83810+}
83811+
83812+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
83813+{
83814+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
83815+}
83816+
83817+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
83818+{
83819+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
83820+}
83821+
83822 static inline bool within_module_core(unsigned long addr,
83823 const struct module *mod)
83824 {
83825- return (unsigned long)mod->module_core <= addr &&
83826- addr < (unsigned long)mod->module_core + mod->core_size;
83827+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
83828 }
83829
83830 static inline bool within_module_init(unsigned long addr,
83831 const struct module *mod)
83832 {
83833- return (unsigned long)mod->module_init <= addr &&
83834- addr < (unsigned long)mod->module_init + mod->init_size;
83835+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
83836 }
83837
83838 static inline bool within_module(unsigned long addr, const struct module *mod)
83839diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
83840index 7eeb9bb..68f37e0 100644
83841--- a/include/linux/moduleloader.h
83842+++ b/include/linux/moduleloader.h
83843@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
83844 sections. Returns NULL on failure. */
83845 void *module_alloc(unsigned long size);
83846
83847+#ifdef CONFIG_PAX_KERNEXEC
83848+void *module_alloc_exec(unsigned long size);
83849+#else
83850+#define module_alloc_exec(x) module_alloc(x)
83851+#endif
83852+
83853 /* Free memory returned from module_alloc. */
83854 void module_free(struct module *mod, void *module_region);
83855
83856+#ifdef CONFIG_PAX_KERNEXEC
83857+void module_free_exec(struct module *mod, void *module_region);
83858+#else
83859+#define module_free_exec(x, y) module_free((x), (y))
83860+#endif
83861+
83862 /*
83863 * Apply the given relocation to the (simplified) ELF. Return -error
83864 * or 0.
83865@@ -45,8 +57,10 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
83866 unsigned int relsec,
83867 struct module *me)
83868 {
83869+#ifdef CONFIG_MODULES
83870 printk(KERN_ERR "module %s: REL relocation unsupported\n",
83871 module_name(me));
83872+#endif
83873 return -ENOEXEC;
83874 }
83875 #endif
83876@@ -68,8 +82,10 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
83877 unsigned int relsec,
83878 struct module *me)
83879 {
83880+#ifdef CONFIG_MODULES
83881 printk(KERN_ERR "module %s: REL relocation unsupported\n",
83882 module_name(me));
83883+#endif
83884 return -ENOEXEC;
83885 }
83886 #endif
83887diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
83888index 494f99e..5059f63 100644
83889--- a/include/linux/moduleparam.h
83890+++ b/include/linux/moduleparam.h
83891@@ -293,7 +293,7 @@ static inline void __kernel_param_unlock(void)
83892 * @len is usually just sizeof(string).
83893 */
83894 #define module_param_string(name, string, len, perm) \
83895- static const struct kparam_string __param_string_##name \
83896+ static const struct kparam_string __param_string_##name __used \
83897 = { len, string }; \
83898 __module_param_call(MODULE_PARAM_PREFIX, name, \
83899 &param_ops_string, \
83900@@ -437,7 +437,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
83901 */
83902 #define module_param_array_named(name, array, type, nump, perm) \
83903 param_check_##type(name, &(array)[0]); \
83904- static const struct kparam_array __param_arr_##name \
83905+ static const struct kparam_array __param_arr_##name __used \
83906 = { .max = ARRAY_SIZE(array), .num = nump, \
83907 .ops = &param_ops_##type, \
83908 .elemsize = sizeof(array[0]), .elem = array }; \
83909diff --git a/include/linux/mount.h b/include/linux/mount.h
83910index 9262e4b..0a45f98 100644
83911--- a/include/linux/mount.h
83912+++ b/include/linux/mount.h
83913@@ -66,7 +66,7 @@ struct vfsmount {
83914 struct dentry *mnt_root; /* root of the mounted tree */
83915 struct super_block *mnt_sb; /* pointer to superblock */
83916 int mnt_flags;
83917-};
83918+} __randomize_layout;
83919
83920 struct file; /* forward dec */
83921 struct path;
83922diff --git a/include/linux/namei.h b/include/linux/namei.h
83923index 492de72..1bddcd4 100644
83924--- a/include/linux/namei.h
83925+++ b/include/linux/namei.h
83926@@ -19,7 +19,7 @@ struct nameidata {
83927 unsigned seq, m_seq;
83928 int last_type;
83929 unsigned depth;
83930- char *saved_names[MAX_NESTED_LINKS + 1];
83931+ const char *saved_names[MAX_NESTED_LINKS + 1];
83932 };
83933
83934 /*
83935@@ -83,12 +83,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
83936
83937 extern void nd_jump_link(struct nameidata *nd, struct path *path);
83938
83939-static inline void nd_set_link(struct nameidata *nd, char *path)
83940+static inline void nd_set_link(struct nameidata *nd, const char *path)
83941 {
83942 nd->saved_names[nd->depth] = path;
83943 }
83944
83945-static inline char *nd_get_link(struct nameidata *nd)
83946+static inline const char *nd_get_link(const struct nameidata *nd)
83947 {
83948 return nd->saved_names[nd->depth];
83949 }
83950diff --git a/include/linux/net.h b/include/linux/net.h
83951index 17d8339..81656c0 100644
83952--- a/include/linux/net.h
83953+++ b/include/linux/net.h
83954@@ -192,7 +192,7 @@ struct net_proto_family {
83955 int (*create)(struct net *net, struct socket *sock,
83956 int protocol, int kern);
83957 struct module *owner;
83958-};
83959+} __do_const;
83960
83961 struct iovec;
83962 struct kvec;
83963diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
83964index c8e388e..5d8cd9b 100644
83965--- a/include/linux/netdevice.h
83966+++ b/include/linux/netdevice.h
83967@@ -1147,6 +1147,7 @@ struct net_device_ops {
83968 void *priv);
83969 int (*ndo_get_lock_subclass)(struct net_device *dev);
83970 };
83971+typedef struct net_device_ops __no_const net_device_ops_no_const;
83972
83973 /**
83974 * enum net_device_priv_flags - &struct net_device priv_flags
83975@@ -1485,10 +1486,10 @@ struct net_device {
83976
83977 struct net_device_stats stats;
83978
83979- atomic_long_t rx_dropped;
83980- atomic_long_t tx_dropped;
83981+ atomic_long_unchecked_t rx_dropped;
83982+ atomic_long_unchecked_t tx_dropped;
83983
83984- atomic_t carrier_changes;
83985+ atomic_unchecked_t carrier_changes;
83986
83987 #ifdef CONFIG_WIRELESS_EXT
83988 const struct iw_handler_def * wireless_handlers;
83989diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
83990index 2517ece..0bbfcfb 100644
83991--- a/include/linux/netfilter.h
83992+++ b/include/linux/netfilter.h
83993@@ -85,7 +85,7 @@ struct nf_sockopt_ops {
83994 #endif
83995 /* Use the module struct to lock set/get code in place */
83996 struct module *owner;
83997-};
83998+} __do_const;
83999
84000 /* Function to register/unregister hook points. */
84001 int nf_register_hook(struct nf_hook_ops *reg);
84002diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
84003index e955d47..04a5338 100644
84004--- a/include/linux/netfilter/nfnetlink.h
84005+++ b/include/linux/netfilter/nfnetlink.h
84006@@ -19,7 +19,7 @@ struct nfnl_callback {
84007 const struct nlattr * const cda[]);
84008 const struct nla_policy *policy; /* netlink attribute policy */
84009 const u_int16_t attr_count; /* number of nlattr's */
84010-};
84011+} __do_const;
84012
84013 struct nfnetlink_subsystem {
84014 const char *name;
84015diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
84016new file mode 100644
84017index 0000000..33f4af8
84018--- /dev/null
84019+++ b/include/linux/netfilter/xt_gradm.h
84020@@ -0,0 +1,9 @@
84021+#ifndef _LINUX_NETFILTER_XT_GRADM_H
84022+#define _LINUX_NETFILTER_XT_GRADM_H 1
84023+
84024+struct xt_gradm_mtinfo {
84025+ __u16 flags;
84026+ __u16 invflags;
84027+};
84028+
84029+#endif
84030diff --git a/include/linux/nls.h b/include/linux/nls.h
84031index 520681b..2b7fabb 100644
84032--- a/include/linux/nls.h
84033+++ b/include/linux/nls.h
84034@@ -31,7 +31,7 @@ struct nls_table {
84035 const unsigned char *charset2upper;
84036 struct module *owner;
84037 struct nls_table *next;
84038-};
84039+} __do_const;
84040
84041 /* this value hold the maximum octet of charset */
84042 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
84043@@ -46,7 +46,7 @@ enum utf16_endian {
84044 /* nls_base.c */
84045 extern int __register_nls(struct nls_table *, struct module *);
84046 extern int unregister_nls(struct nls_table *);
84047-extern struct nls_table *load_nls(char *);
84048+extern struct nls_table *load_nls(const char *);
84049 extern void unload_nls(struct nls_table *);
84050 extern struct nls_table *load_nls_default(void);
84051 #define register_nls(nls) __register_nls((nls), THIS_MODULE)
84052diff --git a/include/linux/notifier.h b/include/linux/notifier.h
84053index d14a4c3..a078786 100644
84054--- a/include/linux/notifier.h
84055+++ b/include/linux/notifier.h
84056@@ -54,7 +54,8 @@ struct notifier_block {
84057 notifier_fn_t notifier_call;
84058 struct notifier_block __rcu *next;
84059 int priority;
84060-};
84061+} __do_const;
84062+typedef struct notifier_block __no_const notifier_block_no_const;
84063
84064 struct atomic_notifier_head {
84065 spinlock_t lock;
84066diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
84067index b2a0f15..4d7da32 100644
84068--- a/include/linux/oprofile.h
84069+++ b/include/linux/oprofile.h
84070@@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
84071 int oprofilefs_create_ro_ulong(struct dentry * root,
84072 char const * name, ulong * val);
84073
84074-/** Create a file for read-only access to an atomic_t. */
84075+/** Create a file for read-only access to an atomic_unchecked_t. */
84076 int oprofilefs_create_ro_atomic(struct dentry * root,
84077- char const * name, atomic_t * val);
84078+ char const * name, atomic_unchecked_t * val);
84079
84080 /** create a directory */
84081 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
84082diff --git a/include/linux/padata.h b/include/linux/padata.h
84083index 4386946..f50c615 100644
84084--- a/include/linux/padata.h
84085+++ b/include/linux/padata.h
84086@@ -129,7 +129,7 @@ struct parallel_data {
84087 struct padata_serial_queue __percpu *squeue;
84088 atomic_t reorder_objects;
84089 atomic_t refcnt;
84090- atomic_t seq_nr;
84091+ atomic_unchecked_t seq_nr;
84092 struct padata_cpumask cpumask;
84093 spinlock_t lock ____cacheline_aligned;
84094 unsigned int processed;
84095diff --git a/include/linux/path.h b/include/linux/path.h
84096index d137218..be0c176 100644
84097--- a/include/linux/path.h
84098+++ b/include/linux/path.h
84099@@ -1,13 +1,15 @@
84100 #ifndef _LINUX_PATH_H
84101 #define _LINUX_PATH_H
84102
84103+#include <linux/compiler.h>
84104+
84105 struct dentry;
84106 struct vfsmount;
84107
84108 struct path {
84109 struct vfsmount *mnt;
84110 struct dentry *dentry;
84111-};
84112+} __randomize_layout;
84113
84114 extern void path_get(const struct path *);
84115 extern void path_put(const struct path *);
84116diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
84117index 5f2e559..7d59314 100644
84118--- a/include/linux/pci_hotplug.h
84119+++ b/include/linux/pci_hotplug.h
84120@@ -71,7 +71,8 @@ struct hotplug_slot_ops {
84121 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
84122 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
84123 int (*reset_slot) (struct hotplug_slot *slot, int probe);
84124-};
84125+} __do_const;
84126+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
84127
84128 /**
84129 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
84130diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
84131index 707617a..28a2e7e 100644
84132--- a/include/linux/perf_event.h
84133+++ b/include/linux/perf_event.h
84134@@ -339,8 +339,8 @@ struct perf_event {
84135
84136 enum perf_event_active_state state;
84137 unsigned int attach_state;
84138- local64_t count;
84139- atomic64_t child_count;
84140+ local64_t count; /* PaX: fix it one day */
84141+ atomic64_unchecked_t child_count;
84142
84143 /*
84144 * These are the total time in nanoseconds that the event
84145@@ -391,8 +391,8 @@ struct perf_event {
84146 * These accumulate total time (in nanoseconds) that children
84147 * events have been enabled and running, respectively.
84148 */
84149- atomic64_t child_total_time_enabled;
84150- atomic64_t child_total_time_running;
84151+ atomic64_unchecked_t child_total_time_enabled;
84152+ atomic64_unchecked_t child_total_time_running;
84153
84154 /*
84155 * Protect attach/detach and child_list:
84156@@ -722,7 +722,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
84157 entry->ip[entry->nr++] = ip;
84158 }
84159
84160-extern int sysctl_perf_event_paranoid;
84161+extern int sysctl_perf_event_legitimately_concerned;
84162 extern int sysctl_perf_event_mlock;
84163 extern int sysctl_perf_event_sample_rate;
84164 extern int sysctl_perf_cpu_time_max_percent;
84165@@ -737,19 +737,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
84166 loff_t *ppos);
84167
84168
84169+static inline bool perf_paranoid_any(void)
84170+{
84171+ return sysctl_perf_event_legitimately_concerned > 2;
84172+}
84173+
84174 static inline bool perf_paranoid_tracepoint_raw(void)
84175 {
84176- return sysctl_perf_event_paranoid > -1;
84177+ return sysctl_perf_event_legitimately_concerned > -1;
84178 }
84179
84180 static inline bool perf_paranoid_cpu(void)
84181 {
84182- return sysctl_perf_event_paranoid > 0;
84183+ return sysctl_perf_event_legitimately_concerned > 0;
84184 }
84185
84186 static inline bool perf_paranoid_kernel(void)
84187 {
84188- return sysctl_perf_event_paranoid > 1;
84189+ return sysctl_perf_event_legitimately_concerned > 1;
84190 }
84191
84192 extern void perf_event_init(void);
84193@@ -880,7 +885,7 @@ struct perf_pmu_events_attr {
84194 struct device_attribute attr;
84195 u64 id;
84196 const char *event_str;
84197-};
84198+} __do_const;
84199
84200 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
84201 static struct perf_pmu_events_attr _var = { \
84202diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
84203index 1997ffc..4f1f44d 100644
84204--- a/include/linux/pid_namespace.h
84205+++ b/include/linux/pid_namespace.h
84206@@ -44,7 +44,7 @@ struct pid_namespace {
84207 int hide_pid;
84208 int reboot; /* group exit code if this pidns was rebooted */
84209 unsigned int proc_inum;
84210-};
84211+} __randomize_layout;
84212
84213 extern struct pid_namespace init_pid_ns;
84214
84215diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
84216index eb8b8ac..62649e1 100644
84217--- a/include/linux/pipe_fs_i.h
84218+++ b/include/linux/pipe_fs_i.h
84219@@ -47,10 +47,10 @@ struct pipe_inode_info {
84220 struct mutex mutex;
84221 wait_queue_head_t wait;
84222 unsigned int nrbufs, curbuf, buffers;
84223- unsigned int readers;
84224- unsigned int writers;
84225- unsigned int files;
84226- unsigned int waiting_writers;
84227+ atomic_t readers;
84228+ atomic_t writers;
84229+ atomic_t files;
84230+ atomic_t waiting_writers;
84231 unsigned int r_counter;
84232 unsigned int w_counter;
84233 struct page *tmp_page;
84234diff --git a/include/linux/pm.h b/include/linux/pm.h
84235index 72c0fe0..26918ed 100644
84236--- a/include/linux/pm.h
84237+++ b/include/linux/pm.h
84238@@ -620,6 +620,7 @@ extern int dev_pm_put_subsys_data(struct device *dev);
84239 struct dev_pm_domain {
84240 struct dev_pm_ops ops;
84241 };
84242+typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
84243
84244 /*
84245 * The PM_EVENT_ messages are also used by drivers implementing the legacy
84246diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
84247index ebc4c76..7fab7b0 100644
84248--- a/include/linux/pm_domain.h
84249+++ b/include/linux/pm_domain.h
84250@@ -44,11 +44,11 @@ struct gpd_dev_ops {
84251 int (*thaw_early)(struct device *dev);
84252 int (*thaw)(struct device *dev);
84253 bool (*active_wakeup)(struct device *dev);
84254-};
84255+} __no_const;
84256
84257 struct gpd_cpu_data {
84258 unsigned int saved_exit_latency;
84259- struct cpuidle_state *idle_state;
84260+ cpuidle_state_no_const *idle_state;
84261 };
84262
84263 struct generic_pm_domain {
84264diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
84265index 367f49b..d2f5a14 100644
84266--- a/include/linux/pm_runtime.h
84267+++ b/include/linux/pm_runtime.h
84268@@ -125,7 +125,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
84269
84270 static inline void pm_runtime_mark_last_busy(struct device *dev)
84271 {
84272- ACCESS_ONCE(dev->power.last_busy) = jiffies;
84273+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
84274 }
84275
84276 #else /* !CONFIG_PM_RUNTIME */
84277diff --git a/include/linux/pnp.h b/include/linux/pnp.h
84278index 195aafc..49a7bc2 100644
84279--- a/include/linux/pnp.h
84280+++ b/include/linux/pnp.h
84281@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
84282 struct pnp_fixup {
84283 char id[7];
84284 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
84285-};
84286+} __do_const;
84287
84288 /* config parameters */
84289 #define PNP_CONFIG_NORMAL 0x0001
84290diff --git a/include/linux/poison.h b/include/linux/poison.h
84291index 2110a81..13a11bb 100644
84292--- a/include/linux/poison.h
84293+++ b/include/linux/poison.h
84294@@ -19,8 +19,8 @@
84295 * under normal circumstances, used to verify that nobody uses
84296 * non-initialized list entries.
84297 */
84298-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
84299-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
84300+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
84301+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
84302
84303 /********** include/linux/timer.h **********/
84304 /*
84305diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
84306index d8b187c3..9a9257a 100644
84307--- a/include/linux/power/smartreflex.h
84308+++ b/include/linux/power/smartreflex.h
84309@@ -238,7 +238,7 @@ struct omap_sr_class_data {
84310 int (*notify)(struct omap_sr *sr, u32 status);
84311 u8 notify_flags;
84312 u8 class_type;
84313-};
84314+} __do_const;
84315
84316 /**
84317 * struct omap_sr_nvalue_table - Smartreflex n-target value info
84318diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
84319index 4ea1d37..80f4b33 100644
84320--- a/include/linux/ppp-comp.h
84321+++ b/include/linux/ppp-comp.h
84322@@ -84,7 +84,7 @@ struct compressor {
84323 struct module *owner;
84324 /* Extra skb space needed by the compressor algorithm */
84325 unsigned int comp_extra;
84326-};
84327+} __do_const;
84328
84329 /*
84330 * The return value from decompress routine is the length of the
84331diff --git a/include/linux/preempt.h b/include/linux/preempt.h
84332index de83b4e..c4b997d 100644
84333--- a/include/linux/preempt.h
84334+++ b/include/linux/preempt.h
84335@@ -27,11 +27,16 @@ extern void preempt_count_sub(int val);
84336 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
84337 #endif
84338
84339+#define raw_preempt_count_add(val) __preempt_count_add(val)
84340+#define raw_preempt_count_sub(val) __preempt_count_sub(val)
84341+
84342 #define __preempt_count_inc() __preempt_count_add(1)
84343 #define __preempt_count_dec() __preempt_count_sub(1)
84344
84345 #define preempt_count_inc() preempt_count_add(1)
84346+#define raw_preempt_count_inc() raw_preempt_count_add(1)
84347 #define preempt_count_dec() preempt_count_sub(1)
84348+#define raw_preempt_count_dec() raw_preempt_count_sub(1)
84349
84350 #ifdef CONFIG_PREEMPT_COUNT
84351
84352@@ -41,6 +46,12 @@ do { \
84353 barrier(); \
84354 } while (0)
84355
84356+#define raw_preempt_disable() \
84357+do { \
84358+ raw_preempt_count_inc(); \
84359+ barrier(); \
84360+} while (0)
84361+
84362 #define sched_preempt_enable_no_resched() \
84363 do { \
84364 barrier(); \
84365@@ -49,6 +60,12 @@ do { \
84366
84367 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
84368
84369+#define raw_preempt_enable_no_resched() \
84370+do { \
84371+ barrier(); \
84372+ raw_preempt_count_dec(); \
84373+} while (0)
84374+
84375 #ifdef CONFIG_PREEMPT
84376 #define preempt_enable() \
84377 do { \
84378@@ -113,8 +130,10 @@ do { \
84379 * region.
84380 */
84381 #define preempt_disable() barrier()
84382+#define raw_preempt_disable() barrier()
84383 #define sched_preempt_enable_no_resched() barrier()
84384 #define preempt_enable_no_resched() barrier()
84385+#define raw_preempt_enable_no_resched() barrier()
84386 #define preempt_enable() barrier()
84387 #define preempt_check_resched() do { } while (0)
84388
84389@@ -128,11 +147,13 @@ do { \
84390 /*
84391 * Modules have no business playing preemption tricks.
84392 */
84393+#ifndef CONFIG_PAX_KERNEXEC
84394 #undef sched_preempt_enable_no_resched
84395 #undef preempt_enable_no_resched
84396 #undef preempt_enable_no_resched_notrace
84397 #undef preempt_check_resched
84398 #endif
84399+#endif
84400
84401 #define preempt_set_need_resched() \
84402 do { \
84403diff --git a/include/linux/printk.h b/include/linux/printk.h
84404index d78125f..7f36596 100644
84405--- a/include/linux/printk.h
84406+++ b/include/linux/printk.h
84407@@ -124,6 +124,8 @@ static inline __printf(1, 2) __cold
84408 void early_printk(const char *s, ...) { }
84409 #endif
84410
84411+extern int kptr_restrict;
84412+
84413 #ifdef CONFIG_PRINTK
84414 asmlinkage __printf(5, 0)
84415 int vprintk_emit(int facility, int level,
84416@@ -158,7 +160,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
84417
84418 extern int printk_delay_msec;
84419 extern int dmesg_restrict;
84420-extern int kptr_restrict;
84421
84422 extern void wake_up_klogd(void);
84423
84424diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
84425index 9d117f6..d832b31 100644
84426--- a/include/linux/proc_fs.h
84427+++ b/include/linux/proc_fs.h
84428@@ -17,8 +17,11 @@ extern void proc_flush_task(struct task_struct *);
84429 extern struct proc_dir_entry *proc_symlink(const char *,
84430 struct proc_dir_entry *, const char *);
84431 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
84432+extern struct proc_dir_entry *proc_mkdir_restrict(const char *, struct proc_dir_entry *);
84433 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
84434 struct proc_dir_entry *, void *);
84435+extern struct proc_dir_entry *proc_mkdir_data_restrict(const char *, umode_t,
84436+ struct proc_dir_entry *, void *);
84437 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
84438 struct proc_dir_entry *);
84439
84440@@ -34,6 +37,19 @@ static inline struct proc_dir_entry *proc_create(
84441 return proc_create_data(name, mode, parent, proc_fops, NULL);
84442 }
84443
84444+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
84445+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
84446+{
84447+#ifdef CONFIG_GRKERNSEC_PROC_USER
84448+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
84449+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
84450+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
84451+#else
84452+ return proc_create_data(name, mode, parent, proc_fops, NULL);
84453+#endif
84454+}
84455+
84456+
84457 extern void proc_set_size(struct proc_dir_entry *, loff_t);
84458 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
84459 extern void *PDE_DATA(const struct inode *);
84460@@ -56,8 +72,12 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
84461 struct proc_dir_entry *parent,const char *dest) { return NULL;}
84462 static inline struct proc_dir_entry *proc_mkdir(const char *name,
84463 struct proc_dir_entry *parent) {return NULL;}
84464+static inline struct proc_dir_entry *proc_mkdir_restrict(const char *name,
84465+ struct proc_dir_entry *parent) { return NULL; }
84466 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
84467 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
84468+static inline struct proc_dir_entry *proc_mkdir_data_restrict(const char *name,
84469+ umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
84470 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
84471 umode_t mode, struct proc_dir_entry *parent) { return NULL; }
84472 #define proc_create(name, mode, parent, proc_fops) ({NULL;})
84473@@ -77,7 +97,7 @@ static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *p
84474 static inline struct proc_dir_entry *proc_net_mkdir(
84475 struct net *net, const char *name, struct proc_dir_entry *parent)
84476 {
84477- return proc_mkdir_data(name, 0, parent, net);
84478+ return proc_mkdir_data_restrict(name, 0, parent, net);
84479 }
84480
84481 #endif /* _LINUX_PROC_FS_H */
84482diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
84483index 34a1e10..70f6bde 100644
84484--- a/include/linux/proc_ns.h
84485+++ b/include/linux/proc_ns.h
84486@@ -14,7 +14,7 @@ struct proc_ns_operations {
84487 void (*put)(void *ns);
84488 int (*install)(struct nsproxy *nsproxy, void *ns);
84489 unsigned int (*inum)(void *ns);
84490-};
84491+} __do_const __randomize_layout;
84492
84493 struct proc_ns {
84494 void *ns;
84495diff --git a/include/linux/quota.h b/include/linux/quota.h
84496index 80d345a..9e89a9a 100644
84497--- a/include/linux/quota.h
84498+++ b/include/linux/quota.h
84499@@ -70,7 +70,7 @@ struct kqid { /* Type in which we store the quota identifier */
84500
84501 extern bool qid_eq(struct kqid left, struct kqid right);
84502 extern bool qid_lt(struct kqid left, struct kqid right);
84503-extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
84504+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
84505 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
84506 extern bool qid_valid(struct kqid qid);
84507
84508diff --git a/include/linux/random.h b/include/linux/random.h
84509index 57fbbff..2170304 100644
84510--- a/include/linux/random.h
84511+++ b/include/linux/random.h
84512@@ -9,9 +9,19 @@
84513 #include <uapi/linux/random.h>
84514
84515 extern void add_device_randomness(const void *, unsigned int);
84516+
84517+static inline void add_latent_entropy(void)
84518+{
84519+
84520+#ifdef LATENT_ENTROPY_PLUGIN
84521+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
84522+#endif
84523+
84524+}
84525+
84526 extern void add_input_randomness(unsigned int type, unsigned int code,
84527- unsigned int value);
84528-extern void add_interrupt_randomness(int irq, int irq_flags);
84529+ unsigned int value) __latent_entropy;
84530+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
84531
84532 extern void get_random_bytes(void *buf, int nbytes);
84533 extern void get_random_bytes_arch(void *buf, int nbytes);
84534@@ -22,10 +32,10 @@ extern int random_int_secret_init(void);
84535 extern const struct file_operations random_fops, urandom_fops;
84536 #endif
84537
84538-unsigned int get_random_int(void);
84539+unsigned int __intentional_overflow(-1) get_random_int(void);
84540 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
84541
84542-u32 prandom_u32(void);
84543+u32 prandom_u32(void) __intentional_overflow(-1);
84544 void prandom_bytes(void *buf, int nbytes);
84545 void prandom_seed(u32 seed);
84546 void prandom_reseed_late(void);
84547@@ -37,6 +47,11 @@ struct rnd_state {
84548 u32 prandom_u32_state(struct rnd_state *state);
84549 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
84550
84551+static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
84552+{
84553+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
84554+}
84555+
84556 /**
84557 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
84558 * @ep_ro: right open interval endpoint
84559@@ -49,7 +64,7 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
84560 *
84561 * Returns: pseudo-random number in interval [0, ep_ro)
84562 */
84563-static inline u32 prandom_u32_max(u32 ep_ro)
84564+static inline u32 __intentional_overflow(-1) prandom_u32_max(u32 ep_ro)
84565 {
84566 return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
84567 }
84568diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
84569index fea49b5..2ac22bb 100644
84570--- a/include/linux/rbtree_augmented.h
84571+++ b/include/linux/rbtree_augmented.h
84572@@ -80,7 +80,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
84573 old->rbaugmented = rbcompute(old); \
84574 } \
84575 rbstatic const struct rb_augment_callbacks rbname = { \
84576- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
84577+ .propagate = rbname ## _propagate, \
84578+ .copy = rbname ## _copy, \
84579+ .rotate = rbname ## _rotate \
84580 };
84581
84582
84583diff --git a/include/linux/rculist.h b/include/linux/rculist.h
84584index 372ad5e..d4373f8 100644
84585--- a/include/linux/rculist.h
84586+++ b/include/linux/rculist.h
84587@@ -29,8 +29,8 @@
84588 */
84589 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
84590 {
84591- ACCESS_ONCE(list->next) = list;
84592- ACCESS_ONCE(list->prev) = list;
84593+ ACCESS_ONCE_RW(list->next) = list;
84594+ ACCESS_ONCE_RW(list->prev) = list;
84595 }
84596
84597 /*
84598@@ -59,6 +59,9 @@ void __list_add_rcu(struct list_head *new,
84599 struct list_head *prev, struct list_head *next);
84600 #endif
84601
84602+void __pax_list_add_rcu(struct list_head *new,
84603+ struct list_head *prev, struct list_head *next);
84604+
84605 /**
84606 * list_add_rcu - add a new entry to rcu-protected list
84607 * @new: new entry to be added
84608@@ -80,6 +83,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
84609 __list_add_rcu(new, head, head->next);
84610 }
84611
84612+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
84613+{
84614+ __pax_list_add_rcu(new, head, head->next);
84615+}
84616+
84617 /**
84618 * list_add_tail_rcu - add a new entry to rcu-protected list
84619 * @new: new entry to be added
84620@@ -102,6 +110,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
84621 __list_add_rcu(new, head->prev, head);
84622 }
84623
84624+static inline void pax_list_add_tail_rcu(struct list_head *new,
84625+ struct list_head *head)
84626+{
84627+ __pax_list_add_rcu(new, head->prev, head);
84628+}
84629+
84630 /**
84631 * list_del_rcu - deletes entry from list without re-initialization
84632 * @entry: the element to delete from the list.
84633@@ -132,6 +146,8 @@ static inline void list_del_rcu(struct list_head *entry)
84634 entry->prev = LIST_POISON2;
84635 }
84636
84637+extern void pax_list_del_rcu(struct list_head *entry);
84638+
84639 /**
84640 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
84641 * @n: the element to delete from the hash list.
84642diff --git a/include/linux/reboot.h b/include/linux/reboot.h
84643index 48bf152..d38b785 100644
84644--- a/include/linux/reboot.h
84645+++ b/include/linux/reboot.h
84646@@ -44,9 +44,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
84647 */
84648
84649 extern void migrate_to_reboot_cpu(void);
84650-extern void machine_restart(char *cmd);
84651-extern void machine_halt(void);
84652-extern void machine_power_off(void);
84653+extern void machine_restart(char *cmd) __noreturn;
84654+extern void machine_halt(void) __noreturn;
84655+extern void machine_power_off(void) __noreturn;
84656
84657 extern void machine_shutdown(void);
84658 struct pt_regs;
84659@@ -57,9 +57,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
84660 */
84661
84662 extern void kernel_restart_prepare(char *cmd);
84663-extern void kernel_restart(char *cmd);
84664-extern void kernel_halt(void);
84665-extern void kernel_power_off(void);
84666+extern void kernel_restart(char *cmd) __noreturn;
84667+extern void kernel_halt(void) __noreturn;
84668+extern void kernel_power_off(void) __noreturn;
84669
84670 extern int C_A_D; /* for sysctl */
84671 void ctrl_alt_del(void);
84672@@ -73,7 +73,7 @@ extern int orderly_poweroff(bool force);
84673 * Emergency restart, callable from an interrupt handler.
84674 */
84675
84676-extern void emergency_restart(void);
84677+extern void emergency_restart(void) __noreturn;
84678 #include <asm/emergency-restart.h>
84679
84680 #endif /* _LINUX_REBOOT_H */
84681diff --git a/include/linux/regset.h b/include/linux/regset.h
84682index 8e0c9fe..ac4d221 100644
84683--- a/include/linux/regset.h
84684+++ b/include/linux/regset.h
84685@@ -161,7 +161,8 @@ struct user_regset {
84686 unsigned int align;
84687 unsigned int bias;
84688 unsigned int core_note_type;
84689-};
84690+} __do_const;
84691+typedef struct user_regset __no_const user_regset_no_const;
84692
84693 /**
84694 * struct user_regset_view - available regsets
84695diff --git a/include/linux/relay.h b/include/linux/relay.h
84696index d7c8359..818daf5 100644
84697--- a/include/linux/relay.h
84698+++ b/include/linux/relay.h
84699@@ -157,7 +157,7 @@ struct rchan_callbacks
84700 * The callback should return 0 if successful, negative if not.
84701 */
84702 int (*remove_buf_file)(struct dentry *dentry);
84703-};
84704+} __no_const;
84705
84706 /*
84707 * CONFIG_RELAY kernel API, kernel/relay.c
84708diff --git a/include/linux/rio.h b/include/linux/rio.h
84709index 6bda06f..bf39a9b 100644
84710--- a/include/linux/rio.h
84711+++ b/include/linux/rio.h
84712@@ -358,7 +358,7 @@ struct rio_ops {
84713 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
84714 u64 rstart, u32 size, u32 flags);
84715 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
84716-};
84717+} __no_const;
84718
84719 #define RIO_RESOURCE_MEM 0x00000100
84720 #define RIO_RESOURCE_DOORBELL 0x00000200
84721diff --git a/include/linux/rmap.h b/include/linux/rmap.h
84722index be57450..31cf65e 100644
84723--- a/include/linux/rmap.h
84724+++ b/include/linux/rmap.h
84725@@ -144,8 +144,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
84726 void anon_vma_init(void); /* create anon_vma_cachep */
84727 int anon_vma_prepare(struct vm_area_struct *);
84728 void unlink_anon_vmas(struct vm_area_struct *);
84729-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
84730-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
84731+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
84732+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
84733
84734 static inline void anon_vma_merge(struct vm_area_struct *vma,
84735 struct vm_area_struct *next)
84736diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
84737index ed8f9e7..999bc96 100644
84738--- a/include/linux/scatterlist.h
84739+++ b/include/linux/scatterlist.h
84740@@ -1,6 +1,7 @@
84741 #ifndef _LINUX_SCATTERLIST_H
84742 #define _LINUX_SCATTERLIST_H
84743
84744+#include <linux/sched.h>
84745 #include <linux/string.h>
84746 #include <linux/bug.h>
84747 #include <linux/mm.h>
84748@@ -114,6 +115,12 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
84749 #ifdef CONFIG_DEBUG_SG
84750 BUG_ON(!virt_addr_valid(buf));
84751 #endif
84752+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
84753+ if (object_starts_on_stack(buf)) {
84754+ void *adjbuf = buf - current->stack + current->lowmem_stack;
84755+ sg_set_page(sg, virt_to_page(adjbuf), buflen, offset_in_page(adjbuf));
84756+ } else
84757+#endif
84758 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
84759 }
84760
84761diff --git a/include/linux/sched.h b/include/linux/sched.h
84762index 2b1d9e9..10ba706 100644
84763--- a/include/linux/sched.h
84764+++ b/include/linux/sched.h
84765@@ -132,6 +132,7 @@ struct fs_struct;
84766 struct perf_event_context;
84767 struct blk_plug;
84768 struct filename;
84769+struct linux_binprm;
84770
84771 #define VMACACHE_BITS 2
84772 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
84773@@ -374,7 +375,7 @@ extern char __sched_text_start[], __sched_text_end[];
84774 extern int in_sched_functions(unsigned long addr);
84775
84776 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
84777-extern signed long schedule_timeout(signed long timeout);
84778+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
84779 extern signed long schedule_timeout_interruptible(signed long timeout);
84780 extern signed long schedule_timeout_killable(signed long timeout);
84781 extern signed long schedule_timeout_uninterruptible(signed long timeout);
84782@@ -385,6 +386,19 @@ struct nsproxy;
84783 struct user_namespace;
84784
84785 #ifdef CONFIG_MMU
84786+
84787+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
84788+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
84789+#else
84790+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
84791+{
84792+ return 0;
84793+}
84794+#endif
84795+
84796+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
84797+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
84798+
84799 extern void arch_pick_mmap_layout(struct mm_struct *mm);
84800 extern unsigned long
84801 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
84802@@ -682,6 +696,17 @@ struct signal_struct {
84803 #ifdef CONFIG_TASKSTATS
84804 struct taskstats *stats;
84805 #endif
84806+
84807+#ifdef CONFIG_GRKERNSEC
84808+ u32 curr_ip;
84809+ u32 saved_ip;
84810+ u32 gr_saddr;
84811+ u32 gr_daddr;
84812+ u16 gr_sport;
84813+ u16 gr_dport;
84814+ u8 used_accept:1;
84815+#endif
84816+
84817 #ifdef CONFIG_AUDIT
84818 unsigned audit_tty;
84819 unsigned audit_tty_log_passwd;
84820@@ -708,7 +733,7 @@ struct signal_struct {
84821 struct mutex cred_guard_mutex; /* guard against foreign influences on
84822 * credential calculations
84823 * (notably. ptrace) */
84824-};
84825+} __randomize_layout;
84826
84827 /*
84828 * Bits in flags field of signal_struct.
84829@@ -761,6 +786,14 @@ struct user_struct {
84830 struct key *session_keyring; /* UID's default session keyring */
84831 #endif
84832
84833+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
84834+ unsigned char kernel_banned;
84835+#endif
84836+#ifdef CONFIG_GRKERNSEC_BRUTE
84837+ unsigned char suid_banned;
84838+ unsigned long suid_ban_expires;
84839+#endif
84840+
84841 /* Hash table maintenance information */
84842 struct hlist_node uidhash_node;
84843 kuid_t uid;
84844@@ -768,7 +801,7 @@ struct user_struct {
84845 #ifdef CONFIG_PERF_EVENTS
84846 atomic_long_t locked_vm;
84847 #endif
84848-};
84849+} __randomize_layout;
84850
84851 extern int uids_sysfs_init(void);
84852
84853@@ -1224,6 +1257,9 @@ enum perf_event_task_context {
84854 struct task_struct {
84855 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
84856 void *stack;
84857+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
84858+ void *lowmem_stack;
84859+#endif
84860 atomic_t usage;
84861 unsigned int flags; /* per process flags, defined below */
84862 unsigned int ptrace;
84863@@ -1345,8 +1381,8 @@ struct task_struct {
84864 struct list_head thread_node;
84865
84866 struct completion *vfork_done; /* for vfork() */
84867- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
84868- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
84869+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
84870+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
84871
84872 cputime_t utime, stime, utimescaled, stimescaled;
84873 cputime_t gtime;
84874@@ -1371,11 +1407,6 @@ struct task_struct {
84875 struct task_cputime cputime_expires;
84876 struct list_head cpu_timers[3];
84877
84878-/* process credentials */
84879- const struct cred __rcu *real_cred; /* objective and real subjective task
84880- * credentials (COW) */
84881- const struct cred __rcu *cred; /* effective (overridable) subjective task
84882- * credentials (COW) */
84883 char comm[TASK_COMM_LEN]; /* executable name excluding path
84884 - access with [gs]et_task_comm (which lock
84885 it with task_lock())
84886@@ -1393,6 +1424,10 @@ struct task_struct {
84887 #endif
84888 /* CPU-specific state of this task */
84889 struct thread_struct thread;
84890+/* thread_info moved to task_struct */
84891+#ifdef CONFIG_X86
84892+ struct thread_info tinfo;
84893+#endif
84894 /* filesystem information */
84895 struct fs_struct *fs;
84896 /* open file information */
84897@@ -1467,6 +1502,10 @@ struct task_struct {
84898 gfp_t lockdep_reclaim_gfp;
84899 #endif
84900
84901+/* process credentials */
84902+ const struct cred __rcu *real_cred; /* objective and real subjective task
84903+ * credentials (COW) */
84904+
84905 /* journalling filesystem info */
84906 void *journal_info;
84907
84908@@ -1505,6 +1544,10 @@ struct task_struct {
84909 /* cg_list protected by css_set_lock and tsk->alloc_lock */
84910 struct list_head cg_list;
84911 #endif
84912+
84913+ const struct cred __rcu *cred; /* effective (overridable) subjective task
84914+ * credentials (COW) */
84915+
84916 #ifdef CONFIG_FUTEX
84917 struct robust_list_head __user *robust_list;
84918 #ifdef CONFIG_COMPAT
84919@@ -1644,7 +1687,78 @@ struct task_struct {
84920 unsigned int sequential_io;
84921 unsigned int sequential_io_avg;
84922 #endif
84923-};
84924+
84925+#ifdef CONFIG_GRKERNSEC
84926+ /* grsecurity */
84927+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
84928+ u64 exec_id;
84929+#endif
84930+#ifdef CONFIG_GRKERNSEC_SETXID
84931+ const struct cred *delayed_cred;
84932+#endif
84933+ struct dentry *gr_chroot_dentry;
84934+ struct acl_subject_label *acl;
84935+ struct acl_subject_label *tmpacl;
84936+ struct acl_role_label *role;
84937+ struct file *exec_file;
84938+ unsigned long brute_expires;
84939+ u16 acl_role_id;
84940+ u8 inherited;
84941+ /* is this the task that authenticated to the special role */
84942+ u8 acl_sp_role;
84943+ u8 is_writable;
84944+ u8 brute;
84945+ u8 gr_is_chrooted;
84946+#endif
84947+
84948+} __randomize_layout;
84949+
84950+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
84951+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
84952+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
84953+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
84954+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
84955+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
84956+
84957+#ifdef CONFIG_PAX_SOFTMODE
84958+extern int pax_softmode;
84959+#endif
84960+
84961+extern int pax_check_flags(unsigned long *);
84962+#define PAX_PARSE_FLAGS_FALLBACK (~0UL)
84963+
84964+/* if tsk != current then task_lock must be held on it */
84965+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
84966+static inline unsigned long pax_get_flags(struct task_struct *tsk)
84967+{
84968+ if (likely(tsk->mm))
84969+ return tsk->mm->pax_flags;
84970+ else
84971+ return 0UL;
84972+}
84973+
84974+/* if tsk != current then task_lock must be held on it */
84975+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
84976+{
84977+ if (likely(tsk->mm)) {
84978+ tsk->mm->pax_flags = flags;
84979+ return 0;
84980+ }
84981+ return -EINVAL;
84982+}
84983+#endif
84984+
84985+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
84986+extern void pax_set_initial_flags(struct linux_binprm *bprm);
84987+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
84988+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
84989+#endif
84990+
84991+struct path;
84992+extern char *pax_get_path(const struct path *path, char *buf, int buflen);
84993+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
84994+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
84995+extern void pax_report_refcount_overflow(struct pt_regs *regs);
84996
84997 /* Future-safe accessor for struct task_struct's cpus_allowed. */
84998 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
84999@@ -1726,7 +1840,7 @@ struct pid_namespace;
85000 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
85001 struct pid_namespace *ns);
85002
85003-static inline pid_t task_pid_nr(struct task_struct *tsk)
85004+static inline pid_t task_pid_nr(const struct task_struct *tsk)
85005 {
85006 return tsk->pid;
85007 }
85008@@ -2097,6 +2211,25 @@ extern u64 sched_clock_cpu(int cpu);
85009
85010 extern void sched_clock_init(void);
85011
85012+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
85013+static inline void populate_stack(void)
85014+{
85015+ struct task_struct *curtask = current;
85016+ int c;
85017+ int *ptr = curtask->stack;
85018+ int *end = curtask->stack + THREAD_SIZE;
85019+
85020+ while (ptr < end) {
85021+ c = *(volatile int *)ptr;
85022+ ptr += PAGE_SIZE/sizeof(int);
85023+ }
85024+}
85025+#else
85026+static inline void populate_stack(void)
85027+{
85028+}
85029+#endif
85030+
85031 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
85032 static inline void sched_clock_tick(void)
85033 {
85034@@ -2230,7 +2363,9 @@ void yield(void);
85035 extern struct exec_domain default_exec_domain;
85036
85037 union thread_union {
85038+#ifndef CONFIG_X86
85039 struct thread_info thread_info;
85040+#endif
85041 unsigned long stack[THREAD_SIZE/sizeof(long)];
85042 };
85043
85044@@ -2263,6 +2398,7 @@ extern struct pid_namespace init_pid_ns;
85045 */
85046
85047 extern struct task_struct *find_task_by_vpid(pid_t nr);
85048+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
85049 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
85050 struct pid_namespace *ns);
85051
85052@@ -2427,7 +2563,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
85053 extern void exit_itimers(struct signal_struct *);
85054 extern void flush_itimer_signals(void);
85055
85056-extern void do_group_exit(int);
85057+extern __noreturn void do_group_exit(int);
85058
85059 extern int do_execve(struct filename *,
85060 const char __user * const __user *,
85061@@ -2642,9 +2778,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
85062
85063 #endif
85064
85065-static inline int object_is_on_stack(void *obj)
85066+static inline int object_starts_on_stack(const void *obj)
85067 {
85068- void *stack = task_stack_page(current);
85069+ const void *stack = task_stack_page(current);
85070
85071 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
85072 }
85073diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
85074index 596a0e0..bea77ec 100644
85075--- a/include/linux/sched/sysctl.h
85076+++ b/include/linux/sched/sysctl.h
85077@@ -34,6 +34,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
85078 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
85079
85080 extern int sysctl_max_map_count;
85081+extern unsigned long sysctl_heap_stack_gap;
85082
85083 extern unsigned int sysctl_sched_latency;
85084 extern unsigned int sysctl_sched_min_granularity;
85085diff --git a/include/linux/security.h b/include/linux/security.h
85086index 623f90e..90b39da 100644
85087--- a/include/linux/security.h
85088+++ b/include/linux/security.h
85089@@ -27,6 +27,7 @@
85090 #include <linux/slab.h>
85091 #include <linux/err.h>
85092 #include <linux/string.h>
85093+#include <linux/grsecurity.h>
85094
85095 struct linux_binprm;
85096 struct cred;
85097@@ -116,8 +117,6 @@ struct seq_file;
85098
85099 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
85100
85101-void reset_security_ops(void);
85102-
85103 #ifdef CONFIG_MMU
85104 extern unsigned long mmap_min_addr;
85105 extern unsigned long dac_mmap_min_addr;
85106@@ -1729,7 +1728,7 @@ struct security_operations {
85107 struct audit_context *actx);
85108 void (*audit_rule_free) (void *lsmrule);
85109 #endif /* CONFIG_AUDIT */
85110-};
85111+} __randomize_layout;
85112
85113 /* prototypes */
85114 extern int security_init(void);
85115diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
85116index dc368b8..e895209 100644
85117--- a/include/linux/semaphore.h
85118+++ b/include/linux/semaphore.h
85119@@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
85120 }
85121
85122 extern void down(struct semaphore *sem);
85123-extern int __must_check down_interruptible(struct semaphore *sem);
85124+extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
85125 extern int __must_check down_killable(struct semaphore *sem);
85126 extern int __must_check down_trylock(struct semaphore *sem);
85127 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
85128diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
85129index 52e0097..383f21d 100644
85130--- a/include/linux/seq_file.h
85131+++ b/include/linux/seq_file.h
85132@@ -27,6 +27,9 @@ struct seq_file {
85133 struct mutex lock;
85134 const struct seq_operations *op;
85135 int poll_event;
85136+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
85137+ u64 exec_id;
85138+#endif
85139 #ifdef CONFIG_USER_NS
85140 struct user_namespace *user_ns;
85141 #endif
85142@@ -39,6 +42,7 @@ struct seq_operations {
85143 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
85144 int (*show) (struct seq_file *m, void *v);
85145 };
85146+typedef struct seq_operations __no_const seq_operations_no_const;
85147
85148 #define SEQ_SKIP 1
85149
85150@@ -96,6 +100,7 @@ void seq_pad(struct seq_file *m, char c);
85151
85152 char *mangle_path(char *s, const char *p, const char *esc);
85153 int seq_open(struct file *, const struct seq_operations *);
85154+int seq_open_restrict(struct file *, const struct seq_operations *);
85155 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
85156 loff_t seq_lseek(struct file *, loff_t, int);
85157 int seq_release(struct inode *, struct file *);
85158@@ -138,6 +143,7 @@ static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
85159 }
85160
85161 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
85162+int single_open_restrict(struct file *, int (*)(struct seq_file *, void *), void *);
85163 int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
85164 int single_release(struct inode *, struct file *);
85165 void *__seq_open_private(struct file *, const struct seq_operations *, int);
85166diff --git a/include/linux/shm.h b/include/linux/shm.h
85167index 6fb8016..ab4465e 100644
85168--- a/include/linux/shm.h
85169+++ b/include/linux/shm.h
85170@@ -22,6 +22,10 @@ struct shmid_kernel /* private to the kernel */
85171 /* The task created the shm object. NULL if the task is dead. */
85172 struct task_struct *shm_creator;
85173 struct list_head shm_clist; /* list by creator */
85174+#ifdef CONFIG_GRKERNSEC
85175+ u64 shm_createtime;
85176+ pid_t shm_lapid;
85177+#endif
85178 };
85179
85180 /* shm_mode upper byte flags */
85181diff --git a/include/linux/signal.h b/include/linux/signal.h
85182index 750196f..ae7a3a4 100644
85183--- a/include/linux/signal.h
85184+++ b/include/linux/signal.h
85185@@ -292,7 +292,7 @@ static inline void allow_signal(int sig)
85186 * know it'll be handled, so that they don't get converted to
85187 * SIGKILL or just silently dropped.
85188 */
85189- kernel_sigaction(sig, (__force __sighandler_t)2);
85190+ kernel_sigaction(sig, (__force_user __sighandler_t)2);
85191 }
85192
85193 static inline void disallow_signal(int sig)
85194diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
85195index abde271..bc9ece1 100644
85196--- a/include/linux/skbuff.h
85197+++ b/include/linux/skbuff.h
85198@@ -728,7 +728,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
85199 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
85200 int node);
85201 struct sk_buff *build_skb(void *data, unsigned int frag_size);
85202-static inline struct sk_buff *alloc_skb(unsigned int size,
85203+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
85204 gfp_t priority)
85205 {
85206 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
85207@@ -1845,7 +1845,7 @@ static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
85208 return skb->inner_transport_header - skb->inner_network_header;
85209 }
85210
85211-static inline int skb_network_offset(const struct sk_buff *skb)
85212+static inline int __intentional_overflow(0) skb_network_offset(const struct sk_buff *skb)
85213 {
85214 return skb_network_header(skb) - skb->data;
85215 }
85216@@ -1917,7 +1917,7 @@ static inline void skb_pop_rcv_encapsulation(struct sk_buff *skb)
85217 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
85218 */
85219 #ifndef NET_SKB_PAD
85220-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
85221+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
85222 #endif
85223
85224 int ___pskb_trim(struct sk_buff *skb, unsigned int len);
85225@@ -2524,7 +2524,7 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
85226 int *err);
85227 unsigned int datagram_poll(struct file *file, struct socket *sock,
85228 struct poll_table_struct *wait);
85229-int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
85230+int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
85231 struct iovec *to, int size);
85232 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
85233 struct iovec *iov);
85234@@ -2918,6 +2918,9 @@ static inline void nf_reset(struct sk_buff *skb)
85235 nf_bridge_put(skb->nf_bridge);
85236 skb->nf_bridge = NULL;
85237 #endif
85238+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
85239+ skb->nf_trace = 0;
85240+#endif
85241 }
85242
85243 static inline void nf_reset_trace(struct sk_buff *skb)
85244diff --git a/include/linux/slab.h b/include/linux/slab.h
85245index 1d9abb7..b1e8b10 100644
85246--- a/include/linux/slab.h
85247+++ b/include/linux/slab.h
85248@@ -14,15 +14,29 @@
85249 #include <linux/gfp.h>
85250 #include <linux/types.h>
85251 #include <linux/workqueue.h>
85252-
85253+#include <linux/err.h>
85254
85255 /*
85256 * Flags to pass to kmem_cache_create().
85257 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
85258 */
85259 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
85260+
85261+#ifdef CONFIG_PAX_USERCOPY_SLABS
85262+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
85263+#else
85264+#define SLAB_USERCOPY 0x00000000UL
85265+#endif
85266+
85267 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
85268 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
85269+
85270+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85271+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
85272+#else
85273+#define SLAB_NO_SANITIZE 0x00000000UL
85274+#endif
85275+
85276 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
85277 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
85278 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
85279@@ -98,10 +112,13 @@
85280 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
85281 * Both make kfree a no-op.
85282 */
85283-#define ZERO_SIZE_PTR ((void *)16)
85284+#define ZERO_SIZE_PTR \
85285+({ \
85286+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
85287+ (void *)(-MAX_ERRNO-1L); \
85288+})
85289
85290-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
85291- (unsigned long)ZERO_SIZE_PTR)
85292+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
85293
85294 #include <linux/kmemleak.h>
85295
85296@@ -144,6 +161,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
85297 void kfree(const void *);
85298 void kzfree(const void *);
85299 size_t ksize(const void *);
85300+const char *check_heap_object(const void *ptr, unsigned long n);
85301+bool is_usercopy_object(const void *ptr);
85302
85303 /*
85304 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
85305@@ -176,7 +195,7 @@ struct kmem_cache {
85306 unsigned int align; /* Alignment as calculated */
85307 unsigned long flags; /* Active flags on the slab */
85308 const char *name; /* Slab name for sysfs */
85309- int refcount; /* Use counter */
85310+ atomic_t refcount; /* Use counter */
85311 void (*ctor)(void *); /* Called on object slot creation */
85312 struct list_head list; /* List of all slab caches on the system */
85313 };
85314@@ -261,6 +280,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
85315 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
85316 #endif
85317
85318+#ifdef CONFIG_PAX_USERCOPY_SLABS
85319+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
85320+#endif
85321+
85322 /*
85323 * Figure out which kmalloc slab an allocation of a certain size
85324 * belongs to.
85325@@ -269,7 +292,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
85326 * 2 = 120 .. 192 bytes
85327 * n = 2^(n-1) .. 2^n -1
85328 */
85329-static __always_inline int kmalloc_index(size_t size)
85330+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
85331 {
85332 if (!size)
85333 return 0;
85334@@ -312,11 +335,11 @@ static __always_inline int kmalloc_index(size_t size)
85335 }
85336 #endif /* !CONFIG_SLOB */
85337
85338-void *__kmalloc(size_t size, gfp_t flags);
85339+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
85340 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
85341
85342 #ifdef CONFIG_NUMA
85343-void *__kmalloc_node(size_t size, gfp_t flags, int node);
85344+void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
85345 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
85346 #else
85347 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
85348diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
85349index 8235dfb..47ce586 100644
85350--- a/include/linux/slab_def.h
85351+++ b/include/linux/slab_def.h
85352@@ -38,7 +38,7 @@ struct kmem_cache {
85353 /* 4) cache creation/removal */
85354 const char *name;
85355 struct list_head list;
85356- int refcount;
85357+ atomic_t refcount;
85358 int object_size;
85359 int align;
85360
85361@@ -54,10 +54,14 @@ struct kmem_cache {
85362 unsigned long node_allocs;
85363 unsigned long node_frees;
85364 unsigned long node_overflow;
85365- atomic_t allochit;
85366- atomic_t allocmiss;
85367- atomic_t freehit;
85368- atomic_t freemiss;
85369+ atomic_unchecked_t allochit;
85370+ atomic_unchecked_t allocmiss;
85371+ atomic_unchecked_t freehit;
85372+ atomic_unchecked_t freemiss;
85373+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85374+ atomic_unchecked_t sanitized;
85375+ atomic_unchecked_t not_sanitized;
85376+#endif
85377
85378 /*
85379 * If debugging is enabled, then the allocator can add additional
85380diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
85381index d82abd4..408c3a0 100644
85382--- a/include/linux/slub_def.h
85383+++ b/include/linux/slub_def.h
85384@@ -74,7 +74,7 @@ struct kmem_cache {
85385 struct kmem_cache_order_objects max;
85386 struct kmem_cache_order_objects min;
85387 gfp_t allocflags; /* gfp flags to use on each alloc */
85388- int refcount; /* Refcount for slab cache destroy */
85389+ atomic_t refcount; /* Refcount for slab cache destroy */
85390 void (*ctor)(void *);
85391 int inuse; /* Offset to metadata */
85392 int align; /* Alignment */
85393diff --git a/include/linux/smp.h b/include/linux/smp.h
85394index 34347f2..8739978 100644
85395--- a/include/linux/smp.h
85396+++ b/include/linux/smp.h
85397@@ -174,7 +174,9 @@ static inline void kick_all_cpus_sync(void) { }
85398 #endif
85399
85400 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
85401+#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
85402 #define put_cpu() preempt_enable()
85403+#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
85404
85405 /*
85406 * Callback to arch code if there's nosmp or maxcpus=0 on the
85407diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
85408index 46cca4c..3323536 100644
85409--- a/include/linux/sock_diag.h
85410+++ b/include/linux/sock_diag.h
85411@@ -11,7 +11,7 @@ struct sock;
85412 struct sock_diag_handler {
85413 __u8 family;
85414 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
85415-};
85416+} __do_const;
85417
85418 int sock_diag_register(const struct sock_diag_handler *h);
85419 void sock_diag_unregister(const struct sock_diag_handler *h);
85420diff --git a/include/linux/sonet.h b/include/linux/sonet.h
85421index 680f9a3..f13aeb0 100644
85422--- a/include/linux/sonet.h
85423+++ b/include/linux/sonet.h
85424@@ -7,7 +7,7 @@
85425 #include <uapi/linux/sonet.h>
85426
85427 struct k_sonet_stats {
85428-#define __HANDLE_ITEM(i) atomic_t i
85429+#define __HANDLE_ITEM(i) atomic_unchecked_t i
85430 __SONET_ITEMS
85431 #undef __HANDLE_ITEM
85432 };
85433diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
85434index 07d8e53..dc934c9 100644
85435--- a/include/linux/sunrpc/addr.h
85436+++ b/include/linux/sunrpc/addr.h
85437@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
85438 {
85439 switch (sap->sa_family) {
85440 case AF_INET:
85441- return ntohs(((struct sockaddr_in *)sap)->sin_port);
85442+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
85443 case AF_INET6:
85444- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
85445+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
85446 }
85447 return 0;
85448 }
85449@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
85450 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
85451 const struct sockaddr *src)
85452 {
85453- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
85454+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
85455 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
85456
85457 dsin->sin_family = ssin->sin_family;
85458@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
85459 if (sa->sa_family != AF_INET6)
85460 return 0;
85461
85462- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
85463+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
85464 }
85465
85466 #endif /* _LINUX_SUNRPC_ADDR_H */
85467diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
85468index 70736b9..37f33db 100644
85469--- a/include/linux/sunrpc/clnt.h
85470+++ b/include/linux/sunrpc/clnt.h
85471@@ -97,7 +97,7 @@ struct rpc_procinfo {
85472 unsigned int p_timer; /* Which RTT timer to use */
85473 u32 p_statidx; /* Which procedure to account */
85474 const char * p_name; /* name of procedure */
85475-};
85476+} __do_const;
85477
85478 #ifdef __KERNEL__
85479
85480diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
85481index cf61ecd..a4a9bc0 100644
85482--- a/include/linux/sunrpc/svc.h
85483+++ b/include/linux/sunrpc/svc.h
85484@@ -417,7 +417,7 @@ struct svc_procedure {
85485 unsigned int pc_count; /* call count */
85486 unsigned int pc_cachetype; /* cache info (NFS) */
85487 unsigned int pc_xdrressize; /* maximum size of XDR reply */
85488-};
85489+} __do_const;
85490
85491 /*
85492 * Function prototypes.
85493diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
85494index 975da75..318c083 100644
85495--- a/include/linux/sunrpc/svc_rdma.h
85496+++ b/include/linux/sunrpc/svc_rdma.h
85497@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
85498 extern unsigned int svcrdma_max_requests;
85499 extern unsigned int svcrdma_max_req_size;
85500
85501-extern atomic_t rdma_stat_recv;
85502-extern atomic_t rdma_stat_read;
85503-extern atomic_t rdma_stat_write;
85504-extern atomic_t rdma_stat_sq_starve;
85505-extern atomic_t rdma_stat_rq_starve;
85506-extern atomic_t rdma_stat_rq_poll;
85507-extern atomic_t rdma_stat_rq_prod;
85508-extern atomic_t rdma_stat_sq_poll;
85509-extern atomic_t rdma_stat_sq_prod;
85510+extern atomic_unchecked_t rdma_stat_recv;
85511+extern atomic_unchecked_t rdma_stat_read;
85512+extern atomic_unchecked_t rdma_stat_write;
85513+extern atomic_unchecked_t rdma_stat_sq_starve;
85514+extern atomic_unchecked_t rdma_stat_rq_starve;
85515+extern atomic_unchecked_t rdma_stat_rq_poll;
85516+extern atomic_unchecked_t rdma_stat_rq_prod;
85517+extern atomic_unchecked_t rdma_stat_sq_poll;
85518+extern atomic_unchecked_t rdma_stat_sq_prod;
85519
85520 #define RPCRDMA_VERSION 1
85521
85522diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
85523index 8d71d65..f79586e 100644
85524--- a/include/linux/sunrpc/svcauth.h
85525+++ b/include/linux/sunrpc/svcauth.h
85526@@ -120,7 +120,7 @@ struct auth_ops {
85527 int (*release)(struct svc_rqst *rq);
85528 void (*domain_release)(struct auth_domain *);
85529 int (*set_client)(struct svc_rqst *rq);
85530-};
85531+} __do_const;
85532
85533 #define SVC_GARBAGE 1
85534 #define SVC_SYSERR 2
85535diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
85536index e7a018e..49f8b17 100644
85537--- a/include/linux/swiotlb.h
85538+++ b/include/linux/swiotlb.h
85539@@ -60,7 +60,8 @@ extern void
85540
85541 extern void
85542 swiotlb_free_coherent(struct device *hwdev, size_t size,
85543- void *vaddr, dma_addr_t dma_handle);
85544+ void *vaddr, dma_addr_t dma_handle,
85545+ struct dma_attrs *attrs);
85546
85547 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
85548 unsigned long offset, size_t size,
85549diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
85550index 0f86d85..dff3419 100644
85551--- a/include/linux/syscalls.h
85552+++ b/include/linux/syscalls.h
85553@@ -98,10 +98,16 @@ struct sigaltstack;
85554 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
85555
85556 #define __SC_DECL(t, a) t a
85557+#define __TYPE_IS_U(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
85558 #define __TYPE_IS_L(t) (__same_type((t)0, 0L))
85559 #define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
85560 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
85561-#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
85562+#define __SC_LONG(t, a) __typeof( \
85563+ __builtin_choose_expr( \
85564+ sizeof(t) > sizeof(int), \
85565+ (t) 0, \
85566+ __builtin_choose_expr(__TYPE_IS_U(t), 0UL, 0L) \
85567+ )) a
85568 #define __SC_CAST(t, a) (t) a
85569 #define __SC_ARGS(t, a) a
85570 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
85571@@ -383,11 +389,11 @@ asmlinkage long sys_sync(void);
85572 asmlinkage long sys_fsync(unsigned int fd);
85573 asmlinkage long sys_fdatasync(unsigned int fd);
85574 asmlinkage long sys_bdflush(int func, long data);
85575-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
85576- char __user *type, unsigned long flags,
85577+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
85578+ const char __user *type, unsigned long flags,
85579 void __user *data);
85580-asmlinkage long sys_umount(char __user *name, int flags);
85581-asmlinkage long sys_oldumount(char __user *name);
85582+asmlinkage long sys_umount(const char __user *name, int flags);
85583+asmlinkage long sys_oldumount(const char __user *name);
85584 asmlinkage long sys_truncate(const char __user *path, long length);
85585 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
85586 asmlinkage long sys_stat(const char __user *filename,
85587@@ -599,7 +605,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
85588 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
85589 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
85590 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
85591- struct sockaddr __user *, int);
85592+ struct sockaddr __user *, int) __intentional_overflow(0);
85593 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
85594 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
85595 unsigned int vlen, unsigned flags);
85596diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
85597index 27b3b0b..e093dd9 100644
85598--- a/include/linux/syscore_ops.h
85599+++ b/include/linux/syscore_ops.h
85600@@ -16,7 +16,7 @@ struct syscore_ops {
85601 int (*suspend)(void);
85602 void (*resume)(void);
85603 void (*shutdown)(void);
85604-};
85605+} __do_const;
85606
85607 extern void register_syscore_ops(struct syscore_ops *ops);
85608 extern void unregister_syscore_ops(struct syscore_ops *ops);
85609diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
85610index b7361f8..341a15a 100644
85611--- a/include/linux/sysctl.h
85612+++ b/include/linux/sysctl.h
85613@@ -39,6 +39,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
85614
85615 extern int proc_dostring(struct ctl_table *, int,
85616 void __user *, size_t *, loff_t *);
85617+extern int proc_dostring_modpriv(struct ctl_table *, int,
85618+ void __user *, size_t *, loff_t *);
85619 extern int proc_dointvec(struct ctl_table *, int,
85620 void __user *, size_t *, loff_t *);
85621 extern int proc_dointvec_minmax(struct ctl_table *, int,
85622@@ -113,7 +115,8 @@ struct ctl_table
85623 struct ctl_table_poll *poll;
85624 void *extra1;
85625 void *extra2;
85626-};
85627+} __do_const __randomize_layout;
85628+typedef struct ctl_table __no_const ctl_table_no_const;
85629
85630 struct ctl_node {
85631 struct rb_node node;
85632diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
85633index f97d0db..c1187dc 100644
85634--- a/include/linux/sysfs.h
85635+++ b/include/linux/sysfs.h
85636@@ -34,7 +34,8 @@ struct attribute {
85637 struct lock_class_key *key;
85638 struct lock_class_key skey;
85639 #endif
85640-};
85641+} __do_const;
85642+typedef struct attribute __no_const attribute_no_const;
85643
85644 /**
85645 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
85646@@ -63,7 +64,8 @@ struct attribute_group {
85647 struct attribute *, int);
85648 struct attribute **attrs;
85649 struct bin_attribute **bin_attrs;
85650-};
85651+} __do_const;
85652+typedef struct attribute_group __no_const attribute_group_no_const;
85653
85654 /**
85655 * Use these macros to make defining attributes easier. See include/linux/device.h
85656@@ -128,7 +130,8 @@ struct bin_attribute {
85657 char *, loff_t, size_t);
85658 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
85659 struct vm_area_struct *vma);
85660-};
85661+} __do_const;
85662+typedef struct bin_attribute __no_const bin_attribute_no_const;
85663
85664 /**
85665 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
85666diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
85667index 387fa7d..3fcde6b 100644
85668--- a/include/linux/sysrq.h
85669+++ b/include/linux/sysrq.h
85670@@ -16,6 +16,7 @@
85671
85672 #include <linux/errno.h>
85673 #include <linux/types.h>
85674+#include <linux/compiler.h>
85675
85676 /* Possible values of bitmask for enabling sysrq functions */
85677 /* 0x0001 is reserved for enable everything */
85678@@ -33,7 +34,7 @@ struct sysrq_key_op {
85679 char *help_msg;
85680 char *action_msg;
85681 int enable_mask;
85682-};
85683+} __do_const;
85684
85685 #ifdef CONFIG_MAGIC_SYSRQ
85686
85687diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
85688index ff307b5..f1a4468 100644
85689--- a/include/linux/thread_info.h
85690+++ b/include/linux/thread_info.h
85691@@ -145,6 +145,13 @@ static inline bool test_and_clear_restore_sigmask(void)
85692 #error "no set_restore_sigmask() provided and default one won't work"
85693 #endif
85694
85695+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user, bool const_size);
85696+
85697+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
85698+{
85699+ __check_object_size(ptr, n, to_user, __builtin_constant_p(n));
85700+}
85701+
85702 #endif /* __KERNEL__ */
85703
85704 #endif /* _LINUX_THREAD_INFO_H */
85705diff --git a/include/linux/tty.h b/include/linux/tty.h
85706index 8413294..44391c7 100644
85707--- a/include/linux/tty.h
85708+++ b/include/linux/tty.h
85709@@ -202,7 +202,7 @@ struct tty_port {
85710 const struct tty_port_operations *ops; /* Port operations */
85711 spinlock_t lock; /* Lock protecting tty field */
85712 int blocked_open; /* Waiting to open */
85713- int count; /* Usage count */
85714+ atomic_t count; /* Usage count */
85715 wait_queue_head_t open_wait; /* Open waiters */
85716 wait_queue_head_t close_wait; /* Close waiters */
85717 wait_queue_head_t delta_msr_wait; /* Modem status change */
85718@@ -284,7 +284,7 @@ struct tty_struct {
85719 /* If the tty has a pending do_SAK, queue it here - akpm */
85720 struct work_struct SAK_work;
85721 struct tty_port *port;
85722-};
85723+} __randomize_layout;
85724
85725 /* Each of a tty's open files has private_data pointing to tty_file_private */
85726 struct tty_file_private {
85727@@ -548,7 +548,7 @@ extern int tty_port_open(struct tty_port *port,
85728 struct tty_struct *tty, struct file *filp);
85729 static inline int tty_port_users(struct tty_port *port)
85730 {
85731- return port->count + port->blocked_open;
85732+ return atomic_read(&port->count) + port->blocked_open;
85733 }
85734
85735 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
85736diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
85737index e48c608..6a19af2 100644
85738--- a/include/linux/tty_driver.h
85739+++ b/include/linux/tty_driver.h
85740@@ -287,7 +287,7 @@ struct tty_operations {
85741 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
85742 #endif
85743 const struct file_operations *proc_fops;
85744-};
85745+} __do_const __randomize_layout;
85746
85747 struct tty_driver {
85748 int magic; /* magic number for this structure */
85749@@ -321,7 +321,7 @@ struct tty_driver {
85750
85751 const struct tty_operations *ops;
85752 struct list_head tty_drivers;
85753-};
85754+} __randomize_layout;
85755
85756 extern struct list_head tty_drivers;
85757
85758diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
85759index 00c9d68..bc0188b 100644
85760--- a/include/linux/tty_ldisc.h
85761+++ b/include/linux/tty_ldisc.h
85762@@ -215,7 +215,7 @@ struct tty_ldisc_ops {
85763
85764 struct module *owner;
85765
85766- int refcount;
85767+ atomic_t refcount;
85768 };
85769
85770 struct tty_ldisc {
85771diff --git a/include/linux/types.h b/include/linux/types.h
85772index a0bb704..f511c77 100644
85773--- a/include/linux/types.h
85774+++ b/include/linux/types.h
85775@@ -177,10 +177,26 @@ typedef struct {
85776 int counter;
85777 } atomic_t;
85778
85779+#ifdef CONFIG_PAX_REFCOUNT
85780+typedef struct {
85781+ int counter;
85782+} atomic_unchecked_t;
85783+#else
85784+typedef atomic_t atomic_unchecked_t;
85785+#endif
85786+
85787 #ifdef CONFIG_64BIT
85788 typedef struct {
85789 long counter;
85790 } atomic64_t;
85791+
85792+#ifdef CONFIG_PAX_REFCOUNT
85793+typedef struct {
85794+ long counter;
85795+} atomic64_unchecked_t;
85796+#else
85797+typedef atomic64_t atomic64_unchecked_t;
85798+#endif
85799 #endif
85800
85801 struct list_head {
85802diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
85803index ecd3319..8a36ded 100644
85804--- a/include/linux/uaccess.h
85805+++ b/include/linux/uaccess.h
85806@@ -75,11 +75,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
85807 long ret; \
85808 mm_segment_t old_fs = get_fs(); \
85809 \
85810- set_fs(KERNEL_DS); \
85811 pagefault_disable(); \
85812- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
85813- pagefault_enable(); \
85814+ set_fs(KERNEL_DS); \
85815+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
85816 set_fs(old_fs); \
85817+ pagefault_enable(); \
85818 ret; \
85819 })
85820
85821diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
85822index 2d1f9b6..d7a9fce 100644
85823--- a/include/linux/uidgid.h
85824+++ b/include/linux/uidgid.h
85825@@ -175,4 +175,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
85826
85827 #endif /* CONFIG_USER_NS */
85828
85829+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
85830+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
85831+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
85832+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
85833+
85834 #endif /* _LINUX_UIDGID_H */
85835diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
85836index 99c1b4d..562e6f3 100644
85837--- a/include/linux/unaligned/access_ok.h
85838+++ b/include/linux/unaligned/access_ok.h
85839@@ -4,34 +4,34 @@
85840 #include <linux/kernel.h>
85841 #include <asm/byteorder.h>
85842
85843-static inline u16 get_unaligned_le16(const void *p)
85844+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
85845 {
85846- return le16_to_cpup((__le16 *)p);
85847+ return le16_to_cpup((const __le16 *)p);
85848 }
85849
85850-static inline u32 get_unaligned_le32(const void *p)
85851+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
85852 {
85853- return le32_to_cpup((__le32 *)p);
85854+ return le32_to_cpup((const __le32 *)p);
85855 }
85856
85857-static inline u64 get_unaligned_le64(const void *p)
85858+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
85859 {
85860- return le64_to_cpup((__le64 *)p);
85861+ return le64_to_cpup((const __le64 *)p);
85862 }
85863
85864-static inline u16 get_unaligned_be16(const void *p)
85865+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
85866 {
85867- return be16_to_cpup((__be16 *)p);
85868+ return be16_to_cpup((const __be16 *)p);
85869 }
85870
85871-static inline u32 get_unaligned_be32(const void *p)
85872+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
85873 {
85874- return be32_to_cpup((__be32 *)p);
85875+ return be32_to_cpup((const __be32 *)p);
85876 }
85877
85878-static inline u64 get_unaligned_be64(const void *p)
85879+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
85880 {
85881- return be64_to_cpup((__be64 *)p);
85882+ return be64_to_cpup((const __be64 *)p);
85883 }
85884
85885 static inline void put_unaligned_le16(u16 val, void *p)
85886diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
85887index 4f844c6..60beb5d 100644
85888--- a/include/linux/uprobes.h
85889+++ b/include/linux/uprobes.h
85890@@ -98,11 +98,11 @@ struct uprobes_state {
85891 struct xol_area *xol_area;
85892 };
85893
85894-extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
85895-extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
85896-extern bool __weak is_swbp_insn(uprobe_opcode_t *insn);
85897-extern bool __weak is_trap_insn(uprobe_opcode_t *insn);
85898-extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
85899+extern int set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
85900+extern int set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
85901+extern bool is_swbp_insn(uprobe_opcode_t *insn);
85902+extern bool is_trap_insn(uprobe_opcode_t *insn);
85903+extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs);
85904 extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs);
85905 extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
85906 extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
85907@@ -128,8 +128,8 @@ extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
85908 extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
85909 extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
85910 extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
85911-extern bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
85912-extern void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
85913+extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
85914+extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
85915 void *src, unsigned long len);
85916 #else /* !CONFIG_UPROBES */
85917 struct uprobes_state {
85918diff --git a/include/linux/usb.h b/include/linux/usb.h
85919index d2465bc..5256de4 100644
85920--- a/include/linux/usb.h
85921+++ b/include/linux/usb.h
85922@@ -571,7 +571,7 @@ struct usb_device {
85923 int maxchild;
85924
85925 u32 quirks;
85926- atomic_t urbnum;
85927+ atomic_unchecked_t urbnum;
85928
85929 unsigned long active_duration;
85930
85931@@ -1655,7 +1655,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
85932
85933 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
85934 __u8 request, __u8 requesttype, __u16 value, __u16 index,
85935- void *data, __u16 size, int timeout);
85936+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
85937 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
85938 void *data, int len, int *actual_length, int timeout);
85939 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
85940diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
85941index d5952bb..9a626d4 100644
85942--- a/include/linux/usb/renesas_usbhs.h
85943+++ b/include/linux/usb/renesas_usbhs.h
85944@@ -39,7 +39,7 @@ enum {
85945 */
85946 struct renesas_usbhs_driver_callback {
85947 int (*notify_hotplug)(struct platform_device *pdev);
85948-};
85949+} __no_const;
85950
85951 /*
85952 * callback functions for platform
85953diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
85954index e953726..8edb26a 100644
85955--- a/include/linux/user_namespace.h
85956+++ b/include/linux/user_namespace.h
85957@@ -33,7 +33,7 @@ struct user_namespace {
85958 struct key *persistent_keyring_register;
85959 struct rw_semaphore persistent_keyring_register_sem;
85960 #endif
85961-};
85962+} __randomize_layout;
85963
85964 extern struct user_namespace init_user_ns;
85965
85966diff --git a/include/linux/utsname.h b/include/linux/utsname.h
85967index 239e277..22a5cf5 100644
85968--- a/include/linux/utsname.h
85969+++ b/include/linux/utsname.h
85970@@ -24,7 +24,7 @@ struct uts_namespace {
85971 struct new_utsname name;
85972 struct user_namespace *user_ns;
85973 unsigned int proc_inum;
85974-};
85975+} __randomize_layout;
85976 extern struct uts_namespace init_uts_ns;
85977
85978 #ifdef CONFIG_UTS_NS
85979diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
85980index 6f8fbcf..4efc177 100644
85981--- a/include/linux/vermagic.h
85982+++ b/include/linux/vermagic.h
85983@@ -25,9 +25,42 @@
85984 #define MODULE_ARCH_VERMAGIC ""
85985 #endif
85986
85987+#ifdef CONFIG_PAX_REFCOUNT
85988+#define MODULE_PAX_REFCOUNT "REFCOUNT "
85989+#else
85990+#define MODULE_PAX_REFCOUNT ""
85991+#endif
85992+
85993+#ifdef CONSTIFY_PLUGIN
85994+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
85995+#else
85996+#define MODULE_CONSTIFY_PLUGIN ""
85997+#endif
85998+
85999+#ifdef STACKLEAK_PLUGIN
86000+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
86001+#else
86002+#define MODULE_STACKLEAK_PLUGIN ""
86003+#endif
86004+
86005+#ifdef RANDSTRUCT_PLUGIN
86006+#include <generated/randomize_layout_hash.h>
86007+#define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED
86008+#else
86009+#define MODULE_RANDSTRUCT_PLUGIN
86010+#endif
86011+
86012+#ifdef CONFIG_GRKERNSEC
86013+#define MODULE_GRSEC "GRSEC "
86014+#else
86015+#define MODULE_GRSEC ""
86016+#endif
86017+
86018 #define VERMAGIC_STRING \
86019 UTS_RELEASE " " \
86020 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
86021 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
86022- MODULE_ARCH_VERMAGIC
86023+ MODULE_ARCH_VERMAGIC \
86024+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
86025+ MODULE_GRSEC MODULE_RANDSTRUCT_PLUGIN
86026
86027diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
86028index b483abd..af305ad 100644
86029--- a/include/linux/vga_switcheroo.h
86030+++ b/include/linux/vga_switcheroo.h
86031@@ -63,9 +63,9 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
86032
86033 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
86034
86035-int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
86036+int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
86037 void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
86038-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
86039+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
86040 #else
86041
86042 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
86043@@ -82,9 +82,9 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
86044
86045 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
86046
86047-static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
86048+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
86049 static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
86050-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
86051+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
86052
86053 #endif
86054 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
86055diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
86056index b87696f..1d11de7 100644
86057--- a/include/linux/vmalloc.h
86058+++ b/include/linux/vmalloc.h
86059@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
86060 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
86061 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
86062 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
86063+
86064+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
86065+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
86066+#endif
86067+
86068 /* bits [20..32] reserved for arch specific ioremap internals */
86069
86070 /*
86071@@ -82,6 +87,10 @@ extern void *vmap(struct page **pages, unsigned int count,
86072 unsigned long flags, pgprot_t prot);
86073 extern void vunmap(const void *addr);
86074
86075+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
86076+extern void unmap_process_stacks(struct task_struct *task);
86077+#endif
86078+
86079 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
86080 unsigned long uaddr, void *kaddr,
86081 unsigned long size);
86082@@ -142,7 +151,7 @@ extern void free_vm_area(struct vm_struct *area);
86083
86084 /* for /dev/kmem */
86085 extern long vread(char *buf, char *addr, unsigned long count);
86086-extern long vwrite(char *buf, char *addr, unsigned long count);
86087+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
86088
86089 /*
86090 * Internals. Dont't use..
86091diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
86092index 82e7db7..f8ce3d0 100644
86093--- a/include/linux/vmstat.h
86094+++ b/include/linux/vmstat.h
86095@@ -108,18 +108,18 @@ static inline void vm_events_fold_cpu(int cpu)
86096 /*
86097 * Zone based page accounting with per cpu differentials.
86098 */
86099-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86100+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86101
86102 static inline void zone_page_state_add(long x, struct zone *zone,
86103 enum zone_stat_item item)
86104 {
86105- atomic_long_add(x, &zone->vm_stat[item]);
86106- atomic_long_add(x, &vm_stat[item]);
86107+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
86108+ atomic_long_add_unchecked(x, &vm_stat[item]);
86109 }
86110
86111-static inline unsigned long global_page_state(enum zone_stat_item item)
86112+static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
86113 {
86114- long x = atomic_long_read(&vm_stat[item]);
86115+ long x = atomic_long_read_unchecked(&vm_stat[item]);
86116 #ifdef CONFIG_SMP
86117 if (x < 0)
86118 x = 0;
86119@@ -127,10 +127,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
86120 return x;
86121 }
86122
86123-static inline unsigned long zone_page_state(struct zone *zone,
86124+static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
86125 enum zone_stat_item item)
86126 {
86127- long x = atomic_long_read(&zone->vm_stat[item]);
86128+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
86129 #ifdef CONFIG_SMP
86130 if (x < 0)
86131 x = 0;
86132@@ -147,7 +147,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
86133 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
86134 enum zone_stat_item item)
86135 {
86136- long x = atomic_long_read(&zone->vm_stat[item]);
86137+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
86138
86139 #ifdef CONFIG_SMP
86140 int cpu;
86141@@ -234,14 +234,14 @@ static inline void __mod_zone_page_state(struct zone *zone,
86142
86143 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
86144 {
86145- atomic_long_inc(&zone->vm_stat[item]);
86146- atomic_long_inc(&vm_stat[item]);
86147+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
86148+ atomic_long_inc_unchecked(&vm_stat[item]);
86149 }
86150
86151 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
86152 {
86153- atomic_long_dec(&zone->vm_stat[item]);
86154- atomic_long_dec(&vm_stat[item]);
86155+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
86156+ atomic_long_dec_unchecked(&vm_stat[item]);
86157 }
86158
86159 static inline void __inc_zone_page_state(struct page *page,
86160diff --git a/include/linux/xattr.h b/include/linux/xattr.h
86161index 91b0a68..0e9adf6 100644
86162--- a/include/linux/xattr.h
86163+++ b/include/linux/xattr.h
86164@@ -28,7 +28,7 @@ struct xattr_handler {
86165 size_t size, int handler_flags);
86166 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
86167 size_t size, int flags, int handler_flags);
86168-};
86169+} __do_const;
86170
86171 struct xattr {
86172 const char *name;
86173@@ -37,6 +37,9 @@ struct xattr {
86174 };
86175
86176 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
86177+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
86178+ssize_t pax_getxattr(struct dentry *, void *, size_t);
86179+#endif
86180 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
86181 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
86182 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
86183diff --git a/include/linux/zlib.h b/include/linux/zlib.h
86184index 92dbbd3..13ab0b3 100644
86185--- a/include/linux/zlib.h
86186+++ b/include/linux/zlib.h
86187@@ -31,6 +31,7 @@
86188 #define _ZLIB_H
86189
86190 #include <linux/zconf.h>
86191+#include <linux/compiler.h>
86192
86193 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
86194 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
86195@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
86196
86197 /* basic functions */
86198
86199-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
86200+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
86201 /*
86202 Returns the number of bytes that needs to be allocated for a per-
86203 stream workspace with the specified parameters. A pointer to this
86204diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
86205index eb76cfd..9fd0e7c 100644
86206--- a/include/media/v4l2-dev.h
86207+++ b/include/media/v4l2-dev.h
86208@@ -75,7 +75,7 @@ struct v4l2_file_operations {
86209 int (*mmap) (struct file *, struct vm_area_struct *);
86210 int (*open) (struct file *);
86211 int (*release) (struct file *);
86212-};
86213+} __do_const;
86214
86215 /*
86216 * Newer version of video_device, handled by videodev2.c
86217diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
86218index ffb69da..040393e 100644
86219--- a/include/media/v4l2-device.h
86220+++ b/include/media/v4l2-device.h
86221@@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
86222 this function returns 0. If the name ends with a digit (e.g. cx18),
86223 then the name will be set to cx18-0 since cx180 looks really odd. */
86224 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
86225- atomic_t *instance);
86226+ atomic_unchecked_t *instance);
86227
86228 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
86229 Since the parent disappears this ensures that v4l2_dev doesn't have an
86230diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
86231index d9fa68f..45c88d1 100644
86232--- a/include/net/9p/transport.h
86233+++ b/include/net/9p/transport.h
86234@@ -63,7 +63,7 @@ struct p9_trans_module {
86235 int (*cancelled)(struct p9_client *, struct p9_req_t *req);
86236 int (*zc_request)(struct p9_client *, struct p9_req_t *,
86237 char *, char *, int , int, int, int);
86238-};
86239+} __do_const;
86240
86241 void v9fs_register_trans(struct p9_trans_module *m);
86242 void v9fs_unregister_trans(struct p9_trans_module *m);
86243diff --git a/include/net/af_unix.h b/include/net/af_unix.h
86244index a175ba4..196eb8242 100644
86245--- a/include/net/af_unix.h
86246+++ b/include/net/af_unix.h
86247@@ -36,7 +36,7 @@ struct unix_skb_parms {
86248 u32 secid; /* Security ID */
86249 #endif
86250 u32 consumed;
86251-};
86252+} __randomize_layout;
86253
86254 #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
86255 #define UNIXSID(skb) (&UNIXCB((skb)).secid)
86256diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
86257index 8df15ad..837fbedd 100644
86258--- a/include/net/bluetooth/l2cap.h
86259+++ b/include/net/bluetooth/l2cap.h
86260@@ -608,7 +608,7 @@ struct l2cap_ops {
86261 unsigned char *kdata,
86262 struct iovec *iov,
86263 int len);
86264-};
86265+} __do_const;
86266
86267 struct l2cap_conn {
86268 struct hci_conn *hcon;
86269diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
86270index f2ae33d..c457cf0 100644
86271--- a/include/net/caif/cfctrl.h
86272+++ b/include/net/caif/cfctrl.h
86273@@ -52,7 +52,7 @@ struct cfctrl_rsp {
86274 void (*radioset_rsp)(void);
86275 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
86276 struct cflayer *client_layer);
86277-};
86278+} __no_const;
86279
86280 /* Link Setup Parameters for CAIF-Links. */
86281 struct cfctrl_link_param {
86282@@ -101,8 +101,8 @@ struct cfctrl_request_info {
86283 struct cfctrl {
86284 struct cfsrvl serv;
86285 struct cfctrl_rsp res;
86286- atomic_t req_seq_no;
86287- atomic_t rsp_seq_no;
86288+ atomic_unchecked_t req_seq_no;
86289+ atomic_unchecked_t rsp_seq_no;
86290 struct list_head list;
86291 /* Protects from simultaneous access to first_req list */
86292 spinlock_t info_list_lock;
86293diff --git a/include/net/flow.h b/include/net/flow.h
86294index 8109a15..504466d 100644
86295--- a/include/net/flow.h
86296+++ b/include/net/flow.h
86297@@ -231,6 +231,6 @@ void flow_cache_fini(struct net *net);
86298
86299 void flow_cache_flush(struct net *net);
86300 void flow_cache_flush_deferred(struct net *net);
86301-extern atomic_t flow_cache_genid;
86302+extern atomic_unchecked_t flow_cache_genid;
86303
86304 #endif
86305diff --git a/include/net/genetlink.h b/include/net/genetlink.h
86306index af10c2c..a431cc5 100644
86307--- a/include/net/genetlink.h
86308+++ b/include/net/genetlink.h
86309@@ -120,7 +120,7 @@ struct genl_ops {
86310 u8 cmd;
86311 u8 internal_flags;
86312 u8 flags;
86313-};
86314+} __do_const;
86315
86316 int __genl_register_family(struct genl_family *family);
86317
86318diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
86319index 734d9b5..48a9a4b 100644
86320--- a/include/net/gro_cells.h
86321+++ b/include/net/gro_cells.h
86322@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
86323 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
86324
86325 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
86326- atomic_long_inc(&dev->rx_dropped);
86327+ atomic_long_inc_unchecked(&dev->rx_dropped);
86328 kfree_skb(skb);
86329 return;
86330 }
86331diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
86332index 5fbe656..9ed3d8b 100644
86333--- a/include/net/inet_connection_sock.h
86334+++ b/include/net/inet_connection_sock.h
86335@@ -63,7 +63,7 @@ struct inet_connection_sock_af_ops {
86336 int (*bind_conflict)(const struct sock *sk,
86337 const struct inet_bind_bucket *tb, bool relax);
86338 void (*mtu_reduced)(struct sock *sk);
86339-};
86340+} __do_const;
86341
86342 /** inet_connection_sock - INET connection oriented sock
86343 *
86344diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
86345index 01d590e..f69c61d 100644
86346--- a/include/net/inetpeer.h
86347+++ b/include/net/inetpeer.h
86348@@ -47,7 +47,7 @@ struct inet_peer {
86349 */
86350 union {
86351 struct {
86352- atomic_t rid; /* Frag reception counter */
86353+ atomic_unchecked_t rid; /* Frag reception counter */
86354 };
86355 struct rcu_head rcu;
86356 struct inet_peer *gc_next;
86357diff --git a/include/net/ip.h b/include/net/ip.h
86358index db4a771..965a42a 100644
86359--- a/include/net/ip.h
86360+++ b/include/net/ip.h
86361@@ -316,7 +316,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
86362 }
86363 }
86364
86365-u32 ip_idents_reserve(u32 hash, int segs);
86366+u32 ip_idents_reserve(u32 hash, int segs) __intentional_overflow(-1);
86367 void __ip_select_ident(struct iphdr *iph, int segs);
86368
86369 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
86370diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
86371index 9922093..a1755d6 100644
86372--- a/include/net/ip_fib.h
86373+++ b/include/net/ip_fib.h
86374@@ -169,7 +169,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
86375
86376 #define FIB_RES_SADDR(net, res) \
86377 ((FIB_RES_NH(res).nh_saddr_genid == \
86378- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
86379+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
86380 FIB_RES_NH(res).nh_saddr : \
86381 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
86382 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
86383diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
86384index 624a8a5..b1e2a24 100644
86385--- a/include/net/ip_vs.h
86386+++ b/include/net/ip_vs.h
86387@@ -558,7 +558,7 @@ struct ip_vs_conn {
86388 struct ip_vs_conn *control; /* Master control connection */
86389 atomic_t n_control; /* Number of controlled ones */
86390 struct ip_vs_dest *dest; /* real server */
86391- atomic_t in_pkts; /* incoming packet counter */
86392+ atomic_unchecked_t in_pkts; /* incoming packet counter */
86393
86394 /* packet transmitter for different forwarding methods. If it
86395 mangles the packet, it must return NF_DROP or better NF_STOLEN,
86396@@ -705,7 +705,7 @@ struct ip_vs_dest {
86397 __be16 port; /* port number of the server */
86398 union nf_inet_addr addr; /* IP address of the server */
86399 volatile unsigned int flags; /* dest status flags */
86400- atomic_t conn_flags; /* flags to copy to conn */
86401+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
86402 atomic_t weight; /* server weight */
86403
86404 atomic_t refcnt; /* reference counter */
86405@@ -960,11 +960,11 @@ struct netns_ipvs {
86406 /* ip_vs_lblc */
86407 int sysctl_lblc_expiration;
86408 struct ctl_table_header *lblc_ctl_header;
86409- struct ctl_table *lblc_ctl_table;
86410+ ctl_table_no_const *lblc_ctl_table;
86411 /* ip_vs_lblcr */
86412 int sysctl_lblcr_expiration;
86413 struct ctl_table_header *lblcr_ctl_header;
86414- struct ctl_table *lblcr_ctl_table;
86415+ ctl_table_no_const *lblcr_ctl_table;
86416 /* ip_vs_est */
86417 struct list_head est_list; /* estimator list */
86418 spinlock_t est_lock;
86419diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
86420index 8d4f588..2e37ad2 100644
86421--- a/include/net/irda/ircomm_tty.h
86422+++ b/include/net/irda/ircomm_tty.h
86423@@ -33,6 +33,7 @@
86424 #include <linux/termios.h>
86425 #include <linux/timer.h>
86426 #include <linux/tty.h> /* struct tty_struct */
86427+#include <asm/local.h>
86428
86429 #include <net/irda/irias_object.h>
86430 #include <net/irda/ircomm_core.h>
86431diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
86432index 714cc9a..ea05f3e 100644
86433--- a/include/net/iucv/af_iucv.h
86434+++ b/include/net/iucv/af_iucv.h
86435@@ -149,7 +149,7 @@ struct iucv_skb_cb {
86436 struct iucv_sock_list {
86437 struct hlist_head head;
86438 rwlock_t lock;
86439- atomic_t autobind_name;
86440+ atomic_unchecked_t autobind_name;
86441 };
86442
86443 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
86444diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
86445index f3be818..bf46196 100644
86446--- a/include/net/llc_c_ac.h
86447+++ b/include/net/llc_c_ac.h
86448@@ -87,7 +87,7 @@
86449 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
86450 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
86451
86452-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
86453+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
86454
86455 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
86456 int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
86457diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
86458index 3948cf1..83b28c4 100644
86459--- a/include/net/llc_c_ev.h
86460+++ b/include/net/llc_c_ev.h
86461@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
86462 return (struct llc_conn_state_ev *)skb->cb;
86463 }
86464
86465-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
86466-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
86467+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
86468+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
86469
86470 int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
86471 int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
86472diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
86473index 0e79cfb..f46db31 100644
86474--- a/include/net/llc_c_st.h
86475+++ b/include/net/llc_c_st.h
86476@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
86477 u8 next_state;
86478 llc_conn_ev_qfyr_t *ev_qualifiers;
86479 llc_conn_action_t *ev_actions;
86480-};
86481+} __do_const;
86482
86483 struct llc_conn_state {
86484 u8 current_state;
86485diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
86486index a61b98c..aade1eb 100644
86487--- a/include/net/llc_s_ac.h
86488+++ b/include/net/llc_s_ac.h
86489@@ -23,7 +23,7 @@
86490 #define SAP_ACT_TEST_IND 9
86491
86492 /* All action functions must look like this */
86493-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
86494+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
86495
86496 int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
86497 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
86498diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
86499index 567c681..cd73ac02 100644
86500--- a/include/net/llc_s_st.h
86501+++ b/include/net/llc_s_st.h
86502@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
86503 llc_sap_ev_t ev;
86504 u8 next_state;
86505 llc_sap_action_t *ev_actions;
86506-};
86507+} __do_const;
86508
86509 struct llc_sap_state {
86510 u8 curr_state;
86511diff --git a/include/net/mac80211.h b/include/net/mac80211.h
86512index dae2e24..89336e6 100644
86513--- a/include/net/mac80211.h
86514+++ b/include/net/mac80211.h
86515@@ -4650,7 +4650,7 @@ struct rate_control_ops {
86516 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
86517
86518 u32 (*get_expected_throughput)(void *priv_sta);
86519-};
86520+} __do_const;
86521
86522 static inline int rate_supported(struct ieee80211_sta *sta,
86523 enum ieee80211_band band,
86524diff --git a/include/net/neighbour.h b/include/net/neighbour.h
86525index 47f4254..fd095bc 100644
86526--- a/include/net/neighbour.h
86527+++ b/include/net/neighbour.h
86528@@ -163,7 +163,7 @@ struct neigh_ops {
86529 void (*error_report)(struct neighbour *, struct sk_buff *);
86530 int (*output)(struct neighbour *, struct sk_buff *);
86531 int (*connected_output)(struct neighbour *, struct sk_buff *);
86532-};
86533+} __do_const;
86534
86535 struct pneigh_entry {
86536 struct pneigh_entry *next;
86537@@ -217,7 +217,7 @@ struct neigh_table {
86538 struct neigh_statistics __percpu *stats;
86539 struct neigh_hash_table __rcu *nht;
86540 struct pneigh_entry **phash_buckets;
86541-};
86542+} __randomize_layout;
86543
86544 static inline int neigh_parms_family(struct neigh_parms *p)
86545 {
86546diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
86547index e0d6466..e2f3003 100644
86548--- a/include/net/net_namespace.h
86549+++ b/include/net/net_namespace.h
86550@@ -129,8 +129,8 @@ struct net {
86551 struct netns_ipvs *ipvs;
86552 #endif
86553 struct sock *diag_nlsk;
86554- atomic_t fnhe_genid;
86555-};
86556+ atomic_unchecked_t fnhe_genid;
86557+} __randomize_layout;
86558
86559 #include <linux/seq_file_net.h>
86560
86561@@ -286,7 +286,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
86562 #define __net_init __init
86563 #define __net_exit __exit_refok
86564 #define __net_initdata __initdata
86565+#ifdef CONSTIFY_PLUGIN
86566 #define __net_initconst __initconst
86567+#else
86568+#define __net_initconst __initdata
86569+#endif
86570 #endif
86571
86572 struct pernet_operations {
86573@@ -296,7 +300,7 @@ struct pernet_operations {
86574 void (*exit_batch)(struct list_head *net_exit_list);
86575 int *id;
86576 size_t size;
86577-};
86578+} __do_const;
86579
86580 /*
86581 * Use these carefully. If you implement a network device and it
86582@@ -344,12 +348,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
86583
86584 static inline int rt_genid_ipv4(struct net *net)
86585 {
86586- return atomic_read(&net->ipv4.rt_genid);
86587+ return atomic_read_unchecked(&net->ipv4.rt_genid);
86588 }
86589
86590 static inline void rt_genid_bump_ipv4(struct net *net)
86591 {
86592- atomic_inc(&net->ipv4.rt_genid);
86593+ atomic_inc_unchecked(&net->ipv4.rt_genid);
86594 }
86595
86596 extern void (*__fib6_flush_trees)(struct net *net);
86597@@ -376,12 +380,12 @@ static inline void rt_genid_bump_all(struct net *net)
86598
86599 static inline int fnhe_genid(struct net *net)
86600 {
86601- return atomic_read(&net->fnhe_genid);
86602+ return atomic_read_unchecked(&net->fnhe_genid);
86603 }
86604
86605 static inline void fnhe_genid_bump(struct net *net)
86606 {
86607- atomic_inc(&net->fnhe_genid);
86608+ atomic_inc_unchecked(&net->fnhe_genid);
86609 }
86610
86611 #endif /* __NET_NET_NAMESPACE_H */
86612diff --git a/include/net/netdma.h b/include/net/netdma.h
86613index 8ba8ce2..99b7fff 100644
86614--- a/include/net/netdma.h
86615+++ b/include/net/netdma.h
86616@@ -24,7 +24,7 @@
86617 #include <linux/dmaengine.h>
86618 #include <linux/skbuff.h>
86619
86620-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
86621+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
86622 struct sk_buff *skb, int offset, struct iovec *to,
86623 size_t len, struct dma_pinned_list *pinned_list);
86624
86625diff --git a/include/net/netlink.h b/include/net/netlink.h
86626index 6c10762..3e5de0c 100644
86627--- a/include/net/netlink.h
86628+++ b/include/net/netlink.h
86629@@ -521,7 +521,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
86630 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
86631 {
86632 if (mark)
86633- skb_trim(skb, (unsigned char *) mark - skb->data);
86634+ skb_trim(skb, (const unsigned char *) mark - skb->data);
86635 }
86636
86637 /**
86638diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
86639index 29d6a94..235d3d8 100644
86640--- a/include/net/netns/conntrack.h
86641+++ b/include/net/netns/conntrack.h
86642@@ -14,10 +14,10 @@ struct nf_conntrack_ecache;
86643 struct nf_proto_net {
86644 #ifdef CONFIG_SYSCTL
86645 struct ctl_table_header *ctl_table_header;
86646- struct ctl_table *ctl_table;
86647+ ctl_table_no_const *ctl_table;
86648 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
86649 struct ctl_table_header *ctl_compat_header;
86650- struct ctl_table *ctl_compat_table;
86651+ ctl_table_no_const *ctl_compat_table;
86652 #endif
86653 #endif
86654 unsigned int users;
86655@@ -60,7 +60,7 @@ struct nf_ip_net {
86656 struct nf_icmp_net icmpv6;
86657 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
86658 struct ctl_table_header *ctl_table_header;
86659- struct ctl_table *ctl_table;
86660+ ctl_table_no_const *ctl_table;
86661 #endif
86662 };
86663
86664diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
86665index aec5e12..807233f 100644
86666--- a/include/net/netns/ipv4.h
86667+++ b/include/net/netns/ipv4.h
86668@@ -82,7 +82,7 @@ struct netns_ipv4 {
86669
86670 struct ping_group_range ping_group_range;
86671
86672- atomic_t dev_addr_genid;
86673+ atomic_unchecked_t dev_addr_genid;
86674
86675 #ifdef CONFIG_SYSCTL
86676 unsigned long *sysctl_local_reserved_ports;
86677@@ -96,6 +96,6 @@ struct netns_ipv4 {
86678 struct fib_rules_ops *mr_rules_ops;
86679 #endif
86680 #endif
86681- atomic_t rt_genid;
86682+ atomic_unchecked_t rt_genid;
86683 };
86684 #endif
86685diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
86686index eade27a..42894dd 100644
86687--- a/include/net/netns/ipv6.h
86688+++ b/include/net/netns/ipv6.h
86689@@ -75,8 +75,8 @@ struct netns_ipv6 {
86690 struct fib_rules_ops *mr6_rules_ops;
86691 #endif
86692 #endif
86693- atomic_t dev_addr_genid;
86694- atomic_t rt_genid;
86695+ atomic_unchecked_t dev_addr_genid;
86696+ atomic_unchecked_t rt_genid;
86697 };
86698
86699 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
86700diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
86701index 3492434..209f58c 100644
86702--- a/include/net/netns/xfrm.h
86703+++ b/include/net/netns/xfrm.h
86704@@ -64,7 +64,7 @@ struct netns_xfrm {
86705
86706 /* flow cache part */
86707 struct flow_cache flow_cache_global;
86708- atomic_t flow_cache_genid;
86709+ atomic_unchecked_t flow_cache_genid;
86710 struct list_head flow_cache_gc_list;
86711 spinlock_t flow_cache_gc_lock;
86712 struct work_struct flow_cache_gc_work;
86713diff --git a/include/net/ping.h b/include/net/ping.h
86714index 026479b..d9b2829 100644
86715--- a/include/net/ping.h
86716+++ b/include/net/ping.h
86717@@ -54,7 +54,7 @@ struct ping_iter_state {
86718
86719 extern struct proto ping_prot;
86720 #if IS_ENABLED(CONFIG_IPV6)
86721-extern struct pingv6_ops pingv6_ops;
86722+extern struct pingv6_ops *pingv6_ops;
86723 #endif
86724
86725 struct pingfakehdr {
86726diff --git a/include/net/protocol.h b/include/net/protocol.h
86727index d6fcc1f..ca277058 100644
86728--- a/include/net/protocol.h
86729+++ b/include/net/protocol.h
86730@@ -49,7 +49,7 @@ struct net_protocol {
86731 * socket lookup?
86732 */
86733 icmp_strict_tag_validation:1;
86734-};
86735+} __do_const;
86736
86737 #if IS_ENABLED(CONFIG_IPV6)
86738 struct inet6_protocol {
86739@@ -62,7 +62,7 @@ struct inet6_protocol {
86740 u8 type, u8 code, int offset,
86741 __be32 info);
86742 unsigned int flags; /* INET6_PROTO_xxx */
86743-};
86744+} __do_const;
86745
86746 #define INET6_PROTO_NOPOLICY 0x1
86747 #define INET6_PROTO_FINAL 0x2
86748diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
86749index e21b9f9..0191ef0 100644
86750--- a/include/net/rtnetlink.h
86751+++ b/include/net/rtnetlink.h
86752@@ -93,7 +93,7 @@ struct rtnl_link_ops {
86753 int (*fill_slave_info)(struct sk_buff *skb,
86754 const struct net_device *dev,
86755 const struct net_device *slave_dev);
86756-};
86757+} __do_const;
86758
86759 int __rtnl_link_register(struct rtnl_link_ops *ops);
86760 void __rtnl_link_unregister(struct rtnl_link_ops *ops);
86761diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
86762index 4a5b9a3..ca27d73 100644
86763--- a/include/net/sctp/checksum.h
86764+++ b/include/net/sctp/checksum.h
86765@@ -61,8 +61,8 @@ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
86766 unsigned int offset)
86767 {
86768 struct sctphdr *sh = sctp_hdr(skb);
86769- __le32 ret, old = sh->checksum;
86770- const struct skb_checksum_ops ops = {
86771+ __le32 ret, old = sh->checksum;
86772+ static const struct skb_checksum_ops ops = {
86773 .update = sctp_csum_update,
86774 .combine = sctp_csum_combine,
86775 };
86776diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
86777index 72a31db..aaa63d9 100644
86778--- a/include/net/sctp/sm.h
86779+++ b/include/net/sctp/sm.h
86780@@ -80,7 +80,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
86781 typedef struct {
86782 sctp_state_fn_t *fn;
86783 const char *name;
86784-} sctp_sm_table_entry_t;
86785+} __do_const sctp_sm_table_entry_t;
86786
86787 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
86788 * currently in use.
86789@@ -292,7 +292,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
86790 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
86791
86792 /* Extern declarations for major data structures. */
86793-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
86794+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
86795
86796
86797 /* Get the size of a DATA chunk payload. */
86798diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
86799index 4ff3f67..89ae38e 100644
86800--- a/include/net/sctp/structs.h
86801+++ b/include/net/sctp/structs.h
86802@@ -509,7 +509,7 @@ struct sctp_pf {
86803 void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
86804 void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
86805 struct sctp_af *af;
86806-};
86807+} __do_const;
86808
86809
86810 /* Structure to track chunk fragments that have been acked, but peer
86811diff --git a/include/net/sock.h b/include/net/sock.h
86812index b9a5bd0..dcd5f3c 100644
86813--- a/include/net/sock.h
86814+++ b/include/net/sock.h
86815@@ -356,7 +356,7 @@ struct sock {
86816 unsigned int sk_napi_id;
86817 unsigned int sk_ll_usec;
86818 #endif
86819- atomic_t sk_drops;
86820+ atomic_unchecked_t sk_drops;
86821 int sk_rcvbuf;
86822
86823 struct sk_filter __rcu *sk_filter;
86824@@ -1053,7 +1053,7 @@ struct proto {
86825 void (*destroy_cgroup)(struct mem_cgroup *memcg);
86826 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
86827 #endif
86828-};
86829+} __randomize_layout;
86830
86831 /*
86832 * Bits in struct cg_proto.flags
86833@@ -1240,7 +1240,7 @@ static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
86834 return ret >> PAGE_SHIFT;
86835 }
86836
86837-static inline long
86838+static inline long __intentional_overflow(-1)
86839 sk_memory_allocated(const struct sock *sk)
86840 {
86841 struct proto *prot = sk->sk_prot;
86842@@ -1385,7 +1385,7 @@ struct sock_iocb {
86843 struct scm_cookie *scm;
86844 struct msghdr *msg, async_msg;
86845 struct kiocb *kiocb;
86846-};
86847+} __randomize_layout;
86848
86849 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
86850 {
86851@@ -1820,7 +1820,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
86852 }
86853
86854 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
86855- char __user *from, char *to,
86856+ char __user *from, unsigned char *to,
86857 int copy, int offset)
86858 {
86859 if (skb->ip_summed == CHECKSUM_NONE) {
86860@@ -2091,7 +2091,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
86861 }
86862 }
86863
86864-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
86865+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
86866
86867 /**
86868 * sk_page_frag - return an appropriate page_frag
86869diff --git a/include/net/tcp.h b/include/net/tcp.h
86870index 590e01a..76498f3 100644
86871--- a/include/net/tcp.h
86872+++ b/include/net/tcp.h
86873@@ -523,7 +523,7 @@ void tcp_retransmit_timer(struct sock *sk);
86874 void tcp_xmit_retransmit_queue(struct sock *);
86875 void tcp_simple_retransmit(struct sock *);
86876 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
86877-int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
86878+int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
86879
86880 void tcp_send_probe0(struct sock *);
86881 void tcp_send_partial(struct sock *);
86882@@ -696,8 +696,8 @@ struct tcp_skb_cb {
86883 struct inet6_skb_parm h6;
86884 #endif
86885 } header; /* For incoming frames */
86886- __u32 seq; /* Starting sequence number */
86887- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
86888+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
86889+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
86890 __u32 when; /* used to compute rtt's */
86891 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
86892
86893@@ -713,7 +713,7 @@ struct tcp_skb_cb {
86894
86895 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
86896 /* 1 byte hole */
86897- __u32 ack_seq; /* Sequence number ACK'd */
86898+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
86899 };
86900
86901 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
86902diff --git a/include/net/xfrm.h b/include/net/xfrm.h
86903index 721e9c3b..3c81bbf 100644
86904--- a/include/net/xfrm.h
86905+++ b/include/net/xfrm.h
86906@@ -285,7 +285,6 @@ struct xfrm_dst;
86907 struct xfrm_policy_afinfo {
86908 unsigned short family;
86909 struct dst_ops *dst_ops;
86910- void (*garbage_collect)(struct net *net);
86911 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
86912 const xfrm_address_t *saddr,
86913 const xfrm_address_t *daddr);
86914@@ -303,7 +302,7 @@ struct xfrm_policy_afinfo {
86915 struct net_device *dev,
86916 const struct flowi *fl);
86917 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
86918-};
86919+} __do_const;
86920
86921 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
86922 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
86923@@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
86924 int (*transport_finish)(struct sk_buff *skb,
86925 int async);
86926 void (*local_error)(struct sk_buff *skb, u32 mtu);
86927-};
86928+} __do_const;
86929
86930 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
86931 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
86932@@ -437,7 +436,7 @@ struct xfrm_mode {
86933 struct module *owner;
86934 unsigned int encap;
86935 int flags;
86936-};
86937+} __do_const;
86938
86939 /* Flags for xfrm_mode. */
86940 enum {
86941@@ -534,7 +533,7 @@ struct xfrm_policy {
86942 struct timer_list timer;
86943
86944 struct flow_cache_object flo;
86945- atomic_t genid;
86946+ atomic_unchecked_t genid;
86947 u32 priority;
86948 u32 index;
86949 struct xfrm_mark mark;
86950@@ -1167,6 +1166,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
86951 }
86952
86953 void xfrm_garbage_collect(struct net *net);
86954+void xfrm_garbage_collect_deferred(struct net *net);
86955
86956 #else
86957
86958@@ -1205,6 +1205,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
86959 static inline void xfrm_garbage_collect(struct net *net)
86960 {
86961 }
86962+static inline void xfrm_garbage_collect_deferred(struct net *net)
86963+{
86964+}
86965 #endif
86966
86967 static __inline__
86968diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
86969index 1017e0b..227aa4d 100644
86970--- a/include/rdma/iw_cm.h
86971+++ b/include/rdma/iw_cm.h
86972@@ -122,7 +122,7 @@ struct iw_cm_verbs {
86973 int backlog);
86974
86975 int (*destroy_listen)(struct iw_cm_id *cm_id);
86976-};
86977+} __no_const;
86978
86979 /**
86980 * iw_create_cm_id - Create an IW CM identifier.
86981diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
86982index 52beadf..598734c 100644
86983--- a/include/scsi/libfc.h
86984+++ b/include/scsi/libfc.h
86985@@ -771,6 +771,7 @@ struct libfc_function_template {
86986 */
86987 void (*disc_stop_final) (struct fc_lport *);
86988 };
86989+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
86990
86991 /**
86992 * struct fc_disc - Discovery context
86993@@ -875,7 +876,7 @@ struct fc_lport {
86994 struct fc_vport *vport;
86995
86996 /* Operational Information */
86997- struct libfc_function_template tt;
86998+ libfc_function_template_no_const tt;
86999 u8 link_up;
87000 u8 qfull;
87001 enum fc_lport_state state;
87002diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
87003index 1a0d184..4fb841f 100644
87004--- a/include/scsi/scsi_device.h
87005+++ b/include/scsi/scsi_device.h
87006@@ -185,9 +185,9 @@ struct scsi_device {
87007 unsigned int max_device_blocked; /* what device_blocked counts down from */
87008 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
87009
87010- atomic_t iorequest_cnt;
87011- atomic_t iodone_cnt;
87012- atomic_t ioerr_cnt;
87013+ atomic_unchecked_t iorequest_cnt;
87014+ atomic_unchecked_t iodone_cnt;
87015+ atomic_unchecked_t ioerr_cnt;
87016
87017 struct device sdev_gendev,
87018 sdev_dev;
87019diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
87020index 007a0bc..7188db8 100644
87021--- a/include/scsi/scsi_transport_fc.h
87022+++ b/include/scsi/scsi_transport_fc.h
87023@@ -756,7 +756,8 @@ struct fc_function_template {
87024 unsigned long show_host_system_hostname:1;
87025
87026 unsigned long disable_target_scan:1;
87027-};
87028+} __do_const;
87029+typedef struct fc_function_template __no_const fc_function_template_no_const;
87030
87031
87032 /**
87033diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
87034index ae6c3b8..fd748ac 100644
87035--- a/include/sound/compress_driver.h
87036+++ b/include/sound/compress_driver.h
87037@@ -128,7 +128,7 @@ struct snd_compr_ops {
87038 struct snd_compr_caps *caps);
87039 int (*get_codec_caps) (struct snd_compr_stream *stream,
87040 struct snd_compr_codec_caps *codec);
87041-};
87042+} __no_const;
87043
87044 /**
87045 * struct snd_compr: Compressed device
87046diff --git a/include/sound/soc.h b/include/sound/soc.h
87047index c83a334..27c8038 100644
87048--- a/include/sound/soc.h
87049+++ b/include/sound/soc.h
87050@@ -817,7 +817,7 @@ struct snd_soc_codec_driver {
87051 /* probe ordering - for components with runtime dependencies */
87052 int probe_order;
87053 int remove_order;
87054-};
87055+} __do_const;
87056
87057 /* SoC platform interface */
87058 struct snd_soc_platform_driver {
87059@@ -861,7 +861,7 @@ struct snd_soc_platform_driver {
87060 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
87061 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
87062 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
87063-};
87064+} __do_const;
87065
87066 struct snd_soc_dai_link_component {
87067 const char *name;
87068diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
87069index 9ec9864..e2ee1ee 100644
87070--- a/include/target/target_core_base.h
87071+++ b/include/target/target_core_base.h
87072@@ -761,7 +761,7 @@ struct se_device {
87073 atomic_long_t write_bytes;
87074 /* Active commands on this virtual SE device */
87075 atomic_t simple_cmds;
87076- atomic_t dev_ordered_id;
87077+ atomic_unchecked_t dev_ordered_id;
87078 atomic_t dev_ordered_sync;
87079 atomic_t dev_qf_count;
87080 int export_count;
87081diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
87082new file mode 100644
87083index 0000000..fb634b7
87084--- /dev/null
87085+++ b/include/trace/events/fs.h
87086@@ -0,0 +1,53 @@
87087+#undef TRACE_SYSTEM
87088+#define TRACE_SYSTEM fs
87089+
87090+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
87091+#define _TRACE_FS_H
87092+
87093+#include <linux/fs.h>
87094+#include <linux/tracepoint.h>
87095+
87096+TRACE_EVENT(do_sys_open,
87097+
87098+ TP_PROTO(const char *filename, int flags, int mode),
87099+
87100+ TP_ARGS(filename, flags, mode),
87101+
87102+ TP_STRUCT__entry(
87103+ __string( filename, filename )
87104+ __field( int, flags )
87105+ __field( int, mode )
87106+ ),
87107+
87108+ TP_fast_assign(
87109+ __assign_str(filename, filename);
87110+ __entry->flags = flags;
87111+ __entry->mode = mode;
87112+ ),
87113+
87114+ TP_printk("\"%s\" %x %o",
87115+ __get_str(filename), __entry->flags, __entry->mode)
87116+);
87117+
87118+TRACE_EVENT(open_exec,
87119+
87120+ TP_PROTO(const char *filename),
87121+
87122+ TP_ARGS(filename),
87123+
87124+ TP_STRUCT__entry(
87125+ __string( filename, filename )
87126+ ),
87127+
87128+ TP_fast_assign(
87129+ __assign_str(filename, filename);
87130+ ),
87131+
87132+ TP_printk("\"%s\"",
87133+ __get_str(filename))
87134+);
87135+
87136+#endif /* _TRACE_FS_H */
87137+
87138+/* This part must be outside protection */
87139+#include <trace/define_trace.h>
87140diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
87141index 3608beb..df39d8a 100644
87142--- a/include/trace/events/irq.h
87143+++ b/include/trace/events/irq.h
87144@@ -36,7 +36,7 @@ struct softirq_action;
87145 */
87146 TRACE_EVENT(irq_handler_entry,
87147
87148- TP_PROTO(int irq, struct irqaction *action),
87149+ TP_PROTO(int irq, const struct irqaction *action),
87150
87151 TP_ARGS(irq, action),
87152
87153@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
87154 */
87155 TRACE_EVENT(irq_handler_exit,
87156
87157- TP_PROTO(int irq, struct irqaction *action, int ret),
87158+ TP_PROTO(int irq, const struct irqaction *action, int ret),
87159
87160 TP_ARGS(irq, action, ret),
87161
87162diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
87163index 7caf44c..23c6f27 100644
87164--- a/include/uapi/linux/a.out.h
87165+++ b/include/uapi/linux/a.out.h
87166@@ -39,6 +39,14 @@ enum machine_type {
87167 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
87168 };
87169
87170+/* Constants for the N_FLAGS field */
87171+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
87172+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
87173+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
87174+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
87175+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
87176+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
87177+
87178 #if !defined (N_MAGIC)
87179 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
87180 #endif
87181diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
87182index 22b6ad3..aeba37e 100644
87183--- a/include/uapi/linux/bcache.h
87184+++ b/include/uapi/linux/bcache.h
87185@@ -5,6 +5,7 @@
87186 * Bcache on disk data structures
87187 */
87188
87189+#include <linux/compiler.h>
87190 #include <asm/types.h>
87191
87192 #define BITMASK(name, type, field, offset, size) \
87193@@ -20,8 +21,8 @@ static inline void SET_##name(type *k, __u64 v) \
87194 /* Btree keys - all units are in sectors */
87195
87196 struct bkey {
87197- __u64 high;
87198- __u64 low;
87199+ __u64 high __intentional_overflow(-1);
87200+ __u64 low __intentional_overflow(-1);
87201 __u64 ptr[];
87202 };
87203
87204diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
87205index d876736..ccce5c0 100644
87206--- a/include/uapi/linux/byteorder/little_endian.h
87207+++ b/include/uapi/linux/byteorder/little_endian.h
87208@@ -42,51 +42,51 @@
87209
87210 static inline __le64 __cpu_to_le64p(const __u64 *p)
87211 {
87212- return (__force __le64)*p;
87213+ return (__force const __le64)*p;
87214 }
87215-static inline __u64 __le64_to_cpup(const __le64 *p)
87216+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
87217 {
87218- return (__force __u64)*p;
87219+ return (__force const __u64)*p;
87220 }
87221 static inline __le32 __cpu_to_le32p(const __u32 *p)
87222 {
87223- return (__force __le32)*p;
87224+ return (__force const __le32)*p;
87225 }
87226 static inline __u32 __le32_to_cpup(const __le32 *p)
87227 {
87228- return (__force __u32)*p;
87229+ return (__force const __u32)*p;
87230 }
87231 static inline __le16 __cpu_to_le16p(const __u16 *p)
87232 {
87233- return (__force __le16)*p;
87234+ return (__force const __le16)*p;
87235 }
87236 static inline __u16 __le16_to_cpup(const __le16 *p)
87237 {
87238- return (__force __u16)*p;
87239+ return (__force const __u16)*p;
87240 }
87241 static inline __be64 __cpu_to_be64p(const __u64 *p)
87242 {
87243- return (__force __be64)__swab64p(p);
87244+ return (__force const __be64)__swab64p(p);
87245 }
87246 static inline __u64 __be64_to_cpup(const __be64 *p)
87247 {
87248- return __swab64p((__u64 *)p);
87249+ return __swab64p((const __u64 *)p);
87250 }
87251 static inline __be32 __cpu_to_be32p(const __u32 *p)
87252 {
87253- return (__force __be32)__swab32p(p);
87254+ return (__force const __be32)__swab32p(p);
87255 }
87256-static inline __u32 __be32_to_cpup(const __be32 *p)
87257+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
87258 {
87259- return __swab32p((__u32 *)p);
87260+ return __swab32p((const __u32 *)p);
87261 }
87262 static inline __be16 __cpu_to_be16p(const __u16 *p)
87263 {
87264- return (__force __be16)__swab16p(p);
87265+ return (__force const __be16)__swab16p(p);
87266 }
87267 static inline __u16 __be16_to_cpup(const __be16 *p)
87268 {
87269- return __swab16p((__u16 *)p);
87270+ return __swab16p((const __u16 *)p);
87271 }
87272 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
87273 #define __le64_to_cpus(x) do { (void)(x); } while (0)
87274diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
87275index ef6103b..d4e65dd 100644
87276--- a/include/uapi/linux/elf.h
87277+++ b/include/uapi/linux/elf.h
87278@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
87279 #define PT_GNU_EH_FRAME 0x6474e550
87280
87281 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
87282+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
87283+
87284+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
87285+
87286+/* Constants for the e_flags field */
87287+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
87288+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
87289+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
87290+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
87291+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
87292+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
87293
87294 /*
87295 * Extended Numbering
87296@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
87297 #define DT_DEBUG 21
87298 #define DT_TEXTREL 22
87299 #define DT_JMPREL 23
87300+#define DT_FLAGS 30
87301+ #define DF_TEXTREL 0x00000004
87302 #define DT_ENCODING 32
87303 #define OLD_DT_LOOS 0x60000000
87304 #define DT_LOOS 0x6000000d
87305@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
87306 #define PF_W 0x2
87307 #define PF_X 0x1
87308
87309+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
87310+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
87311+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
87312+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
87313+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
87314+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
87315+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
87316+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
87317+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
87318+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
87319+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
87320+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
87321+
87322 typedef struct elf32_phdr{
87323 Elf32_Word p_type;
87324 Elf32_Off p_offset;
87325@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
87326 #define EI_OSABI 7
87327 #define EI_PAD 8
87328
87329+#define EI_PAX 14
87330+
87331 #define ELFMAG0 0x7f /* EI_MAG */
87332 #define ELFMAG1 'E'
87333 #define ELFMAG2 'L'
87334diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
87335index aa169c4..6a2771d 100644
87336--- a/include/uapi/linux/personality.h
87337+++ b/include/uapi/linux/personality.h
87338@@ -30,6 +30,7 @@ enum {
87339 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
87340 ADDR_NO_RANDOMIZE | \
87341 ADDR_COMPAT_LAYOUT | \
87342+ ADDR_LIMIT_3GB | \
87343 MMAP_PAGE_ZERO)
87344
87345 /*
87346diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
87347index 7530e74..e714828 100644
87348--- a/include/uapi/linux/screen_info.h
87349+++ b/include/uapi/linux/screen_info.h
87350@@ -43,7 +43,8 @@ struct screen_info {
87351 __u16 pages; /* 0x32 */
87352 __u16 vesa_attributes; /* 0x34 */
87353 __u32 capabilities; /* 0x36 */
87354- __u8 _reserved[6]; /* 0x3a */
87355+ __u16 vesapm_size; /* 0x3a */
87356+ __u8 _reserved[4]; /* 0x3c */
87357 } __attribute__((packed));
87358
87359 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
87360diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
87361index 0e011eb..82681b1 100644
87362--- a/include/uapi/linux/swab.h
87363+++ b/include/uapi/linux/swab.h
87364@@ -43,7 +43,7 @@
87365 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
87366 */
87367
87368-static inline __attribute_const__ __u16 __fswab16(__u16 val)
87369+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
87370 {
87371 #ifdef __HAVE_BUILTIN_BSWAP16__
87372 return __builtin_bswap16(val);
87373@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
87374 #endif
87375 }
87376
87377-static inline __attribute_const__ __u32 __fswab32(__u32 val)
87378+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
87379 {
87380 #ifdef __HAVE_BUILTIN_BSWAP32__
87381 return __builtin_bswap32(val);
87382@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
87383 #endif
87384 }
87385
87386-static inline __attribute_const__ __u64 __fswab64(__u64 val)
87387+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
87388 {
87389 #ifdef __HAVE_BUILTIN_BSWAP64__
87390 return __builtin_bswap64(val);
87391diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
87392index 43aaba1..1c30b48 100644
87393--- a/include/uapi/linux/sysctl.h
87394+++ b/include/uapi/linux/sysctl.h
87395@@ -155,8 +155,6 @@ enum
87396 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
87397 };
87398
87399-
87400-
87401 /* CTL_VM names: */
87402 enum
87403 {
87404diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
87405index 778a329..1416ffb 100644
87406--- a/include/uapi/linux/videodev2.h
87407+++ b/include/uapi/linux/videodev2.h
87408@@ -1285,7 +1285,7 @@ struct v4l2_ext_control {
87409 union {
87410 __s32 value;
87411 __s64 value64;
87412- char *string;
87413+ char __user *string;
87414 __u8 *p_u8;
87415 __u16 *p_u16;
87416 __u32 *p_u32;
87417diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
87418index 1590c49..5eab462 100644
87419--- a/include/uapi/linux/xattr.h
87420+++ b/include/uapi/linux/xattr.h
87421@@ -73,5 +73,9 @@
87422 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
87423 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
87424
87425+/* User namespace */
87426+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
87427+#define XATTR_PAX_FLAGS_SUFFIX "flags"
87428+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
87429
87430 #endif /* _UAPI_LINUX_XATTR_H */
87431diff --git a/include/video/udlfb.h b/include/video/udlfb.h
87432index f9466fa..f4e2b81 100644
87433--- a/include/video/udlfb.h
87434+++ b/include/video/udlfb.h
87435@@ -53,10 +53,10 @@ struct dlfb_data {
87436 u32 pseudo_palette[256];
87437 int blank_mode; /*one of FB_BLANK_ */
87438 /* blit-only rendering path metrics, exposed through sysfs */
87439- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
87440- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
87441- atomic_t bytes_sent; /* to usb, after compression including overhead */
87442- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
87443+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
87444+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
87445+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
87446+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
87447 };
87448
87449 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
87450diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
87451index 30f5362..8ed8ac9 100644
87452--- a/include/video/uvesafb.h
87453+++ b/include/video/uvesafb.h
87454@@ -122,6 +122,7 @@ struct uvesafb_par {
87455 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
87456 u8 pmi_setpal; /* PMI for palette changes */
87457 u16 *pmi_base; /* protected mode interface location */
87458+ u8 *pmi_code; /* protected mode code location */
87459 void *pmi_start;
87460 void *pmi_pal;
87461 u8 *vbe_state_orig; /*
87462diff --git a/init/Kconfig b/init/Kconfig
87463index 80a6907..baf7d53 100644
87464--- a/init/Kconfig
87465+++ b/init/Kconfig
87466@@ -1150,6 +1150,7 @@ endif # CGROUPS
87467
87468 config CHECKPOINT_RESTORE
87469 bool "Checkpoint/restore support" if EXPERT
87470+ depends on !GRKERNSEC
87471 default n
87472 help
87473 Enables additional kernel features in a sake of checkpoint/restore.
87474@@ -1635,7 +1636,7 @@ config SLUB_DEBUG
87475
87476 config COMPAT_BRK
87477 bool "Disable heap randomization"
87478- default y
87479+ default n
87480 help
87481 Randomizing heap placement makes heap exploits harder, but it
87482 also breaks ancient binaries (including anything libc5 based).
87483@@ -1923,7 +1924,7 @@ config INIT_ALL_POSSIBLE
87484 config STOP_MACHINE
87485 bool
87486 default y
87487- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
87488+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
87489 help
87490 Need stop_machine() primitive.
87491
87492diff --git a/init/Makefile b/init/Makefile
87493index 7bc47ee..6da2dc7 100644
87494--- a/init/Makefile
87495+++ b/init/Makefile
87496@@ -2,6 +2,9 @@
87497 # Makefile for the linux kernel.
87498 #
87499
87500+ccflags-y := $(GCC_PLUGINS_CFLAGS)
87501+asflags-y := $(GCC_PLUGINS_AFLAGS)
87502+
87503 obj-y := main.o version.o mounts.o
87504 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
87505 obj-y += noinitramfs.o
87506diff --git a/init/do_mounts.c b/init/do_mounts.c
87507index 82f2288..ea1430a 100644
87508--- a/init/do_mounts.c
87509+++ b/init/do_mounts.c
87510@@ -359,11 +359,11 @@ static void __init get_fs_names(char *page)
87511 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
87512 {
87513 struct super_block *s;
87514- int err = sys_mount(name, "/root", fs, flags, data);
87515+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
87516 if (err)
87517 return err;
87518
87519- sys_chdir("/root");
87520+ sys_chdir((const char __force_user *)"/root");
87521 s = current->fs->pwd.dentry->d_sb;
87522 ROOT_DEV = s->s_dev;
87523 printk(KERN_INFO
87524@@ -484,18 +484,18 @@ void __init change_floppy(char *fmt, ...)
87525 va_start(args, fmt);
87526 vsprintf(buf, fmt, args);
87527 va_end(args);
87528- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
87529+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
87530 if (fd >= 0) {
87531 sys_ioctl(fd, FDEJECT, 0);
87532 sys_close(fd);
87533 }
87534 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
87535- fd = sys_open("/dev/console", O_RDWR, 0);
87536+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
87537 if (fd >= 0) {
87538 sys_ioctl(fd, TCGETS, (long)&termios);
87539 termios.c_lflag &= ~ICANON;
87540 sys_ioctl(fd, TCSETSF, (long)&termios);
87541- sys_read(fd, &c, 1);
87542+ sys_read(fd, (char __user *)&c, 1);
87543 termios.c_lflag |= ICANON;
87544 sys_ioctl(fd, TCSETSF, (long)&termios);
87545 sys_close(fd);
87546@@ -589,8 +589,8 @@ void __init prepare_namespace(void)
87547 mount_root();
87548 out:
87549 devtmpfs_mount("dev");
87550- sys_mount(".", "/", NULL, MS_MOVE, NULL);
87551- sys_chroot(".");
87552+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
87553+ sys_chroot((const char __force_user *)".");
87554 }
87555
87556 static bool is_tmpfs;
87557diff --git a/init/do_mounts.h b/init/do_mounts.h
87558index f5b978a..69dbfe8 100644
87559--- a/init/do_mounts.h
87560+++ b/init/do_mounts.h
87561@@ -15,15 +15,15 @@ extern int root_mountflags;
87562
87563 static inline int create_dev(char *name, dev_t dev)
87564 {
87565- sys_unlink(name);
87566- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
87567+ sys_unlink((char __force_user *)name);
87568+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
87569 }
87570
87571 #if BITS_PER_LONG == 32
87572 static inline u32 bstat(char *name)
87573 {
87574 struct stat64 stat;
87575- if (sys_stat64(name, &stat) != 0)
87576+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
87577 return 0;
87578 if (!S_ISBLK(stat.st_mode))
87579 return 0;
87580@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
87581 static inline u32 bstat(char *name)
87582 {
87583 struct stat stat;
87584- if (sys_newstat(name, &stat) != 0)
87585+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
87586 return 0;
87587 if (!S_ISBLK(stat.st_mode))
87588 return 0;
87589diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
87590index 3e0878e..8a9d7a0 100644
87591--- a/init/do_mounts_initrd.c
87592+++ b/init/do_mounts_initrd.c
87593@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
87594 {
87595 sys_unshare(CLONE_FS | CLONE_FILES);
87596 /* stdin/stdout/stderr for /linuxrc */
87597- sys_open("/dev/console", O_RDWR, 0);
87598+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
87599 sys_dup(0);
87600 sys_dup(0);
87601 /* move initrd over / and chdir/chroot in initrd root */
87602- sys_chdir("/root");
87603- sys_mount(".", "/", NULL, MS_MOVE, NULL);
87604- sys_chroot(".");
87605+ sys_chdir((const char __force_user *)"/root");
87606+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
87607+ sys_chroot((const char __force_user *)".");
87608 sys_setsid();
87609 return 0;
87610 }
87611@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
87612 create_dev("/dev/root.old", Root_RAM0);
87613 /* mount initrd on rootfs' /root */
87614 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
87615- sys_mkdir("/old", 0700);
87616- sys_chdir("/old");
87617+ sys_mkdir((const char __force_user *)"/old", 0700);
87618+ sys_chdir((const char __force_user *)"/old");
87619
87620 /* try loading default modules from initrd */
87621 load_default_modules();
87622@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
87623 current->flags &= ~PF_FREEZER_SKIP;
87624
87625 /* move initrd to rootfs' /old */
87626- sys_mount("..", ".", NULL, MS_MOVE, NULL);
87627+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
87628 /* switch root and cwd back to / of rootfs */
87629- sys_chroot("..");
87630+ sys_chroot((const char __force_user *)"..");
87631
87632 if (new_decode_dev(real_root_dev) == Root_RAM0) {
87633- sys_chdir("/old");
87634+ sys_chdir((const char __force_user *)"/old");
87635 return;
87636 }
87637
87638- sys_chdir("/");
87639+ sys_chdir((const char __force_user *)"/");
87640 ROOT_DEV = new_decode_dev(real_root_dev);
87641 mount_root();
87642
87643 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
87644- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
87645+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
87646 if (!error)
87647 printk("okay\n");
87648 else {
87649- int fd = sys_open("/dev/root.old", O_RDWR, 0);
87650+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
87651 if (error == -ENOENT)
87652 printk("/initrd does not exist. Ignored.\n");
87653 else
87654 printk("failed\n");
87655 printk(KERN_NOTICE "Unmounting old root\n");
87656- sys_umount("/old", MNT_DETACH);
87657+ sys_umount((char __force_user *)"/old", MNT_DETACH);
87658 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
87659 if (fd < 0) {
87660 error = fd;
87661@@ -127,11 +127,11 @@ int __init initrd_load(void)
87662 * mounted in the normal path.
87663 */
87664 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
87665- sys_unlink("/initrd.image");
87666+ sys_unlink((const char __force_user *)"/initrd.image");
87667 handle_initrd();
87668 return 1;
87669 }
87670 }
87671- sys_unlink("/initrd.image");
87672+ sys_unlink((const char __force_user *)"/initrd.image");
87673 return 0;
87674 }
87675diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
87676index 8cb6db5..d729f50 100644
87677--- a/init/do_mounts_md.c
87678+++ b/init/do_mounts_md.c
87679@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
87680 partitioned ? "_d" : "", minor,
87681 md_setup_args[ent].device_names);
87682
87683- fd = sys_open(name, 0, 0);
87684+ fd = sys_open((char __force_user *)name, 0, 0);
87685 if (fd < 0) {
87686 printk(KERN_ERR "md: open failed - cannot start "
87687 "array %s\n", name);
87688@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
87689 * array without it
87690 */
87691 sys_close(fd);
87692- fd = sys_open(name, 0, 0);
87693+ fd = sys_open((char __force_user *)name, 0, 0);
87694 sys_ioctl(fd, BLKRRPART, 0);
87695 }
87696 sys_close(fd);
87697@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
87698
87699 wait_for_device_probe();
87700
87701- fd = sys_open("/dev/md0", 0, 0);
87702+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
87703 if (fd >= 0) {
87704 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
87705 sys_close(fd);
87706diff --git a/init/init_task.c b/init/init_task.c
87707index ba0a7f36..2bcf1d5 100644
87708--- a/init/init_task.c
87709+++ b/init/init_task.c
87710@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
87711 * Initial thread structure. Alignment of this is handled by a special
87712 * linker map entry.
87713 */
87714+#ifdef CONFIG_X86
87715+union thread_union init_thread_union __init_task_data;
87716+#else
87717 union thread_union init_thread_union __init_task_data =
87718 { INIT_THREAD_INFO(init_task) };
87719+#endif
87720diff --git a/init/initramfs.c b/init/initramfs.c
87721index bece48c..e911bd8 100644
87722--- a/init/initramfs.c
87723+++ b/init/initramfs.c
87724@@ -25,7 +25,7 @@ static ssize_t __init xwrite(int fd, const char *p, size_t count)
87725
87726 /* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
87727 while (count) {
87728- ssize_t rv = sys_write(fd, p, count);
87729+ ssize_t rv = sys_write(fd, (char __force_user *)p, count);
87730
87731 if (rv < 0) {
87732 if (rv == -EINTR || rv == -EAGAIN)
87733@@ -107,7 +107,7 @@ static void __init free_hash(void)
87734 }
87735 }
87736
87737-static long __init do_utime(char *filename, time_t mtime)
87738+static long __init do_utime(char __force_user *filename, time_t mtime)
87739 {
87740 struct timespec t[2];
87741
87742@@ -142,7 +142,7 @@ static void __init dir_utime(void)
87743 struct dir_entry *de, *tmp;
87744 list_for_each_entry_safe(de, tmp, &dir_list, list) {
87745 list_del(&de->list);
87746- do_utime(de->name, de->mtime);
87747+ do_utime((char __force_user *)de->name, de->mtime);
87748 kfree(de->name);
87749 kfree(de);
87750 }
87751@@ -304,7 +304,7 @@ static int __init maybe_link(void)
87752 if (nlink >= 2) {
87753 char *old = find_link(major, minor, ino, mode, collected);
87754 if (old)
87755- return (sys_link(old, collected) < 0) ? -1 : 1;
87756+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
87757 }
87758 return 0;
87759 }
87760@@ -313,11 +313,11 @@ static void __init clean_path(char *path, umode_t mode)
87761 {
87762 struct stat st;
87763
87764- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
87765+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
87766 if (S_ISDIR(st.st_mode))
87767- sys_rmdir(path);
87768+ sys_rmdir((char __force_user *)path);
87769 else
87770- sys_unlink(path);
87771+ sys_unlink((char __force_user *)path);
87772 }
87773 }
87774
87775@@ -338,7 +338,7 @@ static int __init do_name(void)
87776 int openflags = O_WRONLY|O_CREAT;
87777 if (ml != 1)
87778 openflags |= O_TRUNC;
87779- wfd = sys_open(collected, openflags, mode);
87780+ wfd = sys_open((char __force_user *)collected, openflags, mode);
87781
87782 if (wfd >= 0) {
87783 sys_fchown(wfd, uid, gid);
87784@@ -350,17 +350,17 @@ static int __init do_name(void)
87785 }
87786 }
87787 } else if (S_ISDIR(mode)) {
87788- sys_mkdir(collected, mode);
87789- sys_chown(collected, uid, gid);
87790- sys_chmod(collected, mode);
87791+ sys_mkdir((char __force_user *)collected, mode);
87792+ sys_chown((char __force_user *)collected, uid, gid);
87793+ sys_chmod((char __force_user *)collected, mode);
87794 dir_add(collected, mtime);
87795 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
87796 S_ISFIFO(mode) || S_ISSOCK(mode)) {
87797 if (maybe_link() == 0) {
87798- sys_mknod(collected, mode, rdev);
87799- sys_chown(collected, uid, gid);
87800- sys_chmod(collected, mode);
87801- do_utime(collected, mtime);
87802+ sys_mknod((char __force_user *)collected, mode, rdev);
87803+ sys_chown((char __force_user *)collected, uid, gid);
87804+ sys_chmod((char __force_user *)collected, mode);
87805+ do_utime((char __force_user *)collected, mtime);
87806 }
87807 }
87808 return 0;
87809@@ -372,7 +372,7 @@ static int __init do_copy(void)
87810 if (xwrite(wfd, victim, body_len) != body_len)
87811 error("write error");
87812 sys_close(wfd);
87813- do_utime(vcollected, mtime);
87814+ do_utime((char __force_user *)vcollected, mtime);
87815 kfree(vcollected);
87816 eat(body_len);
87817 state = SkipIt;
87818@@ -390,9 +390,9 @@ static int __init do_symlink(void)
87819 {
87820 collected[N_ALIGN(name_len) + body_len] = '\0';
87821 clean_path(collected, 0);
87822- sys_symlink(collected + N_ALIGN(name_len), collected);
87823- sys_lchown(collected, uid, gid);
87824- do_utime(collected, mtime);
87825+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
87826+ sys_lchown((char __force_user *)collected, uid, gid);
87827+ do_utime((char __force_user *)collected, mtime);
87828 state = SkipIt;
87829 next_state = Reset;
87830 return 0;
87831diff --git a/init/main.c b/init/main.c
87832index d0f4b59..0c4b184 100644
87833--- a/init/main.c
87834+++ b/init/main.c
87835@@ -98,6 +98,8 @@ extern void radix_tree_init(void);
87836 static inline void mark_rodata_ro(void) { }
87837 #endif
87838
87839+extern void grsecurity_init(void);
87840+
87841 /*
87842 * Debug helper: via this flag we know that we are in 'early bootup code'
87843 * where only the boot processor is running with IRQ disabled. This means
87844@@ -159,6 +161,75 @@ static int __init set_reset_devices(char *str)
87845
87846 __setup("reset_devices", set_reset_devices);
87847
87848+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
87849+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
87850+static int __init setup_grsec_proc_gid(char *str)
87851+{
87852+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
87853+ return 1;
87854+}
87855+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
87856+#endif
87857+
87858+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
87859+unsigned long pax_user_shadow_base __read_only;
87860+EXPORT_SYMBOL(pax_user_shadow_base);
87861+extern char pax_enter_kernel_user[];
87862+extern char pax_exit_kernel_user[];
87863+#endif
87864+
87865+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
87866+static int __init setup_pax_nouderef(char *str)
87867+{
87868+#ifdef CONFIG_X86_32
87869+ unsigned int cpu;
87870+ struct desc_struct *gdt;
87871+
87872+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
87873+ gdt = get_cpu_gdt_table(cpu);
87874+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
87875+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
87876+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
87877+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
87878+ }
87879+ loadsegment(ds, __KERNEL_DS);
87880+ loadsegment(es, __KERNEL_DS);
87881+ loadsegment(ss, __KERNEL_DS);
87882+#else
87883+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
87884+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
87885+ clone_pgd_mask = ~(pgdval_t)0UL;
87886+ pax_user_shadow_base = 0UL;
87887+ setup_clear_cpu_cap(X86_FEATURE_PCID);
87888+ setup_clear_cpu_cap(X86_FEATURE_INVPCID);
87889+#endif
87890+
87891+ return 0;
87892+}
87893+early_param("pax_nouderef", setup_pax_nouderef);
87894+
87895+#ifdef CONFIG_X86_64
87896+static int __init setup_pax_weakuderef(char *str)
87897+{
87898+ if (clone_pgd_mask != ~(pgdval_t)0UL)
87899+ pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
87900+ return 1;
87901+}
87902+__setup("pax_weakuderef", setup_pax_weakuderef);
87903+#endif
87904+#endif
87905+
87906+#ifdef CONFIG_PAX_SOFTMODE
87907+int pax_softmode;
87908+
87909+static int __init setup_pax_softmode(char *str)
87910+{
87911+ get_option(&str, &pax_softmode);
87912+ return 1;
87913+}
87914+__setup("pax_softmode=", setup_pax_softmode);
87915+#endif
87916+
87917 static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
87918 const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
87919 static const char *panic_later, *panic_param;
87920@@ -728,7 +799,7 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
87921 struct blacklist_entry *entry;
87922 char *fn_name;
87923
87924- fn_name = kasprintf(GFP_KERNEL, "%pf", fn);
87925+ fn_name = kasprintf(GFP_KERNEL, "%pX", fn);
87926 if (!fn_name)
87927 return false;
87928
87929@@ -780,7 +851,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
87930 {
87931 int count = preempt_count();
87932 int ret;
87933- char msgbuf[64];
87934+ const char *msg1 = "", *msg2 = "";
87935
87936 if (initcall_blacklisted(fn))
87937 return -EPERM;
87938@@ -790,18 +861,17 @@ int __init_or_module do_one_initcall(initcall_t fn)
87939 else
87940 ret = fn();
87941
87942- msgbuf[0] = 0;
87943-
87944 if (preempt_count() != count) {
87945- sprintf(msgbuf, "preemption imbalance ");
87946+ msg1 = " preemption imbalance";
87947 preempt_count_set(count);
87948 }
87949 if (irqs_disabled()) {
87950- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
87951+ msg2 = " disabled interrupts";
87952 local_irq_enable();
87953 }
87954- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
87955+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
87956
87957+ add_latent_entropy();
87958 return ret;
87959 }
87960
87961@@ -908,8 +978,8 @@ static int run_init_process(const char *init_filename)
87962 {
87963 argv_init[0] = init_filename;
87964 return do_execve(getname_kernel(init_filename),
87965- (const char __user *const __user *)argv_init,
87966- (const char __user *const __user *)envp_init);
87967+ (const char __user *const __force_user *)argv_init,
87968+ (const char __user *const __force_user *)envp_init);
87969 }
87970
87971 static int try_to_run_init_process(const char *init_filename)
87972@@ -926,6 +996,10 @@ static int try_to_run_init_process(const char *init_filename)
87973 return ret;
87974 }
87975
87976+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
87977+extern int gr_init_ran;
87978+#endif
87979+
87980 static noinline void __init kernel_init_freeable(void);
87981
87982 static int __ref kernel_init(void *unused)
87983@@ -950,6 +1024,11 @@ static int __ref kernel_init(void *unused)
87984 ramdisk_execute_command, ret);
87985 }
87986
87987+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
87988+ /* if no initrd was used, be extra sure we enforce chroot restrictions */
87989+ gr_init_ran = 1;
87990+#endif
87991+
87992 /*
87993 * We try each of these until one succeeds.
87994 *
87995@@ -1005,7 +1084,7 @@ static noinline void __init kernel_init_freeable(void)
87996 do_basic_setup();
87997
87998 /* Open the /dev/console on the rootfs, this should never fail */
87999- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
88000+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
88001 pr_err("Warning: unable to open an initial console.\n");
88002
88003 (void) sys_dup(0);
88004@@ -1018,11 +1097,13 @@ static noinline void __init kernel_init_freeable(void)
88005 if (!ramdisk_execute_command)
88006 ramdisk_execute_command = "/init";
88007
88008- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
88009+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
88010 ramdisk_execute_command = NULL;
88011 prepare_namespace();
88012 }
88013
88014+ grsecurity_init();
88015+
88016 /*
88017 * Ok, we have completed the initial bootup, and
88018 * we're essentially up and running. Get rid of the
88019diff --git a/ipc/compat.c b/ipc/compat.c
88020index b5ef4f7..ff31d87 100644
88021--- a/ipc/compat.c
88022+++ b/ipc/compat.c
88023@@ -396,7 +396,7 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
88024 COMPAT_SHMLBA);
88025 if (err < 0)
88026 return err;
88027- return put_user(raddr, (compat_ulong_t *)compat_ptr(third));
88028+ return put_user(raddr, (compat_ulong_t __user *)compat_ptr(third));
88029 }
88030 case SHMDT:
88031 return sys_shmdt(compat_ptr(ptr));
88032diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
88033index e8075b2..76f2c6a 100644
88034--- a/ipc/ipc_sysctl.c
88035+++ b/ipc/ipc_sysctl.c
88036@@ -30,7 +30,7 @@ static void *get_ipc(struct ctl_table *table)
88037 static int proc_ipc_dointvec(struct ctl_table *table, int write,
88038 void __user *buffer, size_t *lenp, loff_t *ppos)
88039 {
88040- struct ctl_table ipc_table;
88041+ ctl_table_no_const ipc_table;
88042
88043 memcpy(&ipc_table, table, sizeof(ipc_table));
88044 ipc_table.data = get_ipc(table);
88045@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
88046 static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
88047 void __user *buffer, size_t *lenp, loff_t *ppos)
88048 {
88049- struct ctl_table ipc_table;
88050+ ctl_table_no_const ipc_table;
88051
88052 memcpy(&ipc_table, table, sizeof(ipc_table));
88053 ipc_table.data = get_ipc(table);
88054@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
88055 static int proc_ipc_callback_dointvec_minmax(struct ctl_table *table, int write,
88056 void __user *buffer, size_t *lenp, loff_t *ppos)
88057 {
88058- struct ctl_table ipc_table;
88059+ ctl_table_no_const ipc_table;
88060 size_t lenp_bef = *lenp;
88061 int rc;
88062
88063@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec_minmax(struct ctl_table *table, int write,
88064 static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
88065 void __user *buffer, size_t *lenp, loff_t *ppos)
88066 {
88067- struct ctl_table ipc_table;
88068+ ctl_table_no_const ipc_table;
88069 memcpy(&ipc_table, table, sizeof(ipc_table));
88070 ipc_table.data = get_ipc(table);
88071
88072@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
88073 static int proc_ipcauto_dointvec_minmax(struct ctl_table *table, int write,
88074 void __user *buffer, size_t *lenp, loff_t *ppos)
88075 {
88076- struct ctl_table ipc_table;
88077+ ctl_table_no_const ipc_table;
88078 int oldval;
88079 int rc;
88080
88081diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
88082index 68d4e95..1477ded 100644
88083--- a/ipc/mq_sysctl.c
88084+++ b/ipc/mq_sysctl.c
88085@@ -25,7 +25,7 @@ static void *get_mq(struct ctl_table *table)
88086 static int proc_mq_dointvec(struct ctl_table *table, int write,
88087 void __user *buffer, size_t *lenp, loff_t *ppos)
88088 {
88089- struct ctl_table mq_table;
88090+ ctl_table_no_const mq_table;
88091 memcpy(&mq_table, table, sizeof(mq_table));
88092 mq_table.data = get_mq(table);
88093
88094@@ -35,7 +35,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
88095 static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
88096 void __user *buffer, size_t *lenp, loff_t *ppos)
88097 {
88098- struct ctl_table mq_table;
88099+ ctl_table_no_const mq_table;
88100 memcpy(&mq_table, table, sizeof(mq_table));
88101 mq_table.data = get_mq(table);
88102
88103diff --git a/ipc/mqueue.c b/ipc/mqueue.c
88104index 4fcf39a..d3cc2ec 100644
88105--- a/ipc/mqueue.c
88106+++ b/ipc/mqueue.c
88107@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
88108 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
88109 info->attr.mq_msgsize);
88110
88111+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
88112 spin_lock(&mq_lock);
88113 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
88114 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
88115diff --git a/ipc/shm.c b/ipc/shm.c
88116index 7fc9f9f..95e201f 100644
88117--- a/ipc/shm.c
88118+++ b/ipc/shm.c
88119@@ -72,6 +72,14 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
88120 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
88121 #endif
88122
88123+#ifdef CONFIG_GRKERNSEC
88124+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
88125+ const u64 shm_createtime, const kuid_t cuid,
88126+ const int shmid);
88127+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
88128+ const u64 shm_createtime);
88129+#endif
88130+
88131 void shm_init_ns(struct ipc_namespace *ns)
88132 {
88133 ns->shm_ctlmax = SHMMAX;
88134@@ -559,6 +567,9 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
88135 shp->shm_lprid = 0;
88136 shp->shm_atim = shp->shm_dtim = 0;
88137 shp->shm_ctim = get_seconds();
88138+#ifdef CONFIG_GRKERNSEC
88139+ shp->shm_createtime = ktime_get_ns();
88140+#endif
88141 shp->shm_segsz = size;
88142 shp->shm_nattch = 0;
88143 shp->shm_file = file;
88144@@ -1095,6 +1106,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88145 f_mode = FMODE_READ | FMODE_WRITE;
88146 }
88147 if (shmflg & SHM_EXEC) {
88148+
88149+#ifdef CONFIG_PAX_MPROTECT
88150+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
88151+ goto out;
88152+#endif
88153+
88154 prot |= PROT_EXEC;
88155 acc_mode |= S_IXUGO;
88156 }
88157@@ -1119,6 +1136,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88158 if (err)
88159 goto out_unlock;
88160
88161+#ifdef CONFIG_GRKERNSEC
88162+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
88163+ shp->shm_perm.cuid, shmid) ||
88164+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
88165+ err = -EACCES;
88166+ goto out_unlock;
88167+ }
88168+#endif
88169+
88170 ipc_lock_object(&shp->shm_perm);
88171
88172 /* check if shm_destroy() is tearing down shp */
88173@@ -1131,6 +1157,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
88174 path = shp->shm_file->f_path;
88175 path_get(&path);
88176 shp->shm_nattch++;
88177+#ifdef CONFIG_GRKERNSEC
88178+ shp->shm_lapid = current->pid;
88179+#endif
88180 size = i_size_read(path.dentry->d_inode);
88181 ipc_unlock_object(&shp->shm_perm);
88182 rcu_read_unlock();
88183diff --git a/ipc/util.c b/ipc/util.c
88184index 27d74e6..8be0be2 100644
88185--- a/ipc/util.c
88186+++ b/ipc/util.c
88187@@ -71,6 +71,8 @@ struct ipc_proc_iface {
88188 int (*show)(struct seq_file *, void *);
88189 };
88190
88191+extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
88192+
88193 static void ipc_memory_notifier(struct work_struct *work)
88194 {
88195 ipcns_notify(IPCNS_MEMCHANGED);
88196@@ -537,6 +539,10 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
88197 granted_mode >>= 6;
88198 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
88199 granted_mode >>= 3;
88200+
88201+ if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
88202+ return -1;
88203+
88204 /* is there some bit set in requested_mode but not in granted_mode? */
88205 if ((requested_mode & ~granted_mode & 0007) &&
88206 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
88207diff --git a/kernel/audit.c b/kernel/audit.c
88208index 6726aa6..bb864a9 100644
88209--- a/kernel/audit.c
88210+++ b/kernel/audit.c
88211@@ -122,7 +122,7 @@ u32 audit_sig_sid = 0;
88212 3) suppressed due to audit_rate_limit
88213 4) suppressed due to audit_backlog_limit
88214 */
88215-static atomic_t audit_lost = ATOMIC_INIT(0);
88216+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
88217
88218 /* The netlink socket. */
88219 static struct sock *audit_sock;
88220@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
88221 unsigned long now;
88222 int print;
88223
88224- atomic_inc(&audit_lost);
88225+ atomic_inc_unchecked(&audit_lost);
88226
88227 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
88228
88229@@ -273,7 +273,7 @@ void audit_log_lost(const char *message)
88230 if (print) {
88231 if (printk_ratelimit())
88232 pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
88233- atomic_read(&audit_lost),
88234+ atomic_read_unchecked(&audit_lost),
88235 audit_rate_limit,
88236 audit_backlog_limit);
88237 audit_panic(message);
88238@@ -840,7 +840,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
88239 s.pid = audit_pid;
88240 s.rate_limit = audit_rate_limit;
88241 s.backlog_limit = audit_backlog_limit;
88242- s.lost = atomic_read(&audit_lost);
88243+ s.lost = atomic_read_unchecked(&audit_lost);
88244 s.backlog = skb_queue_len(&audit_skb_queue);
88245 s.version = AUDIT_VERSION_LATEST;
88246 s.backlog_wait_time = audit_backlog_wait_time;
88247diff --git a/kernel/auditsc.c b/kernel/auditsc.c
88248index 21eae3c..66db239 100644
88249--- a/kernel/auditsc.c
88250+++ b/kernel/auditsc.c
88251@@ -2023,7 +2023,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
88252 }
88253
88254 /* global counter which is incremented every time something logs in */
88255-static atomic_t session_id = ATOMIC_INIT(0);
88256+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
88257
88258 static int audit_set_loginuid_perm(kuid_t loginuid)
88259 {
88260@@ -2090,7 +2090,7 @@ int audit_set_loginuid(kuid_t loginuid)
88261
88262 /* are we setting or clearing? */
88263 if (uid_valid(loginuid))
88264- sessionid = (unsigned int)atomic_inc_return(&session_id);
88265+ sessionid = (unsigned int)atomic_inc_return_unchecked(&session_id);
88266
88267 task->sessionid = sessionid;
88268 task->loginuid = loginuid;
88269diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
88270index 7f0dbcb..b54bb2c 100644
88271--- a/kernel/bpf/core.c
88272+++ b/kernel/bpf/core.c
88273@@ -22,6 +22,7 @@
88274 */
88275 #include <linux/filter.h>
88276 #include <linux/skbuff.h>
88277+#include <linux/vmalloc.h>
88278 #include <asm/unaligned.h>
88279
88280 /* Registers */
88281@@ -63,6 +64,67 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
88282 return NULL;
88283 }
88284
88285+struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
88286+{
88287+ gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
88288+ gfp_extra_flags;
88289+ struct bpf_work_struct *ws;
88290+ struct bpf_prog *fp;
88291+
88292+ size = round_up(size, PAGE_SIZE);
88293+ fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
88294+ if (fp == NULL)
88295+ return NULL;
88296+
88297+ ws = kmalloc(sizeof(*ws), GFP_KERNEL | gfp_extra_flags);
88298+ if (ws == NULL) {
88299+ vfree(fp);
88300+ return NULL;
88301+ }
88302+
88303+ fp->pages = size / PAGE_SIZE;
88304+ fp->work = ws;
88305+
88306+ return fp;
88307+}
88308+EXPORT_SYMBOL_GPL(bpf_prog_alloc);
88309+
88310+struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
88311+ gfp_t gfp_extra_flags)
88312+{
88313+ gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
88314+ gfp_extra_flags;
88315+ struct bpf_prog *fp;
88316+
88317+ BUG_ON(fp_old == NULL);
88318+
88319+ size = round_up(size, PAGE_SIZE);
88320+ if (size <= fp_old->pages * PAGE_SIZE)
88321+ return fp_old;
88322+
88323+ fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
88324+ if (fp != NULL) {
88325+ memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
88326+ fp->pages = size / PAGE_SIZE;
88327+
88328+ /* We keep fp->work from fp_old around in the new
88329+ * reallocated structure.
88330+ */
88331+ fp_old->work = NULL;
88332+ __bpf_prog_free(fp_old);
88333+ }
88334+
88335+ return fp;
88336+}
88337+EXPORT_SYMBOL_GPL(bpf_prog_realloc);
88338+
88339+void __bpf_prog_free(struct bpf_prog *fp)
88340+{
88341+ kfree(fp->work);
88342+ vfree(fp);
88343+}
88344+EXPORT_SYMBOL_GPL(__bpf_prog_free);
88345+
88346 /* Base function for offset calculation. Needs to go into .text section,
88347 * therefore keeping it non-static as well; will also be used by JITs
88348 * anyway later on, so do not let the compiler omit it.
88349@@ -523,12 +585,26 @@ void bpf_prog_select_runtime(struct bpf_prog *fp)
88350
88351 /* Probe if internal BPF can be JITed */
88352 bpf_int_jit_compile(fp);
88353+ /* Lock whole bpf_prog as read-only */
88354+ bpf_prog_lock_ro(fp);
88355 }
88356 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
88357
88358-/* free internal BPF program */
88359+static void bpf_prog_free_deferred(struct work_struct *work)
88360+{
88361+ struct bpf_work_struct *ws;
88362+
88363+ ws = container_of(work, struct bpf_work_struct, work);
88364+ bpf_jit_free(ws->prog);
88365+}
88366+
88367+/* Free internal BPF program */
88368 void bpf_prog_free(struct bpf_prog *fp)
88369 {
88370- bpf_jit_free(fp);
88371+ struct bpf_work_struct *ws = fp->work;
88372+
88373+ INIT_WORK(&ws->work, bpf_prog_free_deferred);
88374+ ws->prog = fp;
88375+ schedule_work(&ws->work);
88376 }
88377 EXPORT_SYMBOL_GPL(bpf_prog_free);
88378diff --git a/kernel/capability.c b/kernel/capability.c
88379index 989f5bf..d317ca0 100644
88380--- a/kernel/capability.c
88381+++ b/kernel/capability.c
88382@@ -192,6 +192,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
88383 * before modification is attempted and the application
88384 * fails.
88385 */
88386+ if (tocopy > ARRAY_SIZE(kdata))
88387+ return -EFAULT;
88388+
88389 if (copy_to_user(dataptr, kdata, tocopy
88390 * sizeof(struct __user_cap_data_struct))) {
88391 return -EFAULT;
88392@@ -297,10 +300,11 @@ bool has_ns_capability(struct task_struct *t,
88393 int ret;
88394
88395 rcu_read_lock();
88396- ret = security_capable(__task_cred(t), ns, cap);
88397+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
88398+ gr_task_is_capable(t, __task_cred(t), cap);
88399 rcu_read_unlock();
88400
88401- return (ret == 0);
88402+ return ret;
88403 }
88404
88405 /**
88406@@ -337,10 +341,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
88407 int ret;
88408
88409 rcu_read_lock();
88410- ret = security_capable_noaudit(__task_cred(t), ns, cap);
88411+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
88412 rcu_read_unlock();
88413
88414- return (ret == 0);
88415+ return ret;
88416 }
88417
88418 /**
88419@@ -378,7 +382,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
88420 BUG();
88421 }
88422
88423- if (security_capable(current_cred(), ns, cap) == 0) {
88424+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
88425 current->flags |= PF_SUPERPRIV;
88426 return true;
88427 }
88428@@ -386,6 +390,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
88429 }
88430 EXPORT_SYMBOL(ns_capable);
88431
88432+bool ns_capable_nolog(struct user_namespace *ns, int cap)
88433+{
88434+ if (unlikely(!cap_valid(cap))) {
88435+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
88436+ BUG();
88437+ }
88438+
88439+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
88440+ current->flags |= PF_SUPERPRIV;
88441+ return true;
88442+ }
88443+ return false;
88444+}
88445+EXPORT_SYMBOL(ns_capable_nolog);
88446+
88447 /**
88448 * file_ns_capable - Determine if the file's opener had a capability in effect
88449 * @file: The file we want to check
88450@@ -427,6 +446,12 @@ bool capable(int cap)
88451 }
88452 EXPORT_SYMBOL(capable);
88453
88454+bool capable_nolog(int cap)
88455+{
88456+ return ns_capable_nolog(&init_user_ns, cap);
88457+}
88458+EXPORT_SYMBOL(capable_nolog);
88459+
88460 /**
88461 * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
88462 * @inode: The inode in question
88463@@ -444,3 +469,12 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
88464 kgid_has_mapping(ns, inode->i_gid);
88465 }
88466 EXPORT_SYMBOL(capable_wrt_inode_uidgid);
88467+
88468+bool capable_wrt_inode_uidgid_nolog(const struct inode *inode, int cap)
88469+{
88470+ struct user_namespace *ns = current_user_ns();
88471+
88472+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
88473+ kgid_has_mapping(ns, inode->i_gid);
88474+}
88475+EXPORT_SYMBOL(capable_wrt_inode_uidgid_nolog);
88476diff --git a/kernel/cgroup.c b/kernel/cgroup.c
88477index 3a73f99..4f29fea 100644
88478--- a/kernel/cgroup.c
88479+++ b/kernel/cgroup.c
88480@@ -5341,6 +5341,14 @@ static void cgroup_release_agent(struct work_struct *work)
88481 release_list);
88482 list_del_init(&cgrp->release_list);
88483 raw_spin_unlock(&release_list_lock);
88484+
88485+ /*
88486+ * don't bother calling call_usermodehelper if we haven't
88487+ * configured a binary to execute
88488+ */
88489+ if (cgrp->root->release_agent_path[0] == '\0')
88490+ goto continue_free;
88491+
88492 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
88493 if (!pathbuf)
88494 goto continue_free;
88495@@ -5539,7 +5547,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
88496 struct task_struct *task;
88497 int count = 0;
88498
88499- seq_printf(seq, "css_set %p\n", cset);
88500+ seq_printf(seq, "css_set %pK\n", cset);
88501
88502 list_for_each_entry(task, &cset->tasks, cg_list) {
88503 if (count++ > MAX_TASKS_SHOWN_PER_CSS)
88504diff --git a/kernel/compat.c b/kernel/compat.c
88505index ebb3c36..1df606e 100644
88506--- a/kernel/compat.c
88507+++ b/kernel/compat.c
88508@@ -13,6 +13,7 @@
88509
88510 #include <linux/linkage.h>
88511 #include <linux/compat.h>
88512+#include <linux/module.h>
88513 #include <linux/errno.h>
88514 #include <linux/time.h>
88515 #include <linux/signal.h>
88516@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
88517 mm_segment_t oldfs;
88518 long ret;
88519
88520- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
88521+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
88522 oldfs = get_fs();
88523 set_fs(KERNEL_DS);
88524 ret = hrtimer_nanosleep_restart(restart);
88525@@ -252,7 +253,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
88526 oldfs = get_fs();
88527 set_fs(KERNEL_DS);
88528 ret = hrtimer_nanosleep(&tu,
88529- rmtp ? (struct timespec __user *)&rmt : NULL,
88530+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
88531 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
88532 set_fs(oldfs);
88533
88534@@ -379,7 +380,7 @@ COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
88535 mm_segment_t old_fs = get_fs();
88536
88537 set_fs(KERNEL_DS);
88538- ret = sys_sigpending((old_sigset_t __user *) &s);
88539+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
88540 set_fs(old_fs);
88541 if (ret == 0)
88542 ret = put_user(s, set);
88543@@ -469,7 +470,7 @@ COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
88544 mm_segment_t old_fs = get_fs();
88545
88546 set_fs(KERNEL_DS);
88547- ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
88548+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
88549 set_fs(old_fs);
88550
88551 if (!ret) {
88552@@ -551,8 +552,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
88553 set_fs (KERNEL_DS);
88554 ret = sys_wait4(pid,
88555 (stat_addr ?
88556- (unsigned int __user *) &status : NULL),
88557- options, (struct rusage __user *) &r);
88558+ (unsigned int __force_user *) &status : NULL),
88559+ options, (struct rusage __force_user *) &r);
88560 set_fs (old_fs);
88561
88562 if (ret > 0) {
88563@@ -578,8 +579,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
88564 memset(&info, 0, sizeof(info));
88565
88566 set_fs(KERNEL_DS);
88567- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
88568- uru ? (struct rusage __user *)&ru : NULL);
88569+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
88570+ uru ? (struct rusage __force_user *)&ru : NULL);
88571 set_fs(old_fs);
88572
88573 if ((ret < 0) || (info.si_signo == 0))
88574@@ -713,8 +714,8 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
88575 oldfs = get_fs();
88576 set_fs(KERNEL_DS);
88577 err = sys_timer_settime(timer_id, flags,
88578- (struct itimerspec __user *) &newts,
88579- (struct itimerspec __user *) &oldts);
88580+ (struct itimerspec __force_user *) &newts,
88581+ (struct itimerspec __force_user *) &oldts);
88582 set_fs(oldfs);
88583 if (!err && old && put_compat_itimerspec(old, &oldts))
88584 return -EFAULT;
88585@@ -731,7 +732,7 @@ COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
88586 oldfs = get_fs();
88587 set_fs(KERNEL_DS);
88588 err = sys_timer_gettime(timer_id,
88589- (struct itimerspec __user *) &ts);
88590+ (struct itimerspec __force_user *) &ts);
88591 set_fs(oldfs);
88592 if (!err && put_compat_itimerspec(setting, &ts))
88593 return -EFAULT;
88594@@ -750,7 +751,7 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
88595 oldfs = get_fs();
88596 set_fs(KERNEL_DS);
88597 err = sys_clock_settime(which_clock,
88598- (struct timespec __user *) &ts);
88599+ (struct timespec __force_user *) &ts);
88600 set_fs(oldfs);
88601 return err;
88602 }
88603@@ -765,7 +766,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
88604 oldfs = get_fs();
88605 set_fs(KERNEL_DS);
88606 err = sys_clock_gettime(which_clock,
88607- (struct timespec __user *) &ts);
88608+ (struct timespec __force_user *) &ts);
88609 set_fs(oldfs);
88610 if (!err && compat_put_timespec(&ts, tp))
88611 return -EFAULT;
88612@@ -785,7 +786,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
88613
88614 oldfs = get_fs();
88615 set_fs(KERNEL_DS);
88616- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
88617+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
88618 set_fs(oldfs);
88619
88620 err = compat_put_timex(utp, &txc);
88621@@ -805,7 +806,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
88622 oldfs = get_fs();
88623 set_fs(KERNEL_DS);
88624 err = sys_clock_getres(which_clock,
88625- (struct timespec __user *) &ts);
88626+ (struct timespec __force_user *) &ts);
88627 set_fs(oldfs);
88628 if (!err && tp && compat_put_timespec(&ts, tp))
88629 return -EFAULT;
88630@@ -819,7 +820,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
88631 struct timespec tu;
88632 struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
88633
88634- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
88635+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
88636 oldfs = get_fs();
88637 set_fs(KERNEL_DS);
88638 err = clock_nanosleep_restart(restart);
88639@@ -851,8 +852,8 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
88640 oldfs = get_fs();
88641 set_fs(KERNEL_DS);
88642 err = sys_clock_nanosleep(which_clock, flags,
88643- (struct timespec __user *) &in,
88644- (struct timespec __user *) &out);
88645+ (struct timespec __force_user *) &in,
88646+ (struct timespec __force_user *) &out);
88647 set_fs(oldfs);
88648
88649 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
88650@@ -1146,7 +1147,7 @@ COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
88651 mm_segment_t old_fs = get_fs();
88652
88653 set_fs(KERNEL_DS);
88654- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
88655+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
88656 set_fs(old_fs);
88657 if (compat_put_timespec(&t, interval))
88658 return -EFAULT;
88659diff --git a/kernel/configs.c b/kernel/configs.c
88660index c18b1f1..b9a0132 100644
88661--- a/kernel/configs.c
88662+++ b/kernel/configs.c
88663@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
88664 struct proc_dir_entry *entry;
88665
88666 /* create the current config file */
88667+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
88668+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
88669+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
88670+ &ikconfig_file_ops);
88671+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
88672+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
88673+ &ikconfig_file_ops);
88674+#endif
88675+#else
88676 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
88677 &ikconfig_file_ops);
88678+#endif
88679+
88680 if (!entry)
88681 return -ENOMEM;
88682
88683diff --git a/kernel/cred.c b/kernel/cred.c
88684index e0573a4..26c0fd3 100644
88685--- a/kernel/cred.c
88686+++ b/kernel/cred.c
88687@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
88688 validate_creds(cred);
88689 alter_cred_subscribers(cred, -1);
88690 put_cred(cred);
88691+
88692+#ifdef CONFIG_GRKERNSEC_SETXID
88693+ cred = (struct cred *) tsk->delayed_cred;
88694+ if (cred != NULL) {
88695+ tsk->delayed_cred = NULL;
88696+ validate_creds(cred);
88697+ alter_cred_subscribers(cred, -1);
88698+ put_cred(cred);
88699+ }
88700+#endif
88701 }
88702
88703 /**
88704@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
88705 * Always returns 0 thus allowing this function to be tail-called at the end
88706 * of, say, sys_setgid().
88707 */
88708-int commit_creds(struct cred *new)
88709+static int __commit_creds(struct cred *new)
88710 {
88711 struct task_struct *task = current;
88712 const struct cred *old = task->real_cred;
88713@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
88714
88715 get_cred(new); /* we will require a ref for the subj creds too */
88716
88717+ gr_set_role_label(task, new->uid, new->gid);
88718+
88719 /* dumpability changes */
88720 if (!uid_eq(old->euid, new->euid) ||
88721 !gid_eq(old->egid, new->egid) ||
88722@@ -479,6 +491,105 @@ int commit_creds(struct cred *new)
88723 put_cred(old);
88724 return 0;
88725 }
88726+#ifdef CONFIG_GRKERNSEC_SETXID
88727+extern int set_user(struct cred *new);
88728+
88729+void gr_delayed_cred_worker(void)
88730+{
88731+ const struct cred *new = current->delayed_cred;
88732+ struct cred *ncred;
88733+
88734+ current->delayed_cred = NULL;
88735+
88736+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
88737+ // from doing get_cred on it when queueing this
88738+ put_cred(new);
88739+ return;
88740+ } else if (new == NULL)
88741+ return;
88742+
88743+ ncred = prepare_creds();
88744+ if (!ncred)
88745+ goto die;
88746+ // uids
88747+ ncred->uid = new->uid;
88748+ ncred->euid = new->euid;
88749+ ncred->suid = new->suid;
88750+ ncred->fsuid = new->fsuid;
88751+ // gids
88752+ ncred->gid = new->gid;
88753+ ncred->egid = new->egid;
88754+ ncred->sgid = new->sgid;
88755+ ncred->fsgid = new->fsgid;
88756+ // groups
88757+ set_groups(ncred, new->group_info);
88758+ // caps
88759+ ncred->securebits = new->securebits;
88760+ ncred->cap_inheritable = new->cap_inheritable;
88761+ ncred->cap_permitted = new->cap_permitted;
88762+ ncred->cap_effective = new->cap_effective;
88763+ ncred->cap_bset = new->cap_bset;
88764+
88765+ if (set_user(ncred)) {
88766+ abort_creds(ncred);
88767+ goto die;
88768+ }
88769+
88770+ // from doing get_cred on it when queueing this
88771+ put_cred(new);
88772+
88773+ __commit_creds(ncred);
88774+ return;
88775+die:
88776+ // from doing get_cred on it when queueing this
88777+ put_cred(new);
88778+ do_group_exit(SIGKILL);
88779+}
88780+#endif
88781+
88782+int commit_creds(struct cred *new)
88783+{
88784+#ifdef CONFIG_GRKERNSEC_SETXID
88785+ int ret;
88786+ int schedule_it = 0;
88787+ struct task_struct *t;
88788+ unsigned oldsecurebits = current_cred()->securebits;
88789+
88790+ /* we won't get called with tasklist_lock held for writing
88791+ and interrupts disabled as the cred struct in that case is
88792+ init_cred
88793+ */
88794+ if (grsec_enable_setxid && !current_is_single_threaded() &&
88795+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
88796+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
88797+ schedule_it = 1;
88798+ }
88799+ ret = __commit_creds(new);
88800+ if (schedule_it) {
88801+ rcu_read_lock();
88802+ read_lock(&tasklist_lock);
88803+ for (t = next_thread(current); t != current;
88804+ t = next_thread(t)) {
88805+ /* we'll check if the thread has uid 0 in
88806+ * the delayed worker routine
88807+ */
88808+ if (task_securebits(t) == oldsecurebits &&
88809+ t->delayed_cred == NULL) {
88810+ t->delayed_cred = get_cred(new);
88811+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
88812+ set_tsk_need_resched(t);
88813+ }
88814+ }
88815+ read_unlock(&tasklist_lock);
88816+ rcu_read_unlock();
88817+ }
88818+
88819+ return ret;
88820+#else
88821+ return __commit_creds(new);
88822+#endif
88823+}
88824+
88825 EXPORT_SYMBOL(commit_creds);
88826
88827 /**
88828diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
88829index 1adf62b..7736e06 100644
88830--- a/kernel/debug/debug_core.c
88831+++ b/kernel/debug/debug_core.c
88832@@ -124,7 +124,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
88833 */
88834 static atomic_t masters_in_kgdb;
88835 static atomic_t slaves_in_kgdb;
88836-static atomic_t kgdb_break_tasklet_var;
88837+static atomic_unchecked_t kgdb_break_tasklet_var;
88838 atomic_t kgdb_setting_breakpoint;
88839
88840 struct task_struct *kgdb_usethread;
88841@@ -134,7 +134,7 @@ int kgdb_single_step;
88842 static pid_t kgdb_sstep_pid;
88843
88844 /* to keep track of the CPU which is doing the single stepping*/
88845-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
88846+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
88847
88848 /*
88849 * If you are debugging a problem where roundup (the collection of
88850@@ -549,7 +549,7 @@ return_normal:
88851 * kernel will only try for the value of sstep_tries before
88852 * giving up and continuing on.
88853 */
88854- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
88855+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
88856 (kgdb_info[cpu].task &&
88857 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
88858 atomic_set(&kgdb_active, -1);
88859@@ -647,8 +647,8 @@ cpu_master_loop:
88860 }
88861
88862 kgdb_restore:
88863- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
88864- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
88865+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
88866+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
88867 if (kgdb_info[sstep_cpu].task)
88868 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
88869 else
88870@@ -925,18 +925,18 @@ static void kgdb_unregister_callbacks(void)
88871 static void kgdb_tasklet_bpt(unsigned long ing)
88872 {
88873 kgdb_breakpoint();
88874- atomic_set(&kgdb_break_tasklet_var, 0);
88875+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
88876 }
88877
88878 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
88879
88880 void kgdb_schedule_breakpoint(void)
88881 {
88882- if (atomic_read(&kgdb_break_tasklet_var) ||
88883+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
88884 atomic_read(&kgdb_active) != -1 ||
88885 atomic_read(&kgdb_setting_breakpoint))
88886 return;
88887- atomic_inc(&kgdb_break_tasklet_var);
88888+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
88889 tasklet_schedule(&kgdb_tasklet_breakpoint);
88890 }
88891 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
88892diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
88893index 379650b..30c5180 100644
88894--- a/kernel/debug/kdb/kdb_main.c
88895+++ b/kernel/debug/kdb/kdb_main.c
88896@@ -1977,7 +1977,7 @@ static int kdb_lsmod(int argc, const char **argv)
88897 continue;
88898
88899 kdb_printf("%-20s%8u 0x%p ", mod->name,
88900- mod->core_size, (void *)mod);
88901+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
88902 #ifdef CONFIG_MODULE_UNLOAD
88903 kdb_printf("%4ld ", module_refcount(mod));
88904 #endif
88905@@ -1987,7 +1987,7 @@ static int kdb_lsmod(int argc, const char **argv)
88906 kdb_printf(" (Loading)");
88907 else
88908 kdb_printf(" (Live)");
88909- kdb_printf(" 0x%p", mod->module_core);
88910+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
88911
88912 #ifdef CONFIG_MODULE_UNLOAD
88913 {
88914diff --git a/kernel/events/core.c b/kernel/events/core.c
88915index 658f232..32e9595 100644
88916--- a/kernel/events/core.c
88917+++ b/kernel/events/core.c
88918@@ -161,8 +161,15 @@ static struct srcu_struct pmus_srcu;
88919 * 0 - disallow raw tracepoint access for unpriv
88920 * 1 - disallow cpu events for unpriv
88921 * 2 - disallow kernel profiling for unpriv
88922+ * 3 - disallow all unpriv perf event use
88923 */
88924-int sysctl_perf_event_paranoid __read_mostly = 1;
88925+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
88926+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
88927+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
88928+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
88929+#else
88930+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
88931+#endif
88932
88933 /* Minimum for 512 kiB + 1 user control page */
88934 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
88935@@ -188,7 +195,7 @@ void update_perf_cpu_limits(void)
88936
88937 tmp *= sysctl_perf_cpu_time_max_percent;
88938 do_div(tmp, 100);
88939- ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
88940+ ACCESS_ONCE_RW(perf_sample_allowed_ns) = tmp;
88941 }
88942
88943 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
88944@@ -294,7 +301,7 @@ void perf_sample_event_took(u64 sample_len_ns)
88945 }
88946 }
88947
88948-static atomic64_t perf_event_id;
88949+static atomic64_unchecked_t perf_event_id;
88950
88951 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
88952 enum event_type_t event_type);
88953@@ -3051,7 +3058,7 @@ static void __perf_event_read(void *info)
88954
88955 static inline u64 perf_event_count(struct perf_event *event)
88956 {
88957- return local64_read(&event->count) + atomic64_read(&event->child_count);
88958+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
88959 }
88960
88961 static u64 perf_event_read(struct perf_event *event)
88962@@ -3430,9 +3437,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
88963 mutex_lock(&event->child_mutex);
88964 total += perf_event_read(event);
88965 *enabled += event->total_time_enabled +
88966- atomic64_read(&event->child_total_time_enabled);
88967+ atomic64_read_unchecked(&event->child_total_time_enabled);
88968 *running += event->total_time_running +
88969- atomic64_read(&event->child_total_time_running);
88970+ atomic64_read_unchecked(&event->child_total_time_running);
88971
88972 list_for_each_entry(child, &event->child_list, child_list) {
88973 total += perf_event_read(child);
88974@@ -3881,10 +3888,10 @@ void perf_event_update_userpage(struct perf_event *event)
88975 userpg->offset -= local64_read(&event->hw.prev_count);
88976
88977 userpg->time_enabled = enabled +
88978- atomic64_read(&event->child_total_time_enabled);
88979+ atomic64_read_unchecked(&event->child_total_time_enabled);
88980
88981 userpg->time_running = running +
88982- atomic64_read(&event->child_total_time_running);
88983+ atomic64_read_unchecked(&event->child_total_time_running);
88984
88985 arch_perf_update_userpage(userpg, now);
88986
88987@@ -4448,7 +4455,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
88988
88989 /* Data. */
88990 sp = perf_user_stack_pointer(regs);
88991- rem = __output_copy_user(handle, (void *) sp, dump_size);
88992+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
88993 dyn_size = dump_size - rem;
88994
88995 perf_output_skip(handle, rem);
88996@@ -4539,11 +4546,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
88997 values[n++] = perf_event_count(event);
88998 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
88999 values[n++] = enabled +
89000- atomic64_read(&event->child_total_time_enabled);
89001+ atomic64_read_unchecked(&event->child_total_time_enabled);
89002 }
89003 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
89004 values[n++] = running +
89005- atomic64_read(&event->child_total_time_running);
89006+ atomic64_read_unchecked(&event->child_total_time_running);
89007 }
89008 if (read_format & PERF_FORMAT_ID)
89009 values[n++] = primary_event_id(event);
89010@@ -6858,7 +6865,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
89011 event->parent = parent_event;
89012
89013 event->ns = get_pid_ns(task_active_pid_ns(current));
89014- event->id = atomic64_inc_return(&perf_event_id);
89015+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
89016
89017 event->state = PERF_EVENT_STATE_INACTIVE;
89018
89019@@ -7137,6 +7144,11 @@ SYSCALL_DEFINE5(perf_event_open,
89020 if (flags & ~PERF_FLAG_ALL)
89021 return -EINVAL;
89022
89023+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
89024+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
89025+ return -EACCES;
89026+#endif
89027+
89028 err = perf_copy_attr(attr_uptr, &attr);
89029 if (err)
89030 return err;
89031@@ -7489,10 +7501,10 @@ static void sync_child_event(struct perf_event *child_event,
89032 /*
89033 * Add back the child's count to the parent's count:
89034 */
89035- atomic64_add(child_val, &parent_event->child_count);
89036- atomic64_add(child_event->total_time_enabled,
89037+ atomic64_add_unchecked(child_val, &parent_event->child_count);
89038+ atomic64_add_unchecked(child_event->total_time_enabled,
89039 &parent_event->child_total_time_enabled);
89040- atomic64_add(child_event->total_time_running,
89041+ atomic64_add_unchecked(child_event->total_time_running,
89042 &parent_event->child_total_time_running);
89043
89044 /*
89045diff --git a/kernel/events/internal.h b/kernel/events/internal.h
89046index 569b2187..19940d9 100644
89047--- a/kernel/events/internal.h
89048+++ b/kernel/events/internal.h
89049@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
89050 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
89051 }
89052
89053-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
89054+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
89055 static inline unsigned long \
89056 func_name(struct perf_output_handle *handle, \
89057- const void *buf, unsigned long len) \
89058+ const void user *buf, unsigned long len) \
89059 { \
89060 unsigned long size, written; \
89061 \
89062@@ -117,7 +117,7 @@ memcpy_common(void *dst, const void *src, unsigned long n)
89063 return 0;
89064 }
89065
89066-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
89067+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
89068
89069 static inline unsigned long
89070 memcpy_skip(void *dst, const void *src, unsigned long n)
89071@@ -125,7 +125,7 @@ memcpy_skip(void *dst, const void *src, unsigned long n)
89072 return 0;
89073 }
89074
89075-DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
89076+DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip, )
89077
89078 #ifndef arch_perf_out_copy_user
89079 #define arch_perf_out_copy_user arch_perf_out_copy_user
89080@@ -143,7 +143,7 @@ arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
89081 }
89082 #endif
89083
89084-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
89085+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
89086
89087 /* Callchain handling */
89088 extern struct perf_callchain_entry *
89089diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
89090index ed8f2cd..fe8030c 100644
89091--- a/kernel/events/uprobes.c
89092+++ b/kernel/events/uprobes.c
89093@@ -1670,7 +1670,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
89094 {
89095 struct page *page;
89096 uprobe_opcode_t opcode;
89097- int result;
89098+ long result;
89099
89100 pagefault_disable();
89101 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
89102diff --git a/kernel/exit.c b/kernel/exit.c
89103index 32c58f7..9eb6907 100644
89104--- a/kernel/exit.c
89105+++ b/kernel/exit.c
89106@@ -173,6 +173,10 @@ void release_task(struct task_struct *p)
89107 struct task_struct *leader;
89108 int zap_leader;
89109 repeat:
89110+#ifdef CONFIG_NET
89111+ gr_del_task_from_ip_table(p);
89112+#endif
89113+
89114 /* don't need to get the RCU readlock here - the process is dead and
89115 * can't be modifying its own credentials. But shut RCU-lockdep up */
89116 rcu_read_lock();
89117@@ -668,6 +672,8 @@ void do_exit(long code)
89118 struct task_struct *tsk = current;
89119 int group_dead;
89120
89121+ set_fs(USER_DS);
89122+
89123 profile_task_exit(tsk);
89124
89125 WARN_ON(blk_needs_flush_plug(tsk));
89126@@ -684,7 +690,6 @@ void do_exit(long code)
89127 * mm_release()->clear_child_tid() from writing to a user-controlled
89128 * kernel address.
89129 */
89130- set_fs(USER_DS);
89131
89132 ptrace_event(PTRACE_EVENT_EXIT, code);
89133
89134@@ -742,6 +747,9 @@ void do_exit(long code)
89135 tsk->exit_code = code;
89136 taskstats_exit(tsk, group_dead);
89137
89138+ gr_acl_handle_psacct(tsk, code);
89139+ gr_acl_handle_exit();
89140+
89141 exit_mm(tsk);
89142
89143 if (group_dead)
89144@@ -859,7 +867,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
89145 * Take down every thread in the group. This is called by fatal signals
89146 * as well as by sys_exit_group (below).
89147 */
89148-void
89149+__noreturn void
89150 do_group_exit(int exit_code)
89151 {
89152 struct signal_struct *sig = current->signal;
89153diff --git a/kernel/fork.c b/kernel/fork.c
89154index a91e47d..71c9064 100644
89155--- a/kernel/fork.c
89156+++ b/kernel/fork.c
89157@@ -183,6 +183,48 @@ void thread_info_cache_init(void)
89158 # endif
89159 #endif
89160
89161+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89162+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
89163+ int node, void **lowmem_stack)
89164+{
89165+ struct page *pages[THREAD_SIZE / PAGE_SIZE];
89166+ void *ret = NULL;
89167+ unsigned int i;
89168+
89169+ *lowmem_stack = alloc_thread_info_node(tsk, node);
89170+ if (*lowmem_stack == NULL)
89171+ goto out;
89172+
89173+ for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
89174+ pages[i] = virt_to_page(*lowmem_stack + (i * PAGE_SIZE));
89175+
89176+ /* use VM_IOREMAP to gain THREAD_SIZE alignment */
89177+ ret = vmap(pages, THREAD_SIZE / PAGE_SIZE, VM_IOREMAP, PAGE_KERNEL);
89178+ if (ret == NULL) {
89179+ free_thread_info(*lowmem_stack);
89180+ *lowmem_stack = NULL;
89181+ }
89182+
89183+out:
89184+ return ret;
89185+}
89186+
89187+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
89188+{
89189+ unmap_process_stacks(tsk);
89190+}
89191+#else
89192+static inline struct thread_info *gr_alloc_thread_info_node(struct task_struct *tsk,
89193+ int node, void **lowmem_stack)
89194+{
89195+ return alloc_thread_info_node(tsk, node);
89196+}
89197+static inline void gr_free_thread_info(struct task_struct *tsk, struct thread_info *ti)
89198+{
89199+ free_thread_info(ti);
89200+}
89201+#endif
89202+
89203 /* SLAB cache for signal_struct structures (tsk->signal) */
89204 static struct kmem_cache *signal_cachep;
89205
89206@@ -201,18 +243,22 @@ struct kmem_cache *vm_area_cachep;
89207 /* SLAB cache for mm_struct structures (tsk->mm) */
89208 static struct kmem_cache *mm_cachep;
89209
89210-static void account_kernel_stack(struct thread_info *ti, int account)
89211+static void account_kernel_stack(struct task_struct *tsk, struct thread_info *ti, int account)
89212 {
89213+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89214+ struct zone *zone = page_zone(virt_to_page(tsk->lowmem_stack));
89215+#else
89216 struct zone *zone = page_zone(virt_to_page(ti));
89217+#endif
89218
89219 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
89220 }
89221
89222 void free_task(struct task_struct *tsk)
89223 {
89224- account_kernel_stack(tsk->stack, -1);
89225+ account_kernel_stack(tsk, tsk->stack, -1);
89226 arch_release_thread_info(tsk->stack);
89227- free_thread_info(tsk->stack);
89228+ gr_free_thread_info(tsk, tsk->stack);
89229 rt_mutex_debug_task_free(tsk);
89230 ftrace_graph_exit_task(tsk);
89231 put_seccomp_filter(tsk);
89232@@ -299,6 +345,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89233 struct task_struct *tsk;
89234 struct thread_info *ti;
89235 unsigned long *stackend;
89236+ void *lowmem_stack;
89237 int node = tsk_fork_get_node(orig);
89238 int err;
89239
89240@@ -306,7 +353,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89241 if (!tsk)
89242 return NULL;
89243
89244- ti = alloc_thread_info_node(tsk, node);
89245+ ti = gr_alloc_thread_info_node(tsk, node, &lowmem_stack);
89246 if (!ti)
89247 goto free_tsk;
89248
89249@@ -315,6 +362,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89250 goto free_ti;
89251
89252 tsk->stack = ti;
89253+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
89254+ tsk->lowmem_stack = lowmem_stack;
89255+#endif
89256 #ifdef CONFIG_SECCOMP
89257 /*
89258 * We must handle setting up seccomp filters once we're under
89259@@ -332,7 +382,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89260 *stackend = STACK_END_MAGIC; /* for overflow detection */
89261
89262 #ifdef CONFIG_CC_STACKPROTECTOR
89263- tsk->stack_canary = get_random_int();
89264+ tsk->stack_canary = pax_get_random_long();
89265 #endif
89266
89267 /*
89268@@ -346,24 +396,92 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
89269 tsk->splice_pipe = NULL;
89270 tsk->task_frag.page = NULL;
89271
89272- account_kernel_stack(ti, 1);
89273+ account_kernel_stack(tsk, ti, 1);
89274
89275 return tsk;
89276
89277 free_ti:
89278- free_thread_info(ti);
89279+ gr_free_thread_info(tsk, ti);
89280 free_tsk:
89281 free_task_struct(tsk);
89282 return NULL;
89283 }
89284
89285 #ifdef CONFIG_MMU
89286-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89287+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
89288+{
89289+ struct vm_area_struct *tmp;
89290+ unsigned long charge;
89291+ struct file *file;
89292+ int retval;
89293+
89294+ charge = 0;
89295+ if (mpnt->vm_flags & VM_ACCOUNT) {
89296+ unsigned long len = vma_pages(mpnt);
89297+
89298+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
89299+ goto fail_nomem;
89300+ charge = len;
89301+ }
89302+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
89303+ if (!tmp)
89304+ goto fail_nomem;
89305+ *tmp = *mpnt;
89306+ tmp->vm_mm = mm;
89307+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
89308+ retval = vma_dup_policy(mpnt, tmp);
89309+ if (retval)
89310+ goto fail_nomem_policy;
89311+ if (anon_vma_fork(tmp, mpnt))
89312+ goto fail_nomem_anon_vma_fork;
89313+ tmp->vm_flags &= ~VM_LOCKED;
89314+ tmp->vm_next = tmp->vm_prev = NULL;
89315+ tmp->vm_mirror = NULL;
89316+ file = tmp->vm_file;
89317+ if (file) {
89318+ struct inode *inode = file_inode(file);
89319+ struct address_space *mapping = file->f_mapping;
89320+
89321+ get_file(file);
89322+ if (tmp->vm_flags & VM_DENYWRITE)
89323+ atomic_dec(&inode->i_writecount);
89324+ mutex_lock(&mapping->i_mmap_mutex);
89325+ if (tmp->vm_flags & VM_SHARED)
89326+ atomic_inc(&mapping->i_mmap_writable);
89327+ flush_dcache_mmap_lock(mapping);
89328+ /* insert tmp into the share list, just after mpnt */
89329+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
89330+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
89331+ else
89332+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
89333+ flush_dcache_mmap_unlock(mapping);
89334+ mutex_unlock(&mapping->i_mmap_mutex);
89335+ }
89336+
89337+ /*
89338+ * Clear hugetlb-related page reserves for children. This only
89339+ * affects MAP_PRIVATE mappings. Faults generated by the child
89340+ * are not guaranteed to succeed, even if read-only
89341+ */
89342+ if (is_vm_hugetlb_page(tmp))
89343+ reset_vma_resv_huge_pages(tmp);
89344+
89345+ return tmp;
89346+
89347+fail_nomem_anon_vma_fork:
89348+ mpol_put(vma_policy(tmp));
89349+fail_nomem_policy:
89350+ kmem_cache_free(vm_area_cachep, tmp);
89351+fail_nomem:
89352+ vm_unacct_memory(charge);
89353+ return NULL;
89354+}
89355+
89356+static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89357 {
89358 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
89359 struct rb_node **rb_link, *rb_parent;
89360 int retval;
89361- unsigned long charge;
89362
89363 uprobe_start_dup_mmap();
89364 down_write(&oldmm->mmap_sem);
89365@@ -391,55 +509,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89366
89367 prev = NULL;
89368 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
89369- struct file *file;
89370-
89371 if (mpnt->vm_flags & VM_DONTCOPY) {
89372 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
89373 -vma_pages(mpnt));
89374 continue;
89375 }
89376- charge = 0;
89377- if (mpnt->vm_flags & VM_ACCOUNT) {
89378- unsigned long len = vma_pages(mpnt);
89379-
89380- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
89381- goto fail_nomem;
89382- charge = len;
89383- }
89384- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
89385- if (!tmp)
89386- goto fail_nomem;
89387- *tmp = *mpnt;
89388- INIT_LIST_HEAD(&tmp->anon_vma_chain);
89389- retval = vma_dup_policy(mpnt, tmp);
89390- if (retval)
89391- goto fail_nomem_policy;
89392- tmp->vm_mm = mm;
89393- if (anon_vma_fork(tmp, mpnt))
89394- goto fail_nomem_anon_vma_fork;
89395- tmp->vm_flags &= ~VM_LOCKED;
89396- tmp->vm_next = tmp->vm_prev = NULL;
89397- file = tmp->vm_file;
89398- if (file) {
89399- struct inode *inode = file_inode(file);
89400- struct address_space *mapping = file->f_mapping;
89401-
89402- get_file(file);
89403- if (tmp->vm_flags & VM_DENYWRITE)
89404- atomic_dec(&inode->i_writecount);
89405- mutex_lock(&mapping->i_mmap_mutex);
89406- if (tmp->vm_flags & VM_SHARED)
89407- atomic_inc(&mapping->i_mmap_writable);
89408- flush_dcache_mmap_lock(mapping);
89409- /* insert tmp into the share list, just after mpnt */
89410- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
89411- vma_nonlinear_insert(tmp,
89412- &mapping->i_mmap_nonlinear);
89413- else
89414- vma_interval_tree_insert_after(tmp, mpnt,
89415- &mapping->i_mmap);
89416- flush_dcache_mmap_unlock(mapping);
89417- mutex_unlock(&mapping->i_mmap_mutex);
89418+ tmp = dup_vma(mm, oldmm, mpnt);
89419+ if (!tmp) {
89420+ retval = -ENOMEM;
89421+ goto out;
89422 }
89423
89424 /*
89425@@ -471,6 +549,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
89426 if (retval)
89427 goto out;
89428 }
89429+
89430+#ifdef CONFIG_PAX_SEGMEXEC
89431+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
89432+ struct vm_area_struct *mpnt_m;
89433+
89434+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
89435+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
89436+
89437+ if (!mpnt->vm_mirror)
89438+ continue;
89439+
89440+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
89441+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
89442+ mpnt->vm_mirror = mpnt_m;
89443+ } else {
89444+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
89445+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
89446+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
89447+ mpnt->vm_mirror->vm_mirror = mpnt;
89448+ }
89449+ }
89450+ BUG_ON(mpnt_m);
89451+ }
89452+#endif
89453+
89454 /* a new mm has just been created */
89455 arch_dup_mmap(oldmm, mm);
89456 retval = 0;
89457@@ -480,14 +583,6 @@ out:
89458 up_write(&oldmm->mmap_sem);
89459 uprobe_end_dup_mmap();
89460 return retval;
89461-fail_nomem_anon_vma_fork:
89462- mpol_put(vma_policy(tmp));
89463-fail_nomem_policy:
89464- kmem_cache_free(vm_area_cachep, tmp);
89465-fail_nomem:
89466- retval = -ENOMEM;
89467- vm_unacct_memory(charge);
89468- goto out;
89469 }
89470
89471 static inline int mm_alloc_pgd(struct mm_struct *mm)
89472@@ -729,8 +824,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
89473 return ERR_PTR(err);
89474
89475 mm = get_task_mm(task);
89476- if (mm && mm != current->mm &&
89477- !ptrace_may_access(task, mode)) {
89478+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
89479+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
89480 mmput(mm);
89481 mm = ERR_PTR(-EACCES);
89482 }
89483@@ -933,13 +1028,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
89484 spin_unlock(&fs->lock);
89485 return -EAGAIN;
89486 }
89487- fs->users++;
89488+ atomic_inc(&fs->users);
89489 spin_unlock(&fs->lock);
89490 return 0;
89491 }
89492 tsk->fs = copy_fs_struct(fs);
89493 if (!tsk->fs)
89494 return -ENOMEM;
89495+ /* Carry through gr_chroot_dentry and is_chrooted instead
89496+ of recomputing it here. Already copied when the task struct
89497+ is duplicated. This allows pivot_root to not be treated as
89498+ a chroot
89499+ */
89500+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
89501+
89502 return 0;
89503 }
89504
89505@@ -1173,7 +1275,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
89506 * parts of the process environment (as per the clone
89507 * flags). The actual kick-off is left to the caller.
89508 */
89509-static struct task_struct *copy_process(unsigned long clone_flags,
89510+static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
89511 unsigned long stack_start,
89512 unsigned long stack_size,
89513 int __user *child_tidptr,
89514@@ -1244,6 +1346,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
89515 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
89516 #endif
89517 retval = -EAGAIN;
89518+
89519+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
89520+
89521 if (atomic_read(&p->real_cred->user->processes) >=
89522 task_rlimit(p, RLIMIT_NPROC)) {
89523 if (p->real_cred->user != INIT_USER &&
89524@@ -1493,6 +1598,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
89525 goto bad_fork_free_pid;
89526 }
89527
89528+ /* synchronizes with gr_set_acls()
89529+ we need to call this past the point of no return for fork()
89530+ */
89531+ gr_copy_label(p);
89532+
89533 if (likely(p->pid)) {
89534 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
89535
89536@@ -1583,6 +1693,8 @@ bad_fork_cleanup_count:
89537 bad_fork_free:
89538 free_task(p);
89539 fork_out:
89540+ gr_log_forkfail(retval);
89541+
89542 return ERR_PTR(retval);
89543 }
89544
89545@@ -1644,6 +1756,7 @@ long do_fork(unsigned long clone_flags,
89546
89547 p = copy_process(clone_flags, stack_start, stack_size,
89548 child_tidptr, NULL, trace);
89549+ add_latent_entropy();
89550 /*
89551 * Do this prior waking up the new thread - the thread pointer
89552 * might get invalid after that point, if the thread exits quickly.
89553@@ -1660,6 +1773,8 @@ long do_fork(unsigned long clone_flags,
89554 if (clone_flags & CLONE_PARENT_SETTID)
89555 put_user(nr, parent_tidptr);
89556
89557+ gr_handle_brute_check();
89558+
89559 if (clone_flags & CLONE_VFORK) {
89560 p->vfork_done = &vfork;
89561 init_completion(&vfork);
89562@@ -1778,7 +1893,7 @@ void __init proc_caches_init(void)
89563 mm_cachep = kmem_cache_create("mm_struct",
89564 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
89565 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
89566- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
89567+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
89568 mmap_init();
89569 nsproxy_cache_init();
89570 }
89571@@ -1818,7 +1933,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
89572 return 0;
89573
89574 /* don't need lock here; in the worst case we'll do useless copy */
89575- if (fs->users == 1)
89576+ if (atomic_read(&fs->users) == 1)
89577 return 0;
89578
89579 *new_fsp = copy_fs_struct(fs);
89580@@ -1930,7 +2045,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
89581 fs = current->fs;
89582 spin_lock(&fs->lock);
89583 current->fs = new_fs;
89584- if (--fs->users)
89585+ gr_set_chroot_entries(current, &current->fs->root);
89586+ if (atomic_dec_return(&fs->users))
89587 new_fs = NULL;
89588 else
89589 new_fs = fs;
89590diff --git a/kernel/futex.c b/kernel/futex.c
89591index 22b3f1b..6820bc0 100644
89592--- a/kernel/futex.c
89593+++ b/kernel/futex.c
89594@@ -202,7 +202,7 @@ struct futex_pi_state {
89595 atomic_t refcount;
89596
89597 union futex_key key;
89598-};
89599+} __randomize_layout;
89600
89601 /**
89602 * struct futex_q - The hashed futex queue entry, one per waiting task
89603@@ -236,7 +236,7 @@ struct futex_q {
89604 struct rt_mutex_waiter *rt_waiter;
89605 union futex_key *requeue_pi_key;
89606 u32 bitset;
89607-};
89608+} __randomize_layout;
89609
89610 static const struct futex_q futex_q_init = {
89611 /* list gets initialized in queue_me()*/
89612@@ -396,6 +396,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
89613 struct page *page, *page_head;
89614 int err, ro = 0;
89615
89616+#ifdef CONFIG_PAX_SEGMEXEC
89617+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
89618+ return -EFAULT;
89619+#endif
89620+
89621 /*
89622 * The futex address must be "naturally" aligned.
89623 */
89624@@ -595,7 +600,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
89625
89626 static int get_futex_value_locked(u32 *dest, u32 __user *from)
89627 {
89628- int ret;
89629+ unsigned long ret;
89630
89631 pagefault_disable();
89632 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
89633@@ -3000,6 +3005,7 @@ static void __init futex_detect_cmpxchg(void)
89634 {
89635 #ifndef CONFIG_HAVE_FUTEX_CMPXCHG
89636 u32 curval;
89637+ mm_segment_t oldfs;
89638
89639 /*
89640 * This will fail and we want it. Some arch implementations do
89641@@ -3011,8 +3017,11 @@ static void __init futex_detect_cmpxchg(void)
89642 * implementation, the non-functional ones will return
89643 * -ENOSYS.
89644 */
89645+ oldfs = get_fs();
89646+ set_fs(USER_DS);
89647 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
89648 futex_cmpxchg_enabled = 1;
89649+ set_fs(oldfs);
89650 #endif
89651 }
89652
89653diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
89654index 55c8c93..9ba7ad6 100644
89655--- a/kernel/futex_compat.c
89656+++ b/kernel/futex_compat.c
89657@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
89658 return 0;
89659 }
89660
89661-static void __user *futex_uaddr(struct robust_list __user *entry,
89662+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
89663 compat_long_t futex_offset)
89664 {
89665 compat_uptr_t base = ptr_to_compat(entry);
89666diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
89667index b358a80..fc25240 100644
89668--- a/kernel/gcov/base.c
89669+++ b/kernel/gcov/base.c
89670@@ -114,11 +114,6 @@ void gcov_enable_events(void)
89671 }
89672
89673 #ifdef CONFIG_MODULES
89674-static inline int within(void *addr, void *start, unsigned long size)
89675-{
89676- return ((addr >= start) && (addr < start + size));
89677-}
89678-
89679 /* Update list and generate events when modules are unloaded. */
89680 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
89681 void *data)
89682@@ -133,7 +128,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
89683
89684 /* Remove entries located in module from linked list. */
89685 while ((info = gcov_info_next(info))) {
89686- if (within(info, mod->module_core, mod->core_size)) {
89687+ if (within_module_core_rw((unsigned long)info, mod)) {
89688 gcov_info_unlink(prev, info);
89689 if (gcov_events_enabled)
89690 gcov_event(GCOV_REMOVE, info);
89691diff --git a/kernel/jump_label.c b/kernel/jump_label.c
89692index 9019f15..9a3c42e 100644
89693--- a/kernel/jump_label.c
89694+++ b/kernel/jump_label.c
89695@@ -14,6 +14,7 @@
89696 #include <linux/err.h>
89697 #include <linux/static_key.h>
89698 #include <linux/jump_label_ratelimit.h>
89699+#include <linux/mm.h>
89700
89701 #ifdef HAVE_JUMP_LABEL
89702
89703@@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
89704
89705 size = (((unsigned long)stop - (unsigned long)start)
89706 / sizeof(struct jump_entry));
89707+ pax_open_kernel();
89708 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
89709+ pax_close_kernel();
89710 }
89711
89712 static void jump_label_update(struct static_key *key, int enable);
89713@@ -363,10 +366,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
89714 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
89715 struct jump_entry *iter;
89716
89717+ pax_open_kernel();
89718 for (iter = iter_start; iter < iter_stop; iter++) {
89719 if (within_module_init(iter->code, mod))
89720 iter->code = 0;
89721 }
89722+ pax_close_kernel();
89723 }
89724
89725 static int
89726diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
89727index ae51670..c1a9796 100644
89728--- a/kernel/kallsyms.c
89729+++ b/kernel/kallsyms.c
89730@@ -11,6 +11,9 @@
89731 * Changed the compression method from stem compression to "table lookup"
89732 * compression (see scripts/kallsyms.c for a more complete description)
89733 */
89734+#ifdef CONFIG_GRKERNSEC_HIDESYM
89735+#define __INCLUDED_BY_HIDESYM 1
89736+#endif
89737 #include <linux/kallsyms.h>
89738 #include <linux/module.h>
89739 #include <linux/init.h>
89740@@ -54,12 +57,33 @@ extern const unsigned long kallsyms_markers[] __weak;
89741
89742 static inline int is_kernel_inittext(unsigned long addr)
89743 {
89744+ if (system_state != SYSTEM_BOOTING)
89745+ return 0;
89746+
89747 if (addr >= (unsigned long)_sinittext
89748 && addr <= (unsigned long)_einittext)
89749 return 1;
89750 return 0;
89751 }
89752
89753+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89754+#ifdef CONFIG_MODULES
89755+static inline int is_module_text(unsigned long addr)
89756+{
89757+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
89758+ return 1;
89759+
89760+ addr = ktla_ktva(addr);
89761+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
89762+}
89763+#else
89764+static inline int is_module_text(unsigned long addr)
89765+{
89766+ return 0;
89767+}
89768+#endif
89769+#endif
89770+
89771 static inline int is_kernel_text(unsigned long addr)
89772 {
89773 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
89774@@ -70,13 +94,28 @@ static inline int is_kernel_text(unsigned long addr)
89775
89776 static inline int is_kernel(unsigned long addr)
89777 {
89778+
89779+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89780+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
89781+ return 1;
89782+
89783+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
89784+#else
89785 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
89786+#endif
89787+
89788 return 1;
89789 return in_gate_area_no_mm(addr);
89790 }
89791
89792 static int is_ksym_addr(unsigned long addr)
89793 {
89794+
89795+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
89796+ if (is_module_text(addr))
89797+ return 0;
89798+#endif
89799+
89800 if (all_var)
89801 return is_kernel(addr);
89802
89803@@ -481,7 +520,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
89804
89805 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
89806 {
89807- iter->name[0] = '\0';
89808 iter->nameoff = get_symbol_offset(new_pos);
89809 iter->pos = new_pos;
89810 }
89811@@ -529,6 +567,11 @@ static int s_show(struct seq_file *m, void *p)
89812 {
89813 struct kallsym_iter *iter = m->private;
89814
89815+#ifdef CONFIG_GRKERNSEC_HIDESYM
89816+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
89817+ return 0;
89818+#endif
89819+
89820 /* Some debugging symbols have no name. Ignore them. */
89821 if (!iter->name[0])
89822 return 0;
89823@@ -542,6 +585,7 @@ static int s_show(struct seq_file *m, void *p)
89824 */
89825 type = iter->exported ? toupper(iter->type) :
89826 tolower(iter->type);
89827+
89828 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
89829 type, iter->name, iter->module_name);
89830 } else
89831@@ -567,7 +611,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
89832 struct kallsym_iter *iter;
89833 int ret;
89834
89835- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
89836+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
89837 if (!iter)
89838 return -ENOMEM;
89839 reset_iter(iter, 0);
89840diff --git a/kernel/kcmp.c b/kernel/kcmp.c
89841index 0aa69ea..a7fcafb 100644
89842--- a/kernel/kcmp.c
89843+++ b/kernel/kcmp.c
89844@@ -100,6 +100,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
89845 struct task_struct *task1, *task2;
89846 int ret;
89847
89848+#ifdef CONFIG_GRKERNSEC
89849+ return -ENOSYS;
89850+#endif
89851+
89852 rcu_read_lock();
89853
89854 /*
89855diff --git a/kernel/kexec.c b/kernel/kexec.c
89856index 2bee072..8979af8 100644
89857--- a/kernel/kexec.c
89858+++ b/kernel/kexec.c
89859@@ -1349,7 +1349,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
89860 compat_ulong_t, flags)
89861 {
89862 struct compat_kexec_segment in;
89863- struct kexec_segment out, __user *ksegments;
89864+ struct kexec_segment out;
89865+ struct kexec_segment __user *ksegments;
89866 unsigned long i, result;
89867
89868 /* Don't allow clients that don't understand the native
89869diff --git a/kernel/kmod.c b/kernel/kmod.c
89870index 8637e04..8b1d0d8 100644
89871--- a/kernel/kmod.c
89872+++ b/kernel/kmod.c
89873@@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
89874 kfree(info->argv);
89875 }
89876
89877-static int call_modprobe(char *module_name, int wait)
89878+static int call_modprobe(char *module_name, char *module_param, int wait)
89879 {
89880 struct subprocess_info *info;
89881 static char *envp[] = {
89882@@ -85,7 +85,7 @@ static int call_modprobe(char *module_name, int wait)
89883 NULL
89884 };
89885
89886- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
89887+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
89888 if (!argv)
89889 goto out;
89890
89891@@ -97,7 +97,8 @@ static int call_modprobe(char *module_name, int wait)
89892 argv[1] = "-q";
89893 argv[2] = "--";
89894 argv[3] = module_name; /* check free_modprobe_argv() */
89895- argv[4] = NULL;
89896+ argv[4] = module_param;
89897+ argv[5] = NULL;
89898
89899 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
89900 NULL, free_modprobe_argv, NULL);
89901@@ -129,9 +130,8 @@ out:
89902 * If module auto-loading support is disabled then this function
89903 * becomes a no-operation.
89904 */
89905-int __request_module(bool wait, const char *fmt, ...)
89906+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
89907 {
89908- va_list args;
89909 char module_name[MODULE_NAME_LEN];
89910 unsigned int max_modprobes;
89911 int ret;
89912@@ -150,9 +150,7 @@ int __request_module(bool wait, const char *fmt, ...)
89913 if (!modprobe_path[0])
89914 return 0;
89915
89916- va_start(args, fmt);
89917- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
89918- va_end(args);
89919+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
89920 if (ret >= MODULE_NAME_LEN)
89921 return -ENAMETOOLONG;
89922
89923@@ -160,6 +158,20 @@ int __request_module(bool wait, const char *fmt, ...)
89924 if (ret)
89925 return ret;
89926
89927+#ifdef CONFIG_GRKERNSEC_MODHARDEN
89928+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
89929+ /* hack to workaround consolekit/udisks stupidity */
89930+ read_lock(&tasklist_lock);
89931+ if (!strcmp(current->comm, "mount") &&
89932+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
89933+ read_unlock(&tasklist_lock);
89934+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
89935+ return -EPERM;
89936+ }
89937+ read_unlock(&tasklist_lock);
89938+ }
89939+#endif
89940+
89941 /* If modprobe needs a service that is in a module, we get a recursive
89942 * loop. Limit the number of running kmod threads to max_threads/2 or
89943 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
89944@@ -188,11 +200,52 @@ int __request_module(bool wait, const char *fmt, ...)
89945
89946 trace_module_request(module_name, wait, _RET_IP_);
89947
89948- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
89949+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
89950
89951 atomic_dec(&kmod_concurrent);
89952 return ret;
89953 }
89954+
89955+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
89956+{
89957+ va_list args;
89958+ int ret;
89959+
89960+ va_start(args, fmt);
89961+ ret = ____request_module(wait, module_param, fmt, args);
89962+ va_end(args);
89963+
89964+ return ret;
89965+}
89966+
89967+int __request_module(bool wait, const char *fmt, ...)
89968+{
89969+ va_list args;
89970+ int ret;
89971+
89972+#ifdef CONFIG_GRKERNSEC_MODHARDEN
89973+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
89974+ char module_param[MODULE_NAME_LEN];
89975+
89976+ memset(module_param, 0, sizeof(module_param));
89977+
89978+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
89979+
89980+ va_start(args, fmt);
89981+ ret = ____request_module(wait, module_param, fmt, args);
89982+ va_end(args);
89983+
89984+ return ret;
89985+ }
89986+#endif
89987+
89988+ va_start(args, fmt);
89989+ ret = ____request_module(wait, NULL, fmt, args);
89990+ va_end(args);
89991+
89992+ return ret;
89993+}
89994+
89995 EXPORT_SYMBOL(__request_module);
89996 #endif /* CONFIG_MODULES */
89997
89998@@ -218,6 +271,20 @@ static int ____call_usermodehelper(void *data)
89999 */
90000 set_user_nice(current, 0);
90001
90002+#ifdef CONFIG_GRKERNSEC
90003+ /* this is race-free as far as userland is concerned as we copied
90004+ out the path to be used prior to this point and are now operating
90005+ on that copy
90006+ */
90007+ if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
90008+ strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7) &&
90009+ strcmp(sub_info->path, "/usr/share/apport/apport")) || strstr(sub_info->path, "..")) {
90010+ printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of /sbin and system library paths\n", sub_info->path);
90011+ retval = -EPERM;
90012+ goto fail;
90013+ }
90014+#endif
90015+
90016 retval = -ENOMEM;
90017 new = prepare_kernel_cred(current);
90018 if (!new)
90019@@ -240,8 +307,8 @@ static int ____call_usermodehelper(void *data)
90020 commit_creds(new);
90021
90022 retval = do_execve(getname_kernel(sub_info->path),
90023- (const char __user *const __user *)sub_info->argv,
90024- (const char __user *const __user *)sub_info->envp);
90025+ (const char __user *const __force_user *)sub_info->argv,
90026+ (const char __user *const __force_user *)sub_info->envp);
90027 if (!retval)
90028 return 0;
90029
90030@@ -260,6 +327,10 @@ static int call_helper(void *data)
90031
90032 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
90033 {
90034+#ifdef CONFIG_GRKERNSEC
90035+ kfree(info->path);
90036+ info->path = info->origpath;
90037+#endif
90038 if (info->cleanup)
90039 (*info->cleanup)(info);
90040 kfree(info);
90041@@ -300,7 +371,7 @@ static int wait_for_helper(void *data)
90042 *
90043 * Thus the __user pointer cast is valid here.
90044 */
90045- sys_wait4(pid, (int __user *)&ret, 0, NULL);
90046+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
90047
90048 /*
90049 * If ret is 0, either ____call_usermodehelper failed and the
90050@@ -539,7 +610,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
90051 goto out;
90052
90053 INIT_WORK(&sub_info->work, __call_usermodehelper);
90054+#ifdef CONFIG_GRKERNSEC
90055+ sub_info->origpath = path;
90056+ sub_info->path = kstrdup(path, gfp_mask);
90057+#else
90058 sub_info->path = path;
90059+#endif
90060 sub_info->argv = argv;
90061 sub_info->envp = envp;
90062
90063@@ -647,7 +723,7 @@ EXPORT_SYMBOL(call_usermodehelper);
90064 static int proc_cap_handler(struct ctl_table *table, int write,
90065 void __user *buffer, size_t *lenp, loff_t *ppos)
90066 {
90067- struct ctl_table t;
90068+ ctl_table_no_const t;
90069 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
90070 kernel_cap_t new_cap;
90071 int err, i;
90072diff --git a/kernel/kprobes.c b/kernel/kprobes.c
90073index 3995f54..e247879 100644
90074--- a/kernel/kprobes.c
90075+++ b/kernel/kprobes.c
90076@@ -31,6 +31,9 @@
90077 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
90078 * <prasanna@in.ibm.com> added function-return probes.
90079 */
90080+#ifdef CONFIG_GRKERNSEC_HIDESYM
90081+#define __INCLUDED_BY_HIDESYM 1
90082+#endif
90083 #include <linux/kprobes.h>
90084 #include <linux/hash.h>
90085 #include <linux/init.h>
90086@@ -122,12 +125,12 @@ enum kprobe_slot_state {
90087
90088 static void *alloc_insn_page(void)
90089 {
90090- return module_alloc(PAGE_SIZE);
90091+ return module_alloc_exec(PAGE_SIZE);
90092 }
90093
90094 static void free_insn_page(void *page)
90095 {
90096- module_free(NULL, page);
90097+ module_free_exec(NULL, page);
90098 }
90099
90100 struct kprobe_insn_cache kprobe_insn_slots = {
90101@@ -2187,11 +2190,11 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
90102 kprobe_type = "k";
90103
90104 if (sym)
90105- seq_printf(pi, "%p %s %s+0x%x %s ",
90106+ seq_printf(pi, "%pK %s %s+0x%x %s ",
90107 p->addr, kprobe_type, sym, offset,
90108 (modname ? modname : " "));
90109 else
90110- seq_printf(pi, "%p %s %p ",
90111+ seq_printf(pi, "%pK %s %pK ",
90112 p->addr, kprobe_type, p->addr);
90113
90114 if (!pp)
90115diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
90116index 6683cce..daf8999 100644
90117--- a/kernel/ksysfs.c
90118+++ b/kernel/ksysfs.c
90119@@ -50,6 +50,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
90120 {
90121 if (count+1 > UEVENT_HELPER_PATH_LEN)
90122 return -ENOENT;
90123+ if (!capable(CAP_SYS_ADMIN))
90124+ return -EPERM;
90125 memcpy(uevent_helper, buf, count);
90126 uevent_helper[count] = '\0';
90127 if (count && uevent_helper[count-1] == '\n')
90128@@ -176,7 +178,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
90129 return count;
90130 }
90131
90132-static struct bin_attribute notes_attr = {
90133+static bin_attribute_no_const notes_attr __read_only = {
90134 .attr = {
90135 .name = "notes",
90136 .mode = S_IRUGO,
90137diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
90138index 88d0d44..e9ce0ee 100644
90139--- a/kernel/locking/lockdep.c
90140+++ b/kernel/locking/lockdep.c
90141@@ -599,6 +599,10 @@ static int static_obj(void *obj)
90142 end = (unsigned long) &_end,
90143 addr = (unsigned long) obj;
90144
90145+#ifdef CONFIG_PAX_KERNEXEC
90146+ start = ktla_ktva(start);
90147+#endif
90148+
90149 /*
90150 * static variable?
90151 */
90152@@ -740,6 +744,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
90153 if (!static_obj(lock->key)) {
90154 debug_locks_off();
90155 printk("INFO: trying to register non-static key.\n");
90156+ printk("lock:%pS key:%pS.\n", lock, lock->key);
90157 printk("the code is fine but needs lockdep annotation.\n");
90158 printk("turning off the locking correctness validator.\n");
90159 dump_stack();
90160@@ -3081,7 +3086,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
90161 if (!class)
90162 return 0;
90163 }
90164- atomic_inc((atomic_t *)&class->ops);
90165+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)&class->ops);
90166 if (very_verbose(class)) {
90167 printk("\nacquire class [%p] %s", class->key, class->name);
90168 if (class->name_version > 1)
90169diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
90170index ef43ac4..2720dfa 100644
90171--- a/kernel/locking/lockdep_proc.c
90172+++ b/kernel/locking/lockdep_proc.c
90173@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
90174 return 0;
90175 }
90176
90177- seq_printf(m, "%p", class->key);
90178+ seq_printf(m, "%pK", class->key);
90179 #ifdef CONFIG_DEBUG_LOCKDEP
90180 seq_printf(m, " OPS:%8ld", class->ops);
90181 #endif
90182@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
90183
90184 list_for_each_entry(entry, &class->locks_after, entry) {
90185 if (entry->distance == 1) {
90186- seq_printf(m, " -> [%p] ", entry->class->key);
90187+ seq_printf(m, " -> [%pK] ", entry->class->key);
90188 print_name(m, entry->class);
90189 seq_puts(m, "\n");
90190 }
90191@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
90192 if (!class->key)
90193 continue;
90194
90195- seq_printf(m, "[%p] ", class->key);
90196+ seq_printf(m, "[%pK] ", class->key);
90197 print_name(m, class);
90198 seq_puts(m, "\n");
90199 }
90200@@ -496,7 +496,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
90201 if (!i)
90202 seq_line(m, '-', 40-namelen, namelen);
90203
90204- snprintf(ip, sizeof(ip), "[<%p>]",
90205+ snprintf(ip, sizeof(ip), "[<%pK>]",
90206 (void *)class->contention_point[i]);
90207 seq_printf(m, "%40s %14lu %29s %pS\n",
90208 name, stats->contention_point[i],
90209@@ -511,7 +511,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
90210 if (!i)
90211 seq_line(m, '-', 40-namelen, namelen);
90212
90213- snprintf(ip, sizeof(ip), "[<%p>]",
90214+ snprintf(ip, sizeof(ip), "[<%pK>]",
90215 (void *)class->contending_point[i]);
90216 seq_printf(m, "%40s %14lu %29s %pS\n",
90217 name, stats->contending_point[i],
90218diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
90219index 9887a90..0cd2b1d 100644
90220--- a/kernel/locking/mcs_spinlock.c
90221+++ b/kernel/locking/mcs_spinlock.c
90222@@ -100,7 +100,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
90223
90224 prev = decode_cpu(old);
90225 node->prev = prev;
90226- ACCESS_ONCE(prev->next) = node;
90227+ ACCESS_ONCE_RW(prev->next) = node;
90228
90229 /*
90230 * Normally @prev is untouchable after the above store; because at that
90231@@ -172,8 +172,8 @@ unqueue:
90232 * it will wait in Step-A.
90233 */
90234
90235- ACCESS_ONCE(next->prev) = prev;
90236- ACCESS_ONCE(prev->next) = next;
90237+ ACCESS_ONCE_RW(next->prev) = prev;
90238+ ACCESS_ONCE_RW(prev->next) = next;
90239
90240 return false;
90241 }
90242@@ -195,13 +195,13 @@ void osq_unlock(struct optimistic_spin_queue *lock)
90243 node = this_cpu_ptr(&osq_node);
90244 next = xchg(&node->next, NULL);
90245 if (next) {
90246- ACCESS_ONCE(next->locked) = 1;
90247+ ACCESS_ONCE_RW(next->locked) = 1;
90248 return;
90249 }
90250
90251 next = osq_wait_next(lock, node, NULL);
90252 if (next)
90253- ACCESS_ONCE(next->locked) = 1;
90254+ ACCESS_ONCE_RW(next->locked) = 1;
90255 }
90256
90257 #endif
90258diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
90259index 23e89c5..8558eac 100644
90260--- a/kernel/locking/mcs_spinlock.h
90261+++ b/kernel/locking/mcs_spinlock.h
90262@@ -81,7 +81,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
90263 */
90264 return;
90265 }
90266- ACCESS_ONCE(prev->next) = node;
90267+ ACCESS_ONCE_RW(prev->next) = node;
90268
90269 /* Wait until the lock holder passes the lock down. */
90270 arch_mcs_spin_lock_contended(&node->locked);
90271diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
90272index 5cf6731..ce3bc5a 100644
90273--- a/kernel/locking/mutex-debug.c
90274+++ b/kernel/locking/mutex-debug.c
90275@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
90276 }
90277
90278 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90279- struct thread_info *ti)
90280+ struct task_struct *task)
90281 {
90282 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
90283
90284 /* Mark the current thread as blocked on the lock: */
90285- ti->task->blocked_on = waiter;
90286+ task->blocked_on = waiter;
90287 }
90288
90289 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90290- struct thread_info *ti)
90291+ struct task_struct *task)
90292 {
90293 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
90294- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
90295- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
90296- ti->task->blocked_on = NULL;
90297+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
90298+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
90299+ task->blocked_on = NULL;
90300
90301 list_del_init(&waiter->list);
90302 waiter->task = NULL;
90303diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
90304index 0799fd3..d06ae3b 100644
90305--- a/kernel/locking/mutex-debug.h
90306+++ b/kernel/locking/mutex-debug.h
90307@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
90308 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
90309 extern void debug_mutex_add_waiter(struct mutex *lock,
90310 struct mutex_waiter *waiter,
90311- struct thread_info *ti);
90312+ struct task_struct *task);
90313 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90314- struct thread_info *ti);
90315+ struct task_struct *task);
90316 extern void debug_mutex_unlock(struct mutex *lock);
90317 extern void debug_mutex_init(struct mutex *lock, const char *name,
90318 struct lock_class_key *key);
90319diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
90320index ae712b2..d0d4a41 100644
90321--- a/kernel/locking/mutex.c
90322+++ b/kernel/locking/mutex.c
90323@@ -486,7 +486,7 @@ slowpath:
90324 goto skip_wait;
90325
90326 debug_mutex_lock_common(lock, &waiter);
90327- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
90328+ debug_mutex_add_waiter(lock, &waiter, task);
90329
90330 /* add waiting tasks to the end of the waitqueue (FIFO): */
90331 list_add_tail(&waiter.list, &lock->wait_list);
90332@@ -531,7 +531,7 @@ slowpath:
90333 schedule_preempt_disabled();
90334 spin_lock_mutex(&lock->wait_lock, flags);
90335 }
90336- mutex_remove_waiter(lock, &waiter, current_thread_info());
90337+ mutex_remove_waiter(lock, &waiter, task);
90338 /* set it to 0 if there are no waiters left: */
90339 if (likely(list_empty(&lock->wait_list)))
90340 atomic_set(&lock->count, 0);
90341@@ -568,7 +568,7 @@ skip_wait:
90342 return 0;
90343
90344 err:
90345- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
90346+ mutex_remove_waiter(lock, &waiter, task);
90347 spin_unlock_mutex(&lock->wait_lock, flags);
90348 debug_mutex_free_waiter(&waiter);
90349 mutex_release(&lock->dep_map, 1, ip);
90350diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
90351index 1d96dd0..994ff19 100644
90352--- a/kernel/locking/rtmutex-tester.c
90353+++ b/kernel/locking/rtmutex-tester.c
90354@@ -22,7 +22,7 @@
90355 #define MAX_RT_TEST_MUTEXES 8
90356
90357 static spinlock_t rttest_lock;
90358-static atomic_t rttest_event;
90359+static atomic_unchecked_t rttest_event;
90360
90361 struct test_thread_data {
90362 int opcode;
90363@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90364
90365 case RTTEST_LOCKCONT:
90366 td->mutexes[td->opdata] = 1;
90367- td->event = atomic_add_return(1, &rttest_event);
90368+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90369 return 0;
90370
90371 case RTTEST_RESET:
90372@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90373 return 0;
90374
90375 case RTTEST_RESETEVENT:
90376- atomic_set(&rttest_event, 0);
90377+ atomic_set_unchecked(&rttest_event, 0);
90378 return 0;
90379
90380 default:
90381@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90382 return ret;
90383
90384 td->mutexes[id] = 1;
90385- td->event = atomic_add_return(1, &rttest_event);
90386+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90387 rt_mutex_lock(&mutexes[id]);
90388- td->event = atomic_add_return(1, &rttest_event);
90389+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90390 td->mutexes[id] = 4;
90391 return 0;
90392
90393@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90394 return ret;
90395
90396 td->mutexes[id] = 1;
90397- td->event = atomic_add_return(1, &rttest_event);
90398+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90399 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
90400- td->event = atomic_add_return(1, &rttest_event);
90401+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90402 td->mutexes[id] = ret ? 0 : 4;
90403 return ret ? -EINTR : 0;
90404
90405@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
90406 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
90407 return ret;
90408
90409- td->event = atomic_add_return(1, &rttest_event);
90410+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90411 rt_mutex_unlock(&mutexes[id]);
90412- td->event = atomic_add_return(1, &rttest_event);
90413+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90414 td->mutexes[id] = 0;
90415 return 0;
90416
90417@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90418 break;
90419
90420 td->mutexes[dat] = 2;
90421- td->event = atomic_add_return(1, &rttest_event);
90422+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90423 break;
90424
90425 default:
90426@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90427 return;
90428
90429 td->mutexes[dat] = 3;
90430- td->event = atomic_add_return(1, &rttest_event);
90431+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90432 break;
90433
90434 case RTTEST_LOCKNOWAIT:
90435@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
90436 return;
90437
90438 td->mutexes[dat] = 1;
90439- td->event = atomic_add_return(1, &rttest_event);
90440+ td->event = atomic_add_return_unchecked(1, &rttest_event);
90441 return;
90442
90443 default:
90444diff --git a/kernel/module.c b/kernel/module.c
90445index 1c47139..6242887 100644
90446--- a/kernel/module.c
90447+++ b/kernel/module.c
90448@@ -60,6 +60,7 @@
90449 #include <linux/jump_label.h>
90450 #include <linux/pfn.h>
90451 #include <linux/bsearch.h>
90452+#include <linux/grsecurity.h>
90453 #include <uapi/linux/module.h>
90454 #include "module-internal.h"
90455
90456@@ -156,7 +157,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
90457
90458 /* Bounds of module allocation, for speeding __module_address.
90459 * Protected by module_mutex. */
90460-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
90461+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
90462+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
90463
90464 int register_module_notifier(struct notifier_block * nb)
90465 {
90466@@ -323,7 +325,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
90467 return true;
90468
90469 list_for_each_entry_rcu(mod, &modules, list) {
90470- struct symsearch arr[] = {
90471+ struct symsearch modarr[] = {
90472 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
90473 NOT_GPL_ONLY, false },
90474 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
90475@@ -348,7 +350,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
90476 if (mod->state == MODULE_STATE_UNFORMED)
90477 continue;
90478
90479- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
90480+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
90481 return true;
90482 }
90483 return false;
90484@@ -488,7 +490,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
90485 if (!pcpusec->sh_size)
90486 return 0;
90487
90488- if (align > PAGE_SIZE) {
90489+ if (align-1 >= PAGE_SIZE) {
90490 pr_warn("%s: per-cpu alignment %li > %li\n",
90491 mod->name, align, PAGE_SIZE);
90492 align = PAGE_SIZE;
90493@@ -1060,7 +1062,7 @@ struct module_attribute module_uevent =
90494 static ssize_t show_coresize(struct module_attribute *mattr,
90495 struct module_kobject *mk, char *buffer)
90496 {
90497- return sprintf(buffer, "%u\n", mk->mod->core_size);
90498+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
90499 }
90500
90501 static struct module_attribute modinfo_coresize =
90502@@ -1069,7 +1071,7 @@ static struct module_attribute modinfo_coresize =
90503 static ssize_t show_initsize(struct module_attribute *mattr,
90504 struct module_kobject *mk, char *buffer)
90505 {
90506- return sprintf(buffer, "%u\n", mk->mod->init_size);
90507+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
90508 }
90509
90510 static struct module_attribute modinfo_initsize =
90511@@ -1161,12 +1163,29 @@ static int check_version(Elf_Shdr *sechdrs,
90512 goto bad_version;
90513 }
90514
90515+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90516+ /*
90517+ * avoid potentially printing jibberish on attempted load
90518+ * of a module randomized with a different seed
90519+ */
90520+ pr_warn("no symbol version for %s\n", symname);
90521+#else
90522 pr_warn("%s: no symbol version for %s\n", mod->name, symname);
90523+#endif
90524 return 0;
90525
90526 bad_version:
90527+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90528+ /*
90529+ * avoid potentially printing jibberish on attempted load
90530+ * of a module randomized with a different seed
90531+ */
90532+ printk("attempted module disagrees about version of symbol %s\n",
90533+ symname);
90534+#else
90535 printk("%s: disagrees about version of symbol %s\n",
90536 mod->name, symname);
90537+#endif
90538 return 0;
90539 }
90540
90541@@ -1282,7 +1301,7 @@ resolve_symbol_wait(struct module *mod,
90542 */
90543 #ifdef CONFIG_SYSFS
90544
90545-#ifdef CONFIG_KALLSYMS
90546+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
90547 static inline bool sect_empty(const Elf_Shdr *sect)
90548 {
90549 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
90550@@ -1422,7 +1441,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
90551 {
90552 unsigned int notes, loaded, i;
90553 struct module_notes_attrs *notes_attrs;
90554- struct bin_attribute *nattr;
90555+ bin_attribute_no_const *nattr;
90556
90557 /* failed to create section attributes, so can't create notes */
90558 if (!mod->sect_attrs)
90559@@ -1534,7 +1553,7 @@ static void del_usage_links(struct module *mod)
90560 static int module_add_modinfo_attrs(struct module *mod)
90561 {
90562 struct module_attribute *attr;
90563- struct module_attribute *temp_attr;
90564+ module_attribute_no_const *temp_attr;
90565 int error = 0;
90566 int i;
90567
90568@@ -1755,21 +1774,21 @@ static void set_section_ro_nx(void *base,
90569
90570 static void unset_module_core_ro_nx(struct module *mod)
90571 {
90572- set_page_attributes(mod->module_core + mod->core_text_size,
90573- mod->module_core + mod->core_size,
90574+ set_page_attributes(mod->module_core_rw,
90575+ mod->module_core_rw + mod->core_size_rw,
90576 set_memory_x);
90577- set_page_attributes(mod->module_core,
90578- mod->module_core + mod->core_ro_size,
90579+ set_page_attributes(mod->module_core_rx,
90580+ mod->module_core_rx + mod->core_size_rx,
90581 set_memory_rw);
90582 }
90583
90584 static void unset_module_init_ro_nx(struct module *mod)
90585 {
90586- set_page_attributes(mod->module_init + mod->init_text_size,
90587- mod->module_init + mod->init_size,
90588+ set_page_attributes(mod->module_init_rw,
90589+ mod->module_init_rw + mod->init_size_rw,
90590 set_memory_x);
90591- set_page_attributes(mod->module_init,
90592- mod->module_init + mod->init_ro_size,
90593+ set_page_attributes(mod->module_init_rx,
90594+ mod->module_init_rx + mod->init_size_rx,
90595 set_memory_rw);
90596 }
90597
90598@@ -1782,14 +1801,14 @@ void set_all_modules_text_rw(void)
90599 list_for_each_entry_rcu(mod, &modules, list) {
90600 if (mod->state == MODULE_STATE_UNFORMED)
90601 continue;
90602- if ((mod->module_core) && (mod->core_text_size)) {
90603- set_page_attributes(mod->module_core,
90604- mod->module_core + mod->core_text_size,
90605+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
90606+ set_page_attributes(mod->module_core_rx,
90607+ mod->module_core_rx + mod->core_size_rx,
90608 set_memory_rw);
90609 }
90610- if ((mod->module_init) && (mod->init_text_size)) {
90611- set_page_attributes(mod->module_init,
90612- mod->module_init + mod->init_text_size,
90613+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
90614+ set_page_attributes(mod->module_init_rx,
90615+ mod->module_init_rx + mod->init_size_rx,
90616 set_memory_rw);
90617 }
90618 }
90619@@ -1805,14 +1824,14 @@ void set_all_modules_text_ro(void)
90620 list_for_each_entry_rcu(mod, &modules, list) {
90621 if (mod->state == MODULE_STATE_UNFORMED)
90622 continue;
90623- if ((mod->module_core) && (mod->core_text_size)) {
90624- set_page_attributes(mod->module_core,
90625- mod->module_core + mod->core_text_size,
90626+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
90627+ set_page_attributes(mod->module_core_rx,
90628+ mod->module_core_rx + mod->core_size_rx,
90629 set_memory_ro);
90630 }
90631- if ((mod->module_init) && (mod->init_text_size)) {
90632- set_page_attributes(mod->module_init,
90633- mod->module_init + mod->init_text_size,
90634+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
90635+ set_page_attributes(mod->module_init_rx,
90636+ mod->module_init_rx + mod->init_size_rx,
90637 set_memory_ro);
90638 }
90639 }
90640@@ -1865,16 +1884,19 @@ static void free_module(struct module *mod)
90641
90642 /* This may be NULL, but that's OK */
90643 unset_module_init_ro_nx(mod);
90644- module_free(mod, mod->module_init);
90645+ module_free(mod, mod->module_init_rw);
90646+ module_free_exec(mod, mod->module_init_rx);
90647 kfree(mod->args);
90648 percpu_modfree(mod);
90649
90650 /* Free lock-classes: */
90651- lockdep_free_key_range(mod->module_core, mod->core_size);
90652+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
90653+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
90654
90655 /* Finally, free the core (containing the module structure) */
90656 unset_module_core_ro_nx(mod);
90657- module_free(mod, mod->module_core);
90658+ module_free_exec(mod, mod->module_core_rx);
90659+ module_free(mod, mod->module_core_rw);
90660
90661 #ifdef CONFIG_MPU
90662 update_protections(current->mm);
90663@@ -1943,9 +1965,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90664 int ret = 0;
90665 const struct kernel_symbol *ksym;
90666
90667+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90668+ int is_fs_load = 0;
90669+ int register_filesystem_found = 0;
90670+ char *p;
90671+
90672+ p = strstr(mod->args, "grsec_modharden_fs");
90673+ if (p) {
90674+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
90675+ /* copy \0 as well */
90676+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
90677+ is_fs_load = 1;
90678+ }
90679+#endif
90680+
90681 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
90682 const char *name = info->strtab + sym[i].st_name;
90683
90684+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90685+ /* it's a real shame this will never get ripped and copied
90686+ upstream! ;(
90687+ */
90688+ if (is_fs_load && !strcmp(name, "register_filesystem"))
90689+ register_filesystem_found = 1;
90690+#endif
90691+
90692 switch (sym[i].st_shndx) {
90693 case SHN_COMMON:
90694 /* Ignore common symbols */
90695@@ -1970,7 +2014,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90696 ksym = resolve_symbol_wait(mod, info, name);
90697 /* Ok if resolved. */
90698 if (ksym && !IS_ERR(ksym)) {
90699+ pax_open_kernel();
90700 sym[i].st_value = ksym->value;
90701+ pax_close_kernel();
90702 break;
90703 }
90704
90705@@ -1989,11 +2035,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
90706 secbase = (unsigned long)mod_percpu(mod);
90707 else
90708 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
90709+ pax_open_kernel();
90710 sym[i].st_value += secbase;
90711+ pax_close_kernel();
90712 break;
90713 }
90714 }
90715
90716+#ifdef CONFIG_GRKERNSEC_MODHARDEN
90717+ if (is_fs_load && !register_filesystem_found) {
90718+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
90719+ ret = -EPERM;
90720+ }
90721+#endif
90722+
90723 return ret;
90724 }
90725
90726@@ -2077,22 +2132,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
90727 || s->sh_entsize != ~0UL
90728 || strstarts(sname, ".init"))
90729 continue;
90730- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
90731+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
90732+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
90733+ else
90734+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
90735 pr_debug("\t%s\n", sname);
90736 }
90737- switch (m) {
90738- case 0: /* executable */
90739- mod->core_size = debug_align(mod->core_size);
90740- mod->core_text_size = mod->core_size;
90741- break;
90742- case 1: /* RO: text and ro-data */
90743- mod->core_size = debug_align(mod->core_size);
90744- mod->core_ro_size = mod->core_size;
90745- break;
90746- case 3: /* whole core */
90747- mod->core_size = debug_align(mod->core_size);
90748- break;
90749- }
90750 }
90751
90752 pr_debug("Init section allocation order:\n");
90753@@ -2106,23 +2151,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
90754 || s->sh_entsize != ~0UL
90755 || !strstarts(sname, ".init"))
90756 continue;
90757- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
90758- | INIT_OFFSET_MASK);
90759+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
90760+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
90761+ else
90762+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
90763+ s->sh_entsize |= INIT_OFFSET_MASK;
90764 pr_debug("\t%s\n", sname);
90765 }
90766- switch (m) {
90767- case 0: /* executable */
90768- mod->init_size = debug_align(mod->init_size);
90769- mod->init_text_size = mod->init_size;
90770- break;
90771- case 1: /* RO: text and ro-data */
90772- mod->init_size = debug_align(mod->init_size);
90773- mod->init_ro_size = mod->init_size;
90774- break;
90775- case 3: /* whole init */
90776- mod->init_size = debug_align(mod->init_size);
90777- break;
90778- }
90779 }
90780 }
90781
90782@@ -2295,7 +2330,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
90783
90784 /* Put symbol section at end of init part of module. */
90785 symsect->sh_flags |= SHF_ALLOC;
90786- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
90787+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
90788 info->index.sym) | INIT_OFFSET_MASK;
90789 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
90790
90791@@ -2312,13 +2347,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
90792 }
90793
90794 /* Append room for core symbols at end of core part. */
90795- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
90796- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
90797- mod->core_size += strtab_size;
90798+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
90799+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
90800+ mod->core_size_rx += strtab_size;
90801
90802 /* Put string table section at end of init part of module. */
90803 strsect->sh_flags |= SHF_ALLOC;
90804- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
90805+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
90806 info->index.str) | INIT_OFFSET_MASK;
90807 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
90808 }
90809@@ -2336,12 +2371,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
90810 /* Make sure we get permanent strtab: don't use info->strtab. */
90811 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
90812
90813+ pax_open_kernel();
90814+
90815 /* Set types up while we still have access to sections. */
90816 for (i = 0; i < mod->num_symtab; i++)
90817 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
90818
90819- mod->core_symtab = dst = mod->module_core + info->symoffs;
90820- mod->core_strtab = s = mod->module_core + info->stroffs;
90821+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
90822+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
90823 src = mod->symtab;
90824 for (ndst = i = 0; i < mod->num_symtab; i++) {
90825 if (i == 0 ||
90826@@ -2353,6 +2390,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
90827 }
90828 }
90829 mod->core_num_syms = ndst;
90830+
90831+ pax_close_kernel();
90832 }
90833 #else
90834 static inline void layout_symtab(struct module *mod, struct load_info *info)
90835@@ -2386,17 +2425,33 @@ void * __weak module_alloc(unsigned long size)
90836 return vmalloc_exec(size);
90837 }
90838
90839-static void *module_alloc_update_bounds(unsigned long size)
90840+static void *module_alloc_update_bounds_rw(unsigned long size)
90841 {
90842 void *ret = module_alloc(size);
90843
90844 if (ret) {
90845 mutex_lock(&module_mutex);
90846 /* Update module bounds. */
90847- if ((unsigned long)ret < module_addr_min)
90848- module_addr_min = (unsigned long)ret;
90849- if ((unsigned long)ret + size > module_addr_max)
90850- module_addr_max = (unsigned long)ret + size;
90851+ if ((unsigned long)ret < module_addr_min_rw)
90852+ module_addr_min_rw = (unsigned long)ret;
90853+ if ((unsigned long)ret + size > module_addr_max_rw)
90854+ module_addr_max_rw = (unsigned long)ret + size;
90855+ mutex_unlock(&module_mutex);
90856+ }
90857+ return ret;
90858+}
90859+
90860+static void *module_alloc_update_bounds_rx(unsigned long size)
90861+{
90862+ void *ret = module_alloc_exec(size);
90863+
90864+ if (ret) {
90865+ mutex_lock(&module_mutex);
90866+ /* Update module bounds. */
90867+ if ((unsigned long)ret < module_addr_min_rx)
90868+ module_addr_min_rx = (unsigned long)ret;
90869+ if ((unsigned long)ret + size > module_addr_max_rx)
90870+ module_addr_max_rx = (unsigned long)ret + size;
90871 mutex_unlock(&module_mutex);
90872 }
90873 return ret;
90874@@ -2650,7 +2705,15 @@ static struct module *setup_load_info(struct load_info *info, int flags)
90875 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
90876
90877 if (info->index.sym == 0) {
90878+#ifdef CONFIG_GRKERNSEC_RANDSTRUCT
90879+ /*
90880+ * avoid potentially printing jibberish on attempted load
90881+ * of a module randomized with a different seed
90882+ */
90883+ pr_warn("module has no symbols (stripped?)\n");
90884+#else
90885 pr_warn("%s: module has no symbols (stripped?)\n", mod->name);
90886+#endif
90887 return ERR_PTR(-ENOEXEC);
90888 }
90889
90890@@ -2666,8 +2729,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
90891 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
90892 {
90893 const char *modmagic = get_modinfo(info, "vermagic");
90894+ const char *license = get_modinfo(info, "license");
90895 int err;
90896
90897+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
90898+ if (!license || !license_is_gpl_compatible(license))
90899+ return -ENOEXEC;
90900+#endif
90901+
90902 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
90903 modmagic = NULL;
90904
90905@@ -2692,7 +2761,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
90906 }
90907
90908 /* Set up license info based on the info section */
90909- set_license(mod, get_modinfo(info, "license"));
90910+ set_license(mod, license);
90911
90912 return 0;
90913 }
90914@@ -2786,7 +2855,7 @@ static int move_module(struct module *mod, struct load_info *info)
90915 void *ptr;
90916
90917 /* Do the allocs. */
90918- ptr = module_alloc_update_bounds(mod->core_size);
90919+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
90920 /*
90921 * The pointer to this block is stored in the module structure
90922 * which is inside the block. Just mark it as not being a
90923@@ -2796,11 +2865,11 @@ static int move_module(struct module *mod, struct load_info *info)
90924 if (!ptr)
90925 return -ENOMEM;
90926
90927- memset(ptr, 0, mod->core_size);
90928- mod->module_core = ptr;
90929+ memset(ptr, 0, mod->core_size_rw);
90930+ mod->module_core_rw = ptr;
90931
90932- if (mod->init_size) {
90933- ptr = module_alloc_update_bounds(mod->init_size);
90934+ if (mod->init_size_rw) {
90935+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
90936 /*
90937 * The pointer to this block is stored in the module structure
90938 * which is inside the block. This block doesn't need to be
90939@@ -2809,13 +2878,45 @@ static int move_module(struct module *mod, struct load_info *info)
90940 */
90941 kmemleak_ignore(ptr);
90942 if (!ptr) {
90943- module_free(mod, mod->module_core);
90944+ module_free(mod, mod->module_core_rw);
90945 return -ENOMEM;
90946 }
90947- memset(ptr, 0, mod->init_size);
90948- mod->module_init = ptr;
90949+ memset(ptr, 0, mod->init_size_rw);
90950+ mod->module_init_rw = ptr;
90951 } else
90952- mod->module_init = NULL;
90953+ mod->module_init_rw = NULL;
90954+
90955+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
90956+ kmemleak_not_leak(ptr);
90957+ if (!ptr) {
90958+ if (mod->module_init_rw)
90959+ module_free(mod, mod->module_init_rw);
90960+ module_free(mod, mod->module_core_rw);
90961+ return -ENOMEM;
90962+ }
90963+
90964+ pax_open_kernel();
90965+ memset(ptr, 0, mod->core_size_rx);
90966+ pax_close_kernel();
90967+ mod->module_core_rx = ptr;
90968+
90969+ if (mod->init_size_rx) {
90970+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
90971+ kmemleak_ignore(ptr);
90972+ if (!ptr && mod->init_size_rx) {
90973+ module_free_exec(mod, mod->module_core_rx);
90974+ if (mod->module_init_rw)
90975+ module_free(mod, mod->module_init_rw);
90976+ module_free(mod, mod->module_core_rw);
90977+ return -ENOMEM;
90978+ }
90979+
90980+ pax_open_kernel();
90981+ memset(ptr, 0, mod->init_size_rx);
90982+ pax_close_kernel();
90983+ mod->module_init_rx = ptr;
90984+ } else
90985+ mod->module_init_rx = NULL;
90986
90987 /* Transfer each section which specifies SHF_ALLOC */
90988 pr_debug("final section addresses:\n");
90989@@ -2826,16 +2927,45 @@ static int move_module(struct module *mod, struct load_info *info)
90990 if (!(shdr->sh_flags & SHF_ALLOC))
90991 continue;
90992
90993- if (shdr->sh_entsize & INIT_OFFSET_MASK)
90994- dest = mod->module_init
90995- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
90996- else
90997- dest = mod->module_core + shdr->sh_entsize;
90998+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
90999+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
91000+ dest = mod->module_init_rw
91001+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
91002+ else
91003+ dest = mod->module_init_rx
91004+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
91005+ } else {
91006+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
91007+ dest = mod->module_core_rw + shdr->sh_entsize;
91008+ else
91009+ dest = mod->module_core_rx + shdr->sh_entsize;
91010+ }
91011+
91012+ if (shdr->sh_type != SHT_NOBITS) {
91013+
91014+#ifdef CONFIG_PAX_KERNEXEC
91015+#ifdef CONFIG_X86_64
91016+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
91017+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
91018+#endif
91019+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
91020+ pax_open_kernel();
91021+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
91022+ pax_close_kernel();
91023+ } else
91024+#endif
91025
91026- if (shdr->sh_type != SHT_NOBITS)
91027 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
91028+ }
91029 /* Update sh_addr to point to copy in image. */
91030- shdr->sh_addr = (unsigned long)dest;
91031+
91032+#ifdef CONFIG_PAX_KERNEXEC
91033+ if (shdr->sh_flags & SHF_EXECINSTR)
91034+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
91035+ else
91036+#endif
91037+
91038+ shdr->sh_addr = (unsigned long)dest;
91039 pr_debug("\t0x%lx %s\n",
91040 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
91041 }
91042@@ -2892,12 +3022,12 @@ static void flush_module_icache(const struct module *mod)
91043 * Do it before processing of module parameters, so the module
91044 * can provide parameter accessor functions of its own.
91045 */
91046- if (mod->module_init)
91047- flush_icache_range((unsigned long)mod->module_init,
91048- (unsigned long)mod->module_init
91049- + mod->init_size);
91050- flush_icache_range((unsigned long)mod->module_core,
91051- (unsigned long)mod->module_core + mod->core_size);
91052+ if (mod->module_init_rx)
91053+ flush_icache_range((unsigned long)mod->module_init_rx,
91054+ (unsigned long)mod->module_init_rx
91055+ + mod->init_size_rx);
91056+ flush_icache_range((unsigned long)mod->module_core_rx,
91057+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
91058
91059 set_fs(old_fs);
91060 }
91061@@ -2954,8 +3084,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
91062 static void module_deallocate(struct module *mod, struct load_info *info)
91063 {
91064 percpu_modfree(mod);
91065- module_free(mod, mod->module_init);
91066- module_free(mod, mod->module_core);
91067+ module_free_exec(mod, mod->module_init_rx);
91068+ module_free_exec(mod, mod->module_core_rx);
91069+ module_free(mod, mod->module_init_rw);
91070+ module_free(mod, mod->module_core_rw);
91071 }
91072
91073 int __weak module_finalize(const Elf_Ehdr *hdr,
91074@@ -2968,7 +3100,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
91075 static int post_relocation(struct module *mod, const struct load_info *info)
91076 {
91077 /* Sort exception table now relocations are done. */
91078+ pax_open_kernel();
91079 sort_extable(mod->extable, mod->extable + mod->num_exentries);
91080+ pax_close_kernel();
91081
91082 /* Copy relocated percpu area over. */
91083 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
91084@@ -3077,11 +3211,12 @@ static int do_init_module(struct module *mod)
91085 mod->strtab = mod->core_strtab;
91086 #endif
91087 unset_module_init_ro_nx(mod);
91088- module_free(mod, mod->module_init);
91089- mod->module_init = NULL;
91090- mod->init_size = 0;
91091- mod->init_ro_size = 0;
91092- mod->init_text_size = 0;
91093+ module_free(mod, mod->module_init_rw);
91094+ module_free_exec(mod, mod->module_init_rx);
91095+ mod->module_init_rw = NULL;
91096+ mod->module_init_rx = NULL;
91097+ mod->init_size_rw = 0;
91098+ mod->init_size_rx = 0;
91099 mutex_unlock(&module_mutex);
91100 wake_up_all(&module_wq);
91101
91102@@ -3149,16 +3284,16 @@ static int complete_formation(struct module *mod, struct load_info *info)
91103 module_bug_finalize(info->hdr, info->sechdrs, mod);
91104
91105 /* Set RO and NX regions for core */
91106- set_section_ro_nx(mod->module_core,
91107- mod->core_text_size,
91108- mod->core_ro_size,
91109- mod->core_size);
91110+ set_section_ro_nx(mod->module_core_rx,
91111+ mod->core_size_rx,
91112+ mod->core_size_rx,
91113+ mod->core_size_rx);
91114
91115 /* Set RO and NX regions for init */
91116- set_section_ro_nx(mod->module_init,
91117- mod->init_text_size,
91118- mod->init_ro_size,
91119- mod->init_size);
91120+ set_section_ro_nx(mod->module_init_rx,
91121+ mod->init_size_rx,
91122+ mod->init_size_rx,
91123+ mod->init_size_rx);
91124
91125 /* Mark state as coming so strong_try_module_get() ignores us,
91126 * but kallsyms etc. can see us. */
91127@@ -3242,9 +3377,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
91128 if (err)
91129 goto free_unload;
91130
91131+ /* Now copy in args */
91132+ mod->args = strndup_user(uargs, ~0UL >> 1);
91133+ if (IS_ERR(mod->args)) {
91134+ err = PTR_ERR(mod->args);
91135+ goto free_unload;
91136+ }
91137+
91138 /* Set up MODINFO_ATTR fields */
91139 setup_modinfo(mod, info);
91140
91141+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91142+ {
91143+ char *p, *p2;
91144+
91145+ if (strstr(mod->args, "grsec_modharden_netdev")) {
91146+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
91147+ err = -EPERM;
91148+ goto free_modinfo;
91149+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
91150+ p += sizeof("grsec_modharden_normal") - 1;
91151+ p2 = strstr(p, "_");
91152+ if (p2) {
91153+ *p2 = '\0';
91154+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
91155+ *p2 = '_';
91156+ }
91157+ err = -EPERM;
91158+ goto free_modinfo;
91159+ }
91160+ }
91161+#endif
91162+
91163 /* Fix up syms, so that st_value is a pointer to location. */
91164 err = simplify_symbols(mod, info);
91165 if (err < 0)
91166@@ -3260,13 +3424,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
91167
91168 flush_module_icache(mod);
91169
91170- /* Now copy in args */
91171- mod->args = strndup_user(uargs, ~0UL >> 1);
91172- if (IS_ERR(mod->args)) {
91173- err = PTR_ERR(mod->args);
91174- goto free_arch_cleanup;
91175- }
91176-
91177 dynamic_debug_setup(info->debug, info->num_debug);
91178
91179 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
91180@@ -3314,11 +3471,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
91181 ddebug_cleanup:
91182 dynamic_debug_remove(info->debug);
91183 synchronize_sched();
91184- kfree(mod->args);
91185- free_arch_cleanup:
91186 module_arch_cleanup(mod);
91187 free_modinfo:
91188 free_modinfo(mod);
91189+ kfree(mod->args);
91190 free_unload:
91191 module_unload_free(mod);
91192 unlink_mod:
91193@@ -3403,10 +3559,16 @@ static const char *get_ksymbol(struct module *mod,
91194 unsigned long nextval;
91195
91196 /* At worse, next value is at end of module */
91197- if (within_module_init(addr, mod))
91198- nextval = (unsigned long)mod->module_init+mod->init_text_size;
91199+ if (within_module_init_rx(addr, mod))
91200+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
91201+ else if (within_module_init_rw(addr, mod))
91202+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
91203+ else if (within_module_core_rx(addr, mod))
91204+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
91205+ else if (within_module_core_rw(addr, mod))
91206+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
91207 else
91208- nextval = (unsigned long)mod->module_core+mod->core_text_size;
91209+ return NULL;
91210
91211 /* Scan for closest preceding symbol, and next symbol. (ELF
91212 starts real symbols at 1). */
91213@@ -3654,7 +3816,7 @@ static int m_show(struct seq_file *m, void *p)
91214 return 0;
91215
91216 seq_printf(m, "%s %u",
91217- mod->name, mod->init_size + mod->core_size);
91218+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
91219 print_unload_info(m, mod);
91220
91221 /* Informative for users. */
91222@@ -3663,7 +3825,7 @@ static int m_show(struct seq_file *m, void *p)
91223 mod->state == MODULE_STATE_COMING ? "Loading":
91224 "Live");
91225 /* Used by oprofile and other similar tools. */
91226- seq_printf(m, " 0x%pK", mod->module_core);
91227+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
91228
91229 /* Taints info */
91230 if (mod->taints)
91231@@ -3699,7 +3861,17 @@ static const struct file_operations proc_modules_operations = {
91232
91233 static int __init proc_modules_init(void)
91234 {
91235+#ifndef CONFIG_GRKERNSEC_HIDESYM
91236+#ifdef CONFIG_GRKERNSEC_PROC_USER
91237+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91238+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
91239+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
91240+#else
91241 proc_create("modules", 0, NULL, &proc_modules_operations);
91242+#endif
91243+#else
91244+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91245+#endif
91246 return 0;
91247 }
91248 module_init(proc_modules_init);
91249@@ -3760,7 +3932,8 @@ struct module *__module_address(unsigned long addr)
91250 {
91251 struct module *mod;
91252
91253- if (addr < module_addr_min || addr > module_addr_max)
91254+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
91255+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
91256 return NULL;
91257
91258 list_for_each_entry_rcu(mod, &modules, list) {
91259@@ -3801,11 +3974,20 @@ bool is_module_text_address(unsigned long addr)
91260 */
91261 struct module *__module_text_address(unsigned long addr)
91262 {
91263- struct module *mod = __module_address(addr);
91264+ struct module *mod;
91265+
91266+#ifdef CONFIG_X86_32
91267+ addr = ktla_ktva(addr);
91268+#endif
91269+
91270+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
91271+ return NULL;
91272+
91273+ mod = __module_address(addr);
91274+
91275 if (mod) {
91276 /* Make sure it's within the text section. */
91277- if (!within(addr, mod->module_init, mod->init_text_size)
91278- && !within(addr, mod->module_core, mod->core_text_size))
91279+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
91280 mod = NULL;
91281 }
91282 return mod;
91283diff --git a/kernel/notifier.c b/kernel/notifier.c
91284index 4803da6..1c5eea6 100644
91285--- a/kernel/notifier.c
91286+++ b/kernel/notifier.c
91287@@ -5,6 +5,7 @@
91288 #include <linux/rcupdate.h>
91289 #include <linux/vmalloc.h>
91290 #include <linux/reboot.h>
91291+#include <linux/mm.h>
91292
91293 /*
91294 * Notifier list for kernel code which wants to be called
91295@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
91296 while ((*nl) != NULL) {
91297 if (n->priority > (*nl)->priority)
91298 break;
91299- nl = &((*nl)->next);
91300+ nl = (struct notifier_block **)&((*nl)->next);
91301 }
91302- n->next = *nl;
91303+ pax_open_kernel();
91304+ *(const void **)&n->next = *nl;
91305 rcu_assign_pointer(*nl, n);
91306+ pax_close_kernel();
91307 return 0;
91308 }
91309
91310@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
91311 return 0;
91312 if (n->priority > (*nl)->priority)
91313 break;
91314- nl = &((*nl)->next);
91315+ nl = (struct notifier_block **)&((*nl)->next);
91316 }
91317- n->next = *nl;
91318+ pax_open_kernel();
91319+ *(const void **)&n->next = *nl;
91320 rcu_assign_pointer(*nl, n);
91321+ pax_close_kernel();
91322 return 0;
91323 }
91324
91325@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
91326 {
91327 while ((*nl) != NULL) {
91328 if ((*nl) == n) {
91329+ pax_open_kernel();
91330 rcu_assign_pointer(*nl, n->next);
91331+ pax_close_kernel();
91332 return 0;
91333 }
91334- nl = &((*nl)->next);
91335+ nl = (struct notifier_block **)&((*nl)->next);
91336 }
91337 return -ENOENT;
91338 }
91339diff --git a/kernel/padata.c b/kernel/padata.c
91340index 161402f..598814c 100644
91341--- a/kernel/padata.c
91342+++ b/kernel/padata.c
91343@@ -54,7 +54,7 @@ static int padata_cpu_hash(struct parallel_data *pd)
91344 * seq_nr mod. number of cpus in use.
91345 */
91346
91347- seq_nr = atomic_inc_return(&pd->seq_nr);
91348+ seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
91349 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
91350
91351 return padata_index_to_cpu(pd, cpu_index);
91352@@ -428,7 +428,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
91353 padata_init_pqueues(pd);
91354 padata_init_squeues(pd);
91355 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
91356- atomic_set(&pd->seq_nr, -1);
91357+ atomic_set_unchecked(&pd->seq_nr, -1);
91358 atomic_set(&pd->reorder_objects, 0);
91359 atomic_set(&pd->refcnt, 0);
91360 pd->pinst = pinst;
91361diff --git a/kernel/panic.c b/kernel/panic.c
91362index d09dc5c..9abbdff 100644
91363--- a/kernel/panic.c
91364+++ b/kernel/panic.c
91365@@ -53,7 +53,7 @@ EXPORT_SYMBOL(panic_blink);
91366 /*
91367 * Stop ourself in panic -- architecture code may override this
91368 */
91369-void __weak panic_smp_self_stop(void)
91370+void __weak __noreturn panic_smp_self_stop(void)
91371 {
91372 while (1)
91373 cpu_relax();
91374@@ -421,7 +421,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
91375 disable_trace_on_warning();
91376
91377 pr_warn("------------[ cut here ]------------\n");
91378- pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
91379+ pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
91380 raw_smp_processor_id(), current->pid, file, line, caller);
91381
91382 if (args)
91383@@ -475,7 +475,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
91384 */
91385 __visible void __stack_chk_fail(void)
91386 {
91387- panic("stack-protector: Kernel stack is corrupted in: %p\n",
91388+ dump_stack();
91389+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
91390 __builtin_return_address(0));
91391 }
91392 EXPORT_SYMBOL(__stack_chk_fail);
91393diff --git a/kernel/pid.c b/kernel/pid.c
91394index 9b9a266..c20ef80 100644
91395--- a/kernel/pid.c
91396+++ b/kernel/pid.c
91397@@ -33,6 +33,7 @@
91398 #include <linux/rculist.h>
91399 #include <linux/bootmem.h>
91400 #include <linux/hash.h>
91401+#include <linux/security.h>
91402 #include <linux/pid_namespace.h>
91403 #include <linux/init_task.h>
91404 #include <linux/syscalls.h>
91405@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
91406
91407 int pid_max = PID_MAX_DEFAULT;
91408
91409-#define RESERVED_PIDS 300
91410+#define RESERVED_PIDS 500
91411
91412 int pid_max_min = RESERVED_PIDS + 1;
91413 int pid_max_max = PID_MAX_LIMIT;
91414@@ -445,10 +446,18 @@ EXPORT_SYMBOL(pid_task);
91415 */
91416 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
91417 {
91418+ struct task_struct *task;
91419+
91420 rcu_lockdep_assert(rcu_read_lock_held(),
91421 "find_task_by_pid_ns() needs rcu_read_lock()"
91422 " protection");
91423- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
91424+
91425+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
91426+
91427+ if (gr_pid_is_chrooted(task))
91428+ return NULL;
91429+
91430+ return task;
91431 }
91432
91433 struct task_struct *find_task_by_vpid(pid_t vnr)
91434@@ -456,6 +465,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
91435 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
91436 }
91437
91438+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
91439+{
91440+ rcu_lockdep_assert(rcu_read_lock_held(),
91441+ "find_task_by_pid_ns() needs rcu_read_lock()"
91442+ " protection");
91443+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
91444+}
91445+
91446 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
91447 {
91448 struct pid *pid;
91449diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
91450index db95d8e..a0ca23f 100644
91451--- a/kernel/pid_namespace.c
91452+++ b/kernel/pid_namespace.c
91453@@ -253,7 +253,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
91454 void __user *buffer, size_t *lenp, loff_t *ppos)
91455 {
91456 struct pid_namespace *pid_ns = task_active_pid_ns(current);
91457- struct ctl_table tmp = *table;
91458+ ctl_table_no_const tmp = *table;
91459
91460 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
91461 return -EPERM;
91462diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
91463index e4e4121..71faf14 100644
91464--- a/kernel/power/Kconfig
91465+++ b/kernel/power/Kconfig
91466@@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
91467 config HIBERNATION
91468 bool "Hibernation (aka 'suspend to disk')"
91469 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
91470+ depends on !GRKERNSEC_KMEM
91471+ depends on !PAX_MEMORY_SANITIZE
91472 select HIBERNATE_CALLBACKS
91473 select LZO_COMPRESS
91474 select LZO_DECOMPRESS
91475diff --git a/kernel/power/process.c b/kernel/power/process.c
91476index 7a37cf3..3e4c1c8 100644
91477--- a/kernel/power/process.c
91478+++ b/kernel/power/process.c
91479@@ -35,6 +35,7 @@ static int try_to_freeze_tasks(bool user_only)
91480 unsigned int elapsed_msecs;
91481 bool wakeup = false;
91482 int sleep_usecs = USEC_PER_MSEC;
91483+ bool timedout = false;
91484
91485 do_gettimeofday(&start);
91486
91487@@ -45,13 +46,20 @@ static int try_to_freeze_tasks(bool user_only)
91488
91489 while (true) {
91490 todo = 0;
91491+ if (time_after(jiffies, end_time))
91492+ timedout = true;
91493 read_lock(&tasklist_lock);
91494 do_each_thread(g, p) {
91495 if (p == current || !freeze_task(p))
91496 continue;
91497
91498- if (!freezer_should_skip(p))
91499+ if (!freezer_should_skip(p)) {
91500 todo++;
91501+ if (timedout) {
91502+ printk(KERN_ERR "Task refusing to freeze:\n");
91503+ sched_show_task(p);
91504+ }
91505+ }
91506 } while_each_thread(g, p);
91507 read_unlock(&tasklist_lock);
91508
91509@@ -60,7 +68,7 @@ static int try_to_freeze_tasks(bool user_only)
91510 todo += wq_busy;
91511 }
91512
91513- if (!todo || time_after(jiffies, end_time))
91514+ if (!todo || timedout)
91515 break;
91516
91517 if (pm_wakeup_pending()) {
91518diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
91519index 1ce7706..3b07c49 100644
91520--- a/kernel/printk/printk.c
91521+++ b/kernel/printk/printk.c
91522@@ -490,6 +490,11 @@ static int check_syslog_permissions(int type, bool from_file)
91523 if (from_file && type != SYSLOG_ACTION_OPEN)
91524 return 0;
91525
91526+#ifdef CONFIG_GRKERNSEC_DMESG
91527+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
91528+ return -EPERM;
91529+#endif
91530+
91531 if (syslog_action_restricted(type)) {
91532 if (capable(CAP_SYSLOG))
91533 return 0;
91534diff --git a/kernel/profile.c b/kernel/profile.c
91535index 54bf5ba..df6e0a2 100644
91536--- a/kernel/profile.c
91537+++ b/kernel/profile.c
91538@@ -37,7 +37,7 @@ struct profile_hit {
91539 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
91540 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
91541
91542-static atomic_t *prof_buffer;
91543+static atomic_unchecked_t *prof_buffer;
91544 static unsigned long prof_len, prof_shift;
91545
91546 int prof_on __read_mostly;
91547@@ -256,7 +256,7 @@ static void profile_flip_buffers(void)
91548 hits[i].pc = 0;
91549 continue;
91550 }
91551- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
91552+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
91553 hits[i].hits = hits[i].pc = 0;
91554 }
91555 }
91556@@ -317,9 +317,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
91557 * Add the current hit(s) and flush the write-queue out
91558 * to the global buffer:
91559 */
91560- atomic_add(nr_hits, &prof_buffer[pc]);
91561+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
91562 for (i = 0; i < NR_PROFILE_HIT; ++i) {
91563- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
91564+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
91565 hits[i].pc = hits[i].hits = 0;
91566 }
91567 out:
91568@@ -394,7 +394,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
91569 {
91570 unsigned long pc;
91571 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
91572- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
91573+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
91574 }
91575 #endif /* !CONFIG_SMP */
91576
91577@@ -490,7 +490,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
91578 return -EFAULT;
91579 buf++; p++; count--; read++;
91580 }
91581- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
91582+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
91583 if (copy_to_user(buf, (void *)pnt, count))
91584 return -EFAULT;
91585 read += count;
91586@@ -521,7 +521,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
91587 }
91588 #endif
91589 profile_discard_flip_buffers();
91590- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
91591+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
91592 return count;
91593 }
91594
91595diff --git a/kernel/ptrace.c b/kernel/ptrace.c
91596index 54e7522..5b82dd6 100644
91597--- a/kernel/ptrace.c
91598+++ b/kernel/ptrace.c
91599@@ -321,7 +321,7 @@ static int ptrace_attach(struct task_struct *task, long request,
91600 if (seize)
91601 flags |= PT_SEIZED;
91602 rcu_read_lock();
91603- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
91604+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
91605 flags |= PT_PTRACE_CAP;
91606 rcu_read_unlock();
91607 task->ptrace = flags;
91608@@ -532,7 +532,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
91609 break;
91610 return -EIO;
91611 }
91612- if (copy_to_user(dst, buf, retval))
91613+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
91614 return -EFAULT;
91615 copied += retval;
91616 src += retval;
91617@@ -800,7 +800,7 @@ int ptrace_request(struct task_struct *child, long request,
91618 bool seized = child->ptrace & PT_SEIZED;
91619 int ret = -EIO;
91620 siginfo_t siginfo, *si;
91621- void __user *datavp = (void __user *) data;
91622+ void __user *datavp = (__force void __user *) data;
91623 unsigned long __user *datalp = datavp;
91624 unsigned long flags;
91625
91626@@ -1046,14 +1046,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
91627 goto out;
91628 }
91629
91630+ if (gr_handle_ptrace(child, request)) {
91631+ ret = -EPERM;
91632+ goto out_put_task_struct;
91633+ }
91634+
91635 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
91636 ret = ptrace_attach(child, request, addr, data);
91637 /*
91638 * Some architectures need to do book-keeping after
91639 * a ptrace attach.
91640 */
91641- if (!ret)
91642+ if (!ret) {
91643 arch_ptrace_attach(child);
91644+ gr_audit_ptrace(child);
91645+ }
91646 goto out_put_task_struct;
91647 }
91648
91649@@ -1081,7 +1088,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
91650 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
91651 if (copied != sizeof(tmp))
91652 return -EIO;
91653- return put_user(tmp, (unsigned long __user *)data);
91654+ return put_user(tmp, (__force unsigned long __user *)data);
91655 }
91656
91657 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
91658@@ -1175,7 +1182,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
91659 }
91660
91661 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
91662- compat_long_t, addr, compat_long_t, data)
91663+ compat_ulong_t, addr, compat_ulong_t, data)
91664 {
91665 struct task_struct *child;
91666 long ret;
91667@@ -1191,14 +1198,21 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
91668 goto out;
91669 }
91670
91671+ if (gr_handle_ptrace(child, request)) {
91672+ ret = -EPERM;
91673+ goto out_put_task_struct;
91674+ }
91675+
91676 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
91677 ret = ptrace_attach(child, request, addr, data);
91678 /*
91679 * Some architectures need to do book-keeping after
91680 * a ptrace attach.
91681 */
91682- if (!ret)
91683+ if (!ret) {
91684 arch_ptrace_attach(child);
91685+ gr_audit_ptrace(child);
91686+ }
91687 goto out_put_task_struct;
91688 }
91689
91690diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
91691index 948a769..5ca842b 100644
91692--- a/kernel/rcu/rcutorture.c
91693+++ b/kernel/rcu/rcutorture.c
91694@@ -124,12 +124,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
91695 rcu_torture_count) = { 0 };
91696 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
91697 rcu_torture_batch) = { 0 };
91698-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
91699-static atomic_t n_rcu_torture_alloc;
91700-static atomic_t n_rcu_torture_alloc_fail;
91701-static atomic_t n_rcu_torture_free;
91702-static atomic_t n_rcu_torture_mberror;
91703-static atomic_t n_rcu_torture_error;
91704+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
91705+static atomic_unchecked_t n_rcu_torture_alloc;
91706+static atomic_unchecked_t n_rcu_torture_alloc_fail;
91707+static atomic_unchecked_t n_rcu_torture_free;
91708+static atomic_unchecked_t n_rcu_torture_mberror;
91709+static atomic_unchecked_t n_rcu_torture_error;
91710 static long n_rcu_torture_barrier_error;
91711 static long n_rcu_torture_boost_ktrerror;
91712 static long n_rcu_torture_boost_rterror;
91713@@ -200,11 +200,11 @@ rcu_torture_alloc(void)
91714
91715 spin_lock_bh(&rcu_torture_lock);
91716 if (list_empty(&rcu_torture_freelist)) {
91717- atomic_inc(&n_rcu_torture_alloc_fail);
91718+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
91719 spin_unlock_bh(&rcu_torture_lock);
91720 return NULL;
91721 }
91722- atomic_inc(&n_rcu_torture_alloc);
91723+ atomic_inc_unchecked(&n_rcu_torture_alloc);
91724 p = rcu_torture_freelist.next;
91725 list_del_init(p);
91726 spin_unlock_bh(&rcu_torture_lock);
91727@@ -217,7 +217,7 @@ rcu_torture_alloc(void)
91728 static void
91729 rcu_torture_free(struct rcu_torture *p)
91730 {
91731- atomic_inc(&n_rcu_torture_free);
91732+ atomic_inc_unchecked(&n_rcu_torture_free);
91733 spin_lock_bh(&rcu_torture_lock);
91734 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
91735 spin_unlock_bh(&rcu_torture_lock);
91736@@ -301,7 +301,7 @@ rcu_torture_pipe_update_one(struct rcu_torture *rp)
91737 i = rp->rtort_pipe_count;
91738 if (i > RCU_TORTURE_PIPE_LEN)
91739 i = RCU_TORTURE_PIPE_LEN;
91740- atomic_inc(&rcu_torture_wcount[i]);
91741+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
91742 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
91743 rp->rtort_mbtest = 0;
91744 return true;
91745@@ -808,7 +808,7 @@ rcu_torture_writer(void *arg)
91746 i = old_rp->rtort_pipe_count;
91747 if (i > RCU_TORTURE_PIPE_LEN)
91748 i = RCU_TORTURE_PIPE_LEN;
91749- atomic_inc(&rcu_torture_wcount[i]);
91750+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
91751 old_rp->rtort_pipe_count++;
91752 switch (synctype[torture_random(&rand) % nsynctypes]) {
91753 case RTWS_DEF_FREE:
91754@@ -926,7 +926,7 @@ static void rcu_torture_timer(unsigned long unused)
91755 return;
91756 }
91757 if (p->rtort_mbtest == 0)
91758- atomic_inc(&n_rcu_torture_mberror);
91759+ atomic_inc_unchecked(&n_rcu_torture_mberror);
91760 spin_lock(&rand_lock);
91761 cur_ops->read_delay(&rand);
91762 n_rcu_torture_timers++;
91763@@ -996,7 +996,7 @@ rcu_torture_reader(void *arg)
91764 continue;
91765 }
91766 if (p->rtort_mbtest == 0)
91767- atomic_inc(&n_rcu_torture_mberror);
91768+ atomic_inc_unchecked(&n_rcu_torture_mberror);
91769 cur_ops->read_delay(&rand);
91770 preempt_disable();
91771 pipe_count = p->rtort_pipe_count;
91772@@ -1054,15 +1054,15 @@ rcu_torture_printk(char *page)
91773 }
91774 page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG);
91775 page += sprintf(page,
91776- "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
91777+ "rtc: %pP ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
91778 rcu_torture_current,
91779 rcu_torture_current_version,
91780 list_empty(&rcu_torture_freelist),
91781- atomic_read(&n_rcu_torture_alloc),
91782- atomic_read(&n_rcu_torture_alloc_fail),
91783- atomic_read(&n_rcu_torture_free));
91784+ atomic_read_unchecked(&n_rcu_torture_alloc),
91785+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
91786+ atomic_read_unchecked(&n_rcu_torture_free));
91787 page += sprintf(page, "rtmbe: %d rtbke: %ld rtbre: %ld ",
91788- atomic_read(&n_rcu_torture_mberror),
91789+ atomic_read_unchecked(&n_rcu_torture_mberror),
91790 n_rcu_torture_boost_ktrerror,
91791 n_rcu_torture_boost_rterror);
91792 page += sprintf(page, "rtbf: %ld rtb: %ld nt: %ld ",
91793@@ -1075,14 +1075,14 @@ rcu_torture_printk(char *page)
91794 n_barrier_attempts,
91795 n_rcu_torture_barrier_error);
91796 page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
91797- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
91798+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
91799 n_rcu_torture_barrier_error != 0 ||
91800 n_rcu_torture_boost_ktrerror != 0 ||
91801 n_rcu_torture_boost_rterror != 0 ||
91802 n_rcu_torture_boost_failure != 0 ||
91803 i > 1) {
91804 page += sprintf(page, "!!! ");
91805- atomic_inc(&n_rcu_torture_error);
91806+ atomic_inc_unchecked(&n_rcu_torture_error);
91807 WARN_ON_ONCE(1);
91808 }
91809 page += sprintf(page, "Reader Pipe: ");
91810@@ -1096,7 +1096,7 @@ rcu_torture_printk(char *page)
91811 page += sprintf(page, "Free-Block Circulation: ");
91812 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
91813 page += sprintf(page, " %d",
91814- atomic_read(&rcu_torture_wcount[i]));
91815+ atomic_read_unchecked(&rcu_torture_wcount[i]));
91816 }
91817 page += sprintf(page, "\n");
91818 if (cur_ops->stats)
91819@@ -1461,7 +1461,7 @@ rcu_torture_cleanup(void)
91820
91821 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
91822
91823- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
91824+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
91825 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
91826 else if (torture_onoff_failures())
91827 rcu_torture_print_module_parms(cur_ops,
91828@@ -1584,18 +1584,18 @@ rcu_torture_init(void)
91829
91830 rcu_torture_current = NULL;
91831 rcu_torture_current_version = 0;
91832- atomic_set(&n_rcu_torture_alloc, 0);
91833- atomic_set(&n_rcu_torture_alloc_fail, 0);
91834- atomic_set(&n_rcu_torture_free, 0);
91835- atomic_set(&n_rcu_torture_mberror, 0);
91836- atomic_set(&n_rcu_torture_error, 0);
91837+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
91838+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
91839+ atomic_set_unchecked(&n_rcu_torture_free, 0);
91840+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
91841+ atomic_set_unchecked(&n_rcu_torture_error, 0);
91842 n_rcu_torture_barrier_error = 0;
91843 n_rcu_torture_boost_ktrerror = 0;
91844 n_rcu_torture_boost_rterror = 0;
91845 n_rcu_torture_boost_failure = 0;
91846 n_rcu_torture_boosts = 0;
91847 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
91848- atomic_set(&rcu_torture_wcount[i], 0);
91849+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
91850 for_each_possible_cpu(cpu) {
91851 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
91852 per_cpu(rcu_torture_count, cpu)[i] = 0;
91853diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
91854index d9efcc1..ea543e9 100644
91855--- a/kernel/rcu/tiny.c
91856+++ b/kernel/rcu/tiny.c
91857@@ -42,7 +42,7 @@
91858 /* Forward declarations for tiny_plugin.h. */
91859 struct rcu_ctrlblk;
91860 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
91861-static void rcu_process_callbacks(struct softirq_action *unused);
91862+static void rcu_process_callbacks(void);
91863 static void __call_rcu(struct rcu_head *head,
91864 void (*func)(struct rcu_head *rcu),
91865 struct rcu_ctrlblk *rcp);
91866@@ -308,7 +308,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
91867 false));
91868 }
91869
91870-static void rcu_process_callbacks(struct softirq_action *unused)
91871+static __latent_entropy void rcu_process_callbacks(void)
91872 {
91873 __rcu_process_callbacks(&rcu_sched_ctrlblk);
91874 __rcu_process_callbacks(&rcu_bh_ctrlblk);
91875diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
91876index 858c565..7efd915 100644
91877--- a/kernel/rcu/tiny_plugin.h
91878+++ b/kernel/rcu/tiny_plugin.h
91879@@ -152,17 +152,17 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
91880 dump_stack();
91881 }
91882 if (*rcp->curtail && ULONG_CMP_GE(j, js))
91883- ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
91884+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies +
91885 3 * rcu_jiffies_till_stall_check() + 3;
91886 else if (ULONG_CMP_GE(j, js))
91887- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91888+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91889 }
91890
91891 static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
91892 {
91893 rcp->ticks_this_gp = 0;
91894 rcp->gp_start = jiffies;
91895- ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91896+ ACCESS_ONCE_RW(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
91897 }
91898
91899 static void check_cpu_stalls(void)
91900diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
91901index 89a404a..f42a019 100644
91902--- a/kernel/rcu/tree.c
91903+++ b/kernel/rcu/tree.c
91904@@ -263,7 +263,7 @@ static void rcu_momentary_dyntick_idle(void)
91905 */
91906 rdtp = this_cpu_ptr(&rcu_dynticks);
91907 smp_mb__before_atomic(); /* Earlier stuff before QS. */
91908- atomic_add(2, &rdtp->dynticks); /* QS. */
91909+ atomic_add_unchecked(2, &rdtp->dynticks); /* QS. */
91910 smp_mb__after_atomic(); /* Later stuff after QS. */
91911 break;
91912 }
91913@@ -523,9 +523,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
91914 rcu_prepare_for_idle(smp_processor_id());
91915 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
91916 smp_mb__before_atomic(); /* See above. */
91917- atomic_inc(&rdtp->dynticks);
91918+ atomic_inc_unchecked(&rdtp->dynticks);
91919 smp_mb__after_atomic(); /* Force ordering with next sojourn. */
91920- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
91921+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
91922
91923 /*
91924 * It is illegal to enter an extended quiescent state while
91925@@ -643,10 +643,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
91926 int user)
91927 {
91928 smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
91929- atomic_inc(&rdtp->dynticks);
91930+ atomic_inc_unchecked(&rdtp->dynticks);
91931 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
91932 smp_mb__after_atomic(); /* See above. */
91933- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
91934+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
91935 rcu_cleanup_after_idle(smp_processor_id());
91936 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
91937 if (!user && !is_idle_task(current)) {
91938@@ -767,14 +767,14 @@ void rcu_nmi_enter(void)
91939 struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
91940
91941 if (rdtp->dynticks_nmi_nesting == 0 &&
91942- (atomic_read(&rdtp->dynticks) & 0x1))
91943+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
91944 return;
91945 rdtp->dynticks_nmi_nesting++;
91946 smp_mb__before_atomic(); /* Force delay from prior write. */
91947- atomic_inc(&rdtp->dynticks);
91948+ atomic_inc_unchecked(&rdtp->dynticks);
91949 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
91950 smp_mb__after_atomic(); /* See above. */
91951- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
91952+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
91953 }
91954
91955 /**
91956@@ -793,9 +793,9 @@ void rcu_nmi_exit(void)
91957 return;
91958 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
91959 smp_mb__before_atomic(); /* See above. */
91960- atomic_inc(&rdtp->dynticks);
91961+ atomic_inc_unchecked(&rdtp->dynticks);
91962 smp_mb__after_atomic(); /* Force delay to next write. */
91963- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
91964+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
91965 }
91966
91967 /**
91968@@ -808,7 +808,7 @@ void rcu_nmi_exit(void)
91969 */
91970 bool notrace __rcu_is_watching(void)
91971 {
91972- return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
91973+ return atomic_read_unchecked(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
91974 }
91975
91976 /**
91977@@ -891,7 +891,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
91978 static int dyntick_save_progress_counter(struct rcu_data *rdp,
91979 bool *isidle, unsigned long *maxj)
91980 {
91981- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
91982+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
91983 rcu_sysidle_check_cpu(rdp, isidle, maxj);
91984 if ((rdp->dynticks_snap & 0x1) == 0) {
91985 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
91986@@ -920,7 +920,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
91987 int *rcrmp;
91988 unsigned int snap;
91989
91990- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
91991+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
91992 snap = (unsigned int)rdp->dynticks_snap;
91993
91994 /*
91995@@ -983,10 +983,10 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
91996 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
91997 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
91998 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
91999- ACCESS_ONCE(rdp->cond_resched_completed) =
92000+ ACCESS_ONCE_RW(rdp->cond_resched_completed) =
92001 ACCESS_ONCE(rdp->mynode->completed);
92002 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
92003- ACCESS_ONCE(*rcrmp) =
92004+ ACCESS_ONCE_RW(*rcrmp) =
92005 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
92006 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
92007 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
92008@@ -1008,7 +1008,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
92009 rsp->gp_start = j;
92010 smp_wmb(); /* Record start time before stall time. */
92011 j1 = rcu_jiffies_till_stall_check();
92012- ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
92013+ ACCESS_ONCE_RW(rsp->jiffies_stall) = j + j1;
92014 rsp->jiffies_resched = j + j1 / 2;
92015 }
92016
92017@@ -1049,7 +1049,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
92018 raw_spin_unlock_irqrestore(&rnp->lock, flags);
92019 return;
92020 }
92021- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
92022+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
92023 raw_spin_unlock_irqrestore(&rnp->lock, flags);
92024
92025 /*
92026@@ -1126,7 +1126,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
92027
92028 raw_spin_lock_irqsave(&rnp->lock, flags);
92029 if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
92030- ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
92031+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies +
92032 3 * rcu_jiffies_till_stall_check() + 3;
92033 raw_spin_unlock_irqrestore(&rnp->lock, flags);
92034
92035@@ -1210,7 +1210,7 @@ void rcu_cpu_stall_reset(void)
92036 struct rcu_state *rsp;
92037
92038 for_each_rcu_flavor(rsp)
92039- ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
92040+ ACCESS_ONCE_RW(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
92041 }
92042
92043 /*
92044@@ -1596,7 +1596,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
92045 raw_spin_unlock_irq(&rnp->lock);
92046 return 0;
92047 }
92048- ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
92049+ ACCESS_ONCE_RW(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
92050
92051 if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
92052 /*
92053@@ -1637,9 +1637,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
92054 rdp = this_cpu_ptr(rsp->rda);
92055 rcu_preempt_check_blocked_tasks(rnp);
92056 rnp->qsmask = rnp->qsmaskinit;
92057- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
92058+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
92059 WARN_ON_ONCE(rnp->completed != rsp->completed);
92060- ACCESS_ONCE(rnp->completed) = rsp->completed;
92061+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
92062 if (rnp == rdp->mynode)
92063 (void)__note_gp_changes(rsp, rnp, rdp);
92064 rcu_preempt_boost_start_gp(rnp);
92065@@ -1684,7 +1684,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
92066 if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
92067 raw_spin_lock_irq(&rnp->lock);
92068 smp_mb__after_unlock_lock();
92069- ACCESS_ONCE(rsp->gp_flags) &= ~RCU_GP_FLAG_FQS;
92070+ ACCESS_ONCE_RW(rsp->gp_flags) &= ~RCU_GP_FLAG_FQS;
92071 raw_spin_unlock_irq(&rnp->lock);
92072 }
92073 return fqs_state;
92074@@ -1729,7 +1729,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
92075 rcu_for_each_node_breadth_first(rsp, rnp) {
92076 raw_spin_lock_irq(&rnp->lock);
92077 smp_mb__after_unlock_lock();
92078- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
92079+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
92080 rdp = this_cpu_ptr(rsp->rda);
92081 if (rnp == rdp->mynode)
92082 needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
92083@@ -1744,14 +1744,14 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
92084 rcu_nocb_gp_set(rnp, nocb);
92085
92086 /* Declare grace period done. */
92087- ACCESS_ONCE(rsp->completed) = rsp->gpnum;
92088+ ACCESS_ONCE_RW(rsp->completed) = rsp->gpnum;
92089 trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
92090 rsp->fqs_state = RCU_GP_IDLE;
92091 rdp = this_cpu_ptr(rsp->rda);
92092 /* Advance CBs to reduce false positives below. */
92093 needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
92094 if (needgp || cpu_needs_another_gp(rsp, rdp)) {
92095- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92096+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92097 trace_rcu_grace_period(rsp->name,
92098 ACCESS_ONCE(rsp->gpnum),
92099 TPS("newreq"));
92100@@ -1876,7 +1876,7 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
92101 */
92102 return false;
92103 }
92104- ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92105+ ACCESS_ONCE_RW(rsp->gp_flags) = RCU_GP_FLAG_INIT;
92106 trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
92107 TPS("newreq"));
92108
92109@@ -2097,7 +2097,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
92110 rsp->qlen += rdp->qlen;
92111 rdp->n_cbs_orphaned += rdp->qlen;
92112 rdp->qlen_lazy = 0;
92113- ACCESS_ONCE(rdp->qlen) = 0;
92114+ ACCESS_ONCE_RW(rdp->qlen) = 0;
92115 }
92116
92117 /*
92118@@ -2344,7 +2344,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
92119 }
92120 smp_mb(); /* List handling before counting for rcu_barrier(). */
92121 rdp->qlen_lazy -= count_lazy;
92122- ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
92123+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen - count;
92124 rdp->n_cbs_invoked += count;
92125
92126 /* Reinstate batch limit if we have worked down the excess. */
92127@@ -2505,7 +2505,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
92128 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
92129 return; /* Someone beat us to it. */
92130 }
92131- ACCESS_ONCE(rsp->gp_flags) |= RCU_GP_FLAG_FQS;
92132+ ACCESS_ONCE_RW(rsp->gp_flags) |= RCU_GP_FLAG_FQS;
92133 raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
92134 rcu_gp_kthread_wake(rsp);
92135 }
92136@@ -2550,7 +2550,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
92137 /*
92138 * Do RCU core processing for the current CPU.
92139 */
92140-static void rcu_process_callbacks(struct softirq_action *unused)
92141+static void rcu_process_callbacks(void)
92142 {
92143 struct rcu_state *rsp;
92144
92145@@ -2662,7 +2662,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
92146 WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
92147 if (debug_rcu_head_queue(head)) {
92148 /* Probable double call_rcu(), so leak the callback. */
92149- ACCESS_ONCE(head->func) = rcu_leak_callback;
92150+ ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
92151 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
92152 return;
92153 }
92154@@ -2690,7 +2690,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
92155 local_irq_restore(flags);
92156 return;
92157 }
92158- ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
92159+ ACCESS_ONCE_RW(rdp->qlen) = rdp->qlen + 1;
92160 if (lazy)
92161 rdp->qlen_lazy++;
92162 else
92163@@ -2965,11 +2965,11 @@ void synchronize_sched_expedited(void)
92164 * counter wrap on a 32-bit system. Quite a few more CPUs would of
92165 * course be required on a 64-bit system.
92166 */
92167- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
92168+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
92169 (ulong)atomic_long_read(&rsp->expedited_done) +
92170 ULONG_MAX / 8)) {
92171 synchronize_sched();
92172- atomic_long_inc(&rsp->expedited_wrap);
92173+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
92174 return;
92175 }
92176
92177@@ -2977,7 +2977,7 @@ void synchronize_sched_expedited(void)
92178 * Take a ticket. Note that atomic_inc_return() implies a
92179 * full memory barrier.
92180 */
92181- snap = atomic_long_inc_return(&rsp->expedited_start);
92182+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
92183 firstsnap = snap;
92184 get_online_cpus();
92185 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
92186@@ -2990,14 +2990,14 @@ void synchronize_sched_expedited(void)
92187 synchronize_sched_expedited_cpu_stop,
92188 NULL) == -EAGAIN) {
92189 put_online_cpus();
92190- atomic_long_inc(&rsp->expedited_tryfail);
92191+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
92192
92193 /* Check to see if someone else did our work for us. */
92194 s = atomic_long_read(&rsp->expedited_done);
92195 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
92196 /* ensure test happens before caller kfree */
92197 smp_mb__before_atomic(); /* ^^^ */
92198- atomic_long_inc(&rsp->expedited_workdone1);
92199+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
92200 return;
92201 }
92202
92203@@ -3006,7 +3006,7 @@ void synchronize_sched_expedited(void)
92204 udelay(trycount * num_online_cpus());
92205 } else {
92206 wait_rcu_gp(call_rcu_sched);
92207- atomic_long_inc(&rsp->expedited_normal);
92208+ atomic_long_inc_unchecked(&rsp->expedited_normal);
92209 return;
92210 }
92211
92212@@ -3015,7 +3015,7 @@ void synchronize_sched_expedited(void)
92213 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
92214 /* ensure test happens before caller kfree */
92215 smp_mb__before_atomic(); /* ^^^ */
92216- atomic_long_inc(&rsp->expedited_workdone2);
92217+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
92218 return;
92219 }
92220
92221@@ -3027,10 +3027,10 @@ void synchronize_sched_expedited(void)
92222 * period works for us.
92223 */
92224 get_online_cpus();
92225- snap = atomic_long_read(&rsp->expedited_start);
92226+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
92227 smp_mb(); /* ensure read is before try_stop_cpus(). */
92228 }
92229- atomic_long_inc(&rsp->expedited_stoppedcpus);
92230+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
92231
92232 /*
92233 * Everyone up to our most recent fetch is covered by our grace
92234@@ -3039,16 +3039,16 @@ void synchronize_sched_expedited(void)
92235 * than we did already did their update.
92236 */
92237 do {
92238- atomic_long_inc(&rsp->expedited_done_tries);
92239+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
92240 s = atomic_long_read(&rsp->expedited_done);
92241 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
92242 /* ensure test happens before caller kfree */
92243 smp_mb__before_atomic(); /* ^^^ */
92244- atomic_long_inc(&rsp->expedited_done_lost);
92245+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
92246 break;
92247 }
92248 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
92249- atomic_long_inc(&rsp->expedited_done_exit);
92250+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
92251
92252 put_online_cpus();
92253 }
92254@@ -3254,7 +3254,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
92255 * ACCESS_ONCE() to prevent the compiler from speculating
92256 * the increment to precede the early-exit check.
92257 */
92258- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92259+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92260 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
92261 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
92262 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
92263@@ -3304,7 +3304,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
92264
92265 /* Increment ->n_barrier_done to prevent duplicate work. */
92266 smp_mb(); /* Keep increment after above mechanism. */
92267- ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92268+ ACCESS_ONCE_RW(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
92269 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
92270 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
92271 smp_mb(); /* Keep increment before caller's subsequent code. */
92272@@ -3349,10 +3349,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
92273 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
92274 init_callback_list(rdp);
92275 rdp->qlen_lazy = 0;
92276- ACCESS_ONCE(rdp->qlen) = 0;
92277+ ACCESS_ONCE_RW(rdp->qlen) = 0;
92278 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
92279 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
92280- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
92281+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
92282 rdp->cpu = cpu;
92283 rdp->rsp = rsp;
92284 rcu_boot_init_nocb_percpu_data(rdp);
92285@@ -3385,8 +3385,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
92286 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
92287 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
92288 rcu_sysidle_init_percpu_data(rdp->dynticks);
92289- atomic_set(&rdp->dynticks->dynticks,
92290- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
92291+ atomic_set_unchecked(&rdp->dynticks->dynticks,
92292+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
92293 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
92294
92295 /* Add CPU to rcu_node bitmasks. */
92296diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
92297index 6a86eb7..022b506 100644
92298--- a/kernel/rcu/tree.h
92299+++ b/kernel/rcu/tree.h
92300@@ -87,11 +87,11 @@ struct rcu_dynticks {
92301 long long dynticks_nesting; /* Track irq/process nesting level. */
92302 /* Process level is worth LLONG_MAX/2. */
92303 int dynticks_nmi_nesting; /* Track NMI nesting level. */
92304- atomic_t dynticks; /* Even value for idle, else odd. */
92305+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
92306 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
92307 long long dynticks_idle_nesting;
92308 /* irq/process nesting level from idle. */
92309- atomic_t dynticks_idle; /* Even value for idle, else odd. */
92310+ atomic_unchecked_t dynticks_idle;/* Even value for idle, else odd. */
92311 /* "Idle" excludes userspace execution. */
92312 unsigned long dynticks_idle_jiffies;
92313 /* End of last non-NMI non-idle period. */
92314@@ -461,17 +461,17 @@ struct rcu_state {
92315 /* _rcu_barrier(). */
92316 /* End of fields guarded by barrier_mutex. */
92317
92318- atomic_long_t expedited_start; /* Starting ticket. */
92319- atomic_long_t expedited_done; /* Done ticket. */
92320- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
92321- atomic_long_t expedited_tryfail; /* # acquisition failures. */
92322- atomic_long_t expedited_workdone1; /* # done by others #1. */
92323- atomic_long_t expedited_workdone2; /* # done by others #2. */
92324- atomic_long_t expedited_normal; /* # fallbacks to normal. */
92325- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
92326- atomic_long_t expedited_done_tries; /* # tries to update _done. */
92327- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
92328- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
92329+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
92330+ atomic_long_t expedited_done; /* Done ticket. */
92331+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
92332+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
92333+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
92334+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
92335+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
92336+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
92337+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
92338+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
92339+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
92340
92341 unsigned long jiffies_force_qs; /* Time at which to invoke */
92342 /* force_quiescent_state(). */
92343diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
92344index a7997e2..9787c9e 100644
92345--- a/kernel/rcu/tree_plugin.h
92346+++ b/kernel/rcu/tree_plugin.h
92347@@ -735,7 +735,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
92348 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
92349 {
92350 return !rcu_preempted_readers_exp(rnp) &&
92351- ACCESS_ONCE(rnp->expmask) == 0;
92352+ ACCESS_ONCE_RW(rnp->expmask) == 0;
92353 }
92354
92355 /*
92356@@ -897,7 +897,7 @@ void synchronize_rcu_expedited(void)
92357
92358 /* Clean up and exit. */
92359 smp_mb(); /* ensure expedited GP seen before counter increment. */
92360- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
92361+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
92362 unlock_mb_ret:
92363 mutex_unlock(&sync_rcu_preempt_exp_mutex);
92364 mb_ret:
92365@@ -1452,7 +1452,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
92366 free_cpumask_var(cm);
92367 }
92368
92369-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
92370+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
92371 .store = &rcu_cpu_kthread_task,
92372 .thread_should_run = rcu_cpu_kthread_should_run,
92373 .thread_fn = rcu_cpu_kthread,
92374@@ -1932,7 +1932,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
92375 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
92376 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
92377 cpu, ticks_value, ticks_title,
92378- atomic_read(&rdtp->dynticks) & 0xfff,
92379+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
92380 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
92381 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
92382 fast_no_hz);
92383@@ -2076,7 +2076,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
92384 return;
92385 if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
92386 /* Prior xchg orders against prior callback enqueue. */
92387- ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
92388+ ACCESS_ONCE_RW(rdp_leader->nocb_leader_sleep) = false;
92389 wake_up(&rdp_leader->nocb_wq);
92390 }
92391 }
92392@@ -2101,7 +2101,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
92393
92394 /* Enqueue the callback on the nocb list and update counts. */
92395 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
92396- ACCESS_ONCE(*old_rhpp) = rhp;
92397+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
92398 atomic_long_add(rhcount, &rdp->nocb_q_count);
92399 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
92400
92401@@ -2272,7 +2272,7 @@ wait_again:
92402 continue; /* No CBs here, try next follower. */
92403
92404 /* Move callbacks to wait-for-GP list, which is empty. */
92405- ACCESS_ONCE(rdp->nocb_head) = NULL;
92406+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
92407 rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
92408 rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0);
92409 rdp->nocb_gp_count_lazy =
92410@@ -2398,7 +2398,7 @@ static int rcu_nocb_kthread(void *arg)
92411 list = ACCESS_ONCE(rdp->nocb_follower_head);
92412 BUG_ON(!list);
92413 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
92414- ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
92415+ ACCESS_ONCE_RW(rdp->nocb_follower_head) = NULL;
92416 tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
92417 c = atomic_long_xchg(&rdp->nocb_follower_count, 0);
92418 cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0);
92419@@ -2428,8 +2428,8 @@ static int rcu_nocb_kthread(void *arg)
92420 list = next;
92421 }
92422 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
92423- ACCESS_ONCE(rdp->nocb_p_count) -= c;
92424- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
92425+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
92426+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
92427 rdp->n_nocbs_invoked += c;
92428 }
92429 return 0;
92430@@ -2446,7 +2446,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
92431 {
92432 if (!rcu_nocb_need_deferred_wakeup(rdp))
92433 return;
92434- ACCESS_ONCE(rdp->nocb_defer_wakeup) = false;
92435+ ACCESS_ONCE_RW(rdp->nocb_defer_wakeup) = false;
92436 wake_nocb_leader(rdp, false);
92437 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty"));
92438 }
92439@@ -2510,7 +2510,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
92440 t = kthread_run(rcu_nocb_kthread, rdp,
92441 "rcuo%c/%d", rsp->abbr, cpu);
92442 BUG_ON(IS_ERR(t));
92443- ACCESS_ONCE(rdp->nocb_kthread) = t;
92444+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
92445 }
92446 }
92447
92448@@ -2641,11 +2641,11 @@ static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
92449
92450 /* Record start of fully idle period. */
92451 j = jiffies;
92452- ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
92453+ ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
92454 smp_mb__before_atomic();
92455- atomic_inc(&rdtp->dynticks_idle);
92456+ atomic_inc_unchecked(&rdtp->dynticks_idle);
92457 smp_mb__after_atomic();
92458- WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
92459+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
92460 }
92461
92462 /*
92463@@ -2710,9 +2710,9 @@ static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
92464
92465 /* Record end of idle period. */
92466 smp_mb__before_atomic();
92467- atomic_inc(&rdtp->dynticks_idle);
92468+ atomic_inc_unchecked(&rdtp->dynticks_idle);
92469 smp_mb__after_atomic();
92470- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
92471+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
92472
92473 /*
92474 * If we are the timekeeping CPU, we are permitted to be non-idle
92475@@ -2753,7 +2753,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
92476 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
92477
92478 /* Pick up current idle and NMI-nesting counter and check. */
92479- cur = atomic_read(&rdtp->dynticks_idle);
92480+ cur = atomic_read_unchecked(&rdtp->dynticks_idle);
92481 if (cur & 0x1) {
92482 *isidle = false; /* We are not idle! */
92483 return;
92484@@ -2802,7 +2802,7 @@ static void rcu_sysidle(unsigned long j)
92485 case RCU_SYSIDLE_NOT:
92486
92487 /* First time all are idle, so note a short idle period. */
92488- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
92489+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
92490 break;
92491
92492 case RCU_SYSIDLE_SHORT:
92493@@ -2840,7 +2840,7 @@ static void rcu_sysidle_cancel(void)
92494 {
92495 smp_mb();
92496 if (full_sysidle_state > RCU_SYSIDLE_SHORT)
92497- ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
92498+ ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
92499 }
92500
92501 /*
92502@@ -2888,7 +2888,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
92503 smp_mb(); /* grace period precedes setting inuse. */
92504
92505 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
92506- ACCESS_ONCE(rshp->inuse) = 0;
92507+ ACCESS_ONCE_RW(rshp->inuse) = 0;
92508 }
92509
92510 /*
92511diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
92512index 5cdc62e..cc52e88 100644
92513--- a/kernel/rcu/tree_trace.c
92514+++ b/kernel/rcu/tree_trace.c
92515@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
92516 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
92517 rdp->passed_quiesce, rdp->qs_pending);
92518 seq_printf(m, " dt=%d/%llx/%d df=%lu",
92519- atomic_read(&rdp->dynticks->dynticks),
92520+ atomic_read_unchecked(&rdp->dynticks->dynticks),
92521 rdp->dynticks->dynticks_nesting,
92522 rdp->dynticks->dynticks_nmi_nesting,
92523 rdp->dynticks_fqs);
92524@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
92525 struct rcu_state *rsp = (struct rcu_state *)m->private;
92526
92527 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
92528- atomic_long_read(&rsp->expedited_start),
92529+ atomic_long_read_unchecked(&rsp->expedited_start),
92530 atomic_long_read(&rsp->expedited_done),
92531- atomic_long_read(&rsp->expedited_wrap),
92532- atomic_long_read(&rsp->expedited_tryfail),
92533- atomic_long_read(&rsp->expedited_workdone1),
92534- atomic_long_read(&rsp->expedited_workdone2),
92535- atomic_long_read(&rsp->expedited_normal),
92536- atomic_long_read(&rsp->expedited_stoppedcpus),
92537- atomic_long_read(&rsp->expedited_done_tries),
92538- atomic_long_read(&rsp->expedited_done_lost),
92539- atomic_long_read(&rsp->expedited_done_exit));
92540+ atomic_long_read_unchecked(&rsp->expedited_wrap),
92541+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
92542+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
92543+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
92544+ atomic_long_read_unchecked(&rsp->expedited_normal),
92545+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
92546+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
92547+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
92548+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
92549 return 0;
92550 }
92551
92552diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
92553index 4056d79..c11741a 100644
92554--- a/kernel/rcu/update.c
92555+++ b/kernel/rcu/update.c
92556@@ -308,10 +308,10 @@ int rcu_jiffies_till_stall_check(void)
92557 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
92558 */
92559 if (till_stall_check < 3) {
92560- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
92561+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
92562 till_stall_check = 3;
92563 } else if (till_stall_check > 300) {
92564- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
92565+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
92566 till_stall_check = 300;
92567 }
92568 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
92569diff --git a/kernel/resource.c b/kernel/resource.c
92570index 60c5a38..ed77193 100644
92571--- a/kernel/resource.c
92572+++ b/kernel/resource.c
92573@@ -161,8 +161,18 @@ static const struct file_operations proc_iomem_operations = {
92574
92575 static int __init ioresources_init(void)
92576 {
92577+#ifdef CONFIG_GRKERNSEC_PROC_ADD
92578+#ifdef CONFIG_GRKERNSEC_PROC_USER
92579+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
92580+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
92581+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
92582+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
92583+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
92584+#endif
92585+#else
92586 proc_create("ioports", 0, NULL, &proc_ioports_operations);
92587 proc_create("iomem", 0, NULL, &proc_iomem_operations);
92588+#endif
92589 return 0;
92590 }
92591 __initcall(ioresources_init);
92592diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
92593index e73efba..c9bfbd4 100644
92594--- a/kernel/sched/auto_group.c
92595+++ b/kernel/sched/auto_group.c
92596@@ -11,7 +11,7 @@
92597
92598 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
92599 static struct autogroup autogroup_default;
92600-static atomic_t autogroup_seq_nr;
92601+static atomic_unchecked_t autogroup_seq_nr;
92602
92603 void __init autogroup_init(struct task_struct *init_task)
92604 {
92605@@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
92606
92607 kref_init(&ag->kref);
92608 init_rwsem(&ag->lock);
92609- ag->id = atomic_inc_return(&autogroup_seq_nr);
92610+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
92611 ag->tg = tg;
92612 #ifdef CONFIG_RT_GROUP_SCHED
92613 /*
92614diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
92615index a63f4dc..349bbb0 100644
92616--- a/kernel/sched/completion.c
92617+++ b/kernel/sched/completion.c
92618@@ -204,7 +204,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
92619 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
92620 * or number of jiffies left till timeout) if completed.
92621 */
92622-long __sched
92623+long __sched __intentional_overflow(-1)
92624 wait_for_completion_interruptible_timeout(struct completion *x,
92625 unsigned long timeout)
92626 {
92627@@ -221,7 +221,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
92628 *
92629 * Return: -ERESTARTSYS if interrupted, 0 if completed.
92630 */
92631-int __sched wait_for_completion_killable(struct completion *x)
92632+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
92633 {
92634 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
92635 if (t == -ERESTARTSYS)
92636@@ -242,7 +242,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
92637 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
92638 * or number of jiffies left till timeout) if completed.
92639 */
92640-long __sched
92641+long __sched __intentional_overflow(-1)
92642 wait_for_completion_killable_timeout(struct completion *x,
92643 unsigned long timeout)
92644 {
92645diff --git a/kernel/sched/core.c b/kernel/sched/core.c
92646index 6d7cb91..420f2d2 100644
92647--- a/kernel/sched/core.c
92648+++ b/kernel/sched/core.c
92649@@ -1857,7 +1857,7 @@ void set_numabalancing_state(bool enabled)
92650 int sysctl_numa_balancing(struct ctl_table *table, int write,
92651 void __user *buffer, size_t *lenp, loff_t *ppos)
92652 {
92653- struct ctl_table t;
92654+ ctl_table_no_const t;
92655 int err;
92656 int state = numabalancing_enabled;
92657
92658@@ -2324,8 +2324,10 @@ context_switch(struct rq *rq, struct task_struct *prev,
92659 next->active_mm = oldmm;
92660 atomic_inc(&oldmm->mm_count);
92661 enter_lazy_tlb(oldmm, next);
92662- } else
92663+ } else {
92664 switch_mm(oldmm, mm, next);
92665+ populate_stack();
92666+ }
92667
92668 if (!prev->mm) {
92669 prev->active_mm = NULL;
92670@@ -3107,6 +3109,8 @@ int can_nice(const struct task_struct *p, const int nice)
92671 /* convert nice value [19,-20] to rlimit style value [1,40] */
92672 int nice_rlim = nice_to_rlimit(nice);
92673
92674+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
92675+
92676 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
92677 capable(CAP_SYS_NICE));
92678 }
92679@@ -3133,7 +3137,8 @@ SYSCALL_DEFINE1(nice, int, increment)
92680 nice = task_nice(current) + increment;
92681
92682 nice = clamp_val(nice, MIN_NICE, MAX_NICE);
92683- if (increment < 0 && !can_nice(current, nice))
92684+ if (increment < 0 && (!can_nice(current, nice) ||
92685+ gr_handle_chroot_nice()))
92686 return -EPERM;
92687
92688 retval = security_task_setnice(current, nice);
92689@@ -3412,6 +3417,7 @@ recheck:
92690 if (policy != p->policy && !rlim_rtprio)
92691 return -EPERM;
92692
92693+ gr_learn_resource(p, RLIMIT_RTPRIO, attr->sched_priority, 1);
92694 /* can't increase priority */
92695 if (attr->sched_priority > p->rt_priority &&
92696 attr->sched_priority > rlim_rtprio)
92697@@ -4802,6 +4808,7 @@ void idle_task_exit(void)
92698
92699 if (mm != &init_mm) {
92700 switch_mm(mm, &init_mm, current);
92701+ populate_stack();
92702 finish_arch_post_lock_switch();
92703 }
92704 mmdrop(mm);
92705@@ -4897,7 +4904,7 @@ static void migrate_tasks(unsigned int dead_cpu)
92706
92707 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
92708
92709-static struct ctl_table sd_ctl_dir[] = {
92710+static ctl_table_no_const sd_ctl_dir[] __read_only = {
92711 {
92712 .procname = "sched_domain",
92713 .mode = 0555,
92714@@ -4914,17 +4921,17 @@ static struct ctl_table sd_ctl_root[] = {
92715 {}
92716 };
92717
92718-static struct ctl_table *sd_alloc_ctl_entry(int n)
92719+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
92720 {
92721- struct ctl_table *entry =
92722+ ctl_table_no_const *entry =
92723 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
92724
92725 return entry;
92726 }
92727
92728-static void sd_free_ctl_entry(struct ctl_table **tablep)
92729+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
92730 {
92731- struct ctl_table *entry;
92732+ ctl_table_no_const *entry;
92733
92734 /*
92735 * In the intermediate directories, both the child directory and
92736@@ -4932,22 +4939,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
92737 * will always be set. In the lowest directory the names are
92738 * static strings and all have proc handlers.
92739 */
92740- for (entry = *tablep; entry->mode; entry++) {
92741- if (entry->child)
92742- sd_free_ctl_entry(&entry->child);
92743+ for (entry = tablep; entry->mode; entry++) {
92744+ if (entry->child) {
92745+ sd_free_ctl_entry(entry->child);
92746+ pax_open_kernel();
92747+ entry->child = NULL;
92748+ pax_close_kernel();
92749+ }
92750 if (entry->proc_handler == NULL)
92751 kfree(entry->procname);
92752 }
92753
92754- kfree(*tablep);
92755- *tablep = NULL;
92756+ kfree(tablep);
92757 }
92758
92759 static int min_load_idx = 0;
92760 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
92761
92762 static void
92763-set_table_entry(struct ctl_table *entry,
92764+set_table_entry(ctl_table_no_const *entry,
92765 const char *procname, void *data, int maxlen,
92766 umode_t mode, proc_handler *proc_handler,
92767 bool load_idx)
92768@@ -4967,7 +4977,7 @@ set_table_entry(struct ctl_table *entry,
92769 static struct ctl_table *
92770 sd_alloc_ctl_domain_table(struct sched_domain *sd)
92771 {
92772- struct ctl_table *table = sd_alloc_ctl_entry(14);
92773+ ctl_table_no_const *table = sd_alloc_ctl_entry(14);
92774
92775 if (table == NULL)
92776 return NULL;
92777@@ -5005,9 +5015,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
92778 return table;
92779 }
92780
92781-static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
92782+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
92783 {
92784- struct ctl_table *entry, *table;
92785+ ctl_table_no_const *entry, *table;
92786 struct sched_domain *sd;
92787 int domain_num = 0, i;
92788 char buf[32];
92789@@ -5034,11 +5044,13 @@ static struct ctl_table_header *sd_sysctl_header;
92790 static void register_sched_domain_sysctl(void)
92791 {
92792 int i, cpu_num = num_possible_cpus();
92793- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
92794+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
92795 char buf[32];
92796
92797 WARN_ON(sd_ctl_dir[0].child);
92798+ pax_open_kernel();
92799 sd_ctl_dir[0].child = entry;
92800+ pax_close_kernel();
92801
92802 if (entry == NULL)
92803 return;
92804@@ -5061,8 +5073,12 @@ static void unregister_sched_domain_sysctl(void)
92805 if (sd_sysctl_header)
92806 unregister_sysctl_table(sd_sysctl_header);
92807 sd_sysctl_header = NULL;
92808- if (sd_ctl_dir[0].child)
92809- sd_free_ctl_entry(&sd_ctl_dir[0].child);
92810+ if (sd_ctl_dir[0].child) {
92811+ sd_free_ctl_entry(sd_ctl_dir[0].child);
92812+ pax_open_kernel();
92813+ sd_ctl_dir[0].child = NULL;
92814+ pax_close_kernel();
92815+ }
92816 }
92817 #else
92818 static void register_sched_domain_sysctl(void)
92819diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
92820index bfa3c86..e58767c 100644
92821--- a/kernel/sched/fair.c
92822+++ b/kernel/sched/fair.c
92823@@ -1873,7 +1873,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
92824
92825 static void reset_ptenuma_scan(struct task_struct *p)
92826 {
92827- ACCESS_ONCE(p->mm->numa_scan_seq)++;
92828+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
92829 p->mm->numa_scan_offset = 0;
92830 }
92831
92832@@ -7339,7 +7339,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
92833 * run_rebalance_domains is triggered when needed from the scheduler tick.
92834 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
92835 */
92836-static void run_rebalance_domains(struct softirq_action *h)
92837+static __latent_entropy void run_rebalance_domains(void)
92838 {
92839 struct rq *this_rq = this_rq();
92840 enum cpu_idle_type idle = this_rq->idle_balance ?
92841diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
92842index 579712f..a338a9d 100644
92843--- a/kernel/sched/sched.h
92844+++ b/kernel/sched/sched.h
92845@@ -1146,7 +1146,7 @@ struct sched_class {
92846 #ifdef CONFIG_FAIR_GROUP_SCHED
92847 void (*task_move_group) (struct task_struct *p, int on_rq);
92848 #endif
92849-};
92850+} __do_const;
92851
92852 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
92853 {
92854diff --git a/kernel/seccomp.c b/kernel/seccomp.c
92855index 44eb005..84922be 100644
92856--- a/kernel/seccomp.c
92857+++ b/kernel/seccomp.c
92858@@ -395,16 +395,15 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
92859 if (!filter)
92860 goto free_prog;
92861
92862- filter->prog = kzalloc(bpf_prog_size(new_len),
92863- GFP_KERNEL|__GFP_NOWARN);
92864+ filter->prog = bpf_prog_alloc(bpf_prog_size(new_len), __GFP_NOWARN);
92865 if (!filter->prog)
92866 goto free_filter;
92867
92868 ret = bpf_convert_filter(fp, fprog->len, filter->prog->insnsi, &new_len);
92869 if (ret)
92870 goto free_filter_prog;
92871- kfree(fp);
92872
92873+ kfree(fp);
92874 atomic_set(&filter->usage, 1);
92875 filter->prog->len = new_len;
92876
92877@@ -413,7 +412,7 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
92878 return filter;
92879
92880 free_filter_prog:
92881- kfree(filter->prog);
92882+ __bpf_prog_free(filter->prog);
92883 free_filter:
92884 kfree(filter);
92885 free_prog:
92886diff --git a/kernel/signal.c b/kernel/signal.c
92887index 8f0876f..1153a5a 100644
92888--- a/kernel/signal.c
92889+++ b/kernel/signal.c
92890@@ -53,12 +53,12 @@ static struct kmem_cache *sigqueue_cachep;
92891
92892 int print_fatal_signals __read_mostly;
92893
92894-static void __user *sig_handler(struct task_struct *t, int sig)
92895+static __sighandler_t sig_handler(struct task_struct *t, int sig)
92896 {
92897 return t->sighand->action[sig - 1].sa.sa_handler;
92898 }
92899
92900-static int sig_handler_ignored(void __user *handler, int sig)
92901+static int sig_handler_ignored(__sighandler_t handler, int sig)
92902 {
92903 /* Is it explicitly or implicitly ignored? */
92904 return handler == SIG_IGN ||
92905@@ -67,7 +67,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
92906
92907 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
92908 {
92909- void __user *handler;
92910+ __sighandler_t handler;
92911
92912 handler = sig_handler(t, sig);
92913
92914@@ -372,6 +372,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
92915 atomic_inc(&user->sigpending);
92916 rcu_read_unlock();
92917
92918+ if (!override_rlimit)
92919+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
92920+
92921 if (override_rlimit ||
92922 atomic_read(&user->sigpending) <=
92923 task_rlimit(t, RLIMIT_SIGPENDING)) {
92924@@ -499,7 +502,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
92925
92926 int unhandled_signal(struct task_struct *tsk, int sig)
92927 {
92928- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
92929+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
92930 if (is_global_init(tsk))
92931 return 1;
92932 if (handler != SIG_IGN && handler != SIG_DFL)
92933@@ -793,6 +796,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
92934 }
92935 }
92936
92937+ /* allow glibc communication via tgkill to other threads in our
92938+ thread group */
92939+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
92940+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
92941+ && gr_handle_signal(t, sig))
92942+ return -EPERM;
92943+
92944 return security_task_kill(t, info, sig, 0);
92945 }
92946
92947@@ -1176,7 +1186,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
92948 return send_signal(sig, info, p, 1);
92949 }
92950
92951-static int
92952+int
92953 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92954 {
92955 return send_signal(sig, info, t, 0);
92956@@ -1213,6 +1223,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92957 unsigned long int flags;
92958 int ret, blocked, ignored;
92959 struct k_sigaction *action;
92960+ int is_unhandled = 0;
92961
92962 spin_lock_irqsave(&t->sighand->siglock, flags);
92963 action = &t->sighand->action[sig-1];
92964@@ -1227,9 +1238,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
92965 }
92966 if (action->sa.sa_handler == SIG_DFL)
92967 t->signal->flags &= ~SIGNAL_UNKILLABLE;
92968+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
92969+ is_unhandled = 1;
92970 ret = specific_send_sig_info(sig, info, t);
92971 spin_unlock_irqrestore(&t->sighand->siglock, flags);
92972
92973+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
92974+ normal operation */
92975+ if (is_unhandled) {
92976+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
92977+ gr_handle_crash(t, sig);
92978+ }
92979+
92980 return ret;
92981 }
92982
92983@@ -1300,8 +1320,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
92984 ret = check_kill_permission(sig, info, p);
92985 rcu_read_unlock();
92986
92987- if (!ret && sig)
92988+ if (!ret && sig) {
92989 ret = do_send_sig_info(sig, info, p, true);
92990+ if (!ret)
92991+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
92992+ }
92993
92994 return ret;
92995 }
92996@@ -2903,7 +2926,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
92997 int error = -ESRCH;
92998
92999 rcu_read_lock();
93000- p = find_task_by_vpid(pid);
93001+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
93002+ /* allow glibc communication via tgkill to other threads in our
93003+ thread group */
93004+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
93005+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
93006+ p = find_task_by_vpid_unrestricted(pid);
93007+ else
93008+#endif
93009+ p = find_task_by_vpid(pid);
93010 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
93011 error = check_kill_permission(sig, info, p);
93012 /*
93013@@ -3236,8 +3267,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
93014 }
93015 seg = get_fs();
93016 set_fs(KERNEL_DS);
93017- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
93018- (stack_t __force __user *) &uoss,
93019+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
93020+ (stack_t __force_user *) &uoss,
93021 compat_user_stack_pointer());
93022 set_fs(seg);
93023 if (ret >= 0 && uoss_ptr) {
93024diff --git a/kernel/smpboot.c b/kernel/smpboot.c
93025index eb89e18..a4e6792 100644
93026--- a/kernel/smpboot.c
93027+++ b/kernel/smpboot.c
93028@@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
93029 }
93030 smpboot_unpark_thread(plug_thread, cpu);
93031 }
93032- list_add(&plug_thread->list, &hotplug_threads);
93033+ pax_list_add(&plug_thread->list, &hotplug_threads);
93034 out:
93035 mutex_unlock(&smpboot_threads_lock);
93036 return ret;
93037@@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
93038 {
93039 get_online_cpus();
93040 mutex_lock(&smpboot_threads_lock);
93041- list_del(&plug_thread->list);
93042+ pax_list_del(&plug_thread->list);
93043 smpboot_destroy_threads(plug_thread);
93044 mutex_unlock(&smpboot_threads_lock);
93045 put_online_cpus();
93046diff --git a/kernel/softirq.c b/kernel/softirq.c
93047index 5918d22..e95d1926 100644
93048--- a/kernel/softirq.c
93049+++ b/kernel/softirq.c
93050@@ -53,7 +53,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
93051 EXPORT_SYMBOL(irq_stat);
93052 #endif
93053
93054-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
93055+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
93056
93057 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
93058
93059@@ -266,7 +266,7 @@ restart:
93060 kstat_incr_softirqs_this_cpu(vec_nr);
93061
93062 trace_softirq_entry(vec_nr);
93063- h->action(h);
93064+ h->action();
93065 trace_softirq_exit(vec_nr);
93066 if (unlikely(prev_count != preempt_count())) {
93067 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
93068@@ -426,7 +426,7 @@ void __raise_softirq_irqoff(unsigned int nr)
93069 or_softirq_pending(1UL << nr);
93070 }
93071
93072-void open_softirq(int nr, void (*action)(struct softirq_action *))
93073+void __init open_softirq(int nr, void (*action)(void))
93074 {
93075 softirq_vec[nr].action = action;
93076 }
93077@@ -478,7 +478,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
93078 }
93079 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
93080
93081-static void tasklet_action(struct softirq_action *a)
93082+static void tasklet_action(void)
93083 {
93084 struct tasklet_struct *list;
93085
93086@@ -514,7 +514,7 @@ static void tasklet_action(struct softirq_action *a)
93087 }
93088 }
93089
93090-static void tasklet_hi_action(struct softirq_action *a)
93091+static __latent_entropy void tasklet_hi_action(void)
93092 {
93093 struct tasklet_struct *list;
93094
93095@@ -741,7 +741,7 @@ static struct notifier_block cpu_nfb = {
93096 .notifier_call = cpu_callback
93097 };
93098
93099-static struct smp_hotplug_thread softirq_threads = {
93100+static struct smp_hotplug_thread softirq_threads __read_only = {
93101 .store = &ksoftirqd,
93102 .thread_should_run = ksoftirqd_should_run,
93103 .thread_fn = run_ksoftirqd,
93104diff --git a/kernel/sys.c b/kernel/sys.c
93105index ce81291..df2ca85 100644
93106--- a/kernel/sys.c
93107+++ b/kernel/sys.c
93108@@ -148,6 +148,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
93109 error = -EACCES;
93110 goto out;
93111 }
93112+
93113+ if (gr_handle_chroot_setpriority(p, niceval)) {
93114+ error = -EACCES;
93115+ goto out;
93116+ }
93117+
93118 no_nice = security_task_setnice(p, niceval);
93119 if (no_nice) {
93120 error = no_nice;
93121@@ -351,6 +357,20 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
93122 goto error;
93123 }
93124
93125+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
93126+ goto error;
93127+
93128+ if (!gid_eq(new->gid, old->gid)) {
93129+ /* make sure we generate a learn log for what will
93130+ end up being a role transition after a full-learning
93131+ policy is generated
93132+ CAP_SETGID is required to perform a transition
93133+ we may not log a CAP_SETGID check above, e.g.
93134+ in the case where new rgid = old egid
93135+ */
93136+ gr_learn_cap(current, new, CAP_SETGID);
93137+ }
93138+
93139 if (rgid != (gid_t) -1 ||
93140 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
93141 new->sgid = new->egid;
93142@@ -386,6 +406,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
93143 old = current_cred();
93144
93145 retval = -EPERM;
93146+
93147+ if (gr_check_group_change(kgid, kgid, kgid))
93148+ goto error;
93149+
93150 if (ns_capable(old->user_ns, CAP_SETGID))
93151 new->gid = new->egid = new->sgid = new->fsgid = kgid;
93152 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
93153@@ -403,7 +427,7 @@ error:
93154 /*
93155 * change the user struct in a credentials set to match the new UID
93156 */
93157-static int set_user(struct cred *new)
93158+int set_user(struct cred *new)
93159 {
93160 struct user_struct *new_user;
93161
93162@@ -483,7 +507,18 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
93163 goto error;
93164 }
93165
93166+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
93167+ goto error;
93168+
93169 if (!uid_eq(new->uid, old->uid)) {
93170+ /* make sure we generate a learn log for what will
93171+ end up being a role transition after a full-learning
93172+ policy is generated
93173+ CAP_SETUID is required to perform a transition
93174+ we may not log a CAP_SETUID check above, e.g.
93175+ in the case where new ruid = old euid
93176+ */
93177+ gr_learn_cap(current, new, CAP_SETUID);
93178 retval = set_user(new);
93179 if (retval < 0)
93180 goto error;
93181@@ -533,6 +568,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
93182 old = current_cred();
93183
93184 retval = -EPERM;
93185+
93186+ if (gr_check_crash_uid(kuid))
93187+ goto error;
93188+ if (gr_check_user_change(kuid, kuid, kuid))
93189+ goto error;
93190+
93191 if (ns_capable(old->user_ns, CAP_SETUID)) {
93192 new->suid = new->uid = kuid;
93193 if (!uid_eq(kuid, old->uid)) {
93194@@ -602,6 +643,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
93195 goto error;
93196 }
93197
93198+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
93199+ goto error;
93200+
93201 if (ruid != (uid_t) -1) {
93202 new->uid = kruid;
93203 if (!uid_eq(kruid, old->uid)) {
93204@@ -684,6 +728,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
93205 goto error;
93206 }
93207
93208+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
93209+ goto error;
93210+
93211 if (rgid != (gid_t) -1)
93212 new->gid = krgid;
93213 if (egid != (gid_t) -1)
93214@@ -745,12 +792,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
93215 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
93216 ns_capable(old->user_ns, CAP_SETUID)) {
93217 if (!uid_eq(kuid, old->fsuid)) {
93218+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
93219+ goto error;
93220+
93221 new->fsuid = kuid;
93222 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
93223 goto change_okay;
93224 }
93225 }
93226
93227+error:
93228 abort_creds(new);
93229 return old_fsuid;
93230
93231@@ -783,12 +834,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
93232 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
93233 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
93234 ns_capable(old->user_ns, CAP_SETGID)) {
93235+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
93236+ goto error;
93237+
93238 if (!gid_eq(kgid, old->fsgid)) {
93239 new->fsgid = kgid;
93240 goto change_okay;
93241 }
93242 }
93243
93244+error:
93245 abort_creds(new);
93246 return old_fsgid;
93247
93248@@ -1167,19 +1222,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
93249 return -EFAULT;
93250
93251 down_read(&uts_sem);
93252- error = __copy_to_user(&name->sysname, &utsname()->sysname,
93253+ error = __copy_to_user(name->sysname, &utsname()->sysname,
93254 __OLD_UTS_LEN);
93255 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
93256- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
93257+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
93258 __OLD_UTS_LEN);
93259 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
93260- error |= __copy_to_user(&name->release, &utsname()->release,
93261+ error |= __copy_to_user(name->release, &utsname()->release,
93262 __OLD_UTS_LEN);
93263 error |= __put_user(0, name->release + __OLD_UTS_LEN);
93264- error |= __copy_to_user(&name->version, &utsname()->version,
93265+ error |= __copy_to_user(name->version, &utsname()->version,
93266 __OLD_UTS_LEN);
93267 error |= __put_user(0, name->version + __OLD_UTS_LEN);
93268- error |= __copy_to_user(&name->machine, &utsname()->machine,
93269+ error |= __copy_to_user(name->machine, &utsname()->machine,
93270 __OLD_UTS_LEN);
93271 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
93272 up_read(&uts_sem);
93273@@ -1381,6 +1436,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
93274 */
93275 new_rlim->rlim_cur = 1;
93276 }
93277+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
93278+ is changed to a lower value. Since tasks can be created by the same
93279+ user in between this limit change and an execve by this task, force
93280+ a recheck only for this task by setting PF_NPROC_EXCEEDED
93281+ */
93282+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
93283+ tsk->flags |= PF_NPROC_EXCEEDED;
93284 }
93285 if (!retval) {
93286 if (old_rlim)
93287diff --git a/kernel/sysctl.c b/kernel/sysctl.c
93288index 75875a7..cd8e838 100644
93289--- a/kernel/sysctl.c
93290+++ b/kernel/sysctl.c
93291@@ -94,7 +94,6 @@
93292
93293
93294 #if defined(CONFIG_SYSCTL)
93295-
93296 /* External variables not in a header file. */
93297 extern int max_threads;
93298 extern int suid_dumpable;
93299@@ -115,19 +114,20 @@ extern int sysctl_nr_trim_pages;
93300
93301 /* Constants used for minimum and maximum */
93302 #ifdef CONFIG_LOCKUP_DETECTOR
93303-static int sixty = 60;
93304+static int sixty __read_only = 60;
93305 #endif
93306
93307-static int __maybe_unused neg_one = -1;
93308+static int __maybe_unused neg_one __read_only = -1;
93309
93310-static int zero;
93311-static int __maybe_unused one = 1;
93312-static int __maybe_unused two = 2;
93313-static int __maybe_unused four = 4;
93314-static unsigned long one_ul = 1;
93315-static int one_hundred = 100;
93316+static int zero __read_only = 0;
93317+static int __maybe_unused one __read_only = 1;
93318+static int __maybe_unused two __read_only = 2;
93319+static int __maybe_unused three __read_only = 3;
93320+static int __maybe_unused four __read_only = 4;
93321+static unsigned long one_ul __read_only = 1;
93322+static int one_hundred __read_only = 100;
93323 #ifdef CONFIG_PRINTK
93324-static int ten_thousand = 10000;
93325+static int ten_thousand __read_only = 10000;
93326 #endif
93327
93328 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
93329@@ -181,10 +181,8 @@ static int proc_taint(struct ctl_table *table, int write,
93330 void __user *buffer, size_t *lenp, loff_t *ppos);
93331 #endif
93332
93333-#ifdef CONFIG_PRINTK
93334 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93335 void __user *buffer, size_t *lenp, loff_t *ppos);
93336-#endif
93337
93338 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
93339 void __user *buffer, size_t *lenp, loff_t *ppos);
93340@@ -215,6 +213,8 @@ static int sysrq_sysctl_handler(struct ctl_table *table, int write,
93341
93342 #endif
93343
93344+extern struct ctl_table grsecurity_table[];
93345+
93346 static struct ctl_table kern_table[];
93347 static struct ctl_table vm_table[];
93348 static struct ctl_table fs_table[];
93349@@ -229,6 +229,20 @@ extern struct ctl_table epoll_table[];
93350 int sysctl_legacy_va_layout;
93351 #endif
93352
93353+#ifdef CONFIG_PAX_SOFTMODE
93354+static struct ctl_table pax_table[] = {
93355+ {
93356+ .procname = "softmode",
93357+ .data = &pax_softmode,
93358+ .maxlen = sizeof(unsigned int),
93359+ .mode = 0600,
93360+ .proc_handler = &proc_dointvec,
93361+ },
93362+
93363+ { }
93364+};
93365+#endif
93366+
93367 /* The default sysctl tables: */
93368
93369 static struct ctl_table sysctl_base_table[] = {
93370@@ -277,6 +291,22 @@ static int max_extfrag_threshold = 1000;
93371 #endif
93372
93373 static struct ctl_table kern_table[] = {
93374+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
93375+ {
93376+ .procname = "grsecurity",
93377+ .mode = 0500,
93378+ .child = grsecurity_table,
93379+ },
93380+#endif
93381+
93382+#ifdef CONFIG_PAX_SOFTMODE
93383+ {
93384+ .procname = "pax",
93385+ .mode = 0500,
93386+ .child = pax_table,
93387+ },
93388+#endif
93389+
93390 {
93391 .procname = "sched_child_runs_first",
93392 .data = &sysctl_sched_child_runs_first,
93393@@ -641,7 +671,7 @@ static struct ctl_table kern_table[] = {
93394 .data = &modprobe_path,
93395 .maxlen = KMOD_PATH_LEN,
93396 .mode = 0644,
93397- .proc_handler = proc_dostring,
93398+ .proc_handler = proc_dostring_modpriv,
93399 },
93400 {
93401 .procname = "modules_disabled",
93402@@ -808,16 +838,20 @@ static struct ctl_table kern_table[] = {
93403 .extra1 = &zero,
93404 .extra2 = &one,
93405 },
93406+#endif
93407 {
93408 .procname = "kptr_restrict",
93409 .data = &kptr_restrict,
93410 .maxlen = sizeof(int),
93411 .mode = 0644,
93412 .proc_handler = proc_dointvec_minmax_sysadmin,
93413+#ifdef CONFIG_GRKERNSEC_HIDESYM
93414+ .extra1 = &two,
93415+#else
93416 .extra1 = &zero,
93417+#endif
93418 .extra2 = &two,
93419 },
93420-#endif
93421 {
93422 .procname = "ngroups_max",
93423 .data = &ngroups_max,
93424@@ -1073,10 +1107,17 @@ static struct ctl_table kern_table[] = {
93425 */
93426 {
93427 .procname = "perf_event_paranoid",
93428- .data = &sysctl_perf_event_paranoid,
93429- .maxlen = sizeof(sysctl_perf_event_paranoid),
93430+ .data = &sysctl_perf_event_legitimately_concerned,
93431+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
93432 .mode = 0644,
93433- .proc_handler = proc_dointvec,
93434+ /* go ahead, be a hero */
93435+ .proc_handler = proc_dointvec_minmax_sysadmin,
93436+ .extra1 = &neg_one,
93437+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
93438+ .extra2 = &three,
93439+#else
93440+ .extra2 = &two,
93441+#endif
93442 },
93443 {
93444 .procname = "perf_event_mlock_kb",
93445@@ -1335,6 +1376,13 @@ static struct ctl_table vm_table[] = {
93446 .proc_handler = proc_dointvec_minmax,
93447 .extra1 = &zero,
93448 },
93449+ {
93450+ .procname = "heap_stack_gap",
93451+ .data = &sysctl_heap_stack_gap,
93452+ .maxlen = sizeof(sysctl_heap_stack_gap),
93453+ .mode = 0644,
93454+ .proc_handler = proc_doulongvec_minmax,
93455+ },
93456 #else
93457 {
93458 .procname = "nr_trim_pages",
93459@@ -1824,6 +1872,16 @@ int proc_dostring(struct ctl_table *table, int write,
93460 (char __user *)buffer, lenp, ppos);
93461 }
93462
93463+int proc_dostring_modpriv(struct ctl_table *table, int write,
93464+ void __user *buffer, size_t *lenp, loff_t *ppos)
93465+{
93466+ if (write && !capable(CAP_SYS_MODULE))
93467+ return -EPERM;
93468+
93469+ return _proc_do_string(table->data, table->maxlen, write,
93470+ buffer, lenp, ppos);
93471+}
93472+
93473 static size_t proc_skip_spaces(char **buf)
93474 {
93475 size_t ret;
93476@@ -1929,6 +1987,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
93477 len = strlen(tmp);
93478 if (len > *size)
93479 len = *size;
93480+ if (len > sizeof(tmp))
93481+ len = sizeof(tmp);
93482 if (copy_to_user(*buf, tmp, len))
93483 return -EFAULT;
93484 *size -= len;
93485@@ -2106,7 +2166,7 @@ int proc_dointvec(struct ctl_table *table, int write,
93486 static int proc_taint(struct ctl_table *table, int write,
93487 void __user *buffer, size_t *lenp, loff_t *ppos)
93488 {
93489- struct ctl_table t;
93490+ ctl_table_no_const t;
93491 unsigned long tmptaint = get_taint();
93492 int err;
93493
93494@@ -2134,7 +2194,6 @@ static int proc_taint(struct ctl_table *table, int write,
93495 return err;
93496 }
93497
93498-#ifdef CONFIG_PRINTK
93499 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93500 void __user *buffer, size_t *lenp, loff_t *ppos)
93501 {
93502@@ -2143,7 +2202,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
93503
93504 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
93505 }
93506-#endif
93507
93508 struct do_proc_dointvec_minmax_conv_param {
93509 int *min;
93510@@ -2703,6 +2761,12 @@ int proc_dostring(struct ctl_table *table, int write,
93511 return -ENOSYS;
93512 }
93513
93514+int proc_dostring_modpriv(struct ctl_table *table, int write,
93515+ void __user *buffer, size_t *lenp, loff_t *ppos)
93516+{
93517+ return -ENOSYS;
93518+}
93519+
93520 int proc_dointvec(struct ctl_table *table, int write,
93521 void __user *buffer, size_t *lenp, loff_t *ppos)
93522 {
93523@@ -2759,5 +2823,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
93524 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
93525 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
93526 EXPORT_SYMBOL(proc_dostring);
93527+EXPORT_SYMBOL(proc_dostring_modpriv);
93528 EXPORT_SYMBOL(proc_doulongvec_minmax);
93529 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
93530diff --git a/kernel/taskstats.c b/kernel/taskstats.c
93531index 13d2f7c..c93d0b0 100644
93532--- a/kernel/taskstats.c
93533+++ b/kernel/taskstats.c
93534@@ -28,9 +28,12 @@
93535 #include <linux/fs.h>
93536 #include <linux/file.h>
93537 #include <linux/pid_namespace.h>
93538+#include <linux/grsecurity.h>
93539 #include <net/genetlink.h>
93540 #include <linux/atomic.h>
93541
93542+extern int gr_is_taskstats_denied(int pid);
93543+
93544 /*
93545 * Maximum length of a cpumask that can be specified in
93546 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
93547@@ -576,6 +579,9 @@ err:
93548
93549 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
93550 {
93551+ if (gr_is_taskstats_denied(current->pid))
93552+ return -EACCES;
93553+
93554 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
93555 return cmd_attr_register_cpumask(info);
93556 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
93557diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
93558index a7077d3..dd48a49 100644
93559--- a/kernel/time/alarmtimer.c
93560+++ b/kernel/time/alarmtimer.c
93561@@ -823,7 +823,7 @@ static int __init alarmtimer_init(void)
93562 struct platform_device *pdev;
93563 int error = 0;
93564 int i;
93565- struct k_clock alarm_clock = {
93566+ static struct k_clock alarm_clock = {
93567 .clock_getres = alarm_clock_getres,
93568 .clock_get = alarm_clock_get,
93569 .timer_create = alarm_timer_create,
93570diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
93571index 1c2fe7d..ce7483d 100644
93572--- a/kernel/time/hrtimer.c
93573+++ b/kernel/time/hrtimer.c
93574@@ -1399,7 +1399,7 @@ void hrtimer_peek_ahead_timers(void)
93575 local_irq_restore(flags);
93576 }
93577
93578-static void run_hrtimer_softirq(struct softirq_action *h)
93579+static __latent_entropy void run_hrtimer_softirq(void)
93580 {
93581 hrtimer_peek_ahead_timers();
93582 }
93583diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
93584index 3b89464..5e38379 100644
93585--- a/kernel/time/posix-cpu-timers.c
93586+++ b/kernel/time/posix-cpu-timers.c
93587@@ -1464,14 +1464,14 @@ struct k_clock clock_posix_cpu = {
93588
93589 static __init int init_posix_cpu_timers(void)
93590 {
93591- struct k_clock process = {
93592+ static struct k_clock process = {
93593 .clock_getres = process_cpu_clock_getres,
93594 .clock_get = process_cpu_clock_get,
93595 .timer_create = process_cpu_timer_create,
93596 .nsleep = process_cpu_nsleep,
93597 .nsleep_restart = process_cpu_nsleep_restart,
93598 };
93599- struct k_clock thread = {
93600+ static struct k_clock thread = {
93601 .clock_getres = thread_cpu_clock_getres,
93602 .clock_get = thread_cpu_clock_get,
93603 .timer_create = thread_cpu_timer_create,
93604diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
93605index 31ea01f..7fc61ef 100644
93606--- a/kernel/time/posix-timers.c
93607+++ b/kernel/time/posix-timers.c
93608@@ -43,6 +43,7 @@
93609 #include <linux/hash.h>
93610 #include <linux/posix-clock.h>
93611 #include <linux/posix-timers.h>
93612+#include <linux/grsecurity.h>
93613 #include <linux/syscalls.h>
93614 #include <linux/wait.h>
93615 #include <linux/workqueue.h>
93616@@ -124,7 +125,7 @@ static DEFINE_SPINLOCK(hash_lock);
93617 * which we beg off on and pass to do_sys_settimeofday().
93618 */
93619
93620-static struct k_clock posix_clocks[MAX_CLOCKS];
93621+static struct k_clock *posix_clocks[MAX_CLOCKS];
93622
93623 /*
93624 * These ones are defined below.
93625@@ -277,7 +278,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
93626 */
93627 static __init int init_posix_timers(void)
93628 {
93629- struct k_clock clock_realtime = {
93630+ static struct k_clock clock_realtime = {
93631 .clock_getres = hrtimer_get_res,
93632 .clock_get = posix_clock_realtime_get,
93633 .clock_set = posix_clock_realtime_set,
93634@@ -289,7 +290,7 @@ static __init int init_posix_timers(void)
93635 .timer_get = common_timer_get,
93636 .timer_del = common_timer_del,
93637 };
93638- struct k_clock clock_monotonic = {
93639+ static struct k_clock clock_monotonic = {
93640 .clock_getres = hrtimer_get_res,
93641 .clock_get = posix_ktime_get_ts,
93642 .nsleep = common_nsleep,
93643@@ -299,19 +300,19 @@ static __init int init_posix_timers(void)
93644 .timer_get = common_timer_get,
93645 .timer_del = common_timer_del,
93646 };
93647- struct k_clock clock_monotonic_raw = {
93648+ static struct k_clock clock_monotonic_raw = {
93649 .clock_getres = hrtimer_get_res,
93650 .clock_get = posix_get_monotonic_raw,
93651 };
93652- struct k_clock clock_realtime_coarse = {
93653+ static struct k_clock clock_realtime_coarse = {
93654 .clock_getres = posix_get_coarse_res,
93655 .clock_get = posix_get_realtime_coarse,
93656 };
93657- struct k_clock clock_monotonic_coarse = {
93658+ static struct k_clock clock_monotonic_coarse = {
93659 .clock_getres = posix_get_coarse_res,
93660 .clock_get = posix_get_monotonic_coarse,
93661 };
93662- struct k_clock clock_tai = {
93663+ static struct k_clock clock_tai = {
93664 .clock_getres = hrtimer_get_res,
93665 .clock_get = posix_get_tai,
93666 .nsleep = common_nsleep,
93667@@ -321,7 +322,7 @@ static __init int init_posix_timers(void)
93668 .timer_get = common_timer_get,
93669 .timer_del = common_timer_del,
93670 };
93671- struct k_clock clock_boottime = {
93672+ static struct k_clock clock_boottime = {
93673 .clock_getres = hrtimer_get_res,
93674 .clock_get = posix_get_boottime,
93675 .nsleep = common_nsleep,
93676@@ -533,7 +534,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
93677 return;
93678 }
93679
93680- posix_clocks[clock_id] = *new_clock;
93681+ posix_clocks[clock_id] = new_clock;
93682 }
93683 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
93684
93685@@ -579,9 +580,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
93686 return (id & CLOCKFD_MASK) == CLOCKFD ?
93687 &clock_posix_dynamic : &clock_posix_cpu;
93688
93689- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
93690+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
93691 return NULL;
93692- return &posix_clocks[id];
93693+ return posix_clocks[id];
93694 }
93695
93696 static int common_timer_create(struct k_itimer *new_timer)
93697@@ -599,7 +600,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
93698 struct k_clock *kc = clockid_to_kclock(which_clock);
93699 struct k_itimer *new_timer;
93700 int error, new_timer_id;
93701- sigevent_t event;
93702+ sigevent_t event = { };
93703 int it_id_set = IT_ID_NOT_SET;
93704
93705 if (!kc)
93706@@ -1014,6 +1015,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
93707 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
93708 return -EFAULT;
93709
93710+ /* only the CLOCK_REALTIME clock can be set, all other clocks
93711+ have their clock_set fptr set to a nosettime dummy function
93712+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
93713+ call common_clock_set, which calls do_sys_settimeofday, which
93714+ we hook
93715+ */
93716+
93717 return kc->clock_set(which_clock, &new_tp);
93718 }
93719
93720diff --git a/kernel/time/time.c b/kernel/time/time.c
93721index a9ae20f..d3fbde7 100644
93722--- a/kernel/time/time.c
93723+++ b/kernel/time/time.c
93724@@ -173,6 +173,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
93725 return error;
93726
93727 if (tz) {
93728+ /* we log in do_settimeofday called below, so don't log twice
93729+ */
93730+ if (!tv)
93731+ gr_log_timechange();
93732+
93733 sys_tz = *tz;
93734 update_vsyscall_tz();
93735 if (firsttime) {
93736diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
93737index ec1791f..6a086cd 100644
93738--- a/kernel/time/timekeeping.c
93739+++ b/kernel/time/timekeeping.c
93740@@ -15,6 +15,7 @@
93741 #include <linux/init.h>
93742 #include <linux/mm.h>
93743 #include <linux/sched.h>
93744+#include <linux/grsecurity.h>
93745 #include <linux/syscore_ops.h>
93746 #include <linux/clocksource.h>
93747 #include <linux/jiffies.h>
93748@@ -717,6 +718,8 @@ int do_settimeofday(const struct timespec *tv)
93749 if (!timespec_valid_strict(tv))
93750 return -EINVAL;
93751
93752+ gr_log_timechange();
93753+
93754 raw_spin_lock_irqsave(&timekeeper_lock, flags);
93755 write_seqcount_begin(&tk_core.seq);
93756
93757diff --git a/kernel/time/timer.c b/kernel/time/timer.c
93758index 9bbb834..3caa8ed 100644
93759--- a/kernel/time/timer.c
93760+++ b/kernel/time/timer.c
93761@@ -1394,7 +1394,7 @@ void update_process_times(int user_tick)
93762 /*
93763 * This function runs timers and the timer-tq in bottom half context.
93764 */
93765-static void run_timer_softirq(struct softirq_action *h)
93766+static __latent_entropy void run_timer_softirq(void)
93767 {
93768 struct tvec_base *base = __this_cpu_read(tvec_bases);
93769
93770@@ -1457,7 +1457,7 @@ static void process_timeout(unsigned long __data)
93771 *
93772 * In all cases the return value is guaranteed to be non-negative.
93773 */
93774-signed long __sched schedule_timeout(signed long timeout)
93775+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
93776 {
93777 struct timer_list timer;
93778 unsigned long expire;
93779diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
93780index 61ed862..3b52c65 100644
93781--- a/kernel/time/timer_list.c
93782+++ b/kernel/time/timer_list.c
93783@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
93784
93785 static void print_name_offset(struct seq_file *m, void *sym)
93786 {
93787+#ifdef CONFIG_GRKERNSEC_HIDESYM
93788+ SEQ_printf(m, "<%p>", NULL);
93789+#else
93790 char symname[KSYM_NAME_LEN];
93791
93792 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
93793 SEQ_printf(m, "<%pK>", sym);
93794 else
93795 SEQ_printf(m, "%s", symname);
93796+#endif
93797 }
93798
93799 static void
93800@@ -119,7 +123,11 @@ next_one:
93801 static void
93802 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
93803 {
93804+#ifdef CONFIG_GRKERNSEC_HIDESYM
93805+ SEQ_printf(m, " .base: %p\n", NULL);
93806+#else
93807 SEQ_printf(m, " .base: %pK\n", base);
93808+#endif
93809 SEQ_printf(m, " .index: %d\n",
93810 base->index);
93811 SEQ_printf(m, " .resolution: %Lu nsecs\n",
93812@@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
93813 {
93814 struct proc_dir_entry *pe;
93815
93816+#ifdef CONFIG_GRKERNSEC_PROC_ADD
93817+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
93818+#else
93819 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
93820+#endif
93821 if (!pe)
93822 return -ENOMEM;
93823 return 0;
93824diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
93825index 1fb08f2..ca4bb1e 100644
93826--- a/kernel/time/timer_stats.c
93827+++ b/kernel/time/timer_stats.c
93828@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
93829 static unsigned long nr_entries;
93830 static struct entry entries[MAX_ENTRIES];
93831
93832-static atomic_t overflow_count;
93833+static atomic_unchecked_t overflow_count;
93834
93835 /*
93836 * The entries are in a hash-table, for fast lookup:
93837@@ -140,7 +140,7 @@ static void reset_entries(void)
93838 nr_entries = 0;
93839 memset(entries, 0, sizeof(entries));
93840 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
93841- atomic_set(&overflow_count, 0);
93842+ atomic_set_unchecked(&overflow_count, 0);
93843 }
93844
93845 static struct entry *alloc_entry(void)
93846@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
93847 if (likely(entry))
93848 entry->count++;
93849 else
93850- atomic_inc(&overflow_count);
93851+ atomic_inc_unchecked(&overflow_count);
93852
93853 out_unlock:
93854 raw_spin_unlock_irqrestore(lock, flags);
93855@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
93856
93857 static void print_name_offset(struct seq_file *m, unsigned long addr)
93858 {
93859+#ifdef CONFIG_GRKERNSEC_HIDESYM
93860+ seq_printf(m, "<%p>", NULL);
93861+#else
93862 char symname[KSYM_NAME_LEN];
93863
93864 if (lookup_symbol_name(addr, symname) < 0)
93865- seq_printf(m, "<%p>", (void *)addr);
93866+ seq_printf(m, "<%pK>", (void *)addr);
93867 else
93868 seq_printf(m, "%s", symname);
93869+#endif
93870 }
93871
93872 static int tstats_show(struct seq_file *m, void *v)
93873@@ -300,8 +304,8 @@ static int tstats_show(struct seq_file *m, void *v)
93874
93875 seq_puts(m, "Timer Stats Version: v0.3\n");
93876 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
93877- if (atomic_read(&overflow_count))
93878- seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
93879+ if (atomic_read_unchecked(&overflow_count))
93880+ seq_printf(m, "Overflow: %d entries\n", atomic_read_unchecked(&overflow_count));
93881 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
93882
93883 for (i = 0; i < nr_entries; i++) {
93884@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
93885 {
93886 struct proc_dir_entry *pe;
93887
93888+#ifdef CONFIG_GRKERNSEC_PROC_ADD
93889+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
93890+#else
93891 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
93892+#endif
93893 if (!pe)
93894 return -ENOMEM;
93895 return 0;
93896diff --git a/kernel/torture.c b/kernel/torture.c
93897index d600af2..27a4e9d 100644
93898--- a/kernel/torture.c
93899+++ b/kernel/torture.c
93900@@ -484,7 +484,7 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
93901 mutex_lock(&fullstop_mutex);
93902 if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
93903 VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
93904- ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
93905+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_SHUTDOWN;
93906 } else {
93907 pr_warn("Concurrent rmmod and shutdown illegal!\n");
93908 }
93909@@ -551,14 +551,14 @@ static int torture_stutter(void *arg)
93910 if (!torture_must_stop()) {
93911 if (stutter > 1) {
93912 schedule_timeout_interruptible(stutter - 1);
93913- ACCESS_ONCE(stutter_pause_test) = 2;
93914+ ACCESS_ONCE_RW(stutter_pause_test) = 2;
93915 }
93916 schedule_timeout_interruptible(1);
93917- ACCESS_ONCE(stutter_pause_test) = 1;
93918+ ACCESS_ONCE_RW(stutter_pause_test) = 1;
93919 }
93920 if (!torture_must_stop())
93921 schedule_timeout_interruptible(stutter);
93922- ACCESS_ONCE(stutter_pause_test) = 0;
93923+ ACCESS_ONCE_RW(stutter_pause_test) = 0;
93924 torture_shutdown_absorb("torture_stutter");
93925 } while (!torture_must_stop());
93926 torture_kthread_stopping("torture_stutter");
93927@@ -645,7 +645,7 @@ bool torture_cleanup(void)
93928 schedule_timeout_uninterruptible(10);
93929 return true;
93930 }
93931- ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
93932+ ACCESS_ONCE_RW(fullstop) = FULLSTOP_RMMOD;
93933 mutex_unlock(&fullstop_mutex);
93934 torture_shutdown_cleanup();
93935 torture_shuffle_cleanup();
93936diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
93937index c1bd4ad..4b861dc 100644
93938--- a/kernel/trace/blktrace.c
93939+++ b/kernel/trace/blktrace.c
93940@@ -328,7 +328,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
93941 struct blk_trace *bt = filp->private_data;
93942 char buf[16];
93943
93944- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
93945+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
93946
93947 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
93948 }
93949@@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
93950 return 1;
93951
93952 bt = buf->chan->private_data;
93953- atomic_inc(&bt->dropped);
93954+ atomic_inc_unchecked(&bt->dropped);
93955 return 0;
93956 }
93957
93958@@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
93959
93960 bt->dir = dir;
93961 bt->dev = dev;
93962- atomic_set(&bt->dropped, 0);
93963+ atomic_set_unchecked(&bt->dropped, 0);
93964 INIT_LIST_HEAD(&bt->running_list);
93965
93966 ret = -EIO;
93967diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
93968index 5916a8e..5cd3b1f 100644
93969--- a/kernel/trace/ftrace.c
93970+++ b/kernel/trace/ftrace.c
93971@@ -2128,12 +2128,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
93972 if (unlikely(ftrace_disabled))
93973 return 0;
93974
93975+ ret = ftrace_arch_code_modify_prepare();
93976+ FTRACE_WARN_ON(ret);
93977+ if (ret)
93978+ return 0;
93979+
93980 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
93981+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
93982 if (ret) {
93983 ftrace_bug(ret, ip);
93984- return 0;
93985 }
93986- return 1;
93987+ return ret ? 0 : 1;
93988 }
93989
93990 /*
93991@@ -4458,8 +4463,10 @@ static int ftrace_process_locs(struct module *mod,
93992 if (!count)
93993 return 0;
93994
93995+ pax_open_kernel();
93996 sort(start, count, sizeof(*start),
93997 ftrace_cmp_ips, ftrace_swap_ips);
93998+ pax_close_kernel();
93999
94000 start_pg = ftrace_allocate_pages(count);
94001 if (!start_pg)
94002diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
94003index a56e07c..d46f0ba 100644
94004--- a/kernel/trace/ring_buffer.c
94005+++ b/kernel/trace/ring_buffer.c
94006@@ -352,9 +352,9 @@ struct buffer_data_page {
94007 */
94008 struct buffer_page {
94009 struct list_head list; /* list of buffer pages */
94010- local_t write; /* index for next write */
94011+ local_unchecked_t write; /* index for next write */
94012 unsigned read; /* index for next read */
94013- local_t entries; /* entries on this page */
94014+ local_unchecked_t entries; /* entries on this page */
94015 unsigned long real_end; /* real end of data */
94016 struct buffer_data_page *page; /* Actual data page */
94017 };
94018@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
94019 unsigned long last_overrun;
94020 local_t entries_bytes;
94021 local_t entries;
94022- local_t overrun;
94023- local_t commit_overrun;
94024+ local_unchecked_t overrun;
94025+ local_unchecked_t commit_overrun;
94026 local_t dropped_events;
94027 local_t committing;
94028 local_t commits;
94029@@ -1032,8 +1032,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
94030 *
94031 * We add a counter to the write field to denote this.
94032 */
94033- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
94034- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
94035+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
94036+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
94037
94038 /*
94039 * Just make sure we have seen our old_write and synchronize
94040@@ -1061,8 +1061,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
94041 * cmpxchg to only update if an interrupt did not already
94042 * do it for us. If the cmpxchg fails, we don't care.
94043 */
94044- (void)local_cmpxchg(&next_page->write, old_write, val);
94045- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
94046+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
94047+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
94048
94049 /*
94050 * No need to worry about races with clearing out the commit.
94051@@ -1429,12 +1429,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
94052
94053 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
94054 {
94055- return local_read(&bpage->entries) & RB_WRITE_MASK;
94056+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
94057 }
94058
94059 static inline unsigned long rb_page_write(struct buffer_page *bpage)
94060 {
94061- return local_read(&bpage->write) & RB_WRITE_MASK;
94062+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
94063 }
94064
94065 static int
94066@@ -1529,7 +1529,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
94067 * bytes consumed in ring buffer from here.
94068 * Increment overrun to account for the lost events.
94069 */
94070- local_add(page_entries, &cpu_buffer->overrun);
94071+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
94072 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
94073 }
94074
94075@@ -2091,7 +2091,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
94076 * it is our responsibility to update
94077 * the counters.
94078 */
94079- local_add(entries, &cpu_buffer->overrun);
94080+ local_add_unchecked(entries, &cpu_buffer->overrun);
94081 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
94082
94083 /*
94084@@ -2241,7 +2241,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94085 if (tail == BUF_PAGE_SIZE)
94086 tail_page->real_end = 0;
94087
94088- local_sub(length, &tail_page->write);
94089+ local_sub_unchecked(length, &tail_page->write);
94090 return;
94091 }
94092
94093@@ -2276,7 +2276,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94094 rb_event_set_padding(event);
94095
94096 /* Set the write back to the previous setting */
94097- local_sub(length, &tail_page->write);
94098+ local_sub_unchecked(length, &tail_page->write);
94099 return;
94100 }
94101
94102@@ -2288,7 +2288,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
94103
94104 /* Set write to end of buffer */
94105 length = (tail + length) - BUF_PAGE_SIZE;
94106- local_sub(length, &tail_page->write);
94107+ local_sub_unchecked(length, &tail_page->write);
94108 }
94109
94110 /*
94111@@ -2314,7 +2314,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94112 * about it.
94113 */
94114 if (unlikely(next_page == commit_page)) {
94115- local_inc(&cpu_buffer->commit_overrun);
94116+ local_inc_unchecked(&cpu_buffer->commit_overrun);
94117 goto out_reset;
94118 }
94119
94120@@ -2370,7 +2370,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
94121 cpu_buffer->tail_page) &&
94122 (cpu_buffer->commit_page ==
94123 cpu_buffer->reader_page))) {
94124- local_inc(&cpu_buffer->commit_overrun);
94125+ local_inc_unchecked(&cpu_buffer->commit_overrun);
94126 goto out_reset;
94127 }
94128 }
94129@@ -2418,7 +2418,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
94130 length += RB_LEN_TIME_EXTEND;
94131
94132 tail_page = cpu_buffer->tail_page;
94133- write = local_add_return(length, &tail_page->write);
94134+ write = local_add_return_unchecked(length, &tail_page->write);
94135
94136 /* set write to only the index of the write */
94137 write &= RB_WRITE_MASK;
94138@@ -2442,7 +2442,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
94139 kmemcheck_annotate_bitfield(event, bitfield);
94140 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
94141
94142- local_inc(&tail_page->entries);
94143+ local_inc_unchecked(&tail_page->entries);
94144
94145 /*
94146 * If this is the first commit on the page, then update
94147@@ -2475,7 +2475,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94148
94149 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
94150 unsigned long write_mask =
94151- local_read(&bpage->write) & ~RB_WRITE_MASK;
94152+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
94153 unsigned long event_length = rb_event_length(event);
94154 /*
94155 * This is on the tail page. It is possible that
94156@@ -2485,7 +2485,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
94157 */
94158 old_index += write_mask;
94159 new_index += write_mask;
94160- index = local_cmpxchg(&bpage->write, old_index, new_index);
94161+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
94162 if (index == old_index) {
94163 /* update counters */
94164 local_sub(event_length, &cpu_buffer->entries_bytes);
94165@@ -2877,7 +2877,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
94166
94167 /* Do the likely case first */
94168 if (likely(bpage->page == (void *)addr)) {
94169- local_dec(&bpage->entries);
94170+ local_dec_unchecked(&bpage->entries);
94171 return;
94172 }
94173
94174@@ -2889,7 +2889,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
94175 start = bpage;
94176 do {
94177 if (bpage->page == (void *)addr) {
94178- local_dec(&bpage->entries);
94179+ local_dec_unchecked(&bpage->entries);
94180 return;
94181 }
94182 rb_inc_page(cpu_buffer, &bpage);
94183@@ -3173,7 +3173,7 @@ static inline unsigned long
94184 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
94185 {
94186 return local_read(&cpu_buffer->entries) -
94187- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
94188+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
94189 }
94190
94191 /**
94192@@ -3262,7 +3262,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
94193 return 0;
94194
94195 cpu_buffer = buffer->buffers[cpu];
94196- ret = local_read(&cpu_buffer->overrun);
94197+ ret = local_read_unchecked(&cpu_buffer->overrun);
94198
94199 return ret;
94200 }
94201@@ -3285,7 +3285,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
94202 return 0;
94203
94204 cpu_buffer = buffer->buffers[cpu];
94205- ret = local_read(&cpu_buffer->commit_overrun);
94206+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
94207
94208 return ret;
94209 }
94210@@ -3370,7 +3370,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
94211 /* if you care about this being correct, lock the buffer */
94212 for_each_buffer_cpu(buffer, cpu) {
94213 cpu_buffer = buffer->buffers[cpu];
94214- overruns += local_read(&cpu_buffer->overrun);
94215+ overruns += local_read_unchecked(&cpu_buffer->overrun);
94216 }
94217
94218 return overruns;
94219@@ -3541,8 +3541,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
94220 /*
94221 * Reset the reader page to size zero.
94222 */
94223- local_set(&cpu_buffer->reader_page->write, 0);
94224- local_set(&cpu_buffer->reader_page->entries, 0);
94225+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
94226+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
94227 local_set(&cpu_buffer->reader_page->page->commit, 0);
94228 cpu_buffer->reader_page->real_end = 0;
94229
94230@@ -3576,7 +3576,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
94231 * want to compare with the last_overrun.
94232 */
94233 smp_mb();
94234- overwrite = local_read(&(cpu_buffer->overrun));
94235+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
94236
94237 /*
94238 * Here's the tricky part.
94239@@ -4148,8 +4148,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
94240
94241 cpu_buffer->head_page
94242 = list_entry(cpu_buffer->pages, struct buffer_page, list);
94243- local_set(&cpu_buffer->head_page->write, 0);
94244- local_set(&cpu_buffer->head_page->entries, 0);
94245+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
94246+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
94247 local_set(&cpu_buffer->head_page->page->commit, 0);
94248
94249 cpu_buffer->head_page->read = 0;
94250@@ -4159,14 +4159,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
94251
94252 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
94253 INIT_LIST_HEAD(&cpu_buffer->new_pages);
94254- local_set(&cpu_buffer->reader_page->write, 0);
94255- local_set(&cpu_buffer->reader_page->entries, 0);
94256+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
94257+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
94258 local_set(&cpu_buffer->reader_page->page->commit, 0);
94259 cpu_buffer->reader_page->read = 0;
94260
94261 local_set(&cpu_buffer->entries_bytes, 0);
94262- local_set(&cpu_buffer->overrun, 0);
94263- local_set(&cpu_buffer->commit_overrun, 0);
94264+ local_set_unchecked(&cpu_buffer->overrun, 0);
94265+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
94266 local_set(&cpu_buffer->dropped_events, 0);
94267 local_set(&cpu_buffer->entries, 0);
94268 local_set(&cpu_buffer->committing, 0);
94269@@ -4571,8 +4571,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
94270 rb_init_page(bpage);
94271 bpage = reader->page;
94272 reader->page = *data_page;
94273- local_set(&reader->write, 0);
94274- local_set(&reader->entries, 0);
94275+ local_set_unchecked(&reader->write, 0);
94276+ local_set_unchecked(&reader->entries, 0);
94277 reader->read = 0;
94278 *data_page = bpage;
94279
94280diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
94281index 1520933..c651ebc 100644
94282--- a/kernel/trace/trace.c
94283+++ b/kernel/trace/trace.c
94284@@ -3488,7 +3488,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
94285 return 0;
94286 }
94287
94288-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
94289+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
94290 {
94291 /* do nothing if flag is already set */
94292 if (!!(trace_flags & mask) == !!enabled)
94293diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
94294index 385391f..8d2250f 100644
94295--- a/kernel/trace/trace.h
94296+++ b/kernel/trace/trace.h
94297@@ -1280,7 +1280,7 @@ extern const char *__stop___tracepoint_str[];
94298 void trace_printk_init_buffers(void);
94299 void trace_printk_start_comm(void);
94300 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
94301-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
94302+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
94303
94304 /*
94305 * Normal trace_printk() and friends allocates special buffers
94306diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
94307index 57b67b1..66082a9 100644
94308--- a/kernel/trace/trace_clock.c
94309+++ b/kernel/trace/trace_clock.c
94310@@ -124,7 +124,7 @@ u64 notrace trace_clock_global(void)
94311 return now;
94312 }
94313
94314-static atomic64_t trace_counter;
94315+static atomic64_unchecked_t trace_counter;
94316
94317 /*
94318 * trace_clock_counter(): simply an atomic counter.
94319@@ -133,5 +133,5 @@ static atomic64_t trace_counter;
94320 */
94321 u64 notrace trace_clock_counter(void)
94322 {
94323- return atomic64_add_return(1, &trace_counter);
94324+ return atomic64_inc_return_unchecked(&trace_counter);
94325 }
94326diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
94327index ef06ce7..3ea161d 100644
94328--- a/kernel/trace/trace_events.c
94329+++ b/kernel/trace/trace_events.c
94330@@ -1720,7 +1720,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
94331 return 0;
94332 }
94333
94334-struct ftrace_module_file_ops;
94335 static void __add_event_to_tracers(struct ftrace_event_call *call);
94336
94337 /* Add an additional event_call dynamically */
94338diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
94339index 0abd9b8..6a663a2 100644
94340--- a/kernel/trace/trace_mmiotrace.c
94341+++ b/kernel/trace/trace_mmiotrace.c
94342@@ -24,7 +24,7 @@ struct header_iter {
94343 static struct trace_array *mmio_trace_array;
94344 static bool overrun_detected;
94345 static unsigned long prev_overruns;
94346-static atomic_t dropped_count;
94347+static atomic_unchecked_t dropped_count;
94348
94349 static void mmio_reset_data(struct trace_array *tr)
94350 {
94351@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
94352
94353 static unsigned long count_overruns(struct trace_iterator *iter)
94354 {
94355- unsigned long cnt = atomic_xchg(&dropped_count, 0);
94356+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
94357 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
94358
94359 if (over > prev_overruns)
94360@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
94361 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
94362 sizeof(*entry), 0, pc);
94363 if (!event) {
94364- atomic_inc(&dropped_count);
94365+ atomic_inc_unchecked(&dropped_count);
94366 return;
94367 }
94368 entry = ring_buffer_event_data(event);
94369@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
94370 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
94371 sizeof(*entry), 0, pc);
94372 if (!event) {
94373- atomic_inc(&dropped_count);
94374+ atomic_inc_unchecked(&dropped_count);
94375 return;
94376 }
94377 entry = ring_buffer_event_data(event);
94378diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
94379index c6977d5..d243785 100644
94380--- a/kernel/trace/trace_output.c
94381+++ b/kernel/trace/trace_output.c
94382@@ -712,14 +712,16 @@ int register_ftrace_event(struct trace_event *event)
94383 goto out;
94384 }
94385
94386+ pax_open_kernel();
94387 if (event->funcs->trace == NULL)
94388- event->funcs->trace = trace_nop_print;
94389+ *(void **)&event->funcs->trace = trace_nop_print;
94390 if (event->funcs->raw == NULL)
94391- event->funcs->raw = trace_nop_print;
94392+ *(void **)&event->funcs->raw = trace_nop_print;
94393 if (event->funcs->hex == NULL)
94394- event->funcs->hex = trace_nop_print;
94395+ *(void **)&event->funcs->hex = trace_nop_print;
94396 if (event->funcs->binary == NULL)
94397- event->funcs->binary = trace_nop_print;
94398+ *(void **)&event->funcs->binary = trace_nop_print;
94399+ pax_close_kernel();
94400
94401 key = event->type & (EVENT_HASHSIZE - 1);
94402
94403diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
94404index 1f24ed9..10407ec 100644
94405--- a/kernel/trace/trace_seq.c
94406+++ b/kernel/trace/trace_seq.c
94407@@ -367,7 +367,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
94408
94409 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
94410 if (!IS_ERR(p)) {
94411- p = mangle_path(s->buffer + s->len, p, "\n");
94412+ p = mangle_path(s->buffer + s->len, p, "\n\\");
94413 if (p) {
94414 s->len = p - s->buffer;
94415 return 1;
94416diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
94417index 8a4e5cb..64f270d 100644
94418--- a/kernel/trace/trace_stack.c
94419+++ b/kernel/trace/trace_stack.c
94420@@ -91,7 +91,7 @@ check_stack(unsigned long ip, unsigned long *stack)
94421 return;
94422
94423 /* we do not handle interrupt stacks yet */
94424- if (!object_is_on_stack(stack))
94425+ if (!object_starts_on_stack(stack))
94426 return;
94427
94428 local_irq_save(flags);
94429diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
94430index 7e3cd7a..5156a5fe 100644
94431--- a/kernel/trace/trace_syscalls.c
94432+++ b/kernel/trace/trace_syscalls.c
94433@@ -602,6 +602,8 @@ static int perf_sysenter_enable(struct ftrace_event_call *call)
94434 int num;
94435
94436 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94437+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94438+ return -EINVAL;
94439
94440 mutex_lock(&syscall_trace_lock);
94441 if (!sys_perf_refcount_enter)
94442@@ -622,6 +624,8 @@ static void perf_sysenter_disable(struct ftrace_event_call *call)
94443 int num;
94444
94445 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94446+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94447+ return;
94448
94449 mutex_lock(&syscall_trace_lock);
94450 sys_perf_refcount_enter--;
94451@@ -674,6 +678,8 @@ static int perf_sysexit_enable(struct ftrace_event_call *call)
94452 int num;
94453
94454 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94455+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94456+ return -EINVAL;
94457
94458 mutex_lock(&syscall_trace_lock);
94459 if (!sys_perf_refcount_exit)
94460@@ -694,6 +700,8 @@ static void perf_sysexit_disable(struct ftrace_event_call *call)
94461 int num;
94462
94463 num = ((struct syscall_metadata *)call->data)->syscall_nr;
94464+ if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
94465+ return;
94466
94467 mutex_lock(&syscall_trace_lock);
94468 sys_perf_refcount_exit--;
94469diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
94470index aa312b0..395f343 100644
94471--- a/kernel/user_namespace.c
94472+++ b/kernel/user_namespace.c
94473@@ -82,6 +82,21 @@ int create_user_ns(struct cred *new)
94474 !kgid_has_mapping(parent_ns, group))
94475 return -EPERM;
94476
94477+#ifdef CONFIG_GRKERNSEC
94478+ /*
94479+ * This doesn't really inspire confidence:
94480+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
94481+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
94482+ * Increases kernel attack surface in areas developers
94483+ * previously cared little about ("low importance due
94484+ * to requiring "root" capability")
94485+ * To be removed when this code receives *proper* review
94486+ */
94487+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
94488+ !capable(CAP_SETGID))
94489+ return -EPERM;
94490+#endif
94491+
94492 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
94493 if (!ns)
94494 return -ENOMEM;
94495@@ -872,7 +887,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
94496 if (atomic_read(&current->mm->mm_users) > 1)
94497 return -EINVAL;
94498
94499- if (current->fs->users != 1)
94500+ if (atomic_read(&current->fs->users) != 1)
94501 return -EINVAL;
94502
94503 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
94504diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
94505index c8eac43..4b5f08f 100644
94506--- a/kernel/utsname_sysctl.c
94507+++ b/kernel/utsname_sysctl.c
94508@@ -47,7 +47,7 @@ static void put_uts(struct ctl_table *table, int write, void *which)
94509 static int proc_do_uts_string(struct ctl_table *table, int write,
94510 void __user *buffer, size_t *lenp, loff_t *ppos)
94511 {
94512- struct ctl_table uts_table;
94513+ ctl_table_no_const uts_table;
94514 int r;
94515 memcpy(&uts_table, table, sizeof(uts_table));
94516 uts_table.data = get_uts(table, write);
94517diff --git a/kernel/watchdog.c b/kernel/watchdog.c
94518index a8d6914..8fbdb13 100644
94519--- a/kernel/watchdog.c
94520+++ b/kernel/watchdog.c
94521@@ -521,7 +521,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
94522 static void watchdog_nmi_disable(unsigned int cpu) { return; }
94523 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
94524
94525-static struct smp_hotplug_thread watchdog_threads = {
94526+static struct smp_hotplug_thread watchdog_threads __read_only = {
94527 .store = &softlockup_watchdog,
94528 .thread_should_run = watchdog_should_run,
94529 .thread_fn = watchdog,
94530diff --git a/kernel/workqueue.c b/kernel/workqueue.c
94531index 5dbe22a..872413c 100644
94532--- a/kernel/workqueue.c
94533+++ b/kernel/workqueue.c
94534@@ -4507,7 +4507,7 @@ static void rebind_workers(struct worker_pool *pool)
94535 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
94536 worker_flags |= WORKER_REBOUND;
94537 worker_flags &= ~WORKER_UNBOUND;
94538- ACCESS_ONCE(worker->flags) = worker_flags;
94539+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
94540 }
94541
94542 spin_unlock_irq(&pool->lock);
94543diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
94544index a285900..5e3b26b 100644
94545--- a/lib/Kconfig.debug
94546+++ b/lib/Kconfig.debug
94547@@ -882,7 +882,7 @@ config DEBUG_MUTEXES
94548
94549 config DEBUG_WW_MUTEX_SLOWPATH
94550 bool "Wait/wound mutex debugging: Slowpath testing"
94551- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94552+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94553 select DEBUG_LOCK_ALLOC
94554 select DEBUG_SPINLOCK
94555 select DEBUG_MUTEXES
94556@@ -899,7 +899,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
94557
94558 config DEBUG_LOCK_ALLOC
94559 bool "Lock debugging: detect incorrect freeing of live locks"
94560- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94561+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94562 select DEBUG_SPINLOCK
94563 select DEBUG_MUTEXES
94564 select LOCKDEP
94565@@ -913,7 +913,7 @@ config DEBUG_LOCK_ALLOC
94566
94567 config PROVE_LOCKING
94568 bool "Lock debugging: prove locking correctness"
94569- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94570+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94571 select LOCKDEP
94572 select DEBUG_SPINLOCK
94573 select DEBUG_MUTEXES
94574@@ -964,7 +964,7 @@ config LOCKDEP
94575
94576 config LOCK_STAT
94577 bool "Lock usage statistics"
94578- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
94579+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
94580 select LOCKDEP
94581 select DEBUG_SPINLOCK
94582 select DEBUG_MUTEXES
94583@@ -1437,6 +1437,7 @@ config LATENCYTOP
94584 depends on DEBUG_KERNEL
94585 depends on STACKTRACE_SUPPORT
94586 depends on PROC_FS
94587+ depends on !GRKERNSEC_HIDESYM
94588 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
94589 select KALLSYMS
94590 select KALLSYMS_ALL
94591@@ -1453,7 +1454,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
94592 config DEBUG_STRICT_USER_COPY_CHECKS
94593 bool "Strict user copy size checks"
94594 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
94595- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
94596+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
94597 help
94598 Enabling this option turns a certain set of sanity checks for user
94599 copy operations into compile time failures.
94600@@ -1581,7 +1582,7 @@ endmenu # runtime tests
94601
94602 config PROVIDE_OHCI1394_DMA_INIT
94603 bool "Remote debugging over FireWire early on boot"
94604- depends on PCI && X86
94605+ depends on PCI && X86 && !GRKERNSEC
94606 help
94607 If you want to debug problems which hang or crash the kernel early
94608 on boot and the crashing machine has a FireWire port, you can use
94609diff --git a/lib/Makefile b/lib/Makefile
94610index d6b4bc4..a3724eb 100644
94611--- a/lib/Makefile
94612+++ b/lib/Makefile
94613@@ -55,7 +55,7 @@ obj-$(CONFIG_BTREE) += btree.o
94614 obj-$(CONFIG_INTERVAL_TREE) += interval_tree.o
94615 obj-$(CONFIG_ASSOCIATIVE_ARRAY) += assoc_array.o
94616 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
94617-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
94618+obj-y += list_debug.o
94619 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
94620
94621 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
94622diff --git a/lib/average.c b/lib/average.c
94623index 114d1be..ab0350c 100644
94624--- a/lib/average.c
94625+++ b/lib/average.c
94626@@ -55,7 +55,7 @@ struct ewma *ewma_add(struct ewma *avg, unsigned long val)
94627 {
94628 unsigned long internal = ACCESS_ONCE(avg->internal);
94629
94630- ACCESS_ONCE(avg->internal) = internal ?
94631+ ACCESS_ONCE_RW(avg->internal) = internal ?
94632 (((internal << avg->weight) - internal) +
94633 (val << avg->factor)) >> avg->weight :
94634 (val << avg->factor);
94635diff --git a/lib/bitmap.c b/lib/bitmap.c
94636index 33ce011..89e3d6f 100644
94637--- a/lib/bitmap.c
94638+++ b/lib/bitmap.c
94639@@ -433,7 +433,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
94640 {
94641 int c, old_c, totaldigits, ndigits, nchunks, nbits;
94642 u32 chunk;
94643- const char __user __force *ubuf = (const char __user __force *)buf;
94644+ const char __user *ubuf = (const char __force_user *)buf;
94645
94646 bitmap_zero(maskp, nmaskbits);
94647
94648@@ -518,7 +518,7 @@ int bitmap_parse_user(const char __user *ubuf,
94649 {
94650 if (!access_ok(VERIFY_READ, ubuf, ulen))
94651 return -EFAULT;
94652- return __bitmap_parse((const char __force *)ubuf,
94653+ return __bitmap_parse((const char __force_kernel *)ubuf,
94654 ulen, 1, maskp, nmaskbits);
94655
94656 }
94657@@ -609,7 +609,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
94658 {
94659 unsigned a, b;
94660 int c, old_c, totaldigits;
94661- const char __user __force *ubuf = (const char __user __force *)buf;
94662+ const char __user *ubuf = (const char __force_user *)buf;
94663 int exp_digit, in_range;
94664
94665 totaldigits = c = 0;
94666@@ -704,7 +704,7 @@ int bitmap_parselist_user(const char __user *ubuf,
94667 {
94668 if (!access_ok(VERIFY_READ, ubuf, ulen))
94669 return -EFAULT;
94670- return __bitmap_parselist((const char __force *)ubuf,
94671+ return __bitmap_parselist((const char __force_kernel *)ubuf,
94672 ulen, 1, maskp, nmaskbits);
94673 }
94674 EXPORT_SYMBOL(bitmap_parselist_user);
94675diff --git a/lib/bug.c b/lib/bug.c
94676index d1d7c78..b354235 100644
94677--- a/lib/bug.c
94678+++ b/lib/bug.c
94679@@ -137,6 +137,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
94680 return BUG_TRAP_TYPE_NONE;
94681
94682 bug = find_bug(bugaddr);
94683+ if (!bug)
94684+ return BUG_TRAP_TYPE_NONE;
94685
94686 file = NULL;
94687 line = 0;
94688diff --git a/lib/debugobjects.c b/lib/debugobjects.c
94689index 547f7f9..a6d4ba0 100644
94690--- a/lib/debugobjects.c
94691+++ b/lib/debugobjects.c
94692@@ -289,7 +289,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
94693 if (limit > 4)
94694 return;
94695
94696- is_on_stack = object_is_on_stack(addr);
94697+ is_on_stack = object_starts_on_stack(addr);
94698 if (is_on_stack == onstack)
94699 return;
94700
94701diff --git a/lib/div64.c b/lib/div64.c
94702index 4382ad7..08aa558 100644
94703--- a/lib/div64.c
94704+++ b/lib/div64.c
94705@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
94706 EXPORT_SYMBOL(__div64_32);
94707
94708 #ifndef div_s64_rem
94709-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
94710+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
94711 {
94712 u64 quotient;
94713
94714@@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
94715 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
94716 */
94717 #ifndef div64_u64
94718-u64 div64_u64(u64 dividend, u64 divisor)
94719+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
94720 {
94721 u32 high = divisor >> 32;
94722 u64 quot;
94723diff --git a/lib/dma-debug.c b/lib/dma-debug.c
94724index 98f2d7e..899da5c 100644
94725--- a/lib/dma-debug.c
94726+++ b/lib/dma-debug.c
94727@@ -971,7 +971,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
94728
94729 void dma_debug_add_bus(struct bus_type *bus)
94730 {
94731- struct notifier_block *nb;
94732+ notifier_block_no_const *nb;
94733
94734 if (global_disable)
94735 return;
94736@@ -1148,7 +1148,7 @@ static void check_unmap(struct dma_debug_entry *ref)
94737
94738 static void check_for_stack(struct device *dev, void *addr)
94739 {
94740- if (object_is_on_stack(addr))
94741+ if (object_starts_on_stack(addr))
94742 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
94743 "stack [addr=%p]\n", addr);
94744 }
94745diff --git a/lib/hash.c b/lib/hash.c
94746index fea973f..386626f 100644
94747--- a/lib/hash.c
94748+++ b/lib/hash.c
94749@@ -14,7 +14,7 @@
94750 #include <linux/hash.h>
94751 #include <linux/cache.h>
94752
94753-static struct fast_hash_ops arch_hash_ops __read_mostly = {
94754+static struct fast_hash_ops arch_hash_ops __read_only = {
94755 .hash = jhash,
94756 .hash2 = jhash2,
94757 };
94758diff --git a/lib/inflate.c b/lib/inflate.c
94759index 013a761..c28f3fc 100644
94760--- a/lib/inflate.c
94761+++ b/lib/inflate.c
94762@@ -269,7 +269,7 @@ static void free(void *where)
94763 malloc_ptr = free_mem_ptr;
94764 }
94765 #else
94766-#define malloc(a) kmalloc(a, GFP_KERNEL)
94767+#define malloc(a) kmalloc((a), GFP_KERNEL)
94768 #define free(a) kfree(a)
94769 #endif
94770
94771diff --git a/lib/ioremap.c b/lib/ioremap.c
94772index 0c9216c..863bd89 100644
94773--- a/lib/ioremap.c
94774+++ b/lib/ioremap.c
94775@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
94776 unsigned long next;
94777
94778 phys_addr -= addr;
94779- pmd = pmd_alloc(&init_mm, pud, addr);
94780+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
94781 if (!pmd)
94782 return -ENOMEM;
94783 do {
94784@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
94785 unsigned long next;
94786
94787 phys_addr -= addr;
94788- pud = pud_alloc(&init_mm, pgd, addr);
94789+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
94790 if (!pud)
94791 return -ENOMEM;
94792 do {
94793diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
94794index bd2bea9..6b3c95e 100644
94795--- a/lib/is_single_threaded.c
94796+++ b/lib/is_single_threaded.c
94797@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
94798 struct task_struct *p, *t;
94799 bool ret;
94800
94801+ if (!mm)
94802+ return true;
94803+
94804 if (atomic_read(&task->signal->live) != 1)
94805 return false;
94806
94807diff --git a/lib/kobject.c b/lib/kobject.c
94808index 58751bb..93a1853 100644
94809--- a/lib/kobject.c
94810+++ b/lib/kobject.c
94811@@ -931,9 +931,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
94812
94813
94814 static DEFINE_SPINLOCK(kobj_ns_type_lock);
94815-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
94816+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
94817
94818-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
94819+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
94820 {
94821 enum kobj_ns_type type = ops->type;
94822 int error;
94823diff --git a/lib/list_debug.c b/lib/list_debug.c
94824index c24c2f7..f0296f4 100644
94825--- a/lib/list_debug.c
94826+++ b/lib/list_debug.c
94827@@ -11,7 +11,9 @@
94828 #include <linux/bug.h>
94829 #include <linux/kernel.h>
94830 #include <linux/rculist.h>
94831+#include <linux/mm.h>
94832
94833+#ifdef CONFIG_DEBUG_LIST
94834 /*
94835 * Insert a new entry between two known consecutive entries.
94836 *
94837@@ -19,21 +21,40 @@
94838 * the prev/next entries already!
94839 */
94840
94841+static bool __list_add_debug(struct list_head *new,
94842+ struct list_head *prev,
94843+ struct list_head *next)
94844+{
94845+ if (unlikely(next->prev != prev)) {
94846+ printk(KERN_ERR "list_add corruption. next->prev should be "
94847+ "prev (%p), but was %p. (next=%p).\n",
94848+ prev, next->prev, next);
94849+ BUG();
94850+ return false;
94851+ }
94852+ if (unlikely(prev->next != next)) {
94853+ printk(KERN_ERR "list_add corruption. prev->next should be "
94854+ "next (%p), but was %p. (prev=%p).\n",
94855+ next, prev->next, prev);
94856+ BUG();
94857+ return false;
94858+ }
94859+ if (unlikely(new == prev || new == next)) {
94860+ printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
94861+ new, prev, next);
94862+ BUG();
94863+ return false;
94864+ }
94865+ return true;
94866+}
94867+
94868 void __list_add(struct list_head *new,
94869- struct list_head *prev,
94870- struct list_head *next)
94871+ struct list_head *prev,
94872+ struct list_head *next)
94873 {
94874- WARN(next->prev != prev,
94875- "list_add corruption. next->prev should be "
94876- "prev (%p), but was %p. (next=%p).\n",
94877- prev, next->prev, next);
94878- WARN(prev->next != next,
94879- "list_add corruption. prev->next should be "
94880- "next (%p), but was %p. (prev=%p).\n",
94881- next, prev->next, prev);
94882- WARN(new == prev || new == next,
94883- "list_add double add: new=%p, prev=%p, next=%p.\n",
94884- new, prev, next);
94885+ if (!__list_add_debug(new, prev, next))
94886+ return;
94887+
94888 next->prev = new;
94889 new->next = next;
94890 new->prev = prev;
94891@@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
94892 }
94893 EXPORT_SYMBOL(__list_add);
94894
94895-void __list_del_entry(struct list_head *entry)
94896+static bool __list_del_entry_debug(struct list_head *entry)
94897 {
94898 struct list_head *prev, *next;
94899
94900 prev = entry->prev;
94901 next = entry->next;
94902
94903- if (WARN(next == LIST_POISON1,
94904- "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
94905- entry, LIST_POISON1) ||
94906- WARN(prev == LIST_POISON2,
94907- "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
94908- entry, LIST_POISON2) ||
94909- WARN(prev->next != entry,
94910- "list_del corruption. prev->next should be %p, "
94911- "but was %p\n", entry, prev->next) ||
94912- WARN(next->prev != entry,
94913- "list_del corruption. next->prev should be %p, "
94914- "but was %p\n", entry, next->prev))
94915+ if (unlikely(next == LIST_POISON1)) {
94916+ printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
94917+ entry, LIST_POISON1);
94918+ BUG();
94919+ return false;
94920+ }
94921+ if (unlikely(prev == LIST_POISON2)) {
94922+ printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
94923+ entry, LIST_POISON2);
94924+ BUG();
94925+ return false;
94926+ }
94927+ if (unlikely(entry->prev->next != entry)) {
94928+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
94929+ "but was %p\n", entry, prev->next);
94930+ BUG();
94931+ return false;
94932+ }
94933+ if (unlikely(entry->next->prev != entry)) {
94934+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
94935+ "but was %p\n", entry, next->prev);
94936+ BUG();
94937+ return false;
94938+ }
94939+ return true;
94940+}
94941+
94942+void __list_del_entry(struct list_head *entry)
94943+{
94944+ if (!__list_del_entry_debug(entry))
94945 return;
94946
94947- __list_del(prev, next);
94948+ __list_del(entry->prev, entry->next);
94949 }
94950 EXPORT_SYMBOL(__list_del_entry);
94951
94952@@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
94953 void __list_add_rcu(struct list_head *new,
94954 struct list_head *prev, struct list_head *next)
94955 {
94956- WARN(next->prev != prev,
94957- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
94958- prev, next->prev, next);
94959- WARN(prev->next != next,
94960- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
94961- next, prev->next, prev);
94962+ if (!__list_add_debug(new, prev, next))
94963+ return;
94964+
94965 new->next = next;
94966 new->prev = prev;
94967 rcu_assign_pointer(list_next_rcu(prev), new);
94968 next->prev = new;
94969 }
94970 EXPORT_SYMBOL(__list_add_rcu);
94971+#endif
94972+
94973+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
94974+{
94975+#ifdef CONFIG_DEBUG_LIST
94976+ if (!__list_add_debug(new, prev, next))
94977+ return;
94978+#endif
94979+
94980+ pax_open_kernel();
94981+ next->prev = new;
94982+ new->next = next;
94983+ new->prev = prev;
94984+ prev->next = new;
94985+ pax_close_kernel();
94986+}
94987+EXPORT_SYMBOL(__pax_list_add);
94988+
94989+void pax_list_del(struct list_head *entry)
94990+{
94991+#ifdef CONFIG_DEBUG_LIST
94992+ if (!__list_del_entry_debug(entry))
94993+ return;
94994+#endif
94995+
94996+ pax_open_kernel();
94997+ __list_del(entry->prev, entry->next);
94998+ entry->next = LIST_POISON1;
94999+ entry->prev = LIST_POISON2;
95000+ pax_close_kernel();
95001+}
95002+EXPORT_SYMBOL(pax_list_del);
95003+
95004+void pax_list_del_init(struct list_head *entry)
95005+{
95006+ pax_open_kernel();
95007+ __list_del(entry->prev, entry->next);
95008+ INIT_LIST_HEAD(entry);
95009+ pax_close_kernel();
95010+}
95011+EXPORT_SYMBOL(pax_list_del_init);
95012+
95013+void __pax_list_add_rcu(struct list_head *new,
95014+ struct list_head *prev, struct list_head *next)
95015+{
95016+#ifdef CONFIG_DEBUG_LIST
95017+ if (!__list_add_debug(new, prev, next))
95018+ return;
95019+#endif
95020+
95021+ pax_open_kernel();
95022+ new->next = next;
95023+ new->prev = prev;
95024+ rcu_assign_pointer(list_next_rcu(prev), new);
95025+ next->prev = new;
95026+ pax_close_kernel();
95027+}
95028+EXPORT_SYMBOL(__pax_list_add_rcu);
95029+
95030+void pax_list_del_rcu(struct list_head *entry)
95031+{
95032+#ifdef CONFIG_DEBUG_LIST
95033+ if (!__list_del_entry_debug(entry))
95034+ return;
95035+#endif
95036+
95037+ pax_open_kernel();
95038+ __list_del(entry->prev, entry->next);
95039+ entry->next = LIST_POISON1;
95040+ entry->prev = LIST_POISON2;
95041+ pax_close_kernel();
95042+}
95043+EXPORT_SYMBOL(pax_list_del_rcu);
95044diff --git a/lib/lockref.c b/lib/lockref.c
95045index d2233de..fa1a2f6 100644
95046--- a/lib/lockref.c
95047+++ b/lib/lockref.c
95048@@ -48,13 +48,13 @@
95049 void lockref_get(struct lockref *lockref)
95050 {
95051 CMPXCHG_LOOP(
95052- new.count++;
95053+ __lockref_inc(&new);
95054 ,
95055 return;
95056 );
95057
95058 spin_lock(&lockref->lock);
95059- lockref->count++;
95060+ __lockref_inc(lockref);
95061 spin_unlock(&lockref->lock);
95062 }
95063 EXPORT_SYMBOL(lockref_get);
95064@@ -69,7 +69,7 @@ int lockref_get_not_zero(struct lockref *lockref)
95065 int retval;
95066
95067 CMPXCHG_LOOP(
95068- new.count++;
95069+ __lockref_inc(&new);
95070 if (!old.count)
95071 return 0;
95072 ,
95073@@ -79,7 +79,7 @@ int lockref_get_not_zero(struct lockref *lockref)
95074 spin_lock(&lockref->lock);
95075 retval = 0;
95076 if (lockref->count) {
95077- lockref->count++;
95078+ __lockref_inc(lockref);
95079 retval = 1;
95080 }
95081 spin_unlock(&lockref->lock);
95082@@ -96,7 +96,7 @@ EXPORT_SYMBOL(lockref_get_not_zero);
95083 int lockref_get_or_lock(struct lockref *lockref)
95084 {
95085 CMPXCHG_LOOP(
95086- new.count++;
95087+ __lockref_inc(&new);
95088 if (!old.count)
95089 break;
95090 ,
95091@@ -106,7 +106,7 @@ int lockref_get_or_lock(struct lockref *lockref)
95092 spin_lock(&lockref->lock);
95093 if (!lockref->count)
95094 return 0;
95095- lockref->count++;
95096+ __lockref_inc(lockref);
95097 spin_unlock(&lockref->lock);
95098 return 1;
95099 }
95100@@ -120,7 +120,7 @@ EXPORT_SYMBOL(lockref_get_or_lock);
95101 int lockref_put_or_lock(struct lockref *lockref)
95102 {
95103 CMPXCHG_LOOP(
95104- new.count--;
95105+ __lockref_dec(&new);
95106 if (old.count <= 1)
95107 break;
95108 ,
95109@@ -130,7 +130,7 @@ int lockref_put_or_lock(struct lockref *lockref)
95110 spin_lock(&lockref->lock);
95111 if (lockref->count <= 1)
95112 return 0;
95113- lockref->count--;
95114+ __lockref_dec(lockref);
95115 spin_unlock(&lockref->lock);
95116 return 1;
95117 }
95118@@ -157,7 +157,7 @@ int lockref_get_not_dead(struct lockref *lockref)
95119 int retval;
95120
95121 CMPXCHG_LOOP(
95122- new.count++;
95123+ __lockref_inc(&new);
95124 if ((int)old.count < 0)
95125 return 0;
95126 ,
95127@@ -167,7 +167,7 @@ int lockref_get_not_dead(struct lockref *lockref)
95128 spin_lock(&lockref->lock);
95129 retval = 0;
95130 if ((int) lockref->count >= 0) {
95131- lockref->count++;
95132+ __lockref_inc(lockref);
95133 retval = 1;
95134 }
95135 spin_unlock(&lockref->lock);
95136diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
95137index a89cf09..1a42c2d 100644
95138--- a/lib/percpu-refcount.c
95139+++ b/lib/percpu-refcount.c
95140@@ -29,7 +29,7 @@
95141 * can't hit 0 before we've added up all the percpu refs.
95142 */
95143
95144-#define PCPU_COUNT_BIAS (1U << 31)
95145+#define PCPU_COUNT_BIAS (1U << 30)
95146
95147 static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref)
95148 {
95149diff --git a/lib/radix-tree.c b/lib/radix-tree.c
95150index 3291a8e..346a91e 100644
95151--- a/lib/radix-tree.c
95152+++ b/lib/radix-tree.c
95153@@ -67,7 +67,7 @@ struct radix_tree_preload {
95154 int nr;
95155 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
95156 };
95157-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
95158+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
95159
95160 static inline void *ptr_to_indirect(void *ptr)
95161 {
95162diff --git a/lib/random32.c b/lib/random32.c
95163index c9b6bf3..4752c6d4 100644
95164--- a/lib/random32.c
95165+++ b/lib/random32.c
95166@@ -46,7 +46,7 @@ static inline void prandom_state_selftest(void)
95167 }
95168 #endif
95169
95170-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
95171+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
95172
95173 /**
95174 * prandom_u32_state - seeded pseudo-random number generator.
95175diff --git a/lib/rbtree.c b/lib/rbtree.c
95176index c16c81a..4dcbda1 100644
95177--- a/lib/rbtree.c
95178+++ b/lib/rbtree.c
95179@@ -380,7 +380,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
95180 static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
95181
95182 static const struct rb_augment_callbacks dummy_callbacks = {
95183- dummy_propagate, dummy_copy, dummy_rotate
95184+ .propagate = dummy_propagate,
95185+ .copy = dummy_copy,
95186+ .rotate = dummy_rotate
95187 };
95188
95189 void rb_insert_color(struct rb_node *node, struct rb_root *root)
95190diff --git a/lib/show_mem.c b/lib/show_mem.c
95191index 0922579..9d7adb9 100644
95192--- a/lib/show_mem.c
95193+++ b/lib/show_mem.c
95194@@ -44,6 +44,6 @@ void show_mem(unsigned int filter)
95195 quicklist_total_size());
95196 #endif
95197 #ifdef CONFIG_MEMORY_FAILURE
95198- printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));
95199+ printk("%lu pages hwpoisoned\n", atomic_long_read_unchecked(&num_poisoned_pages));
95200 #endif
95201 }
95202diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
95203index bb2b201..46abaf9 100644
95204--- a/lib/strncpy_from_user.c
95205+++ b/lib/strncpy_from_user.c
95206@@ -21,7 +21,7 @@
95207 */
95208 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
95209 {
95210- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95211+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95212 long res = 0;
95213
95214 /*
95215diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
95216index a28df52..3d55877 100644
95217--- a/lib/strnlen_user.c
95218+++ b/lib/strnlen_user.c
95219@@ -26,7 +26,7 @@
95220 */
95221 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
95222 {
95223- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95224+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
95225 long align, res = 0;
95226 unsigned long c;
95227
95228diff --git a/lib/swiotlb.c b/lib/swiotlb.c
95229index 4abda07..b9d3765 100644
95230--- a/lib/swiotlb.c
95231+++ b/lib/swiotlb.c
95232@@ -682,7 +682,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
95233
95234 void
95235 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
95236- dma_addr_t dev_addr)
95237+ dma_addr_t dev_addr, struct dma_attrs *attrs)
95238 {
95239 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
95240
95241diff --git a/lib/test_bpf.c b/lib/test_bpf.c
95242index 89e0345..3347efe 100644
95243--- a/lib/test_bpf.c
95244+++ b/lib/test_bpf.c
95245@@ -1798,7 +1798,7 @@ static struct bpf_prog *generate_filter(int which, int *err)
95246 break;
95247
95248 case INTERNAL:
95249- fp = kzalloc(bpf_prog_size(flen), GFP_KERNEL);
95250+ fp = bpf_prog_alloc(bpf_prog_size(flen), 0);
95251 if (fp == NULL) {
95252 pr_cont("UNEXPECTED_FAIL no memory left\n");
95253 *err = -ENOMEM;
95254diff --git a/lib/usercopy.c b/lib/usercopy.c
95255index 4f5b1dd..7cab418 100644
95256--- a/lib/usercopy.c
95257+++ b/lib/usercopy.c
95258@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
95259 WARN(1, "Buffer overflow detected!\n");
95260 }
95261 EXPORT_SYMBOL(copy_from_user_overflow);
95262+
95263+void copy_to_user_overflow(void)
95264+{
95265+ WARN(1, "Buffer overflow detected!\n");
95266+}
95267+EXPORT_SYMBOL(copy_to_user_overflow);
95268diff --git a/lib/vsprintf.c b/lib/vsprintf.c
95269index 6fe2c84..2fe5ec6 100644
95270--- a/lib/vsprintf.c
95271+++ b/lib/vsprintf.c
95272@@ -16,6 +16,9 @@
95273 * - scnprintf and vscnprintf
95274 */
95275
95276+#ifdef CONFIG_GRKERNSEC_HIDESYM
95277+#define __INCLUDED_BY_HIDESYM 1
95278+#endif
95279 #include <stdarg.h>
95280 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
95281 #include <linux/types.h>
95282@@ -624,7 +627,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
95283 #ifdef CONFIG_KALLSYMS
95284 if (*fmt == 'B')
95285 sprint_backtrace(sym, value);
95286- else if (*fmt != 'f' && *fmt != 's')
95287+ else if (*fmt != 'f' && *fmt != 's' && *fmt != 'X')
95288 sprint_symbol(sym, value);
95289 else
95290 sprint_symbol_no_offset(sym, value);
95291@@ -1183,7 +1186,11 @@ char *address_val(char *buf, char *end, const void *addr,
95292 return number(buf, end, num, spec);
95293 }
95294
95295+#ifdef CONFIG_GRKERNSEC_HIDESYM
95296+int kptr_restrict __read_mostly = 2;
95297+#else
95298 int kptr_restrict __read_mostly;
95299+#endif
95300
95301 /*
95302 * Show a '%p' thing. A kernel extension is that the '%p' is followed
95303@@ -1194,8 +1201,10 @@ int kptr_restrict __read_mostly;
95304 *
95305 * - 'F' For symbolic function descriptor pointers with offset
95306 * - 'f' For simple symbolic function names without offset
95307+ * - 'X' For simple symbolic function names without offset approved for use with GRKERNSEC_HIDESYM
95308 * - 'S' For symbolic direct pointers with offset
95309 * - 's' For symbolic direct pointers without offset
95310+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
95311 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
95312 * - 'B' For backtraced symbolic direct pointers with offset
95313 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
95314@@ -1263,12 +1272,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95315
95316 if (!ptr && *fmt != 'K') {
95317 /*
95318- * Print (null) with the same width as a pointer so it makes
95319+ * Print (nil) with the same width as a pointer so it makes
95320 * tabular output look nice.
95321 */
95322 if (spec.field_width == -1)
95323 spec.field_width = default_width;
95324- return string(buf, end, "(null)", spec);
95325+ return string(buf, end, "(nil)", spec);
95326 }
95327
95328 switch (*fmt) {
95329@@ -1278,6 +1287,14 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95330 /* Fallthrough */
95331 case 'S':
95332 case 's':
95333+#ifdef CONFIG_GRKERNSEC_HIDESYM
95334+ break;
95335+#else
95336+ return symbol_string(buf, end, ptr, spec, fmt);
95337+#endif
95338+ case 'X':
95339+ ptr = dereference_function_descriptor(ptr);
95340+ case 'A':
95341 case 'B':
95342 return symbol_string(buf, end, ptr, spec, fmt);
95343 case 'R':
95344@@ -1333,6 +1350,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95345 va_end(va);
95346 return buf;
95347 }
95348+ case 'P':
95349+ break;
95350 case 'K':
95351 /*
95352 * %pK cannot be used in IRQ context because its test
95353@@ -1390,6 +1409,22 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
95354 ((const struct file *)ptr)->f_path.dentry,
95355 spec, fmt);
95356 }
95357+
95358+#ifdef CONFIG_GRKERNSEC_HIDESYM
95359+ /* 'P' = approved pointers to copy to userland,
95360+ as in the /proc/kallsyms case, as we make it display nothing
95361+ for non-root users, and the real contents for root users
95362+ 'X' = approved simple symbols
95363+ Also ignore 'K' pointers, since we force their NULLing for non-root users
95364+ above
95365+ */
95366+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'X' && *fmt != 'K' && is_usercopy_object(buf)) {
95367+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
95368+ dump_stack();
95369+ ptr = NULL;
95370+ }
95371+#endif
95372+
95373 spec.flags |= SMALL;
95374 if (spec.field_width == -1) {
95375 spec.field_width = default_width;
95376@@ -2089,11 +2124,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95377 typeof(type) value; \
95378 if (sizeof(type) == 8) { \
95379 args = PTR_ALIGN(args, sizeof(u32)); \
95380- *(u32 *)&value = *(u32 *)args; \
95381- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
95382+ *(u32 *)&value = *(const u32 *)args; \
95383+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
95384 } else { \
95385 args = PTR_ALIGN(args, sizeof(type)); \
95386- value = *(typeof(type) *)args; \
95387+ value = *(const typeof(type) *)args; \
95388 } \
95389 args += sizeof(type); \
95390 value; \
95391@@ -2156,7 +2191,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
95392 case FORMAT_TYPE_STR: {
95393 const char *str_arg = args;
95394 args += strlen(str_arg) + 1;
95395- str = string(str, end, (char *)str_arg, spec);
95396+ str = string(str, end, str_arg, spec);
95397 break;
95398 }
95399
95400diff --git a/localversion-grsec b/localversion-grsec
95401new file mode 100644
95402index 0000000..7cd6065
95403--- /dev/null
95404+++ b/localversion-grsec
95405@@ -0,0 +1 @@
95406+-grsec
95407diff --git a/mm/Kconfig b/mm/Kconfig
95408index 886db21..f514de2 100644
95409--- a/mm/Kconfig
95410+++ b/mm/Kconfig
95411@@ -333,10 +333,11 @@ config KSM
95412 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
95413
95414 config DEFAULT_MMAP_MIN_ADDR
95415- int "Low address space to protect from user allocation"
95416+ int "Low address space to protect from user allocation"
95417 depends on MMU
95418- default 4096
95419- help
95420+ default 32768 if ALPHA || ARM || PARISC || SPARC32
95421+ default 65536
95422+ help
95423 This is the portion of low virtual memory which should be protected
95424 from userspace allocation. Keeping a user from writing to low pages
95425 can help reduce the impact of kernel NULL pointer bugs.
95426@@ -367,7 +368,7 @@ config MEMORY_FAILURE
95427
95428 config HWPOISON_INJECT
95429 tristate "HWPoison pages injector"
95430- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
95431+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
95432 select PROC_PAGE_MONITOR
95433
95434 config NOMMU_INITIAL_TRIM_EXCESS
95435diff --git a/mm/backing-dev.c b/mm/backing-dev.c
95436index 1706cbb..f89dbca 100644
95437--- a/mm/backing-dev.c
95438+++ b/mm/backing-dev.c
95439@@ -12,7 +12,7 @@
95440 #include <linux/device.h>
95441 #include <trace/events/writeback.h>
95442
95443-static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
95444+static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
95445
95446 struct backing_dev_info default_backing_dev_info = {
95447 .name = "default",
95448@@ -533,7 +533,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
95449 return err;
95450
95451 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
95452- atomic_long_inc_return(&bdi_seq));
95453+ atomic_long_inc_return_unchecked(&bdi_seq));
95454 if (err) {
95455 bdi_destroy(bdi);
95456 return err;
95457diff --git a/mm/filemap.c b/mm/filemap.c
95458index 90effcd..539aa64 100644
95459--- a/mm/filemap.c
95460+++ b/mm/filemap.c
95461@@ -2092,7 +2092,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
95462 struct address_space *mapping = file->f_mapping;
95463
95464 if (!mapping->a_ops->readpage)
95465- return -ENOEXEC;
95466+ return -ENODEV;
95467 file_accessed(file);
95468 vma->vm_ops = &generic_file_vm_ops;
95469 return 0;
95470@@ -2270,6 +2270,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
95471 *pos = i_size_read(inode);
95472
95473 if (limit != RLIM_INFINITY) {
95474+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
95475 if (*pos >= limit) {
95476 send_sig(SIGXFSZ, current, 0);
95477 return -EFBIG;
95478diff --git a/mm/fremap.c b/mm/fremap.c
95479index 72b8fa3..c5b39f1 100644
95480--- a/mm/fremap.c
95481+++ b/mm/fremap.c
95482@@ -180,6 +180,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
95483 retry:
95484 vma = find_vma(mm, start);
95485
95486+#ifdef CONFIG_PAX_SEGMEXEC
95487+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
95488+ goto out;
95489+#endif
95490+
95491 /*
95492 * Make sure the vma is shared, that it supports prefaulting,
95493 * and that the remapped range is valid and fully within
95494diff --git a/mm/gup.c b/mm/gup.c
95495index 91d044b..a58ecf6 100644
95496--- a/mm/gup.c
95497+++ b/mm/gup.c
95498@@ -270,11 +270,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
95499 unsigned int fault_flags = 0;
95500 int ret;
95501
95502- /* For mlock, just skip the stack guard page. */
95503- if ((*flags & FOLL_MLOCK) &&
95504- (stack_guard_page_start(vma, address) ||
95505- stack_guard_page_end(vma, address + PAGE_SIZE)))
95506- return -ENOENT;
95507 if (*flags & FOLL_WRITE)
95508 fault_flags |= FAULT_FLAG_WRITE;
95509 if (nonblocking)
95510@@ -436,14 +431,14 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95511 if (!(gup_flags & FOLL_FORCE))
95512 gup_flags |= FOLL_NUMA;
95513
95514- do {
95515+ while (nr_pages) {
95516 struct page *page;
95517 unsigned int foll_flags = gup_flags;
95518 unsigned int page_increm;
95519
95520 /* first iteration or cross vma bound */
95521 if (!vma || start >= vma->vm_end) {
95522- vma = find_extend_vma(mm, start);
95523+ vma = find_vma(mm, start);
95524 if (!vma && in_gate_area(mm, start)) {
95525 int ret;
95526 ret = get_gate_page(mm, start & PAGE_MASK,
95527@@ -455,7 +450,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95528 goto next_page;
95529 }
95530
95531- if (!vma || check_vma_flags(vma, gup_flags))
95532+ if (!vma || start < vma->vm_start || check_vma_flags(vma, gup_flags))
95533 return i ? : -EFAULT;
95534 if (is_vm_hugetlb_page(vma)) {
95535 i = follow_hugetlb_page(mm, vma, pages, vmas,
95536@@ -510,7 +505,7 @@ next_page:
95537 i += page_increm;
95538 start += page_increm * PAGE_SIZE;
95539 nr_pages -= page_increm;
95540- } while (nr_pages);
95541+ }
95542 return i;
95543 }
95544 EXPORT_SYMBOL(__get_user_pages);
95545diff --git a/mm/highmem.c b/mm/highmem.c
95546index 123bcd3..0de52ba 100644
95547--- a/mm/highmem.c
95548+++ b/mm/highmem.c
95549@@ -195,8 +195,9 @@ static void flush_all_zero_pkmaps(void)
95550 * So no dangers, even with speculative execution.
95551 */
95552 page = pte_page(pkmap_page_table[i]);
95553+ pax_open_kernel();
95554 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
95555-
95556+ pax_close_kernel();
95557 set_page_address(page, NULL);
95558 need_flush = 1;
95559 }
95560@@ -259,9 +260,11 @@ start:
95561 }
95562 }
95563 vaddr = PKMAP_ADDR(last_pkmap_nr);
95564+
95565+ pax_open_kernel();
95566 set_pte_at(&init_mm, vaddr,
95567 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
95568-
95569+ pax_close_kernel();
95570 pkmap_count[last_pkmap_nr] = 1;
95571 set_page_address(page, (void *)vaddr);
95572
95573diff --git a/mm/hugetlb.c b/mm/hugetlb.c
95574index eeceeeb..a209d58 100644
95575--- a/mm/hugetlb.c
95576+++ b/mm/hugetlb.c
95577@@ -2258,6 +2258,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
95578 struct ctl_table *table, int write,
95579 void __user *buffer, size_t *length, loff_t *ppos)
95580 {
95581+ ctl_table_no_const t;
95582 struct hstate *h = &default_hstate;
95583 unsigned long tmp = h->max_huge_pages;
95584 int ret;
95585@@ -2265,9 +2266,10 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
95586 if (!hugepages_supported())
95587 return -ENOTSUPP;
95588
95589- table->data = &tmp;
95590- table->maxlen = sizeof(unsigned long);
95591- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
95592+ t = *table;
95593+ t.data = &tmp;
95594+ t.maxlen = sizeof(unsigned long);
95595+ ret = proc_doulongvec_minmax(&t, write, buffer, length, ppos);
95596 if (ret)
95597 goto out;
95598
95599@@ -2302,6 +2304,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
95600 struct hstate *h = &default_hstate;
95601 unsigned long tmp;
95602 int ret;
95603+ ctl_table_no_const hugetlb_table;
95604
95605 if (!hugepages_supported())
95606 return -ENOTSUPP;
95607@@ -2311,9 +2314,10 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
95608 if (write && hstate_is_gigantic(h))
95609 return -EINVAL;
95610
95611- table->data = &tmp;
95612- table->maxlen = sizeof(unsigned long);
95613- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
95614+ hugetlb_table = *table;
95615+ hugetlb_table.data = &tmp;
95616+ hugetlb_table.maxlen = sizeof(unsigned long);
95617+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
95618 if (ret)
95619 goto out;
95620
95621@@ -2792,6 +2796,27 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
95622 mutex_unlock(&mapping->i_mmap_mutex);
95623 }
95624
95625+#ifdef CONFIG_PAX_SEGMEXEC
95626+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
95627+{
95628+ struct mm_struct *mm = vma->vm_mm;
95629+ struct vm_area_struct *vma_m;
95630+ unsigned long address_m;
95631+ pte_t *ptep_m;
95632+
95633+ vma_m = pax_find_mirror_vma(vma);
95634+ if (!vma_m)
95635+ return;
95636+
95637+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
95638+ address_m = address + SEGMEXEC_TASK_SIZE;
95639+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
95640+ get_page(page_m);
95641+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
95642+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
95643+}
95644+#endif
95645+
95646 /*
95647 * Hugetlb_cow() should be called with page lock of the original hugepage held.
95648 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
95649@@ -2903,6 +2928,11 @@ retry_avoidcopy:
95650 make_huge_pte(vma, new_page, 1));
95651 page_remove_rmap(old_page);
95652 hugepage_add_new_anon_rmap(new_page, vma, address);
95653+
95654+#ifdef CONFIG_PAX_SEGMEXEC
95655+ pax_mirror_huge_pte(vma, address, new_page);
95656+#endif
95657+
95658 /* Make the old page be freed below */
95659 new_page = old_page;
95660 }
95661@@ -3063,6 +3093,10 @@ retry:
95662 && (vma->vm_flags & VM_SHARED)));
95663 set_huge_pte_at(mm, address, ptep, new_pte);
95664
95665+#ifdef CONFIG_PAX_SEGMEXEC
95666+ pax_mirror_huge_pte(vma, address, page);
95667+#endif
95668+
95669 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
95670 /* Optimization, do the COW without a second fault */
95671 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
95672@@ -3129,6 +3163,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95673 struct hstate *h = hstate_vma(vma);
95674 struct address_space *mapping;
95675
95676+#ifdef CONFIG_PAX_SEGMEXEC
95677+ struct vm_area_struct *vma_m;
95678+#endif
95679+
95680 address &= huge_page_mask(h);
95681
95682 ptep = huge_pte_offset(mm, address);
95683@@ -3142,6 +3180,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95684 VM_FAULT_SET_HINDEX(hstate_index(h));
95685 }
95686
95687+#ifdef CONFIG_PAX_SEGMEXEC
95688+ vma_m = pax_find_mirror_vma(vma);
95689+ if (vma_m) {
95690+ unsigned long address_m;
95691+
95692+ if (vma->vm_start > vma_m->vm_start) {
95693+ address_m = address;
95694+ address -= SEGMEXEC_TASK_SIZE;
95695+ vma = vma_m;
95696+ h = hstate_vma(vma);
95697+ } else
95698+ address_m = address + SEGMEXEC_TASK_SIZE;
95699+
95700+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
95701+ return VM_FAULT_OOM;
95702+ address_m &= HPAGE_MASK;
95703+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
95704+ }
95705+#endif
95706+
95707 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
95708 if (!ptep)
95709 return VM_FAULT_OOM;
95710diff --git a/mm/internal.h b/mm/internal.h
95711index 5f2772f..4c3882c 100644
95712--- a/mm/internal.h
95713+++ b/mm/internal.h
95714@@ -134,6 +134,7 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
95715
95716 extern int __isolate_free_page(struct page *page, unsigned int order);
95717 extern void __free_pages_bootmem(struct page *page, unsigned int order);
95718+extern void free_compound_page(struct page *page);
95719 extern void prep_compound_page(struct page *page, unsigned long order);
95720 #ifdef CONFIG_MEMORY_FAILURE
95721 extern bool is_free_buddy_page(struct page *page);
95722@@ -376,7 +377,7 @@ extern u32 hwpoison_filter_enable;
95723
95724 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
95725 unsigned long, unsigned long,
95726- unsigned long, unsigned long);
95727+ unsigned long, unsigned long) __intentional_overflow(-1);
95728
95729 extern void set_pageblock_order(void);
95730 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
95731diff --git a/mm/iov_iter.c b/mm/iov_iter.c
95732index 141dcf7..7327fd3 100644
95733--- a/mm/iov_iter.c
95734+++ b/mm/iov_iter.c
95735@@ -173,7 +173,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
95736
95737 while (bytes) {
95738 char __user *buf = iov->iov_base + base;
95739- int copy = min(bytes, iov->iov_len - base);
95740+ size_t copy = min(bytes, iov->iov_len - base);
95741
95742 base = 0;
95743 left = __copy_from_user_inatomic(vaddr, buf, copy);
95744@@ -201,7 +201,7 @@ static size_t copy_from_user_atomic_iovec(struct page *page,
95745
95746 kaddr = kmap_atomic(page);
95747 if (likely(i->nr_segs == 1)) {
95748- int left;
95749+ size_t left;
95750 char __user *buf = i->iov->iov_base + i->iov_offset;
95751 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
95752 copied = bytes - left;
95753@@ -231,7 +231,7 @@ static void advance_iovec(struct iov_iter *i, size_t bytes)
95754 * zero-length segments (without overruning the iovec).
95755 */
95756 while (bytes || unlikely(i->count && !iov->iov_len)) {
95757- int copy;
95758+ size_t copy;
95759
95760 copy = min(bytes, iov->iov_len - base);
95761 BUG_ON(!i->count || i->count < copy);
95762diff --git a/mm/kmemleak.c b/mm/kmemleak.c
95763index 3cda50c..032ba634 100644
95764--- a/mm/kmemleak.c
95765+++ b/mm/kmemleak.c
95766@@ -364,7 +364,7 @@ static void print_unreferenced(struct seq_file *seq,
95767
95768 for (i = 0; i < object->trace_len; i++) {
95769 void *ptr = (void *)object->trace[i];
95770- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
95771+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
95772 }
95773 }
95774
95775@@ -1905,7 +1905,7 @@ static int __init kmemleak_late_init(void)
95776 return -ENOMEM;
95777 }
95778
95779- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
95780+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
95781 &kmemleak_fops);
95782 if (!dentry)
95783 pr_warning("Failed to create the debugfs kmemleak file\n");
95784diff --git a/mm/maccess.c b/mm/maccess.c
95785index d53adf9..03a24bf 100644
95786--- a/mm/maccess.c
95787+++ b/mm/maccess.c
95788@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
95789 set_fs(KERNEL_DS);
95790 pagefault_disable();
95791 ret = __copy_from_user_inatomic(dst,
95792- (__force const void __user *)src, size);
95793+ (const void __force_user *)src, size);
95794 pagefault_enable();
95795 set_fs(old_fs);
95796
95797@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
95798
95799 set_fs(KERNEL_DS);
95800 pagefault_disable();
95801- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
95802+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
95803 pagefault_enable();
95804 set_fs(old_fs);
95805
95806diff --git a/mm/madvise.c b/mm/madvise.c
95807index 0938b30..199abe8 100644
95808--- a/mm/madvise.c
95809+++ b/mm/madvise.c
95810@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
95811 pgoff_t pgoff;
95812 unsigned long new_flags = vma->vm_flags;
95813
95814+#ifdef CONFIG_PAX_SEGMEXEC
95815+ struct vm_area_struct *vma_m;
95816+#endif
95817+
95818 switch (behavior) {
95819 case MADV_NORMAL:
95820 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
95821@@ -126,6 +130,13 @@ success:
95822 /*
95823 * vm_flags is protected by the mmap_sem held in write mode.
95824 */
95825+
95826+#ifdef CONFIG_PAX_SEGMEXEC
95827+ vma_m = pax_find_mirror_vma(vma);
95828+ if (vma_m)
95829+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
95830+#endif
95831+
95832 vma->vm_flags = new_flags;
95833
95834 out:
95835@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct *vma,
95836 struct vm_area_struct **prev,
95837 unsigned long start, unsigned long end)
95838 {
95839+
95840+#ifdef CONFIG_PAX_SEGMEXEC
95841+ struct vm_area_struct *vma_m;
95842+#endif
95843+
95844 *prev = vma;
95845 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
95846 return -EINVAL;
95847@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct *vma,
95848 zap_page_range(vma, start, end - start, &details);
95849 } else
95850 zap_page_range(vma, start, end - start, NULL);
95851+
95852+#ifdef CONFIG_PAX_SEGMEXEC
95853+ vma_m = pax_find_mirror_vma(vma);
95854+ if (vma_m) {
95855+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
95856+ struct zap_details details = {
95857+ .nonlinear_vma = vma_m,
95858+ .last_index = ULONG_MAX,
95859+ };
95860+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
95861+ } else
95862+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
95863+ }
95864+#endif
95865+
95866 return 0;
95867 }
95868
95869@@ -488,6 +519,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
95870 if (end < start)
95871 return error;
95872
95873+#ifdef CONFIG_PAX_SEGMEXEC
95874+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
95875+ if (end > SEGMEXEC_TASK_SIZE)
95876+ return error;
95877+ } else
95878+#endif
95879+
95880+ if (end > TASK_SIZE)
95881+ return error;
95882+
95883 error = 0;
95884 if (end == start)
95885 return error;
95886diff --git a/mm/memory-failure.c b/mm/memory-failure.c
95887index 44c6bd2..60369dc3 100644
95888--- a/mm/memory-failure.c
95889+++ b/mm/memory-failure.c
95890@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
95891
95892 int sysctl_memory_failure_recovery __read_mostly = 1;
95893
95894-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
95895+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
95896
95897 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
95898
95899@@ -198,7 +198,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
95900 pfn, t->comm, t->pid);
95901 si.si_signo = SIGBUS;
95902 si.si_errno = 0;
95903- si.si_addr = (void *)addr;
95904+ si.si_addr = (void __user *)addr;
95905 #ifdef __ARCH_SI_TRAPNO
95906 si.si_trapno = trapno;
95907 #endif
95908@@ -791,7 +791,7 @@ static struct page_state {
95909 unsigned long res;
95910 char *msg;
95911 int (*action)(struct page *p, unsigned long pfn);
95912-} error_states[] = {
95913+} __do_const error_states[] = {
95914 { reserved, reserved, "reserved kernel", me_kernel },
95915 /*
95916 * free pages are specially detected outside this table:
95917@@ -1099,7 +1099,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
95918 nr_pages = 1 << compound_order(hpage);
95919 else /* normal page or thp */
95920 nr_pages = 1;
95921- atomic_long_add(nr_pages, &num_poisoned_pages);
95922+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
95923
95924 /*
95925 * We need/can do nothing about count=0 pages.
95926@@ -1128,7 +1128,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
95927 if (PageHWPoison(hpage)) {
95928 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
95929 || (p != hpage && TestSetPageHWPoison(hpage))) {
95930- atomic_long_sub(nr_pages, &num_poisoned_pages);
95931+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95932 unlock_page(hpage);
95933 return 0;
95934 }
95935@@ -1196,14 +1196,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
95936 */
95937 if (!PageHWPoison(p)) {
95938 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
95939- atomic_long_sub(nr_pages, &num_poisoned_pages);
95940+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95941 put_page(hpage);
95942 res = 0;
95943 goto out;
95944 }
95945 if (hwpoison_filter(p)) {
95946 if (TestClearPageHWPoison(p))
95947- atomic_long_sub(nr_pages, &num_poisoned_pages);
95948+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95949 unlock_page(hpage);
95950 put_page(hpage);
95951 return 0;
95952@@ -1433,7 +1433,7 @@ int unpoison_memory(unsigned long pfn)
95953 return 0;
95954 }
95955 if (TestClearPageHWPoison(p))
95956- atomic_long_dec(&num_poisoned_pages);
95957+ atomic_long_dec_unchecked(&num_poisoned_pages);
95958 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
95959 return 0;
95960 }
95961@@ -1447,7 +1447,7 @@ int unpoison_memory(unsigned long pfn)
95962 */
95963 if (TestClearPageHWPoison(page)) {
95964 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
95965- atomic_long_sub(nr_pages, &num_poisoned_pages);
95966+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
95967 freeit = 1;
95968 if (PageHuge(page))
95969 clear_page_hwpoison_huge_page(page);
95970@@ -1572,11 +1572,11 @@ static int soft_offline_huge_page(struct page *page, int flags)
95971 if (PageHuge(page)) {
95972 set_page_hwpoison_huge_page(hpage);
95973 dequeue_hwpoisoned_huge_page(hpage);
95974- atomic_long_add(1 << compound_order(hpage),
95975+ atomic_long_add_unchecked(1 << compound_order(hpage),
95976 &num_poisoned_pages);
95977 } else {
95978 SetPageHWPoison(page);
95979- atomic_long_inc(&num_poisoned_pages);
95980+ atomic_long_inc_unchecked(&num_poisoned_pages);
95981 }
95982 }
95983 return ret;
95984@@ -1615,7 +1615,7 @@ static int __soft_offline_page(struct page *page, int flags)
95985 put_page(page);
95986 pr_info("soft_offline: %#lx: invalidated\n", pfn);
95987 SetPageHWPoison(page);
95988- atomic_long_inc(&num_poisoned_pages);
95989+ atomic_long_inc_unchecked(&num_poisoned_pages);
95990 return 0;
95991 }
95992
95993@@ -1666,7 +1666,7 @@ static int __soft_offline_page(struct page *page, int flags)
95994 if (!is_free_buddy_page(page))
95995 pr_info("soft offline: %#lx: page leaked\n",
95996 pfn);
95997- atomic_long_inc(&num_poisoned_pages);
95998+ atomic_long_inc_unchecked(&num_poisoned_pages);
95999 }
96000 } else {
96001 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
96002@@ -1736,11 +1736,11 @@ int soft_offline_page(struct page *page, int flags)
96003 if (PageHuge(page)) {
96004 set_page_hwpoison_huge_page(hpage);
96005 dequeue_hwpoisoned_huge_page(hpage);
96006- atomic_long_add(1 << compound_order(hpage),
96007+ atomic_long_add_unchecked(1 << compound_order(hpage),
96008 &num_poisoned_pages);
96009 } else {
96010 SetPageHWPoison(page);
96011- atomic_long_inc(&num_poisoned_pages);
96012+ atomic_long_inc_unchecked(&num_poisoned_pages);
96013 }
96014 }
96015 unset_migratetype_isolate(page, MIGRATE_MOVABLE);
96016diff --git a/mm/memory.c b/mm/memory.c
96017index 37b80fc..9cdef79 100644
96018--- a/mm/memory.c
96019+++ b/mm/memory.c
96020@@ -415,6 +415,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
96021 free_pte_range(tlb, pmd, addr);
96022 } while (pmd++, addr = next, addr != end);
96023
96024+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
96025 start &= PUD_MASK;
96026 if (start < floor)
96027 return;
96028@@ -429,6 +430,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
96029 pmd = pmd_offset(pud, start);
96030 pud_clear(pud);
96031 pmd_free_tlb(tlb, pmd, start);
96032+#endif
96033+
96034 }
96035
96036 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96037@@ -448,6 +451,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96038 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
96039 } while (pud++, addr = next, addr != end);
96040
96041+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
96042 start &= PGDIR_MASK;
96043 if (start < floor)
96044 return;
96045@@ -462,6 +466,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
96046 pud = pud_offset(pgd, start);
96047 pgd_clear(pgd);
96048 pud_free_tlb(tlb, pud, start);
96049+#endif
96050+
96051 }
96052
96053 /*
96054@@ -691,10 +697,10 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
96055 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
96056 */
96057 if (vma->vm_ops)
96058- printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
96059+ printk(KERN_ALERT "vma->vm_ops->fault: %pAR\n",
96060 vma->vm_ops->fault);
96061 if (vma->vm_file)
96062- printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
96063+ printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pAR\n",
96064 vma->vm_file->f_op->mmap);
96065 dump_stack();
96066 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
96067@@ -815,20 +821,20 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
96068 if (!pte_file(pte)) {
96069 swp_entry_t entry = pte_to_swp_entry(pte);
96070
96071- if (swap_duplicate(entry) < 0)
96072- return entry.val;
96073+ if (likely(!non_swap_entry(entry))) {
96074+ if (swap_duplicate(entry) < 0)
96075+ return entry.val;
96076
96077- /* make sure dst_mm is on swapoff's mmlist. */
96078- if (unlikely(list_empty(&dst_mm->mmlist))) {
96079- spin_lock(&mmlist_lock);
96080- if (list_empty(&dst_mm->mmlist))
96081- list_add(&dst_mm->mmlist,
96082- &src_mm->mmlist);
96083- spin_unlock(&mmlist_lock);
96084- }
96085- if (likely(!non_swap_entry(entry)))
96086+ /* make sure dst_mm is on swapoff's mmlist. */
96087+ if (unlikely(list_empty(&dst_mm->mmlist))) {
96088+ spin_lock(&mmlist_lock);
96089+ if (list_empty(&dst_mm->mmlist))
96090+ list_add(&dst_mm->mmlist,
96091+ &src_mm->mmlist);
96092+ spin_unlock(&mmlist_lock);
96093+ }
96094 rss[MM_SWAPENTS]++;
96095- else if (is_migration_entry(entry)) {
96096+ } else if (is_migration_entry(entry)) {
96097 page = migration_entry_to_page(entry);
96098
96099 if (PageAnon(page))
96100@@ -1501,6 +1507,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
96101 page_add_file_rmap(page);
96102 set_pte_at(mm, addr, pte, mk_pte(page, prot));
96103
96104+#ifdef CONFIG_PAX_SEGMEXEC
96105+ pax_mirror_file_pte(vma, addr, page, ptl);
96106+#endif
96107+
96108 retval = 0;
96109 pte_unmap_unlock(pte, ptl);
96110 return retval;
96111@@ -1545,9 +1555,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
96112 if (!page_count(page))
96113 return -EINVAL;
96114 if (!(vma->vm_flags & VM_MIXEDMAP)) {
96115+
96116+#ifdef CONFIG_PAX_SEGMEXEC
96117+ struct vm_area_struct *vma_m;
96118+#endif
96119+
96120 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
96121 BUG_ON(vma->vm_flags & VM_PFNMAP);
96122 vma->vm_flags |= VM_MIXEDMAP;
96123+
96124+#ifdef CONFIG_PAX_SEGMEXEC
96125+ vma_m = pax_find_mirror_vma(vma);
96126+ if (vma_m)
96127+ vma_m->vm_flags |= VM_MIXEDMAP;
96128+#endif
96129+
96130 }
96131 return insert_page(vma, addr, page, vma->vm_page_prot);
96132 }
96133@@ -1630,6 +1652,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
96134 unsigned long pfn)
96135 {
96136 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
96137+ BUG_ON(vma->vm_mirror);
96138
96139 if (addr < vma->vm_start || addr >= vma->vm_end)
96140 return -EFAULT;
96141@@ -1877,7 +1900,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
96142
96143 BUG_ON(pud_huge(*pud));
96144
96145- pmd = pmd_alloc(mm, pud, addr);
96146+ pmd = (mm == &init_mm) ?
96147+ pmd_alloc_kernel(mm, pud, addr) :
96148+ pmd_alloc(mm, pud, addr);
96149 if (!pmd)
96150 return -ENOMEM;
96151 do {
96152@@ -1897,7 +1922,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
96153 unsigned long next;
96154 int err;
96155
96156- pud = pud_alloc(mm, pgd, addr);
96157+ pud = (mm == &init_mm) ?
96158+ pud_alloc_kernel(mm, pgd, addr) :
96159+ pud_alloc(mm, pgd, addr);
96160 if (!pud)
96161 return -ENOMEM;
96162 do {
96163@@ -2019,6 +2046,186 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
96164 return ret;
96165 }
96166
96167+#ifdef CONFIG_PAX_SEGMEXEC
96168+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
96169+{
96170+ struct mm_struct *mm = vma->vm_mm;
96171+ spinlock_t *ptl;
96172+ pte_t *pte, entry;
96173+
96174+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
96175+ entry = *pte;
96176+ if (!pte_present(entry)) {
96177+ if (!pte_none(entry)) {
96178+ BUG_ON(pte_file(entry));
96179+ free_swap_and_cache(pte_to_swp_entry(entry));
96180+ pte_clear_not_present_full(mm, address, pte, 0);
96181+ }
96182+ } else {
96183+ struct page *page;
96184+
96185+ flush_cache_page(vma, address, pte_pfn(entry));
96186+ entry = ptep_clear_flush(vma, address, pte);
96187+ BUG_ON(pte_dirty(entry));
96188+ page = vm_normal_page(vma, address, entry);
96189+ if (page) {
96190+ update_hiwater_rss(mm);
96191+ if (PageAnon(page))
96192+ dec_mm_counter_fast(mm, MM_ANONPAGES);
96193+ else
96194+ dec_mm_counter_fast(mm, MM_FILEPAGES);
96195+ page_remove_rmap(page);
96196+ page_cache_release(page);
96197+ }
96198+ }
96199+ pte_unmap_unlock(pte, ptl);
96200+}
96201+
96202+/* PaX: if vma is mirrored, synchronize the mirror's PTE
96203+ *
96204+ * the ptl of the lower mapped page is held on entry and is not released on exit
96205+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
96206+ */
96207+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96208+{
96209+ struct mm_struct *mm = vma->vm_mm;
96210+ unsigned long address_m;
96211+ spinlock_t *ptl_m;
96212+ struct vm_area_struct *vma_m;
96213+ pmd_t *pmd_m;
96214+ pte_t *pte_m, entry_m;
96215+
96216+ BUG_ON(!page_m || !PageAnon(page_m));
96217+
96218+ vma_m = pax_find_mirror_vma(vma);
96219+ if (!vma_m)
96220+ return;
96221+
96222+ BUG_ON(!PageLocked(page_m));
96223+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96224+ address_m = address + SEGMEXEC_TASK_SIZE;
96225+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96226+ pte_m = pte_offset_map(pmd_m, address_m);
96227+ ptl_m = pte_lockptr(mm, pmd_m);
96228+ if (ptl != ptl_m) {
96229+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96230+ if (!pte_none(*pte_m))
96231+ goto out;
96232+ }
96233+
96234+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96235+ page_cache_get(page_m);
96236+ page_add_anon_rmap(page_m, vma_m, address_m);
96237+ inc_mm_counter_fast(mm, MM_ANONPAGES);
96238+ set_pte_at(mm, address_m, pte_m, entry_m);
96239+ update_mmu_cache(vma_m, address_m, pte_m);
96240+out:
96241+ if (ptl != ptl_m)
96242+ spin_unlock(ptl_m);
96243+ pte_unmap(pte_m);
96244+ unlock_page(page_m);
96245+}
96246+
96247+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
96248+{
96249+ struct mm_struct *mm = vma->vm_mm;
96250+ unsigned long address_m;
96251+ spinlock_t *ptl_m;
96252+ struct vm_area_struct *vma_m;
96253+ pmd_t *pmd_m;
96254+ pte_t *pte_m, entry_m;
96255+
96256+ BUG_ON(!page_m || PageAnon(page_m));
96257+
96258+ vma_m = pax_find_mirror_vma(vma);
96259+ if (!vma_m)
96260+ return;
96261+
96262+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96263+ address_m = address + SEGMEXEC_TASK_SIZE;
96264+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96265+ pte_m = pte_offset_map(pmd_m, address_m);
96266+ ptl_m = pte_lockptr(mm, pmd_m);
96267+ if (ptl != ptl_m) {
96268+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96269+ if (!pte_none(*pte_m))
96270+ goto out;
96271+ }
96272+
96273+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
96274+ page_cache_get(page_m);
96275+ page_add_file_rmap(page_m);
96276+ inc_mm_counter_fast(mm, MM_FILEPAGES);
96277+ set_pte_at(mm, address_m, pte_m, entry_m);
96278+ update_mmu_cache(vma_m, address_m, pte_m);
96279+out:
96280+ if (ptl != ptl_m)
96281+ spin_unlock(ptl_m);
96282+ pte_unmap(pte_m);
96283+}
96284+
96285+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
96286+{
96287+ struct mm_struct *mm = vma->vm_mm;
96288+ unsigned long address_m;
96289+ spinlock_t *ptl_m;
96290+ struct vm_area_struct *vma_m;
96291+ pmd_t *pmd_m;
96292+ pte_t *pte_m, entry_m;
96293+
96294+ vma_m = pax_find_mirror_vma(vma);
96295+ if (!vma_m)
96296+ return;
96297+
96298+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
96299+ address_m = address + SEGMEXEC_TASK_SIZE;
96300+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
96301+ pte_m = pte_offset_map(pmd_m, address_m);
96302+ ptl_m = pte_lockptr(mm, pmd_m);
96303+ if (ptl != ptl_m) {
96304+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
96305+ if (!pte_none(*pte_m))
96306+ goto out;
96307+ }
96308+
96309+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
96310+ set_pte_at(mm, address_m, pte_m, entry_m);
96311+out:
96312+ if (ptl != ptl_m)
96313+ spin_unlock(ptl_m);
96314+ pte_unmap(pte_m);
96315+}
96316+
96317+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
96318+{
96319+ struct page *page_m;
96320+ pte_t entry;
96321+
96322+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
96323+ goto out;
96324+
96325+ entry = *pte;
96326+ page_m = vm_normal_page(vma, address, entry);
96327+ if (!page_m)
96328+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
96329+ else if (PageAnon(page_m)) {
96330+ if (pax_find_mirror_vma(vma)) {
96331+ pte_unmap_unlock(pte, ptl);
96332+ lock_page(page_m);
96333+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
96334+ if (pte_same(entry, *pte))
96335+ pax_mirror_anon_pte(vma, address, page_m, ptl);
96336+ else
96337+ unlock_page(page_m);
96338+ }
96339+ } else
96340+ pax_mirror_file_pte(vma, address, page_m, ptl);
96341+
96342+out:
96343+ pte_unmap_unlock(pte, ptl);
96344+}
96345+#endif
96346+
96347 /*
96348 * This routine handles present pages, when users try to write
96349 * to a shared page. It is done by copying the page to a new address
96350@@ -2217,6 +2424,12 @@ gotten:
96351 */
96352 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96353 if (likely(pte_same(*page_table, orig_pte))) {
96354+
96355+#ifdef CONFIG_PAX_SEGMEXEC
96356+ if (pax_find_mirror_vma(vma))
96357+ BUG_ON(!trylock_page(new_page));
96358+#endif
96359+
96360 if (old_page) {
96361 if (!PageAnon(old_page)) {
96362 dec_mm_counter_fast(mm, MM_FILEPAGES);
96363@@ -2270,6 +2483,10 @@ gotten:
96364 page_remove_rmap(old_page);
96365 }
96366
96367+#ifdef CONFIG_PAX_SEGMEXEC
96368+ pax_mirror_anon_pte(vma, address, new_page, ptl);
96369+#endif
96370+
96371 /* Free the old page.. */
96372 new_page = old_page;
96373 ret |= VM_FAULT_WRITE;
96374@@ -2544,6 +2761,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96375 swap_free(entry);
96376 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
96377 try_to_free_swap(page);
96378+
96379+#ifdef CONFIG_PAX_SEGMEXEC
96380+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
96381+#endif
96382+
96383 unlock_page(page);
96384 if (page != swapcache) {
96385 /*
96386@@ -2567,6 +2789,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
96387
96388 /* No need to invalidate - it was non-present before */
96389 update_mmu_cache(vma, address, page_table);
96390+
96391+#ifdef CONFIG_PAX_SEGMEXEC
96392+ pax_mirror_anon_pte(vma, address, page, ptl);
96393+#endif
96394+
96395 unlock:
96396 pte_unmap_unlock(page_table, ptl);
96397 out:
96398@@ -2586,40 +2813,6 @@ out_release:
96399 }
96400
96401 /*
96402- * This is like a special single-page "expand_{down|up}wards()",
96403- * except we must first make sure that 'address{-|+}PAGE_SIZE'
96404- * doesn't hit another vma.
96405- */
96406-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
96407-{
96408- address &= PAGE_MASK;
96409- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
96410- struct vm_area_struct *prev = vma->vm_prev;
96411-
96412- /*
96413- * Is there a mapping abutting this one below?
96414- *
96415- * That's only ok if it's the same stack mapping
96416- * that has gotten split..
96417- */
96418- if (prev && prev->vm_end == address)
96419- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
96420-
96421- expand_downwards(vma, address - PAGE_SIZE);
96422- }
96423- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
96424- struct vm_area_struct *next = vma->vm_next;
96425-
96426- /* As VM_GROWSDOWN but s/below/above/ */
96427- if (next && next->vm_start == address + PAGE_SIZE)
96428- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
96429-
96430- expand_upwards(vma, address + PAGE_SIZE);
96431- }
96432- return 0;
96433-}
96434-
96435-/*
96436 * We enter with non-exclusive mmap_sem (to exclude vma changes,
96437 * but allow concurrent faults), and pte mapped but not yet locked.
96438 * We return with mmap_sem still held, but pte unmapped and unlocked.
96439@@ -2629,27 +2822,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96440 unsigned int flags)
96441 {
96442 struct mem_cgroup *memcg;
96443- struct page *page;
96444+ struct page *page = NULL;
96445 spinlock_t *ptl;
96446 pte_t entry;
96447
96448- pte_unmap(page_table);
96449-
96450- /* Check if we need to add a guard page to the stack */
96451- if (check_stack_guard_page(vma, address) < 0)
96452- return VM_FAULT_SIGBUS;
96453-
96454- /* Use the zero-page for reads */
96455 if (!(flags & FAULT_FLAG_WRITE)) {
96456 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
96457 vma->vm_page_prot));
96458- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
96459+ ptl = pte_lockptr(mm, pmd);
96460+ spin_lock(ptl);
96461 if (!pte_none(*page_table))
96462 goto unlock;
96463 goto setpte;
96464 }
96465
96466 /* Allocate our own private page. */
96467+ pte_unmap(page_table);
96468+
96469 if (unlikely(anon_vma_prepare(vma)))
96470 goto oom;
96471 page = alloc_zeroed_user_highpage_movable(vma, address);
96472@@ -2673,6 +2862,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
96473 if (!pte_none(*page_table))
96474 goto release;
96475
96476+#ifdef CONFIG_PAX_SEGMEXEC
96477+ if (pax_find_mirror_vma(vma))
96478+ BUG_ON(!trylock_page(page));
96479+#endif
96480+
96481 inc_mm_counter_fast(mm, MM_ANONPAGES);
96482 page_add_new_anon_rmap(page, vma, address);
96483 mem_cgroup_commit_charge(page, memcg, false);
96484@@ -2682,6 +2876,12 @@ setpte:
96485
96486 /* No need to invalidate - it was non-present before */
96487 update_mmu_cache(vma, address, page_table);
96488+
96489+#ifdef CONFIG_PAX_SEGMEXEC
96490+ if (page)
96491+ pax_mirror_anon_pte(vma, address, page, ptl);
96492+#endif
96493+
96494 unlock:
96495 pte_unmap_unlock(page_table, ptl);
96496 return 0;
96497@@ -2912,6 +3112,11 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96498 return ret;
96499 }
96500 do_set_pte(vma, address, fault_page, pte, false, false);
96501+
96502+#ifdef CONFIG_PAX_SEGMEXEC
96503+ pax_mirror_file_pte(vma, address, fault_page, ptl);
96504+#endif
96505+
96506 unlock_page(fault_page);
96507 unlock_out:
96508 pte_unmap_unlock(pte, ptl);
96509@@ -2954,7 +3159,18 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96510 page_cache_release(fault_page);
96511 goto uncharge_out;
96512 }
96513+
96514+#ifdef CONFIG_PAX_SEGMEXEC
96515+ if (pax_find_mirror_vma(vma))
96516+ BUG_ON(!trylock_page(new_page));
96517+#endif
96518+
96519 do_set_pte(vma, address, new_page, pte, true, true);
96520+
96521+#ifdef CONFIG_PAX_SEGMEXEC
96522+ pax_mirror_anon_pte(vma, address, new_page, ptl);
96523+#endif
96524+
96525 mem_cgroup_commit_charge(new_page, memcg, false);
96526 lru_cache_add_active_or_unevictable(new_page, vma);
96527 pte_unmap_unlock(pte, ptl);
96528@@ -3004,6 +3220,11 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96529 return ret;
96530 }
96531 do_set_pte(vma, address, fault_page, pte, true, false);
96532+
96533+#ifdef CONFIG_PAX_SEGMEXEC
96534+ pax_mirror_file_pte(vma, address, fault_page, ptl);
96535+#endif
96536+
96537 pte_unmap_unlock(pte, ptl);
96538
96539 if (set_page_dirty(fault_page))
96540@@ -3245,6 +3466,12 @@ static int handle_pte_fault(struct mm_struct *mm,
96541 if (flags & FAULT_FLAG_WRITE)
96542 flush_tlb_fix_spurious_fault(vma, address);
96543 }
96544+
96545+#ifdef CONFIG_PAX_SEGMEXEC
96546+ pax_mirror_pte(vma, address, pte, pmd, ptl);
96547+ return 0;
96548+#endif
96549+
96550 unlock:
96551 pte_unmap_unlock(pte, ptl);
96552 return 0;
96553@@ -3264,9 +3491,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
96554 pmd_t *pmd;
96555 pte_t *pte;
96556
96557+#ifdef CONFIG_PAX_SEGMEXEC
96558+ struct vm_area_struct *vma_m;
96559+#endif
96560+
96561 if (unlikely(is_vm_hugetlb_page(vma)))
96562 return hugetlb_fault(mm, vma, address, flags);
96563
96564+#ifdef CONFIG_PAX_SEGMEXEC
96565+ vma_m = pax_find_mirror_vma(vma);
96566+ if (vma_m) {
96567+ unsigned long address_m;
96568+ pgd_t *pgd_m;
96569+ pud_t *pud_m;
96570+ pmd_t *pmd_m;
96571+
96572+ if (vma->vm_start > vma_m->vm_start) {
96573+ address_m = address;
96574+ address -= SEGMEXEC_TASK_SIZE;
96575+ vma = vma_m;
96576+ } else
96577+ address_m = address + SEGMEXEC_TASK_SIZE;
96578+
96579+ pgd_m = pgd_offset(mm, address_m);
96580+ pud_m = pud_alloc(mm, pgd_m, address_m);
96581+ if (!pud_m)
96582+ return VM_FAULT_OOM;
96583+ pmd_m = pmd_alloc(mm, pud_m, address_m);
96584+ if (!pmd_m)
96585+ return VM_FAULT_OOM;
96586+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
96587+ return VM_FAULT_OOM;
96588+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
96589+ }
96590+#endif
96591+
96592 pgd = pgd_offset(mm, address);
96593 pud = pud_alloc(mm, pgd, address);
96594 if (!pud)
96595@@ -3400,6 +3659,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
96596 spin_unlock(&mm->page_table_lock);
96597 return 0;
96598 }
96599+
96600+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
96601+{
96602+ pud_t *new = pud_alloc_one(mm, address);
96603+ if (!new)
96604+ return -ENOMEM;
96605+
96606+ smp_wmb(); /* See comment in __pte_alloc */
96607+
96608+ spin_lock(&mm->page_table_lock);
96609+ if (pgd_present(*pgd)) /* Another has populated it */
96610+ pud_free(mm, new);
96611+ else
96612+ pgd_populate_kernel(mm, pgd, new);
96613+ spin_unlock(&mm->page_table_lock);
96614+ return 0;
96615+}
96616 #endif /* __PAGETABLE_PUD_FOLDED */
96617
96618 #ifndef __PAGETABLE_PMD_FOLDED
96619@@ -3430,6 +3706,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
96620 spin_unlock(&mm->page_table_lock);
96621 return 0;
96622 }
96623+
96624+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
96625+{
96626+ pmd_t *new = pmd_alloc_one(mm, address);
96627+ if (!new)
96628+ return -ENOMEM;
96629+
96630+ smp_wmb(); /* See comment in __pte_alloc */
96631+
96632+ spin_lock(&mm->page_table_lock);
96633+#ifndef __ARCH_HAS_4LEVEL_HACK
96634+ if (pud_present(*pud)) /* Another has populated it */
96635+ pmd_free(mm, new);
96636+ else
96637+ pud_populate_kernel(mm, pud, new);
96638+#else
96639+ if (pgd_present(*pud)) /* Another has populated it */
96640+ pmd_free(mm, new);
96641+ else
96642+ pgd_populate_kernel(mm, pud, new);
96643+#endif /* __ARCH_HAS_4LEVEL_HACK */
96644+ spin_unlock(&mm->page_table_lock);
96645+ return 0;
96646+}
96647 #endif /* __PAGETABLE_PMD_FOLDED */
96648
96649 static int __follow_pte(struct mm_struct *mm, unsigned long address,
96650@@ -3539,8 +3839,8 @@ out:
96651 return ret;
96652 }
96653
96654-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
96655- void *buf, int len, int write)
96656+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
96657+ void *buf, size_t len, int write)
96658 {
96659 resource_size_t phys_addr;
96660 unsigned long prot = 0;
96661@@ -3566,8 +3866,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
96662 * Access another process' address space as given in mm. If non-NULL, use the
96663 * given task for page fault accounting.
96664 */
96665-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96666- unsigned long addr, void *buf, int len, int write)
96667+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96668+ unsigned long addr, void *buf, size_t len, int write)
96669 {
96670 struct vm_area_struct *vma;
96671 void *old_buf = buf;
96672@@ -3575,7 +3875,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96673 down_read(&mm->mmap_sem);
96674 /* ignore errors, just check how much was successfully transferred */
96675 while (len) {
96676- int bytes, ret, offset;
96677+ ssize_t bytes, ret, offset;
96678 void *maddr;
96679 struct page *page = NULL;
96680
96681@@ -3636,8 +3936,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
96682 *
96683 * The caller must hold a reference on @mm.
96684 */
96685-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
96686- void *buf, int len, int write)
96687+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
96688+ void *buf, size_t len, int write)
96689 {
96690 return __access_remote_vm(NULL, mm, addr, buf, len, write);
96691 }
96692@@ -3647,11 +3947,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
96693 * Source/target buffer must be kernel space,
96694 * Do not walk the page table directly, use get_user_pages
96695 */
96696-int access_process_vm(struct task_struct *tsk, unsigned long addr,
96697- void *buf, int len, int write)
96698+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
96699+ void *buf, size_t len, int write)
96700 {
96701 struct mm_struct *mm;
96702- int ret;
96703+ ssize_t ret;
96704
96705 mm = get_task_mm(tsk);
96706 if (!mm)
96707diff --git a/mm/mempolicy.c b/mm/mempolicy.c
96708index 8f5330d..b41914b 100644
96709--- a/mm/mempolicy.c
96710+++ b/mm/mempolicy.c
96711@@ -750,6 +750,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
96712 unsigned long vmstart;
96713 unsigned long vmend;
96714
96715+#ifdef CONFIG_PAX_SEGMEXEC
96716+ struct vm_area_struct *vma_m;
96717+#endif
96718+
96719 vma = find_vma(mm, start);
96720 if (!vma || vma->vm_start > start)
96721 return -EFAULT;
96722@@ -793,6 +797,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
96723 err = vma_replace_policy(vma, new_pol);
96724 if (err)
96725 goto out;
96726+
96727+#ifdef CONFIG_PAX_SEGMEXEC
96728+ vma_m = pax_find_mirror_vma(vma);
96729+ if (vma_m) {
96730+ err = vma_replace_policy(vma_m, new_pol);
96731+ if (err)
96732+ goto out;
96733+ }
96734+#endif
96735+
96736 }
96737
96738 out:
96739@@ -1225,6 +1239,17 @@ static long do_mbind(unsigned long start, unsigned long len,
96740
96741 if (end < start)
96742 return -EINVAL;
96743+
96744+#ifdef CONFIG_PAX_SEGMEXEC
96745+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
96746+ if (end > SEGMEXEC_TASK_SIZE)
96747+ return -EINVAL;
96748+ } else
96749+#endif
96750+
96751+ if (end > TASK_SIZE)
96752+ return -EINVAL;
96753+
96754 if (end == start)
96755 return 0;
96756
96757@@ -1450,8 +1475,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96758 */
96759 tcred = __task_cred(task);
96760 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
96761- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
96762- !capable(CAP_SYS_NICE)) {
96763+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
96764 rcu_read_unlock();
96765 err = -EPERM;
96766 goto out_put;
96767@@ -1482,6 +1506,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
96768 goto out;
96769 }
96770
96771+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96772+ if (mm != current->mm &&
96773+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
96774+ mmput(mm);
96775+ err = -EPERM;
96776+ goto out;
96777+ }
96778+#endif
96779+
96780 err = do_migrate_pages(mm, old, new,
96781 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
96782
96783diff --git a/mm/migrate.c b/mm/migrate.c
96784index 0143995..b294728 100644
96785--- a/mm/migrate.c
96786+++ b/mm/migrate.c
96787@@ -1495,8 +1495,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
96788 */
96789 tcred = __task_cred(task);
96790 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
96791- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
96792- !capable(CAP_SYS_NICE)) {
96793+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
96794 rcu_read_unlock();
96795 err = -EPERM;
96796 goto out;
96797diff --git a/mm/mlock.c b/mm/mlock.c
96798index ce84cb0..6d5a9aa 100644
96799--- a/mm/mlock.c
96800+++ b/mm/mlock.c
96801@@ -14,6 +14,7 @@
96802 #include <linux/pagevec.h>
96803 #include <linux/mempolicy.h>
96804 #include <linux/syscalls.h>
96805+#include <linux/security.h>
96806 #include <linux/sched.h>
96807 #include <linux/export.h>
96808 #include <linux/rmap.h>
96809@@ -613,7 +614,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
96810 {
96811 unsigned long nstart, end, tmp;
96812 struct vm_area_struct * vma, * prev;
96813- int error;
96814+ int error = 0;
96815
96816 VM_BUG_ON(start & ~PAGE_MASK);
96817 VM_BUG_ON(len != PAGE_ALIGN(len));
96818@@ -622,6 +623,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
96819 return -EINVAL;
96820 if (end == start)
96821 return 0;
96822+ if (end > TASK_SIZE)
96823+ return -EINVAL;
96824+
96825 vma = find_vma(current->mm, start);
96826 if (!vma || vma->vm_start > start)
96827 return -ENOMEM;
96828@@ -633,6 +637,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
96829 for (nstart = start ; ; ) {
96830 vm_flags_t newflags;
96831
96832+#ifdef CONFIG_PAX_SEGMEXEC
96833+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96834+ break;
96835+#endif
96836+
96837 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
96838
96839 newflags = vma->vm_flags & ~VM_LOCKED;
96840@@ -746,6 +755,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
96841 locked += current->mm->locked_vm;
96842
96843 /* check against resource limits */
96844+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
96845 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
96846 error = do_mlock(start, len, 1);
96847
96848@@ -783,6 +793,11 @@ static int do_mlockall(int flags)
96849 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
96850 vm_flags_t newflags;
96851
96852+#ifdef CONFIG_PAX_SEGMEXEC
96853+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96854+ break;
96855+#endif
96856+
96857 newflags = vma->vm_flags & ~VM_LOCKED;
96858 if (flags & MCL_CURRENT)
96859 newflags |= VM_LOCKED;
96860@@ -814,8 +829,10 @@ SYSCALL_DEFINE1(mlockall, int, flags)
96861 lock_limit >>= PAGE_SHIFT;
96862
96863 ret = -ENOMEM;
96864+
96865+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
96866+
96867 down_write(&current->mm->mmap_sem);
96868-
96869 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
96870 capable(CAP_IPC_LOCK))
96871 ret = do_mlockall(flags);
96872diff --git a/mm/mmap.c b/mm/mmap.c
96873index ebc25fa..9135e65 100644
96874--- a/mm/mmap.c
96875+++ b/mm/mmap.c
96876@@ -41,6 +41,7 @@
96877 #include <linux/notifier.h>
96878 #include <linux/memory.h>
96879 #include <linux/printk.h>
96880+#include <linux/random.h>
96881
96882 #include <asm/uaccess.h>
96883 #include <asm/cacheflush.h>
96884@@ -57,6 +58,16 @@
96885 #define arch_rebalance_pgtables(addr, len) (addr)
96886 #endif
96887
96888+static inline void verify_mm_writelocked(struct mm_struct *mm)
96889+{
96890+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
96891+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
96892+ up_read(&mm->mmap_sem);
96893+ BUG();
96894+ }
96895+#endif
96896+}
96897+
96898 static void unmap_region(struct mm_struct *mm,
96899 struct vm_area_struct *vma, struct vm_area_struct *prev,
96900 unsigned long start, unsigned long end);
96901@@ -76,16 +87,25 @@ static void unmap_region(struct mm_struct *mm,
96902 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
96903 *
96904 */
96905-pgprot_t protection_map[16] = {
96906+pgprot_t protection_map[16] __read_only = {
96907 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
96908 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
96909 };
96910
96911-pgprot_t vm_get_page_prot(unsigned long vm_flags)
96912+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
96913 {
96914- return __pgprot(pgprot_val(protection_map[vm_flags &
96915+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
96916 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
96917 pgprot_val(arch_vm_get_page_prot(vm_flags)));
96918+
96919+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
96920+ if (!(__supported_pte_mask & _PAGE_NX) &&
96921+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
96922+ (vm_flags & (VM_READ | VM_WRITE)))
96923+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
96924+#endif
96925+
96926+ return prot;
96927 }
96928 EXPORT_SYMBOL(vm_get_page_prot);
96929
96930@@ -95,6 +115,7 @@ unsigned long sysctl_overcommit_kbytes __read_mostly;
96931 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
96932 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
96933 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
96934+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
96935 /*
96936 * Make sure vm_committed_as in one cacheline and not cacheline shared with
96937 * other variables. It can be updated by several CPUs frequently.
96938@@ -255,6 +276,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
96939 struct vm_area_struct *next = vma->vm_next;
96940
96941 might_sleep();
96942+ BUG_ON(vma->vm_mirror);
96943 if (vma->vm_ops && vma->vm_ops->close)
96944 vma->vm_ops->close(vma);
96945 if (vma->vm_file)
96946@@ -299,6 +321,12 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
96947 * not page aligned -Ram Gupta
96948 */
96949 rlim = rlimit(RLIMIT_DATA);
96950+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96951+ /* force a minimum 16MB brk heap on setuid/setgid binaries */
96952+ if (rlim < PAGE_SIZE && (get_dumpable(mm) != SUID_DUMP_USER) && gr_is_global_nonroot(current_uid()))
96953+ rlim = 4096 * PAGE_SIZE;
96954+#endif
96955+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
96956 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
96957 (mm->end_data - mm->start_data) > rlim)
96958 goto out;
96959@@ -752,8 +780,11 @@ again: remove_next = 1 + (end > next->vm_end);
96960 * shrinking vma had, to cover any anon pages imported.
96961 */
96962 if (exporter && exporter->anon_vma && !importer->anon_vma) {
96963- if (anon_vma_clone(importer, exporter))
96964- return -ENOMEM;
96965+ int error;
96966+
96967+ error = anon_vma_clone(importer, exporter);
96968+ if (error)
96969+ return error;
96970 importer->anon_vma = exporter->anon_vma;
96971 }
96972 }
96973@@ -949,6 +980,12 @@ static int
96974 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
96975 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
96976 {
96977+
96978+#ifdef CONFIG_PAX_SEGMEXEC
96979+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
96980+ return 0;
96981+#endif
96982+
96983 if (is_mergeable_vma(vma, file, vm_flags) &&
96984 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
96985 if (vma->vm_pgoff == vm_pgoff)
96986@@ -968,6 +1005,12 @@ static int
96987 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
96988 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
96989 {
96990+
96991+#ifdef CONFIG_PAX_SEGMEXEC
96992+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
96993+ return 0;
96994+#endif
96995+
96996 if (is_mergeable_vma(vma, file, vm_flags) &&
96997 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
96998 pgoff_t vm_pglen;
96999@@ -1010,13 +1053,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
97000 struct vm_area_struct *vma_merge(struct mm_struct *mm,
97001 struct vm_area_struct *prev, unsigned long addr,
97002 unsigned long end, unsigned long vm_flags,
97003- struct anon_vma *anon_vma, struct file *file,
97004+ struct anon_vma *anon_vma, struct file *file,
97005 pgoff_t pgoff, struct mempolicy *policy)
97006 {
97007 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
97008 struct vm_area_struct *area, *next;
97009 int err;
97010
97011+#ifdef CONFIG_PAX_SEGMEXEC
97012+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
97013+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
97014+
97015+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
97016+#endif
97017+
97018 /*
97019 * We later require that vma->vm_flags == vm_flags,
97020 * so this tests vma->vm_flags & VM_SPECIAL, too.
97021@@ -1032,6 +1082,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97022 if (next && next->vm_end == end) /* cases 6, 7, 8 */
97023 next = next->vm_next;
97024
97025+#ifdef CONFIG_PAX_SEGMEXEC
97026+ if (prev)
97027+ prev_m = pax_find_mirror_vma(prev);
97028+ if (area)
97029+ area_m = pax_find_mirror_vma(area);
97030+ if (next)
97031+ next_m = pax_find_mirror_vma(next);
97032+#endif
97033+
97034 /*
97035 * Can it merge with the predecessor?
97036 */
97037@@ -1051,9 +1110,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97038 /* cases 1, 6 */
97039 err = vma_adjust(prev, prev->vm_start,
97040 next->vm_end, prev->vm_pgoff, NULL);
97041- } else /* cases 2, 5, 7 */
97042+
97043+#ifdef CONFIG_PAX_SEGMEXEC
97044+ if (!err && prev_m)
97045+ err = vma_adjust(prev_m, prev_m->vm_start,
97046+ next_m->vm_end, prev_m->vm_pgoff, NULL);
97047+#endif
97048+
97049+ } else { /* cases 2, 5, 7 */
97050 err = vma_adjust(prev, prev->vm_start,
97051 end, prev->vm_pgoff, NULL);
97052+
97053+#ifdef CONFIG_PAX_SEGMEXEC
97054+ if (!err && prev_m)
97055+ err = vma_adjust(prev_m, prev_m->vm_start,
97056+ end_m, prev_m->vm_pgoff, NULL);
97057+#endif
97058+
97059+ }
97060 if (err)
97061 return NULL;
97062 khugepaged_enter_vma_merge(prev, vm_flags);
97063@@ -1067,12 +1141,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
97064 mpol_equal(policy, vma_policy(next)) &&
97065 can_vma_merge_before(next, vm_flags,
97066 anon_vma, file, pgoff+pglen)) {
97067- if (prev && addr < prev->vm_end) /* case 4 */
97068+ if (prev && addr < prev->vm_end) { /* case 4 */
97069 err = vma_adjust(prev, prev->vm_start,
97070 addr, prev->vm_pgoff, NULL);
97071- else /* cases 3, 8 */
97072+
97073+#ifdef CONFIG_PAX_SEGMEXEC
97074+ if (!err && prev_m)
97075+ err = vma_adjust(prev_m, prev_m->vm_start,
97076+ addr_m, prev_m->vm_pgoff, NULL);
97077+#endif
97078+
97079+ } else { /* cases 3, 8 */
97080 err = vma_adjust(area, addr, next->vm_end,
97081 next->vm_pgoff - pglen, NULL);
97082+
97083+#ifdef CONFIG_PAX_SEGMEXEC
97084+ if (!err && area_m)
97085+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
97086+ next_m->vm_pgoff - pglen, NULL);
97087+#endif
97088+
97089+ }
97090 if (err)
97091 return NULL;
97092 khugepaged_enter_vma_merge(area, vm_flags);
97093@@ -1181,8 +1270,10 @@ none:
97094 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
97095 struct file *file, long pages)
97096 {
97097- const unsigned long stack_flags
97098- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
97099+
97100+#ifdef CONFIG_PAX_RANDMMAP
97101+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
97102+#endif
97103
97104 mm->total_vm += pages;
97105
97106@@ -1190,7 +1281,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
97107 mm->shared_vm += pages;
97108 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
97109 mm->exec_vm += pages;
97110- } else if (flags & stack_flags)
97111+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
97112 mm->stack_vm += pages;
97113 }
97114 #endif /* CONFIG_PROC_FS */
97115@@ -1220,6 +1311,7 @@ static inline int mlock_future_check(struct mm_struct *mm,
97116 locked += mm->locked_vm;
97117 lock_limit = rlimit(RLIMIT_MEMLOCK);
97118 lock_limit >>= PAGE_SHIFT;
97119+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97120 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
97121 return -EAGAIN;
97122 }
97123@@ -1246,7 +1338,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97124 * (the exception is when the underlying filesystem is noexec
97125 * mounted, in which case we dont add PROT_EXEC.)
97126 */
97127- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
97128+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
97129 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
97130 prot |= PROT_EXEC;
97131
97132@@ -1272,7 +1364,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97133 /* Obtain the address to map to. we verify (or select) it and ensure
97134 * that it represents a valid section of the address space.
97135 */
97136- addr = get_unmapped_area(file, addr, len, pgoff, flags);
97137+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
97138 if (addr & ~PAGE_MASK)
97139 return addr;
97140
97141@@ -1283,6 +1375,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97142 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
97143 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
97144
97145+#ifdef CONFIG_PAX_MPROTECT
97146+ if (mm->pax_flags & MF_PAX_MPROTECT) {
97147+
97148+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
97149+ if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
97150+ mm->binfmt->handle_mmap)
97151+ mm->binfmt->handle_mmap(file);
97152+#endif
97153+
97154+#ifndef CONFIG_PAX_MPROTECT_COMPAT
97155+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
97156+ gr_log_rwxmmap(file);
97157+
97158+#ifdef CONFIG_PAX_EMUPLT
97159+ vm_flags &= ~VM_EXEC;
97160+#else
97161+ return -EPERM;
97162+#endif
97163+
97164+ }
97165+
97166+ if (!(vm_flags & VM_EXEC))
97167+ vm_flags &= ~VM_MAYEXEC;
97168+#else
97169+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
97170+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
97171+#endif
97172+ else
97173+ vm_flags &= ~VM_MAYWRITE;
97174+ }
97175+#endif
97176+
97177+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97178+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
97179+ vm_flags &= ~VM_PAGEEXEC;
97180+#endif
97181+
97182 if (flags & MAP_LOCKED)
97183 if (!can_do_mlock())
97184 return -EPERM;
97185@@ -1370,6 +1499,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
97186 vm_flags |= VM_NORESERVE;
97187 }
97188
97189+ if (!gr_acl_handle_mmap(file, prot))
97190+ return -EACCES;
97191+
97192 addr = mmap_region(file, addr, len, vm_flags, pgoff);
97193 if (!IS_ERR_VALUE(addr) &&
97194 ((vm_flags & VM_LOCKED) ||
97195@@ -1463,7 +1595,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
97196 vm_flags_t vm_flags = vma->vm_flags;
97197
97198 /* If it was private or non-writable, the write bit is already clear */
97199- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
97200+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
97201 return 0;
97202
97203 /* The backer wishes to know when pages are first written to? */
97204@@ -1509,7 +1641,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97205 struct rb_node **rb_link, *rb_parent;
97206 unsigned long charged = 0;
97207
97208+#ifdef CONFIG_PAX_SEGMEXEC
97209+ struct vm_area_struct *vma_m = NULL;
97210+#endif
97211+
97212+ /*
97213+ * mm->mmap_sem is required to protect against another thread
97214+ * changing the mappings in case we sleep.
97215+ */
97216+ verify_mm_writelocked(mm);
97217+
97218 /* Check against address space limit. */
97219+
97220+#ifdef CONFIG_PAX_RANDMMAP
97221+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
97222+#endif
97223+
97224 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
97225 unsigned long nr_pages;
97226
97227@@ -1528,11 +1675,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
97228
97229 /* Clear old maps */
97230 error = -ENOMEM;
97231-munmap_back:
97232 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
97233 if (do_munmap(mm, addr, len))
97234 return -ENOMEM;
97235- goto munmap_back;
97236+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
97237 }
97238
97239 /*
97240@@ -1563,6 +1709,16 @@ munmap_back:
97241 goto unacct_error;
97242 }
97243
97244+#ifdef CONFIG_PAX_SEGMEXEC
97245+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
97246+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97247+ if (!vma_m) {
97248+ error = -ENOMEM;
97249+ goto free_vma;
97250+ }
97251+ }
97252+#endif
97253+
97254 vma->vm_mm = mm;
97255 vma->vm_start = addr;
97256 vma->vm_end = addr + len;
97257@@ -1593,6 +1749,13 @@ munmap_back:
97258 if (error)
97259 goto unmap_and_free_vma;
97260
97261+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
97262+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
97263+ vma->vm_flags |= VM_PAGEEXEC;
97264+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
97265+ }
97266+#endif
97267+
97268 /* Can addr have changed??
97269 *
97270 * Answer: Yes, several device drivers can do it in their
97271@@ -1626,6 +1789,12 @@ munmap_back:
97272 }
97273
97274 vma_link(mm, vma, prev, rb_link, rb_parent);
97275+
97276+#ifdef CONFIG_PAX_SEGMEXEC
97277+ if (vma_m)
97278+ BUG_ON(pax_mirror_vma(vma_m, vma));
97279+#endif
97280+
97281 /* Once vma denies write, undo our temporary denial count */
97282 if (file) {
97283 if (vm_flags & VM_SHARED)
97284@@ -1638,6 +1807,7 @@ out:
97285 perf_event_mmap(vma);
97286
97287 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
97288+ track_exec_limit(mm, addr, addr + len, vm_flags);
97289 if (vm_flags & VM_LOCKED) {
97290 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
97291 vma == get_gate_vma(current->mm)))
97292@@ -1673,6 +1843,12 @@ allow_write_and_free_vma:
97293 if (vm_flags & VM_DENYWRITE)
97294 allow_write_access(file);
97295 free_vma:
97296+
97297+#ifdef CONFIG_PAX_SEGMEXEC
97298+ if (vma_m)
97299+ kmem_cache_free(vm_area_cachep, vma_m);
97300+#endif
97301+
97302 kmem_cache_free(vm_area_cachep, vma);
97303 unacct_error:
97304 if (charged)
97305@@ -1680,7 +1856,63 @@ unacct_error:
97306 return error;
97307 }
97308
97309-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
97310+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
97311+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
97312+{
97313+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
97314+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
97315+
97316+ return 0;
97317+}
97318+#endif
97319+
97320+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
97321+{
97322+ if (!vma) {
97323+#ifdef CONFIG_STACK_GROWSUP
97324+ if (addr > sysctl_heap_stack_gap)
97325+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
97326+ else
97327+ vma = find_vma(current->mm, 0);
97328+ if (vma && (vma->vm_flags & VM_GROWSUP))
97329+ return false;
97330+#endif
97331+ return true;
97332+ }
97333+
97334+ if (addr + len > vma->vm_start)
97335+ return false;
97336+
97337+ if (vma->vm_flags & VM_GROWSDOWN)
97338+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
97339+#ifdef CONFIG_STACK_GROWSUP
97340+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
97341+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
97342+#endif
97343+ else if (offset)
97344+ return offset <= vma->vm_start - addr - len;
97345+
97346+ return true;
97347+}
97348+
97349+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
97350+{
97351+ if (vma->vm_start < len)
97352+ return -ENOMEM;
97353+
97354+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
97355+ if (offset <= vma->vm_start - len)
97356+ return vma->vm_start - len - offset;
97357+ else
97358+ return -ENOMEM;
97359+ }
97360+
97361+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
97362+ return vma->vm_start - len - sysctl_heap_stack_gap;
97363+ return -ENOMEM;
97364+}
97365+
97366+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
97367 {
97368 /*
97369 * We implement the search by looking for an rbtree node that
97370@@ -1728,11 +1960,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
97371 }
97372 }
97373
97374- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
97375+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
97376 check_current:
97377 /* Check if current node has a suitable gap */
97378 if (gap_start > high_limit)
97379 return -ENOMEM;
97380+
97381+ if (gap_end - gap_start > info->threadstack_offset)
97382+ gap_start += info->threadstack_offset;
97383+ else
97384+ gap_start = gap_end;
97385+
97386+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
97387+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97388+ gap_start += sysctl_heap_stack_gap;
97389+ else
97390+ gap_start = gap_end;
97391+ }
97392+ if (vma->vm_flags & VM_GROWSDOWN) {
97393+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97394+ gap_end -= sysctl_heap_stack_gap;
97395+ else
97396+ gap_end = gap_start;
97397+ }
97398 if (gap_end >= low_limit && gap_end - gap_start >= length)
97399 goto found;
97400
97401@@ -1782,7 +2032,7 @@ found:
97402 return gap_start;
97403 }
97404
97405-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
97406+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
97407 {
97408 struct mm_struct *mm = current->mm;
97409 struct vm_area_struct *vma;
97410@@ -1836,6 +2086,24 @@ check_current:
97411 gap_end = vma->vm_start;
97412 if (gap_end < low_limit)
97413 return -ENOMEM;
97414+
97415+ if (gap_end - gap_start > info->threadstack_offset)
97416+ gap_end -= info->threadstack_offset;
97417+ else
97418+ gap_end = gap_start;
97419+
97420+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
97421+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97422+ gap_start += sysctl_heap_stack_gap;
97423+ else
97424+ gap_start = gap_end;
97425+ }
97426+ if (vma->vm_flags & VM_GROWSDOWN) {
97427+ if (gap_end - gap_start > sysctl_heap_stack_gap)
97428+ gap_end -= sysctl_heap_stack_gap;
97429+ else
97430+ gap_end = gap_start;
97431+ }
97432 if (gap_start <= high_limit && gap_end - gap_start >= length)
97433 goto found;
97434
97435@@ -1899,6 +2167,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97436 struct mm_struct *mm = current->mm;
97437 struct vm_area_struct *vma;
97438 struct vm_unmapped_area_info info;
97439+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
97440
97441 if (len > TASK_SIZE - mmap_min_addr)
97442 return -ENOMEM;
97443@@ -1906,11 +2175,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97444 if (flags & MAP_FIXED)
97445 return addr;
97446
97447+#ifdef CONFIG_PAX_RANDMMAP
97448+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97449+#endif
97450+
97451 if (addr) {
97452 addr = PAGE_ALIGN(addr);
97453 vma = find_vma(mm, addr);
97454 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
97455- (!vma || addr + len <= vma->vm_start))
97456+ check_heap_stack_gap(vma, addr, len, offset))
97457 return addr;
97458 }
97459
97460@@ -1919,6 +2192,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
97461 info.low_limit = mm->mmap_base;
97462 info.high_limit = TASK_SIZE;
97463 info.align_mask = 0;
97464+ info.threadstack_offset = offset;
97465 return vm_unmapped_area(&info);
97466 }
97467 #endif
97468@@ -1937,6 +2211,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97469 struct mm_struct *mm = current->mm;
97470 unsigned long addr = addr0;
97471 struct vm_unmapped_area_info info;
97472+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
97473
97474 /* requested length too big for entire address space */
97475 if (len > TASK_SIZE - mmap_min_addr)
97476@@ -1945,12 +2220,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97477 if (flags & MAP_FIXED)
97478 return addr;
97479
97480+#ifdef CONFIG_PAX_RANDMMAP
97481+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
97482+#endif
97483+
97484 /* requesting a specific address */
97485 if (addr) {
97486 addr = PAGE_ALIGN(addr);
97487 vma = find_vma(mm, addr);
97488 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
97489- (!vma || addr + len <= vma->vm_start))
97490+ check_heap_stack_gap(vma, addr, len, offset))
97491 return addr;
97492 }
97493
97494@@ -1959,6 +2238,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97495 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
97496 info.high_limit = mm->mmap_base;
97497 info.align_mask = 0;
97498+ info.threadstack_offset = offset;
97499 addr = vm_unmapped_area(&info);
97500
97501 /*
97502@@ -1971,6 +2251,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
97503 VM_BUG_ON(addr != -ENOMEM);
97504 info.flags = 0;
97505 info.low_limit = TASK_UNMAPPED_BASE;
97506+
97507+#ifdef CONFIG_PAX_RANDMMAP
97508+ if (mm->pax_flags & MF_PAX_RANDMMAP)
97509+ info.low_limit += mm->delta_mmap;
97510+#endif
97511+
97512 info.high_limit = TASK_SIZE;
97513 addr = vm_unmapped_area(&info);
97514 }
97515@@ -2071,6 +2357,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
97516 return vma;
97517 }
97518
97519+#ifdef CONFIG_PAX_SEGMEXEC
97520+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
97521+{
97522+ struct vm_area_struct *vma_m;
97523+
97524+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
97525+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
97526+ BUG_ON(vma->vm_mirror);
97527+ return NULL;
97528+ }
97529+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
97530+ vma_m = vma->vm_mirror;
97531+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
97532+ BUG_ON(vma->vm_file != vma_m->vm_file);
97533+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
97534+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
97535+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
97536+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
97537+ return vma_m;
97538+}
97539+#endif
97540+
97541 /*
97542 * Verify that the stack growth is acceptable and
97543 * update accounting. This is shared with both the
97544@@ -2087,6 +2395,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97545 return -ENOMEM;
97546
97547 /* Stack limit test */
97548+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
97549 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
97550 return -ENOMEM;
97551
97552@@ -2097,6 +2406,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97553 locked = mm->locked_vm + grow;
97554 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
97555 limit >>= PAGE_SHIFT;
97556+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
97557 if (locked > limit && !capable(CAP_IPC_LOCK))
97558 return -ENOMEM;
97559 }
97560@@ -2126,37 +2436,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
97561 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
97562 * vma is the last one with address > vma->vm_end. Have to extend vma.
97563 */
97564+#ifndef CONFIG_IA64
97565+static
97566+#endif
97567 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97568 {
97569 int error;
97570+ bool locknext;
97571
97572 if (!(vma->vm_flags & VM_GROWSUP))
97573 return -EFAULT;
97574
97575+ /* Also guard against wrapping around to address 0. */
97576+ if (address < PAGE_ALIGN(address+1))
97577+ address = PAGE_ALIGN(address+1);
97578+ else
97579+ return -ENOMEM;
97580+
97581 /*
97582 * We must make sure the anon_vma is allocated
97583 * so that the anon_vma locking is not a noop.
97584 */
97585 if (unlikely(anon_vma_prepare(vma)))
97586 return -ENOMEM;
97587+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
97588+ if (locknext && anon_vma_prepare(vma->vm_next))
97589+ return -ENOMEM;
97590 vma_lock_anon_vma(vma);
97591+ if (locknext)
97592+ vma_lock_anon_vma(vma->vm_next);
97593
97594 /*
97595 * vma->vm_start/vm_end cannot change under us because the caller
97596 * is required to hold the mmap_sem in read mode. We need the
97597- * anon_vma lock to serialize against concurrent expand_stacks.
97598- * Also guard against wrapping around to address 0.
97599+ * anon_vma locks to serialize against concurrent expand_stacks
97600+ * and expand_upwards.
97601 */
97602- if (address < PAGE_ALIGN(address+4))
97603- address = PAGE_ALIGN(address+4);
97604- else {
97605- vma_unlock_anon_vma(vma);
97606- return -ENOMEM;
97607- }
97608 error = 0;
97609
97610 /* Somebody else might have raced and expanded it already */
97611- if (address > vma->vm_end) {
97612+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
97613+ error = -ENOMEM;
97614+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
97615 unsigned long size, grow;
97616
97617 size = address - vma->vm_start;
97618@@ -2191,6 +2512,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
97619 }
97620 }
97621 }
97622+ if (locknext)
97623+ vma_unlock_anon_vma(vma->vm_next);
97624 vma_unlock_anon_vma(vma);
97625 khugepaged_enter_vma_merge(vma, vma->vm_flags);
97626 validate_mm(vma->vm_mm);
97627@@ -2205,6 +2528,8 @@ int expand_downwards(struct vm_area_struct *vma,
97628 unsigned long address)
97629 {
97630 int error;
97631+ bool lockprev = false;
97632+ struct vm_area_struct *prev;
97633
97634 /*
97635 * We must make sure the anon_vma is allocated
97636@@ -2218,6 +2543,15 @@ int expand_downwards(struct vm_area_struct *vma,
97637 if (error)
97638 return error;
97639
97640+ prev = vma->vm_prev;
97641+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
97642+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
97643+#endif
97644+ if (lockprev && anon_vma_prepare(prev))
97645+ return -ENOMEM;
97646+ if (lockprev)
97647+ vma_lock_anon_vma(prev);
97648+
97649 vma_lock_anon_vma(vma);
97650
97651 /*
97652@@ -2227,9 +2561,17 @@ int expand_downwards(struct vm_area_struct *vma,
97653 */
97654
97655 /* Somebody else might have raced and expanded it already */
97656- if (address < vma->vm_start) {
97657+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
97658+ error = -ENOMEM;
97659+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
97660 unsigned long size, grow;
97661
97662+#ifdef CONFIG_PAX_SEGMEXEC
97663+ struct vm_area_struct *vma_m;
97664+
97665+ vma_m = pax_find_mirror_vma(vma);
97666+#endif
97667+
97668 size = vma->vm_end - address;
97669 grow = (vma->vm_start - address) >> PAGE_SHIFT;
97670
97671@@ -2254,13 +2596,27 @@ int expand_downwards(struct vm_area_struct *vma,
97672 vma->vm_pgoff -= grow;
97673 anon_vma_interval_tree_post_update_vma(vma);
97674 vma_gap_update(vma);
97675+
97676+#ifdef CONFIG_PAX_SEGMEXEC
97677+ if (vma_m) {
97678+ anon_vma_interval_tree_pre_update_vma(vma_m);
97679+ vma_m->vm_start -= grow << PAGE_SHIFT;
97680+ vma_m->vm_pgoff -= grow;
97681+ anon_vma_interval_tree_post_update_vma(vma_m);
97682+ vma_gap_update(vma_m);
97683+ }
97684+#endif
97685+
97686 spin_unlock(&vma->vm_mm->page_table_lock);
97687
97688+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
97689 perf_event_mmap(vma);
97690 }
97691 }
97692 }
97693 vma_unlock_anon_vma(vma);
97694+ if (lockprev)
97695+ vma_unlock_anon_vma(prev);
97696 khugepaged_enter_vma_merge(vma, vma->vm_flags);
97697 validate_mm(vma->vm_mm);
97698 return error;
97699@@ -2358,6 +2714,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
97700 do {
97701 long nrpages = vma_pages(vma);
97702
97703+#ifdef CONFIG_PAX_SEGMEXEC
97704+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
97705+ vma = remove_vma(vma);
97706+ continue;
97707+ }
97708+#endif
97709+
97710 if (vma->vm_flags & VM_ACCOUNT)
97711 nr_accounted += nrpages;
97712 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
97713@@ -2402,6 +2765,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
97714 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
97715 vma->vm_prev = NULL;
97716 do {
97717+
97718+#ifdef CONFIG_PAX_SEGMEXEC
97719+ if (vma->vm_mirror) {
97720+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
97721+ vma->vm_mirror->vm_mirror = NULL;
97722+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
97723+ vma->vm_mirror = NULL;
97724+ }
97725+#endif
97726+
97727 vma_rb_erase(vma, &mm->mm_rb);
97728 mm->map_count--;
97729 tail_vma = vma;
97730@@ -2429,14 +2802,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97731 struct vm_area_struct *new;
97732 int err = -ENOMEM;
97733
97734+#ifdef CONFIG_PAX_SEGMEXEC
97735+ struct vm_area_struct *vma_m, *new_m = NULL;
97736+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
97737+#endif
97738+
97739 if (is_vm_hugetlb_page(vma) && (addr &
97740 ~(huge_page_mask(hstate_vma(vma)))))
97741 return -EINVAL;
97742
97743+#ifdef CONFIG_PAX_SEGMEXEC
97744+ vma_m = pax_find_mirror_vma(vma);
97745+#endif
97746+
97747 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97748 if (!new)
97749 goto out_err;
97750
97751+#ifdef CONFIG_PAX_SEGMEXEC
97752+ if (vma_m) {
97753+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97754+ if (!new_m) {
97755+ kmem_cache_free(vm_area_cachep, new);
97756+ goto out_err;
97757+ }
97758+ }
97759+#endif
97760+
97761 /* most fields are the same, copy all, and then fixup */
97762 *new = *vma;
97763
97764@@ -2449,11 +2841,28 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97765 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
97766 }
97767
97768+#ifdef CONFIG_PAX_SEGMEXEC
97769+ if (vma_m) {
97770+ *new_m = *vma_m;
97771+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
97772+ new_m->vm_mirror = new;
97773+ new->vm_mirror = new_m;
97774+
97775+ if (new_below)
97776+ new_m->vm_end = addr_m;
97777+ else {
97778+ new_m->vm_start = addr_m;
97779+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
97780+ }
97781+ }
97782+#endif
97783+
97784 err = vma_dup_policy(vma, new);
97785 if (err)
97786 goto out_free_vma;
97787
97788- if (anon_vma_clone(new, vma))
97789+ err = anon_vma_clone(new, vma);
97790+ if (err)
97791 goto out_free_mpol;
97792
97793 if (new->vm_file)
97794@@ -2468,6 +2877,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97795 else
97796 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
97797
97798+#ifdef CONFIG_PAX_SEGMEXEC
97799+ if (!err && vma_m) {
97800+ struct mempolicy *pol = vma_policy(new);
97801+
97802+ if (anon_vma_clone(new_m, vma_m))
97803+ goto out_free_mpol;
97804+
97805+ mpol_get(pol);
97806+ set_vma_policy(new_m, pol);
97807+
97808+ if (new_m->vm_file)
97809+ get_file(new_m->vm_file);
97810+
97811+ if (new_m->vm_ops && new_m->vm_ops->open)
97812+ new_m->vm_ops->open(new_m);
97813+
97814+ if (new_below)
97815+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
97816+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
97817+ else
97818+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
97819+
97820+ if (err) {
97821+ if (new_m->vm_ops && new_m->vm_ops->close)
97822+ new_m->vm_ops->close(new_m);
97823+ if (new_m->vm_file)
97824+ fput(new_m->vm_file);
97825+ mpol_put(pol);
97826+ }
97827+ }
97828+#endif
97829+
97830 /* Success. */
97831 if (!err)
97832 return 0;
97833@@ -2477,10 +2918,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97834 new->vm_ops->close(new);
97835 if (new->vm_file)
97836 fput(new->vm_file);
97837- unlink_anon_vmas(new);
97838 out_free_mpol:
97839 mpol_put(vma_policy(new));
97840 out_free_vma:
97841+
97842+#ifdef CONFIG_PAX_SEGMEXEC
97843+ if (new_m) {
97844+ unlink_anon_vmas(new_m);
97845+ kmem_cache_free(vm_area_cachep, new_m);
97846+ }
97847+#endif
97848+
97849+ unlink_anon_vmas(new);
97850 kmem_cache_free(vm_area_cachep, new);
97851 out_err:
97852 return err;
97853@@ -2493,6 +2942,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97854 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97855 unsigned long addr, int new_below)
97856 {
97857+
97858+#ifdef CONFIG_PAX_SEGMEXEC
97859+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
97860+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
97861+ if (mm->map_count >= sysctl_max_map_count-1)
97862+ return -ENOMEM;
97863+ } else
97864+#endif
97865+
97866 if (mm->map_count >= sysctl_max_map_count)
97867 return -ENOMEM;
97868
97869@@ -2504,11 +2962,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
97870 * work. This now handles partial unmappings.
97871 * Jeremy Fitzhardinge <jeremy@goop.org>
97872 */
97873+#ifdef CONFIG_PAX_SEGMEXEC
97874 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97875 {
97876+ int ret = __do_munmap(mm, start, len);
97877+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
97878+ return ret;
97879+
97880+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
97881+}
97882+
97883+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97884+#else
97885+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97886+#endif
97887+{
97888 unsigned long end;
97889 struct vm_area_struct *vma, *prev, *last;
97890
97891+ /*
97892+ * mm->mmap_sem is required to protect against another thread
97893+ * changing the mappings in case we sleep.
97894+ */
97895+ verify_mm_writelocked(mm);
97896+
97897 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
97898 return -EINVAL;
97899
97900@@ -2583,6 +3060,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97901 /* Fix up all other VM information */
97902 remove_vma_list(mm, vma);
97903
97904+ track_exec_limit(mm, start, end, 0UL);
97905+
97906 return 0;
97907 }
97908
97909@@ -2591,6 +3070,13 @@ int vm_munmap(unsigned long start, size_t len)
97910 int ret;
97911 struct mm_struct *mm = current->mm;
97912
97913+
97914+#ifdef CONFIG_PAX_SEGMEXEC
97915+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
97916+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
97917+ return -EINVAL;
97918+#endif
97919+
97920 down_write(&mm->mmap_sem);
97921 ret = do_munmap(mm, start, len);
97922 up_write(&mm->mmap_sem);
97923@@ -2604,16 +3090,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
97924 return vm_munmap(addr, len);
97925 }
97926
97927-static inline void verify_mm_writelocked(struct mm_struct *mm)
97928-{
97929-#ifdef CONFIG_DEBUG_VM
97930- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
97931- WARN_ON(1);
97932- up_read(&mm->mmap_sem);
97933- }
97934-#endif
97935-}
97936-
97937 /*
97938 * this is really a simplified "do_mmap". it only handles
97939 * anonymous maps. eventually we may be able to do some
97940@@ -2627,6 +3103,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97941 struct rb_node ** rb_link, * rb_parent;
97942 pgoff_t pgoff = addr >> PAGE_SHIFT;
97943 int error;
97944+ unsigned long charged;
97945
97946 len = PAGE_ALIGN(len);
97947 if (!len)
97948@@ -2634,10 +3111,24 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97949
97950 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
97951
97952+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
97953+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
97954+ flags &= ~VM_EXEC;
97955+
97956+#ifdef CONFIG_PAX_MPROTECT
97957+ if (mm->pax_flags & MF_PAX_MPROTECT)
97958+ flags &= ~VM_MAYEXEC;
97959+#endif
97960+
97961+ }
97962+#endif
97963+
97964 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
97965 if (error & ~PAGE_MASK)
97966 return error;
97967
97968+ charged = len >> PAGE_SHIFT;
97969+
97970 error = mlock_future_check(mm, mm->def_flags, len);
97971 if (error)
97972 return error;
97973@@ -2651,21 +3142,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97974 /*
97975 * Clear old maps. this also does some error checking for us
97976 */
97977- munmap_back:
97978 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
97979 if (do_munmap(mm, addr, len))
97980 return -ENOMEM;
97981- goto munmap_back;
97982+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
97983 }
97984
97985 /* Check against address space limits *after* clearing old maps... */
97986- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
97987+ if (!may_expand_vm(mm, charged))
97988 return -ENOMEM;
97989
97990 if (mm->map_count > sysctl_max_map_count)
97991 return -ENOMEM;
97992
97993- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
97994+ if (security_vm_enough_memory_mm(mm, charged))
97995 return -ENOMEM;
97996
97997 /* Can we just expand an old private anonymous mapping? */
97998@@ -2679,7 +3169,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
97999 */
98000 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98001 if (!vma) {
98002- vm_unacct_memory(len >> PAGE_SHIFT);
98003+ vm_unacct_memory(charged);
98004 return -ENOMEM;
98005 }
98006
98007@@ -2693,10 +3183,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
98008 vma_link(mm, vma, prev, rb_link, rb_parent);
98009 out:
98010 perf_event_mmap(vma);
98011- mm->total_vm += len >> PAGE_SHIFT;
98012+ mm->total_vm += charged;
98013 if (flags & VM_LOCKED)
98014- mm->locked_vm += (len >> PAGE_SHIFT);
98015+ mm->locked_vm += charged;
98016 vma->vm_flags |= VM_SOFTDIRTY;
98017+ track_exec_limit(mm, addr, addr + len, flags);
98018 return addr;
98019 }
98020
98021@@ -2758,6 +3249,7 @@ void exit_mmap(struct mm_struct *mm)
98022 while (vma) {
98023 if (vma->vm_flags & VM_ACCOUNT)
98024 nr_accounted += vma_pages(vma);
98025+ vma->vm_mirror = NULL;
98026 vma = remove_vma(vma);
98027 }
98028 vm_unacct_memory(nr_accounted);
98029@@ -2775,6 +3267,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
98030 struct vm_area_struct *prev;
98031 struct rb_node **rb_link, *rb_parent;
98032
98033+#ifdef CONFIG_PAX_SEGMEXEC
98034+ struct vm_area_struct *vma_m = NULL;
98035+#endif
98036+
98037+ if (security_mmap_addr(vma->vm_start))
98038+ return -EPERM;
98039+
98040 /*
98041 * The vm_pgoff of a purely anonymous vma should be irrelevant
98042 * until its first write fault, when page's anon_vma and index
98043@@ -2798,7 +3297,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
98044 security_vm_enough_memory_mm(mm, vma_pages(vma)))
98045 return -ENOMEM;
98046
98047+#ifdef CONFIG_PAX_SEGMEXEC
98048+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
98049+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98050+ if (!vma_m)
98051+ return -ENOMEM;
98052+ }
98053+#endif
98054+
98055 vma_link(mm, vma, prev, rb_link, rb_parent);
98056+
98057+#ifdef CONFIG_PAX_SEGMEXEC
98058+ if (vma_m)
98059+ BUG_ON(pax_mirror_vma(vma_m, vma));
98060+#endif
98061+
98062 return 0;
98063 }
98064
98065@@ -2817,6 +3330,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
98066 struct rb_node **rb_link, *rb_parent;
98067 bool faulted_in_anon_vma = true;
98068
98069+ BUG_ON(vma->vm_mirror);
98070+
98071 /*
98072 * If anonymous vma has not yet been faulted, update new pgoff
98073 * to match new location, to increase its chance of merging.
98074@@ -2881,6 +3396,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
98075 return NULL;
98076 }
98077
98078+#ifdef CONFIG_PAX_SEGMEXEC
98079+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
98080+{
98081+ struct vm_area_struct *prev_m;
98082+ struct rb_node **rb_link_m, *rb_parent_m;
98083+ struct mempolicy *pol_m;
98084+
98085+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
98086+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
98087+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
98088+ *vma_m = *vma;
98089+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
98090+ if (anon_vma_clone(vma_m, vma))
98091+ return -ENOMEM;
98092+ pol_m = vma_policy(vma_m);
98093+ mpol_get(pol_m);
98094+ set_vma_policy(vma_m, pol_m);
98095+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
98096+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
98097+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
98098+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
98099+ if (vma_m->vm_file)
98100+ get_file(vma_m->vm_file);
98101+ if (vma_m->vm_ops && vma_m->vm_ops->open)
98102+ vma_m->vm_ops->open(vma_m);
98103+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
98104+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
98105+ vma_m->vm_mirror = vma;
98106+ vma->vm_mirror = vma_m;
98107+ return 0;
98108+}
98109+#endif
98110+
98111 /*
98112 * Return true if the calling process may expand its vm space by the passed
98113 * number of pages
98114@@ -2892,6 +3440,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
98115
98116 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
98117
98118+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
98119 if (cur + npages > lim)
98120 return 0;
98121 return 1;
98122@@ -2974,6 +3523,22 @@ static struct vm_area_struct *__install_special_mapping(
98123 vma->vm_start = addr;
98124 vma->vm_end = addr + len;
98125
98126+#ifdef CONFIG_PAX_MPROTECT
98127+ if (mm->pax_flags & MF_PAX_MPROTECT) {
98128+#ifndef CONFIG_PAX_MPROTECT_COMPAT
98129+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
98130+ return ERR_PTR(-EPERM);
98131+ if (!(vm_flags & VM_EXEC))
98132+ vm_flags &= ~VM_MAYEXEC;
98133+#else
98134+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
98135+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
98136+#endif
98137+ else
98138+ vm_flags &= ~VM_MAYWRITE;
98139+ }
98140+#endif
98141+
98142 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
98143 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
98144
98145diff --git a/mm/mprotect.c b/mm/mprotect.c
98146index c43d557..0b7ccd2 100644
98147--- a/mm/mprotect.c
98148+++ b/mm/mprotect.c
98149@@ -24,10 +24,18 @@
98150 #include <linux/migrate.h>
98151 #include <linux/perf_event.h>
98152 #include <linux/ksm.h>
98153+#include <linux/sched/sysctl.h>
98154+
98155+#ifdef CONFIG_PAX_MPROTECT
98156+#include <linux/elf.h>
98157+#include <linux/binfmts.h>
98158+#endif
98159+
98160 #include <asm/uaccess.h>
98161 #include <asm/pgtable.h>
98162 #include <asm/cacheflush.h>
98163 #include <asm/tlbflush.h>
98164+#include <asm/mmu_context.h>
98165
98166 #ifndef pgprot_modify
98167 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
98168@@ -256,6 +264,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
98169 return pages;
98170 }
98171
98172+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98173+/* called while holding the mmap semaphor for writing except stack expansion */
98174+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
98175+{
98176+ unsigned long oldlimit, newlimit = 0UL;
98177+
98178+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
98179+ return;
98180+
98181+ spin_lock(&mm->page_table_lock);
98182+ oldlimit = mm->context.user_cs_limit;
98183+ if ((prot & VM_EXEC) && oldlimit < end)
98184+ /* USER_CS limit moved up */
98185+ newlimit = end;
98186+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
98187+ /* USER_CS limit moved down */
98188+ newlimit = start;
98189+
98190+ if (newlimit) {
98191+ mm->context.user_cs_limit = newlimit;
98192+
98193+#ifdef CONFIG_SMP
98194+ wmb();
98195+ cpus_clear(mm->context.cpu_user_cs_mask);
98196+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
98197+#endif
98198+
98199+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
98200+ }
98201+ spin_unlock(&mm->page_table_lock);
98202+ if (newlimit == end) {
98203+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
98204+
98205+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
98206+ if (is_vm_hugetlb_page(vma))
98207+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
98208+ else
98209+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
98210+ }
98211+}
98212+#endif
98213+
98214 int
98215 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98216 unsigned long start, unsigned long end, unsigned long newflags)
98217@@ -268,11 +318,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98218 int error;
98219 int dirty_accountable = 0;
98220
98221+#ifdef CONFIG_PAX_SEGMEXEC
98222+ struct vm_area_struct *vma_m = NULL;
98223+ unsigned long start_m, end_m;
98224+
98225+ start_m = start + SEGMEXEC_TASK_SIZE;
98226+ end_m = end + SEGMEXEC_TASK_SIZE;
98227+#endif
98228+
98229 if (newflags == oldflags) {
98230 *pprev = vma;
98231 return 0;
98232 }
98233
98234+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
98235+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
98236+
98237+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
98238+ return -ENOMEM;
98239+
98240+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
98241+ return -ENOMEM;
98242+ }
98243+
98244 /*
98245 * If we make a private mapping writable we increase our commit;
98246 * but (without finer accounting) cannot reduce our commit if we
98247@@ -289,6 +357,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
98248 }
98249 }
98250
98251+#ifdef CONFIG_PAX_SEGMEXEC
98252+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
98253+ if (start != vma->vm_start) {
98254+ error = split_vma(mm, vma, start, 1);
98255+ if (error)
98256+ goto fail;
98257+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
98258+ *pprev = (*pprev)->vm_next;
98259+ }
98260+
98261+ if (end != vma->vm_end) {
98262+ error = split_vma(mm, vma, end, 0);
98263+ if (error)
98264+ goto fail;
98265+ }
98266+
98267+ if (pax_find_mirror_vma(vma)) {
98268+ error = __do_munmap(mm, start_m, end_m - start_m);
98269+ if (error)
98270+ goto fail;
98271+ } else {
98272+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
98273+ if (!vma_m) {
98274+ error = -ENOMEM;
98275+ goto fail;
98276+ }
98277+ vma->vm_flags = newflags;
98278+ error = pax_mirror_vma(vma_m, vma);
98279+ if (error) {
98280+ vma->vm_flags = oldflags;
98281+ goto fail;
98282+ }
98283+ }
98284+ }
98285+#endif
98286+
98287 /*
98288 * First try to merge with previous and/or next vma.
98289 */
98290@@ -319,9 +423,21 @@ success:
98291 * vm_flags and vm_page_prot are protected by the mmap_sem
98292 * held in write mode.
98293 */
98294+
98295+#ifdef CONFIG_PAX_SEGMEXEC
98296+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
98297+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
98298+#endif
98299+
98300 vma->vm_flags = newflags;
98301+
98302+#ifdef CONFIG_PAX_MPROTECT
98303+ if (mm->binfmt && mm->binfmt->handle_mprotect)
98304+ mm->binfmt->handle_mprotect(vma, newflags);
98305+#endif
98306+
98307 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
98308- vm_get_page_prot(newflags));
98309+ vm_get_page_prot(vma->vm_flags));
98310
98311 if (vma_wants_writenotify(vma)) {
98312 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
98313@@ -360,6 +476,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98314 end = start + len;
98315 if (end <= start)
98316 return -ENOMEM;
98317+
98318+#ifdef CONFIG_PAX_SEGMEXEC
98319+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
98320+ if (end > SEGMEXEC_TASK_SIZE)
98321+ return -EINVAL;
98322+ } else
98323+#endif
98324+
98325+ if (end > TASK_SIZE)
98326+ return -EINVAL;
98327+
98328 if (!arch_validate_prot(prot))
98329 return -EINVAL;
98330
98331@@ -367,7 +494,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98332 /*
98333 * Does the application expect PROT_READ to imply PROT_EXEC:
98334 */
98335- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
98336+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
98337 prot |= PROT_EXEC;
98338
98339 vm_flags = calc_vm_prot_bits(prot);
98340@@ -399,6 +526,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98341 if (start > vma->vm_start)
98342 prev = vma;
98343
98344+#ifdef CONFIG_PAX_MPROTECT
98345+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
98346+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
98347+#endif
98348+
98349 for (nstart = start ; ; ) {
98350 unsigned long newflags;
98351
98352@@ -409,6 +541,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98353
98354 /* newflags >> 4 shift VM_MAY% in place of VM_% */
98355 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
98356+ if (prot & (PROT_WRITE | PROT_EXEC))
98357+ gr_log_rwxmprotect(vma);
98358+
98359+ error = -EACCES;
98360+ goto out;
98361+ }
98362+
98363+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
98364 error = -EACCES;
98365 goto out;
98366 }
98367@@ -423,6 +563,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
98368 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
98369 if (error)
98370 goto out;
98371+
98372+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
98373+
98374 nstart = tmp;
98375
98376 if (nstart < prev->vm_end)
98377diff --git a/mm/mremap.c b/mm/mremap.c
98378index 05f1180..c3cde48 100644
98379--- a/mm/mremap.c
98380+++ b/mm/mremap.c
98381@@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
98382 continue;
98383 pte = ptep_get_and_clear(mm, old_addr, old_pte);
98384 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
98385+
98386+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
98387+ if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
98388+ pte = pte_exprotect(pte);
98389+#endif
98390+
98391 pte = move_soft_dirty_pte(pte);
98392 set_pte_at(mm, new_addr, new_pte, pte);
98393 }
98394@@ -344,6 +350,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
98395 if (is_vm_hugetlb_page(vma))
98396 goto Einval;
98397
98398+#ifdef CONFIG_PAX_SEGMEXEC
98399+ if (pax_find_mirror_vma(vma))
98400+ goto Einval;
98401+#endif
98402+
98403 /* We can't remap across vm area boundaries */
98404 if (old_len > vma->vm_end - addr)
98405 goto Efault;
98406@@ -399,20 +410,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
98407 unsigned long ret = -EINVAL;
98408 unsigned long charged = 0;
98409 unsigned long map_flags;
98410+ unsigned long pax_task_size = TASK_SIZE;
98411
98412 if (new_addr & ~PAGE_MASK)
98413 goto out;
98414
98415- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
98416+#ifdef CONFIG_PAX_SEGMEXEC
98417+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98418+ pax_task_size = SEGMEXEC_TASK_SIZE;
98419+#endif
98420+
98421+ pax_task_size -= PAGE_SIZE;
98422+
98423+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
98424 goto out;
98425
98426 /* Check if the location we're moving into overlaps the
98427 * old location at all, and fail if it does.
98428 */
98429- if ((new_addr <= addr) && (new_addr+new_len) > addr)
98430- goto out;
98431-
98432- if ((addr <= new_addr) && (addr+old_len) > new_addr)
98433+ if (addr + old_len > new_addr && new_addr + new_len > addr)
98434 goto out;
98435
98436 ret = do_munmap(mm, new_addr, new_len);
98437@@ -481,6 +497,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98438 unsigned long ret = -EINVAL;
98439 unsigned long charged = 0;
98440 bool locked = false;
98441+ unsigned long pax_task_size = TASK_SIZE;
98442
98443 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
98444 return ret;
98445@@ -502,6 +519,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98446 if (!new_len)
98447 return ret;
98448
98449+#ifdef CONFIG_PAX_SEGMEXEC
98450+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
98451+ pax_task_size = SEGMEXEC_TASK_SIZE;
98452+#endif
98453+
98454+ pax_task_size -= PAGE_SIZE;
98455+
98456+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
98457+ old_len > pax_task_size || addr > pax_task_size-old_len)
98458+ return ret;
98459+
98460 down_write(&current->mm->mmap_sem);
98461
98462 if (flags & MREMAP_FIXED) {
98463@@ -552,6 +580,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98464 new_addr = addr;
98465 }
98466 ret = addr;
98467+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
98468 goto out;
98469 }
98470 }
98471@@ -575,7 +604,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
98472 goto out;
98473 }
98474
98475+ map_flags = vma->vm_flags;
98476 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
98477+ if (!(ret & ~PAGE_MASK)) {
98478+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
98479+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
98480+ }
98481 }
98482 out:
98483 if (ret & ~PAGE_MASK)
98484diff --git a/mm/nommu.c b/mm/nommu.c
98485index a881d96..e5932cd 100644
98486--- a/mm/nommu.c
98487+++ b/mm/nommu.c
98488@@ -70,7 +70,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
98489 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
98490 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
98491 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
98492-int heap_stack_gap = 0;
98493
98494 atomic_long_t mmap_pages_allocated;
98495
98496@@ -857,15 +856,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
98497 EXPORT_SYMBOL(find_vma);
98498
98499 /*
98500- * find a VMA
98501- * - we don't extend stack VMAs under NOMMU conditions
98502- */
98503-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
98504-{
98505- return find_vma(mm, addr);
98506-}
98507-
98508-/*
98509 * expand a stack to a given address
98510 * - not supported under NOMMU conditions
98511 */
98512@@ -1572,6 +1562,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
98513
98514 /* most fields are the same, copy all, and then fixup */
98515 *new = *vma;
98516+ INIT_LIST_HEAD(&new->anon_vma_chain);
98517 *region = *vma->vm_region;
98518 new->vm_region = region;
98519
98520@@ -2002,8 +1993,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
98521 }
98522 EXPORT_SYMBOL(generic_file_remap_pages);
98523
98524-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98525- unsigned long addr, void *buf, int len, int write)
98526+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98527+ unsigned long addr, void *buf, size_t len, int write)
98528 {
98529 struct vm_area_struct *vma;
98530
98531@@ -2044,8 +2035,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
98532 *
98533 * The caller must hold a reference on @mm.
98534 */
98535-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
98536- void *buf, int len, int write)
98537+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
98538+ void *buf, size_t len, int write)
98539 {
98540 return __access_remote_vm(NULL, mm, addr, buf, len, write);
98541 }
98542@@ -2054,7 +2045,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
98543 * Access another process' address space.
98544 * - source/target buffer must be kernel space
98545 */
98546-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
98547+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
98548 {
98549 struct mm_struct *mm;
98550
98551diff --git a/mm/page-writeback.c b/mm/page-writeback.c
98552index ba5fd97..5a95869 100644
98553--- a/mm/page-writeback.c
98554+++ b/mm/page-writeback.c
98555@@ -664,7 +664,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
98556 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
98557 * - the bdi dirty thresh drops quickly due to change of JBOD workload
98558 */
98559-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
98560+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
98561 unsigned long thresh,
98562 unsigned long bg_thresh,
98563 unsigned long dirty,
98564diff --git a/mm/page_alloc.c b/mm/page_alloc.c
98565index c5fe124..2cf7f17 100644
98566--- a/mm/page_alloc.c
98567+++ b/mm/page_alloc.c
98568@@ -61,6 +61,7 @@
98569 #include <linux/page-debug-flags.h>
98570 #include <linux/hugetlb.h>
98571 #include <linux/sched/rt.h>
98572+#include <linux/random.h>
98573
98574 #include <asm/sections.h>
98575 #include <asm/tlbflush.h>
98576@@ -357,7 +358,7 @@ out:
98577 * This usage means that zero-order pages may not be compound.
98578 */
98579
98580-static void free_compound_page(struct page *page)
98581+void free_compound_page(struct page *page)
98582 {
98583 __free_pages_ok(page, compound_order(page));
98584 }
98585@@ -740,6 +741,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
98586 int i;
98587 int bad = 0;
98588
98589+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98590+ unsigned long index = 1UL << order;
98591+#endif
98592+
98593 trace_mm_page_free(page, order);
98594 kmemcheck_free_shadow(page, order);
98595
98596@@ -756,6 +761,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
98597 debug_check_no_obj_freed(page_address(page),
98598 PAGE_SIZE << order);
98599 }
98600+
98601+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98602+ for (; index; --index)
98603+ sanitize_highpage(page + index - 1);
98604+#endif
98605+
98606 arch_free_page(page, order);
98607 kernel_map_pages(page, 1 << order, 0);
98608
98609@@ -779,6 +790,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
98610 local_irq_restore(flags);
98611 }
98612
98613+#ifdef CONFIG_PAX_LATENT_ENTROPY
98614+bool __meminitdata extra_latent_entropy;
98615+
98616+static int __init setup_pax_extra_latent_entropy(char *str)
98617+{
98618+ extra_latent_entropy = true;
98619+ return 0;
98620+}
98621+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
98622+
98623+volatile u64 latent_entropy __latent_entropy;
98624+EXPORT_SYMBOL(latent_entropy);
98625+#endif
98626+
98627 void __init __free_pages_bootmem(struct page *page, unsigned int order)
98628 {
98629 unsigned int nr_pages = 1 << order;
98630@@ -794,6 +819,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
98631 __ClearPageReserved(p);
98632 set_page_count(p, 0);
98633
98634+#ifdef CONFIG_PAX_LATENT_ENTROPY
98635+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
98636+ u64 hash = 0;
98637+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
98638+ const u64 *data = lowmem_page_address(page);
98639+
98640+ for (index = 0; index < end; index++)
98641+ hash ^= hash + data[index];
98642+ latent_entropy ^= hash;
98643+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
98644+ }
98645+#endif
98646+
98647 page_zone(page)->managed_pages += nr_pages;
98648 set_page_refcounted(page);
98649 __free_pages(page, order);
98650@@ -922,8 +960,10 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
98651 arch_alloc_page(page, order);
98652 kernel_map_pages(page, 1 << order, 1);
98653
98654+#ifndef CONFIG_PAX_MEMORY_SANITIZE
98655 if (gfp_flags & __GFP_ZERO)
98656 prep_zero_page(page, order, gfp_flags);
98657+#endif
98658
98659 if (order && (gfp_flags & __GFP_COMP))
98660 prep_compound_page(page, order);
98661@@ -1601,7 +1641,7 @@ again:
98662 }
98663
98664 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
98665- if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
98666+ if (atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
98667 !zone_is_fair_depleted(zone))
98668 zone_set_flag(zone, ZONE_FAIR_DEPLETED);
98669
98670@@ -1922,7 +1962,7 @@ static void reset_alloc_batches(struct zone *preferred_zone)
98671 do {
98672 mod_zone_page_state(zone, NR_ALLOC_BATCH,
98673 high_wmark_pages(zone) - low_wmark_pages(zone) -
98674- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
98675+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
98676 zone_clear_flag(zone, ZONE_FAIR_DEPLETED);
98677 } while (zone++ != preferred_zone);
98678 }
98679@@ -5699,7 +5739,7 @@ static void __setup_per_zone_wmarks(void)
98680
98681 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
98682 high_wmark_pages(zone) - low_wmark_pages(zone) -
98683- atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
98684+ atomic_long_read_unchecked(&zone->vm_stat[NR_ALLOC_BATCH]));
98685
98686 setup_zone_migrate_reserve(zone);
98687 spin_unlock_irqrestore(&zone->lock, flags);
98688diff --git a/mm/percpu.c b/mm/percpu.c
98689index 2139e30..1d45bce 100644
98690--- a/mm/percpu.c
98691+++ b/mm/percpu.c
98692@@ -123,7 +123,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
98693 static unsigned int pcpu_high_unit_cpu __read_mostly;
98694
98695 /* the address of the first chunk which starts with the kernel static area */
98696-void *pcpu_base_addr __read_mostly;
98697+void *pcpu_base_addr __read_only;
98698 EXPORT_SYMBOL_GPL(pcpu_base_addr);
98699
98700 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
98701diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
98702index 5077afc..846c9ef 100644
98703--- a/mm/process_vm_access.c
98704+++ b/mm/process_vm_access.c
98705@@ -13,6 +13,7 @@
98706 #include <linux/uio.h>
98707 #include <linux/sched.h>
98708 #include <linux/highmem.h>
98709+#include <linux/security.h>
98710 #include <linux/ptrace.h>
98711 #include <linux/slab.h>
98712 #include <linux/syscalls.h>
98713@@ -157,19 +158,19 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
98714 ssize_t iov_len;
98715 size_t total_len = iov_iter_count(iter);
98716
98717+ return -ENOSYS; // PaX: until properly audited
98718+
98719 /*
98720 * Work out how many pages of struct pages we're going to need
98721 * when eventually calling get_user_pages
98722 */
98723 for (i = 0; i < riovcnt; i++) {
98724 iov_len = rvec[i].iov_len;
98725- if (iov_len > 0) {
98726- nr_pages_iov = ((unsigned long)rvec[i].iov_base
98727- + iov_len)
98728- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
98729- / PAGE_SIZE + 1;
98730- nr_pages = max(nr_pages, nr_pages_iov);
98731- }
98732+ if (iov_len <= 0)
98733+ continue;
98734+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
98735+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
98736+ nr_pages = max(nr_pages, nr_pages_iov);
98737 }
98738
98739 if (nr_pages == 0)
98740@@ -197,6 +198,11 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
98741 goto free_proc_pages;
98742 }
98743
98744+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
98745+ rc = -EPERM;
98746+ goto put_task_struct;
98747+ }
98748+
98749 mm = mm_access(task, PTRACE_MODE_ATTACH);
98750 if (!mm || IS_ERR(mm)) {
98751 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
98752diff --git a/mm/rmap.c b/mm/rmap.c
98753index e01318d..7a532bd 100644
98754--- a/mm/rmap.c
98755+++ b/mm/rmap.c
98756@@ -164,6 +164,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98757 struct anon_vma *anon_vma = vma->anon_vma;
98758 struct anon_vma_chain *avc;
98759
98760+#ifdef CONFIG_PAX_SEGMEXEC
98761+ struct anon_vma_chain *avc_m = NULL;
98762+#endif
98763+
98764 might_sleep();
98765 if (unlikely(!anon_vma)) {
98766 struct mm_struct *mm = vma->vm_mm;
98767@@ -173,6 +177,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98768 if (!avc)
98769 goto out_enomem;
98770
98771+#ifdef CONFIG_PAX_SEGMEXEC
98772+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
98773+ if (!avc_m)
98774+ goto out_enomem_free_avc;
98775+#endif
98776+
98777 anon_vma = find_mergeable_anon_vma(vma);
98778 allocated = NULL;
98779 if (!anon_vma) {
98780@@ -186,6 +196,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98781 /* page_table_lock to protect against threads */
98782 spin_lock(&mm->page_table_lock);
98783 if (likely(!vma->anon_vma)) {
98784+
98785+#ifdef CONFIG_PAX_SEGMEXEC
98786+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
98787+
98788+ if (vma_m) {
98789+ BUG_ON(vma_m->anon_vma);
98790+ vma_m->anon_vma = anon_vma;
98791+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
98792+ avc_m = NULL;
98793+ }
98794+#endif
98795+
98796 vma->anon_vma = anon_vma;
98797 anon_vma_chain_link(vma, avc, anon_vma);
98798 allocated = NULL;
98799@@ -196,12 +218,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
98800
98801 if (unlikely(allocated))
98802 put_anon_vma(allocated);
98803+
98804+#ifdef CONFIG_PAX_SEGMEXEC
98805+ if (unlikely(avc_m))
98806+ anon_vma_chain_free(avc_m);
98807+#endif
98808+
98809 if (unlikely(avc))
98810 anon_vma_chain_free(avc);
98811 }
98812 return 0;
98813
98814 out_enomem_free_avc:
98815+
98816+#ifdef CONFIG_PAX_SEGMEXEC
98817+ if (avc_m)
98818+ anon_vma_chain_free(avc_m);
98819+#endif
98820+
98821 anon_vma_chain_free(avc);
98822 out_enomem:
98823 return -ENOMEM;
98824@@ -237,7 +271,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
98825 * Attach the anon_vmas from src to dst.
98826 * Returns 0 on success, -ENOMEM on failure.
98827 */
98828-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
98829+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
98830 {
98831 struct anon_vma_chain *avc, *pavc;
98832 struct anon_vma *root = NULL;
98833@@ -270,10 +304,11 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
98834 * the corresponding VMA in the parent process is attached to.
98835 * Returns 0 on success, non-zero on failure.
98836 */
98837-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
98838+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
98839 {
98840 struct anon_vma_chain *avc;
98841 struct anon_vma *anon_vma;
98842+ int error;
98843
98844 /* Don't bother if the parent process has no anon_vma here. */
98845 if (!pvma->anon_vma)
98846@@ -283,8 +318,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
98847 * First, attach the new VMA to the parent VMA's anon_vmas,
98848 * so rmap can find non-COWed pages in child processes.
98849 */
98850- if (anon_vma_clone(vma, pvma))
98851- return -ENOMEM;
98852+ error = anon_vma_clone(vma, pvma);
98853+ if (error)
98854+ return error;
98855
98856 /* Then add our own anon_vma. */
98857 anon_vma = anon_vma_alloc();
98858@@ -374,8 +410,10 @@ static void anon_vma_ctor(void *data)
98859 void __init anon_vma_init(void)
98860 {
98861 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
98862- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
98863- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
98864+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
98865+ anon_vma_ctor);
98866+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
98867+ SLAB_PANIC|SLAB_NO_SANITIZE);
98868 }
98869
98870 /*
98871diff --git a/mm/shmem.c b/mm/shmem.c
98872index 469f90d..34a09ee 100644
98873--- a/mm/shmem.c
98874+++ b/mm/shmem.c
98875@@ -33,7 +33,7 @@
98876 #include <linux/swap.h>
98877 #include <linux/aio.h>
98878
98879-static struct vfsmount *shm_mnt;
98880+struct vfsmount *shm_mnt;
98881
98882 #ifdef CONFIG_SHMEM
98883 /*
98884@@ -80,7 +80,7 @@ static struct vfsmount *shm_mnt;
98885 #define BOGO_DIRENT_SIZE 20
98886
98887 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
98888-#define SHORT_SYMLINK_LEN 128
98889+#define SHORT_SYMLINK_LEN 64
98890
98891 /*
98892 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
98893@@ -2524,6 +2524,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
98894 static int shmem_xattr_validate(const char *name)
98895 {
98896 struct { const char *prefix; size_t len; } arr[] = {
98897+
98898+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
98899+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
98900+#endif
98901+
98902 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
98903 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
98904 };
98905@@ -2579,6 +2584,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
98906 if (err)
98907 return err;
98908
98909+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
98910+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
98911+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
98912+ return -EOPNOTSUPP;
98913+ if (size > 8)
98914+ return -EINVAL;
98915+ }
98916+#endif
98917+
98918 return simple_xattr_set(&info->xattrs, name, value, size, flags);
98919 }
98920
98921@@ -2962,8 +2976,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
98922 int err = -ENOMEM;
98923
98924 /* Round up to L1_CACHE_BYTES to resist false sharing */
98925- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
98926- L1_CACHE_BYTES), GFP_KERNEL);
98927+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
98928 if (!sbinfo)
98929 return -ENOMEM;
98930
98931diff --git a/mm/slab.c b/mm/slab.c
98932index 7c52b38..3ccc17e 100644
98933--- a/mm/slab.c
98934+++ b/mm/slab.c
98935@@ -316,10 +316,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
98936 if ((x)->max_freeable < i) \
98937 (x)->max_freeable = i; \
98938 } while (0)
98939-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
98940-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
98941-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
98942-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
98943+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
98944+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
98945+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
98946+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
98947+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
98948+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
98949 #else
98950 #define STATS_INC_ACTIVE(x) do { } while (0)
98951 #define STATS_DEC_ACTIVE(x) do { } while (0)
98952@@ -336,6 +338,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
98953 #define STATS_INC_ALLOCMISS(x) do { } while (0)
98954 #define STATS_INC_FREEHIT(x) do { } while (0)
98955 #define STATS_INC_FREEMISS(x) do { } while (0)
98956+#define STATS_INC_SANITIZED(x) do { } while (0)
98957+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
98958 #endif
98959
98960 #if DEBUG
98961@@ -452,7 +456,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
98962 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
98963 */
98964 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
98965- const struct page *page, void *obj)
98966+ const struct page *page, const void *obj)
98967 {
98968 u32 offset = (obj - page->s_mem);
98969 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
98970@@ -1462,12 +1466,12 @@ void __init kmem_cache_init(void)
98971 */
98972
98973 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
98974- kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
98975+ kmalloc_size(INDEX_AC), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
98976
98977 if (INDEX_AC != INDEX_NODE)
98978 kmalloc_caches[INDEX_NODE] =
98979 create_kmalloc_cache("kmalloc-node",
98980- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
98981+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
98982
98983 slab_early_init = 0;
98984
98985@@ -3384,6 +3388,20 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
98986 struct array_cache *ac = cpu_cache_get(cachep);
98987
98988 check_irq_off();
98989+
98990+#ifdef CONFIG_PAX_MEMORY_SANITIZE
98991+ if (cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))
98992+ STATS_INC_NOT_SANITIZED(cachep);
98993+ else {
98994+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
98995+
98996+ if (cachep->ctor)
98997+ cachep->ctor(objp);
98998+
98999+ STATS_INC_SANITIZED(cachep);
99000+ }
99001+#endif
99002+
99003 kmemleak_free_recursive(objp, cachep->flags);
99004 objp = cache_free_debugcheck(cachep, objp, caller);
99005
99006@@ -3607,6 +3625,7 @@ void kfree(const void *objp)
99007
99008 if (unlikely(ZERO_OR_NULL_PTR(objp)))
99009 return;
99010+ VM_BUG_ON(!virt_addr_valid(objp));
99011 local_irq_save(flags);
99012 kfree_debugcheck(objp);
99013 c = virt_to_cache(objp);
99014@@ -4056,14 +4075,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
99015 }
99016 /* cpu stats */
99017 {
99018- unsigned long allochit = atomic_read(&cachep->allochit);
99019- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
99020- unsigned long freehit = atomic_read(&cachep->freehit);
99021- unsigned long freemiss = atomic_read(&cachep->freemiss);
99022+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
99023+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
99024+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
99025+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
99026
99027 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
99028 allochit, allocmiss, freehit, freemiss);
99029 }
99030+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99031+ {
99032+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
99033+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
99034+
99035+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
99036+ }
99037+#endif
99038 #endif
99039 }
99040
99041@@ -4281,13 +4308,69 @@ static const struct file_operations proc_slabstats_operations = {
99042 static int __init slab_proc_init(void)
99043 {
99044 #ifdef CONFIG_DEBUG_SLAB_LEAK
99045- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
99046+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
99047 #endif
99048 return 0;
99049 }
99050 module_init(slab_proc_init);
99051 #endif
99052
99053+bool is_usercopy_object(const void *ptr)
99054+{
99055+ struct page *page;
99056+ struct kmem_cache *cachep;
99057+
99058+ if (ZERO_OR_NULL_PTR(ptr))
99059+ return false;
99060+
99061+ if (!slab_is_available())
99062+ return false;
99063+
99064+ if (!virt_addr_valid(ptr))
99065+ return false;
99066+
99067+ page = virt_to_head_page(ptr);
99068+
99069+ if (!PageSlab(page))
99070+ return false;
99071+
99072+ cachep = page->slab_cache;
99073+ return cachep->flags & SLAB_USERCOPY;
99074+}
99075+
99076+#ifdef CONFIG_PAX_USERCOPY
99077+const char *check_heap_object(const void *ptr, unsigned long n)
99078+{
99079+ struct page *page;
99080+ struct kmem_cache *cachep;
99081+ unsigned int objnr;
99082+ unsigned long offset;
99083+
99084+ if (ZERO_OR_NULL_PTR(ptr))
99085+ return "<null>";
99086+
99087+ if (!virt_addr_valid(ptr))
99088+ return NULL;
99089+
99090+ page = virt_to_head_page(ptr);
99091+
99092+ if (!PageSlab(page))
99093+ return NULL;
99094+
99095+ cachep = page->slab_cache;
99096+ if (!(cachep->flags & SLAB_USERCOPY))
99097+ return cachep->name;
99098+
99099+ objnr = obj_to_index(cachep, page, ptr);
99100+ BUG_ON(objnr >= cachep->num);
99101+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
99102+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
99103+ return NULL;
99104+
99105+ return cachep->name;
99106+}
99107+#endif
99108+
99109 /**
99110 * ksize - get the actual amount of memory allocated for a given object
99111 * @objp: Pointer to the object
99112diff --git a/mm/slab.h b/mm/slab.h
99113index 0e0fdd3..d0fd761 100644
99114--- a/mm/slab.h
99115+++ b/mm/slab.h
99116@@ -32,6 +32,20 @@ extern struct list_head slab_caches;
99117 /* The slab cache that manages slab cache information */
99118 extern struct kmem_cache *kmem_cache;
99119
99120+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99121+#ifdef CONFIG_X86_64
99122+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
99123+#else
99124+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
99125+#endif
99126+enum pax_sanitize_mode {
99127+ PAX_SANITIZE_SLAB_OFF = 0,
99128+ PAX_SANITIZE_SLAB_FAST,
99129+ PAX_SANITIZE_SLAB_FULL,
99130+};
99131+extern enum pax_sanitize_mode pax_sanitize_slab;
99132+#endif
99133+
99134 unsigned long calculate_alignment(unsigned long flags,
99135 unsigned long align, unsigned long size);
99136
99137@@ -67,7 +81,8 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99138
99139 /* Legal flag mask for kmem_cache_create(), for various configurations */
99140 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
99141- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
99142+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
99143+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
99144
99145 #if defined(CONFIG_DEBUG_SLAB)
99146 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
99147@@ -251,6 +266,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
99148 return s;
99149
99150 page = virt_to_head_page(x);
99151+
99152+ BUG_ON(!PageSlab(page));
99153+
99154 cachep = page->slab_cache;
99155 if (slab_equal_or_root(cachep, s))
99156 return cachep;
99157diff --git a/mm/slab_common.c b/mm/slab_common.c
99158index d319502..da7714e 100644
99159--- a/mm/slab_common.c
99160+++ b/mm/slab_common.c
99161@@ -25,11 +25,35 @@
99162
99163 #include "slab.h"
99164
99165-enum slab_state slab_state;
99166+enum slab_state slab_state __read_only;
99167 LIST_HEAD(slab_caches);
99168 DEFINE_MUTEX(slab_mutex);
99169 struct kmem_cache *kmem_cache;
99170
99171+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99172+enum pax_sanitize_mode pax_sanitize_slab __read_only = PAX_SANITIZE_SLAB_FAST;
99173+static int __init pax_sanitize_slab_setup(char *str)
99174+{
99175+ if (!str)
99176+ return 0;
99177+
99178+ if (!strcmp(str, "0") || !strcmp(str, "off")) {
99179+ pr_info("PaX slab sanitization: %s\n", "disabled");
99180+ pax_sanitize_slab = PAX_SANITIZE_SLAB_OFF;
99181+ } else if (!strcmp(str, "1") || !strcmp(str, "fast")) {
99182+ pr_info("PaX slab sanitization: %s\n", "fast");
99183+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FAST;
99184+ } else if (!strcmp(str, "full")) {
99185+ pr_info("PaX slab sanitization: %s\n", "full");
99186+ pax_sanitize_slab = PAX_SANITIZE_SLAB_FULL;
99187+ } else
99188+ pr_err("PaX slab sanitization: unsupported option '%s'\n", str);
99189+
99190+ return 0;
99191+}
99192+early_param("pax_sanitize_slab", pax_sanitize_slab_setup);
99193+#endif
99194+
99195 #ifdef CONFIG_DEBUG_VM
99196 static int kmem_cache_sanity_check(const char *name, size_t size)
99197 {
99198@@ -160,7 +184,7 @@ do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
99199 if (err)
99200 goto out_free_cache;
99201
99202- s->refcount = 1;
99203+ atomic_set(&s->refcount, 1);
99204 list_add(&s->list, &slab_caches);
99205 out:
99206 if (err)
99207@@ -222,6 +246,13 @@ kmem_cache_create(const char *name, size_t size, size_t align,
99208 */
99209 flags &= CACHE_CREATE_MASK;
99210
99211+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99212+ if (pax_sanitize_slab == PAX_SANITIZE_SLAB_OFF || (flags & SLAB_DESTROY_BY_RCU))
99213+ flags |= SLAB_NO_SANITIZE;
99214+ else if (pax_sanitize_slab == PAX_SANITIZE_SLAB_FULL)
99215+ flags &= ~SLAB_NO_SANITIZE;
99216+#endif
99217+
99218 s = __kmem_cache_alias(name, size, align, flags, ctor);
99219 if (s)
99220 goto out_unlock;
99221@@ -341,8 +372,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
99222
99223 mutex_lock(&slab_mutex);
99224
99225- s->refcount--;
99226- if (s->refcount)
99227+ if (!atomic_dec_and_test(&s->refcount))
99228 goto out_unlock;
99229
99230 if (memcg_cleanup_cache_params(s) != 0)
99231@@ -362,7 +392,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
99232 rcu_barrier();
99233
99234 memcg_free_cache_params(s);
99235-#ifdef SLAB_SUPPORTS_SYSFS
99236+#if defined(SLAB_SUPPORTS_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99237 sysfs_slab_remove(s);
99238 #else
99239 slab_kmem_cache_release(s);
99240@@ -418,7 +448,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
99241 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
99242 name, size, err);
99243
99244- s->refcount = -1; /* Exempt from merging for now */
99245+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
99246 }
99247
99248 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
99249@@ -431,7 +461,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
99250
99251 create_boot_cache(s, name, size, flags);
99252 list_add(&s->list, &slab_caches);
99253- s->refcount = 1;
99254+ atomic_set(&s->refcount, 1);
99255 return s;
99256 }
99257
99258@@ -443,6 +473,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
99259 EXPORT_SYMBOL(kmalloc_dma_caches);
99260 #endif
99261
99262+#ifdef CONFIG_PAX_USERCOPY_SLABS
99263+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
99264+EXPORT_SYMBOL(kmalloc_usercopy_caches);
99265+#endif
99266+
99267 /*
99268 * Conversion table for small slabs sizes / 8 to the index in the
99269 * kmalloc array. This is necessary for slabs < 192 since we have non power
99270@@ -507,6 +542,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
99271 return kmalloc_dma_caches[index];
99272
99273 #endif
99274+
99275+#ifdef CONFIG_PAX_USERCOPY_SLABS
99276+ if (unlikely((flags & GFP_USERCOPY)))
99277+ return kmalloc_usercopy_caches[index];
99278+
99279+#endif
99280+
99281 return kmalloc_caches[index];
99282 }
99283
99284@@ -563,7 +605,7 @@ void __init create_kmalloc_caches(unsigned long flags)
99285 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
99286 if (!kmalloc_caches[i]) {
99287 kmalloc_caches[i] = create_kmalloc_cache(NULL,
99288- 1 << i, flags);
99289+ 1 << i, SLAB_USERCOPY | flags);
99290 }
99291
99292 /*
99293@@ -572,10 +614,10 @@ void __init create_kmalloc_caches(unsigned long flags)
99294 * earlier power of two caches
99295 */
99296 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
99297- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
99298+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
99299
99300 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
99301- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
99302+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
99303 }
99304
99305 /* Kmalloc array is now usable */
99306@@ -608,6 +650,23 @@ void __init create_kmalloc_caches(unsigned long flags)
99307 }
99308 }
99309 #endif
99310+
99311+#ifdef CONFIG_PAX_USERCOPY_SLABS
99312+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
99313+ struct kmem_cache *s = kmalloc_caches[i];
99314+
99315+ if (s) {
99316+ int size = kmalloc_size(i);
99317+ char *n = kasprintf(GFP_NOWAIT,
99318+ "usercopy-kmalloc-%d", size);
99319+
99320+ BUG_ON(!n);
99321+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
99322+ size, SLAB_USERCOPY | flags);
99323+ }
99324+ }
99325+#endif
99326+
99327 }
99328 #endif /* !CONFIG_SLOB */
99329
99330@@ -666,6 +725,9 @@ void print_slabinfo_header(struct seq_file *m)
99331 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
99332 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
99333 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
99334+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99335+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
99336+#endif
99337 #endif
99338 seq_putc(m, '\n');
99339 }
99340diff --git a/mm/slob.c b/mm/slob.c
99341index 21980e0..975f1bf 100644
99342--- a/mm/slob.c
99343+++ b/mm/slob.c
99344@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
99345 /*
99346 * Return the size of a slob block.
99347 */
99348-static slobidx_t slob_units(slob_t *s)
99349+static slobidx_t slob_units(const slob_t *s)
99350 {
99351 if (s->units > 0)
99352 return s->units;
99353@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
99354 /*
99355 * Return the next free slob block pointer after this one.
99356 */
99357-static slob_t *slob_next(slob_t *s)
99358+static slob_t *slob_next(const slob_t *s)
99359 {
99360 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
99361 slobidx_t next;
99362@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
99363 /*
99364 * Returns true if s is the last free block in its page.
99365 */
99366-static int slob_last(slob_t *s)
99367+static int slob_last(const slob_t *s)
99368 {
99369 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
99370 }
99371
99372-static void *slob_new_pages(gfp_t gfp, int order, int node)
99373+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
99374 {
99375- void *page;
99376+ struct page *page;
99377
99378 #ifdef CONFIG_NUMA
99379 if (node != NUMA_NO_NODE)
99380@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
99381 if (!page)
99382 return NULL;
99383
99384- return page_address(page);
99385+ __SetPageSlab(page);
99386+ return page;
99387 }
99388
99389-static void slob_free_pages(void *b, int order)
99390+static void slob_free_pages(struct page *sp, int order)
99391 {
99392 if (current->reclaim_state)
99393 current->reclaim_state->reclaimed_slab += 1 << order;
99394- free_pages((unsigned long)b, order);
99395+ __ClearPageSlab(sp);
99396+ page_mapcount_reset(sp);
99397+ sp->private = 0;
99398+ __free_pages(sp, order);
99399 }
99400
99401 /*
99402@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
99403
99404 /* Not enough space: must allocate a new page */
99405 if (!b) {
99406- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
99407- if (!b)
99408+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
99409+ if (!sp)
99410 return NULL;
99411- sp = virt_to_page(b);
99412- __SetPageSlab(sp);
99413+ b = page_address(sp);
99414
99415 spin_lock_irqsave(&slob_lock, flags);
99416 sp->units = SLOB_UNITS(PAGE_SIZE);
99417 sp->freelist = b;
99418+ sp->private = 0;
99419 INIT_LIST_HEAD(&sp->lru);
99420 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
99421 set_slob_page_free(sp, slob_list);
99422@@ -337,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
99423 /*
99424 * slob_free: entry point into the slob allocator.
99425 */
99426-static void slob_free(void *block, int size)
99427+static void slob_free(struct kmem_cache *c, void *block, int size)
99428 {
99429 struct page *sp;
99430 slob_t *prev, *next, *b = (slob_t *)block;
99431@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
99432 if (slob_page_free(sp))
99433 clear_slob_page_free(sp);
99434 spin_unlock_irqrestore(&slob_lock, flags);
99435- __ClearPageSlab(sp);
99436- page_mapcount_reset(sp);
99437- slob_free_pages(b, 0);
99438+ slob_free_pages(sp, 0);
99439 return;
99440 }
99441
99442+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99443+ if (pax_sanitize_slab && !(c && (c->flags & SLAB_NO_SANITIZE)))
99444+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
99445+#endif
99446+
99447 if (!slob_page_free(sp)) {
99448 /* This slob page is about to become partially free. Easy! */
99449 sp->units = units;
99450@@ -424,11 +431,10 @@ out:
99451 */
99452
99453 static __always_inline void *
99454-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99455+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
99456 {
99457- unsigned int *m;
99458- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99459- void *ret;
99460+ slob_t *m;
99461+ void *ret = NULL;
99462
99463 gfp &= gfp_allowed_mask;
99464
99465@@ -442,23 +448,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99466
99467 if (!m)
99468 return NULL;
99469- *m = size;
99470+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
99471+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
99472+ m[0].units = size;
99473+ m[1].units = align;
99474 ret = (void *)m + align;
99475
99476 trace_kmalloc_node(caller, ret,
99477 size, size + align, gfp, node);
99478 } else {
99479 unsigned int order = get_order(size);
99480+ struct page *page;
99481
99482 if (likely(order))
99483 gfp |= __GFP_COMP;
99484- ret = slob_new_pages(gfp, order, node);
99485+ page = slob_new_pages(gfp, order, node);
99486+ if (page) {
99487+ ret = page_address(page);
99488+ page->private = size;
99489+ }
99490
99491 trace_kmalloc_node(caller, ret,
99492 size, PAGE_SIZE << order, gfp, node);
99493 }
99494
99495- kmemleak_alloc(ret, size, 1, gfp);
99496+ return ret;
99497+}
99498+
99499+static __always_inline void *
99500+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
99501+{
99502+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99503+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
99504+
99505+ if (!ZERO_OR_NULL_PTR(ret))
99506+ kmemleak_alloc(ret, size, 1, gfp);
99507 return ret;
99508 }
99509
99510@@ -493,34 +517,112 @@ void kfree(const void *block)
99511 return;
99512 kmemleak_free(block);
99513
99514+ VM_BUG_ON(!virt_addr_valid(block));
99515 sp = virt_to_page(block);
99516- if (PageSlab(sp)) {
99517+ VM_BUG_ON(!PageSlab(sp));
99518+ if (!sp->private) {
99519 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99520- unsigned int *m = (unsigned int *)(block - align);
99521- slob_free(m, *m + align);
99522- } else
99523+ slob_t *m = (slob_t *)(block - align);
99524+ slob_free(NULL, m, m[0].units + align);
99525+ } else {
99526+ __ClearPageSlab(sp);
99527+ page_mapcount_reset(sp);
99528+ sp->private = 0;
99529 __free_pages(sp, compound_order(sp));
99530+ }
99531 }
99532 EXPORT_SYMBOL(kfree);
99533
99534+bool is_usercopy_object(const void *ptr)
99535+{
99536+ if (!slab_is_available())
99537+ return false;
99538+
99539+ // PAX: TODO
99540+
99541+ return false;
99542+}
99543+
99544+#ifdef CONFIG_PAX_USERCOPY
99545+const char *check_heap_object(const void *ptr, unsigned long n)
99546+{
99547+ struct page *page;
99548+ const slob_t *free;
99549+ const void *base;
99550+ unsigned long flags;
99551+
99552+ if (ZERO_OR_NULL_PTR(ptr))
99553+ return "<null>";
99554+
99555+ if (!virt_addr_valid(ptr))
99556+ return NULL;
99557+
99558+ page = virt_to_head_page(ptr);
99559+ if (!PageSlab(page))
99560+ return NULL;
99561+
99562+ if (page->private) {
99563+ base = page;
99564+ if (base <= ptr && n <= page->private - (ptr - base))
99565+ return NULL;
99566+ return "<slob>";
99567+ }
99568+
99569+ /* some tricky double walking to find the chunk */
99570+ spin_lock_irqsave(&slob_lock, flags);
99571+ base = (void *)((unsigned long)ptr & PAGE_MASK);
99572+ free = page->freelist;
99573+
99574+ while (!slob_last(free) && (void *)free <= ptr) {
99575+ base = free + slob_units(free);
99576+ free = slob_next(free);
99577+ }
99578+
99579+ while (base < (void *)free) {
99580+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
99581+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
99582+ int offset;
99583+
99584+ if (ptr < base + align)
99585+ break;
99586+
99587+ offset = ptr - base - align;
99588+ if (offset >= m) {
99589+ base += size;
99590+ continue;
99591+ }
99592+
99593+ if (n > m - offset)
99594+ break;
99595+
99596+ spin_unlock_irqrestore(&slob_lock, flags);
99597+ return NULL;
99598+ }
99599+
99600+ spin_unlock_irqrestore(&slob_lock, flags);
99601+ return "<slob>";
99602+}
99603+#endif
99604+
99605 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
99606 size_t ksize(const void *block)
99607 {
99608 struct page *sp;
99609 int align;
99610- unsigned int *m;
99611+ slob_t *m;
99612
99613 BUG_ON(!block);
99614 if (unlikely(block == ZERO_SIZE_PTR))
99615 return 0;
99616
99617 sp = virt_to_page(block);
99618- if (unlikely(!PageSlab(sp)))
99619- return PAGE_SIZE << compound_order(sp);
99620+ VM_BUG_ON(!PageSlab(sp));
99621+ if (sp->private)
99622+ return sp->private;
99623
99624 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
99625- m = (unsigned int *)(block - align);
99626- return SLOB_UNITS(*m) * SLOB_UNIT;
99627+ m = (slob_t *)(block - align);
99628+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
99629 }
99630 EXPORT_SYMBOL(ksize);
99631
99632@@ -536,23 +638,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
99633
99634 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
99635 {
99636- void *b;
99637+ void *b = NULL;
99638
99639 flags &= gfp_allowed_mask;
99640
99641 lockdep_trace_alloc(flags);
99642
99643+#ifdef CONFIG_PAX_USERCOPY_SLABS
99644+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
99645+#else
99646 if (c->size < PAGE_SIZE) {
99647 b = slob_alloc(c->size, flags, c->align, node);
99648 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
99649 SLOB_UNITS(c->size) * SLOB_UNIT,
99650 flags, node);
99651 } else {
99652- b = slob_new_pages(flags, get_order(c->size), node);
99653+ struct page *sp;
99654+
99655+ sp = slob_new_pages(flags, get_order(c->size), node);
99656+ if (sp) {
99657+ b = page_address(sp);
99658+ sp->private = c->size;
99659+ }
99660 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
99661 PAGE_SIZE << get_order(c->size),
99662 flags, node);
99663 }
99664+#endif
99665
99666 if (b && c->ctor)
99667 c->ctor(b);
99668@@ -582,12 +694,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
99669 EXPORT_SYMBOL(kmem_cache_alloc_node);
99670 #endif
99671
99672-static void __kmem_cache_free(void *b, int size)
99673+static void __kmem_cache_free(struct kmem_cache *c, void *b, int size)
99674 {
99675- if (size < PAGE_SIZE)
99676- slob_free(b, size);
99677+ struct page *sp;
99678+
99679+ sp = virt_to_page(b);
99680+ BUG_ON(!PageSlab(sp));
99681+ if (!sp->private)
99682+ slob_free(c, b, size);
99683 else
99684- slob_free_pages(b, get_order(size));
99685+ slob_free_pages(sp, get_order(size));
99686 }
99687
99688 static void kmem_rcu_free(struct rcu_head *head)
99689@@ -595,22 +711,36 @@ static void kmem_rcu_free(struct rcu_head *head)
99690 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
99691 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
99692
99693- __kmem_cache_free(b, slob_rcu->size);
99694+ __kmem_cache_free(NULL, b, slob_rcu->size);
99695 }
99696
99697 void kmem_cache_free(struct kmem_cache *c, void *b)
99698 {
99699+ int size = c->size;
99700+
99701+#ifdef CONFIG_PAX_USERCOPY_SLABS
99702+ if (size + c->align < PAGE_SIZE) {
99703+ size += c->align;
99704+ b -= c->align;
99705+ }
99706+#endif
99707+
99708 kmemleak_free_recursive(b, c->flags);
99709 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
99710 struct slob_rcu *slob_rcu;
99711- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
99712- slob_rcu->size = c->size;
99713+ slob_rcu = b + (size - sizeof(struct slob_rcu));
99714+ slob_rcu->size = size;
99715 call_rcu(&slob_rcu->head, kmem_rcu_free);
99716 } else {
99717- __kmem_cache_free(b, c->size);
99718+ __kmem_cache_free(c, b, size);
99719 }
99720
99721+#ifdef CONFIG_PAX_USERCOPY_SLABS
99722+ trace_kfree(_RET_IP_, b);
99723+#else
99724 trace_kmem_cache_free(_RET_IP_, b);
99725+#endif
99726+
99727 }
99728 EXPORT_SYMBOL(kmem_cache_free);
99729
99730diff --git a/mm/slub.c b/mm/slub.c
99731index 3e8afcc..d6e2c89 100644
99732--- a/mm/slub.c
99733+++ b/mm/slub.c
99734@@ -207,7 +207,7 @@ struct track {
99735
99736 enum track_item { TRACK_ALLOC, TRACK_FREE };
99737
99738-#ifdef CONFIG_SYSFS
99739+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99740 static int sysfs_slab_add(struct kmem_cache *);
99741 static int sysfs_slab_alias(struct kmem_cache *, const char *);
99742 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
99743@@ -545,7 +545,7 @@ static void print_track(const char *s, struct track *t)
99744 if (!t->addr)
99745 return;
99746
99747- pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
99748+ pr_err("INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
99749 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
99750 #ifdef CONFIG_STACKTRACE
99751 {
99752@@ -2643,6 +2643,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
99753
99754 slab_free_hook(s, x);
99755
99756+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99757+ if (!(s->flags & SLAB_NO_SANITIZE)) {
99758+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
99759+ if (s->ctor)
99760+ s->ctor(x);
99761+ }
99762+#endif
99763+
99764 redo:
99765 /*
99766 * Determine the currently cpus per cpu slab.
99767@@ -2710,7 +2718,7 @@ static int slub_min_objects;
99768 * Merge control. If this is set then no merging of slab caches will occur.
99769 * (Could be removed. This was introduced to pacify the merge skeptics.)
99770 */
99771-static int slub_nomerge;
99772+static int slub_nomerge = 1;
99773
99774 /*
99775 * Calculate the order of allocation given an slab object size.
99776@@ -2986,6 +2994,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
99777 s->inuse = size;
99778
99779 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
99780+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99781+ (!(flags & SLAB_NO_SANITIZE)) ||
99782+#endif
99783 s->ctor)) {
99784 /*
99785 * Relocate free pointer after the object if it is not
99786@@ -3313,6 +3324,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
99787 EXPORT_SYMBOL(__kmalloc_node);
99788 #endif
99789
99790+bool is_usercopy_object(const void *ptr)
99791+{
99792+ struct page *page;
99793+ struct kmem_cache *s;
99794+
99795+ if (ZERO_OR_NULL_PTR(ptr))
99796+ return false;
99797+
99798+ if (!slab_is_available())
99799+ return false;
99800+
99801+ if (!virt_addr_valid(ptr))
99802+ return false;
99803+
99804+ page = virt_to_head_page(ptr);
99805+
99806+ if (!PageSlab(page))
99807+ return false;
99808+
99809+ s = page->slab_cache;
99810+ return s->flags & SLAB_USERCOPY;
99811+}
99812+
99813+#ifdef CONFIG_PAX_USERCOPY
99814+const char *check_heap_object(const void *ptr, unsigned long n)
99815+{
99816+ struct page *page;
99817+ struct kmem_cache *s;
99818+ unsigned long offset;
99819+
99820+ if (ZERO_OR_NULL_PTR(ptr))
99821+ return "<null>";
99822+
99823+ if (!virt_addr_valid(ptr))
99824+ return NULL;
99825+
99826+ page = virt_to_head_page(ptr);
99827+
99828+ if (!PageSlab(page))
99829+ return NULL;
99830+
99831+ s = page->slab_cache;
99832+ if (!(s->flags & SLAB_USERCOPY))
99833+ return s->name;
99834+
99835+ offset = (ptr - page_address(page)) % s->size;
99836+ if (offset <= s->object_size && n <= s->object_size - offset)
99837+ return NULL;
99838+
99839+ return s->name;
99840+}
99841+#endif
99842+
99843 size_t ksize(const void *object)
99844 {
99845 struct page *page;
99846@@ -3341,6 +3405,7 @@ void kfree(const void *x)
99847 if (unlikely(ZERO_OR_NULL_PTR(x)))
99848 return;
99849
99850+ VM_BUG_ON(!virt_addr_valid(x));
99851 page = virt_to_head_page(x);
99852 if (unlikely(!PageSlab(page))) {
99853 BUG_ON(!PageCompound(page));
99854@@ -3642,7 +3707,7 @@ static int slab_unmergeable(struct kmem_cache *s)
99855 /*
99856 * We may have set a slab to be unmergeable during bootstrap.
99857 */
99858- if (s->refcount < 0)
99859+ if (atomic_read(&s->refcount) < 0)
99860 return 1;
99861
99862 return 0;
99863@@ -3699,7 +3764,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99864 int i;
99865 struct kmem_cache *c;
99866
99867- s->refcount++;
99868+ atomic_inc(&s->refcount);
99869
99870 /*
99871 * Adjust the object sizes so that we clear
99872@@ -3718,7 +3783,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
99873 }
99874
99875 if (sysfs_slab_alias(s, name)) {
99876- s->refcount--;
99877+ atomic_dec(&s->refcount);
99878 s = NULL;
99879 }
99880 }
99881@@ -3835,7 +3900,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
99882 }
99883 #endif
99884
99885-#ifdef CONFIG_SYSFS
99886+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99887 static int count_inuse(struct page *page)
99888 {
99889 return page->inuse;
99890@@ -4116,7 +4181,11 @@ static int list_locations(struct kmem_cache *s, char *buf,
99891 len += sprintf(buf + len, "%7ld ", l->count);
99892
99893 if (l->addr)
99894+#ifdef CONFIG_GRKERNSEC_HIDESYM
99895+ len += sprintf(buf + len, "%pS", NULL);
99896+#else
99897 len += sprintf(buf + len, "%pS", (void *)l->addr);
99898+#endif
99899 else
99900 len += sprintf(buf + len, "<not-available>");
99901
99902@@ -4218,12 +4287,12 @@ static void __init resiliency_test(void)
99903 validate_slab_cache(kmalloc_caches[9]);
99904 }
99905 #else
99906-#ifdef CONFIG_SYSFS
99907+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99908 static void resiliency_test(void) {};
99909 #endif
99910 #endif
99911
99912-#ifdef CONFIG_SYSFS
99913+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99914 enum slab_stat_type {
99915 SL_ALL, /* All slabs */
99916 SL_PARTIAL, /* Only partially allocated slabs */
99917@@ -4460,13 +4529,17 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
99918 {
99919 if (!s->ctor)
99920 return 0;
99921+#ifdef CONFIG_GRKERNSEC_HIDESYM
99922+ return sprintf(buf, "%pS\n", NULL);
99923+#else
99924 return sprintf(buf, "%pS\n", s->ctor);
99925+#endif
99926 }
99927 SLAB_ATTR_RO(ctor);
99928
99929 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
99930 {
99931- return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
99932+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) < 0 ? 0 : atomic_read(&s->refcount) - 1);
99933 }
99934 SLAB_ATTR_RO(aliases);
99935
99936@@ -4554,6 +4627,22 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
99937 SLAB_ATTR_RO(cache_dma);
99938 #endif
99939
99940+#ifdef CONFIG_PAX_USERCOPY_SLABS
99941+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
99942+{
99943+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
99944+}
99945+SLAB_ATTR_RO(usercopy);
99946+#endif
99947+
99948+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99949+static ssize_t sanitize_show(struct kmem_cache *s, char *buf)
99950+{
99951+ return sprintf(buf, "%d\n", !(s->flags & SLAB_NO_SANITIZE));
99952+}
99953+SLAB_ATTR_RO(sanitize);
99954+#endif
99955+
99956 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
99957 {
99958 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
99959@@ -4888,6 +4977,12 @@ static struct attribute *slab_attrs[] = {
99960 #ifdef CONFIG_ZONE_DMA
99961 &cache_dma_attr.attr,
99962 #endif
99963+#ifdef CONFIG_PAX_USERCOPY_SLABS
99964+ &usercopy_attr.attr,
99965+#endif
99966+#ifdef CONFIG_PAX_MEMORY_SANITIZE
99967+ &sanitize_attr.attr,
99968+#endif
99969 #ifdef CONFIG_NUMA
99970 &remote_node_defrag_ratio_attr.attr,
99971 #endif
99972@@ -5132,6 +5227,7 @@ static char *create_unique_id(struct kmem_cache *s)
99973 return name;
99974 }
99975
99976+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99977 static int sysfs_slab_add(struct kmem_cache *s)
99978 {
99979 int err;
99980@@ -5205,6 +5301,7 @@ void sysfs_slab_remove(struct kmem_cache *s)
99981 kobject_del(&s->kobj);
99982 kobject_put(&s->kobj);
99983 }
99984+#endif
99985
99986 /*
99987 * Need to buffer aliases during bootup until sysfs becomes
99988@@ -5218,6 +5315,7 @@ struct saved_alias {
99989
99990 static struct saved_alias *alias_list;
99991
99992+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
99993 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
99994 {
99995 struct saved_alias *al;
99996@@ -5240,6 +5338,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
99997 alias_list = al;
99998 return 0;
99999 }
100000+#endif
100001
100002 static int __init slab_sysfs_init(void)
100003 {
100004diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
100005index 4cba9c2..b4f9fcc 100644
100006--- a/mm/sparse-vmemmap.c
100007+++ b/mm/sparse-vmemmap.c
100008@@ -131,7 +131,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
100009 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
100010 if (!p)
100011 return NULL;
100012- pud_populate(&init_mm, pud, p);
100013+ pud_populate_kernel(&init_mm, pud, p);
100014 }
100015 return pud;
100016 }
100017@@ -143,7 +143,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
100018 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
100019 if (!p)
100020 return NULL;
100021- pgd_populate(&init_mm, pgd, p);
100022+ pgd_populate_kernel(&init_mm, pgd, p);
100023 }
100024 return pgd;
100025 }
100026diff --git a/mm/sparse.c b/mm/sparse.c
100027index d1b48b6..6e8590e 100644
100028--- a/mm/sparse.c
100029+++ b/mm/sparse.c
100030@@ -750,7 +750,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
100031
100032 for (i = 0; i < PAGES_PER_SECTION; i++) {
100033 if (PageHWPoison(&memmap[i])) {
100034- atomic_long_sub(1, &num_poisoned_pages);
100035+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
100036 ClearPageHWPoison(&memmap[i]);
100037 }
100038 }
100039diff --git a/mm/swap.c b/mm/swap.c
100040index 6b2dc38..46b79ba 100644
100041--- a/mm/swap.c
100042+++ b/mm/swap.c
100043@@ -31,6 +31,7 @@
100044 #include <linux/memcontrol.h>
100045 #include <linux/gfp.h>
100046 #include <linux/uio.h>
100047+#include <linux/hugetlb.h>
100048
100049 #include "internal.h"
100050
100051@@ -77,6 +78,8 @@ static void __put_compound_page(struct page *page)
100052
100053 __page_cache_release(page);
100054 dtor = get_compound_page_dtor(page);
100055+ if (!PageHuge(page))
100056+ BUG_ON(dtor != free_compound_page);
100057 (*dtor)(page);
100058 }
100059
100060diff --git a/mm/swapfile.c b/mm/swapfile.c
100061index 8798b2e..348f9dd 100644
100062--- a/mm/swapfile.c
100063+++ b/mm/swapfile.c
100064@@ -84,7 +84,7 @@ static DEFINE_MUTEX(swapon_mutex);
100065
100066 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
100067 /* Activity counter to indicate that a swapon or swapoff has occurred */
100068-static atomic_t proc_poll_event = ATOMIC_INIT(0);
100069+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
100070
100071 static inline unsigned char swap_count(unsigned char ent)
100072 {
100073@@ -1944,7 +1944,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
100074 spin_unlock(&swap_lock);
100075
100076 err = 0;
100077- atomic_inc(&proc_poll_event);
100078+ atomic_inc_unchecked(&proc_poll_event);
100079 wake_up_interruptible(&proc_poll_wait);
100080
100081 out_dput:
100082@@ -1961,8 +1961,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
100083
100084 poll_wait(file, &proc_poll_wait, wait);
100085
100086- if (seq->poll_event != atomic_read(&proc_poll_event)) {
100087- seq->poll_event = atomic_read(&proc_poll_event);
100088+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
100089+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
100090 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
100091 }
100092
100093@@ -2060,7 +2060,7 @@ static int swaps_open(struct inode *inode, struct file *file)
100094 return ret;
100095
100096 seq = file->private_data;
100097- seq->poll_event = atomic_read(&proc_poll_event);
100098+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
100099 return 0;
100100 }
100101
100102@@ -2520,7 +2520,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
100103 (frontswap_map) ? "FS" : "");
100104
100105 mutex_unlock(&swapon_mutex);
100106- atomic_inc(&proc_poll_event);
100107+ atomic_inc_unchecked(&proc_poll_event);
100108 wake_up_interruptible(&proc_poll_wait);
100109
100110 if (S_ISREG(inode->i_mode))
100111diff --git a/mm/util.c b/mm/util.c
100112index 093c973..b70a268 100644
100113--- a/mm/util.c
100114+++ b/mm/util.c
100115@@ -202,6 +202,12 @@ done:
100116 void arch_pick_mmap_layout(struct mm_struct *mm)
100117 {
100118 mm->mmap_base = TASK_UNMAPPED_BASE;
100119+
100120+#ifdef CONFIG_PAX_RANDMMAP
100121+ if (mm->pax_flags & MF_PAX_RANDMMAP)
100122+ mm->mmap_base += mm->delta_mmap;
100123+#endif
100124+
100125 mm->get_unmapped_area = arch_get_unmapped_area;
100126 }
100127 #endif
100128@@ -378,6 +384,9 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
100129 if (!mm->arg_end)
100130 goto out_mm; /* Shh! No looking before we're done */
100131
100132+ if (gr_acl_handle_procpidmem(task))
100133+ goto out_mm;
100134+
100135 len = mm->arg_end - mm->arg_start;
100136
100137 if (len > buflen)
100138diff --git a/mm/vmalloc.c b/mm/vmalloc.c
100139index 2b0aa54..b451f74 100644
100140--- a/mm/vmalloc.c
100141+++ b/mm/vmalloc.c
100142@@ -40,6 +40,21 @@ struct vfree_deferred {
100143 };
100144 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
100145
100146+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100147+struct stack_deferred_llist {
100148+ struct llist_head list;
100149+ void *stack;
100150+ void *lowmem_stack;
100151+};
100152+
100153+struct stack_deferred {
100154+ struct stack_deferred_llist list;
100155+ struct work_struct wq;
100156+};
100157+
100158+static DEFINE_PER_CPU(struct stack_deferred, stack_deferred);
100159+#endif
100160+
100161 static void __vunmap(const void *, int);
100162
100163 static void free_work(struct work_struct *w)
100164@@ -47,12 +62,30 @@ static void free_work(struct work_struct *w)
100165 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
100166 struct llist_node *llnode = llist_del_all(&p->list);
100167 while (llnode) {
100168- void *p = llnode;
100169+ void *x = llnode;
100170 llnode = llist_next(llnode);
100171- __vunmap(p, 1);
100172+ __vunmap(x, 1);
100173 }
100174 }
100175
100176+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100177+static void unmap_work(struct work_struct *w)
100178+{
100179+ struct stack_deferred *p = container_of(w, struct stack_deferred, wq);
100180+ struct llist_node *llnode = llist_del_all(&p->list.list);
100181+ while (llnode) {
100182+ struct stack_deferred_llist *x =
100183+ llist_entry((struct llist_head *)llnode,
100184+ struct stack_deferred_llist, list);
100185+ void *stack = ACCESS_ONCE(x->stack);
100186+ void *lowmem_stack = ACCESS_ONCE(x->lowmem_stack);
100187+ llnode = llist_next(llnode);
100188+ __vunmap(stack, 0);
100189+ free_kmem_pages((unsigned long)lowmem_stack, THREAD_SIZE_ORDER);
100190+ }
100191+}
100192+#endif
100193+
100194 /*** Page table manipulation functions ***/
100195
100196 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100197@@ -61,8 +94,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
100198
100199 pte = pte_offset_kernel(pmd, addr);
100200 do {
100201- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100202- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100203+
100204+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100205+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
100206+ BUG_ON(!pte_exec(*pte));
100207+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
100208+ continue;
100209+ }
100210+#endif
100211+
100212+ {
100213+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
100214+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
100215+ }
100216 } while (pte++, addr += PAGE_SIZE, addr != end);
100217 }
100218
100219@@ -122,16 +166,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
100220 pte = pte_alloc_kernel(pmd, addr);
100221 if (!pte)
100222 return -ENOMEM;
100223+
100224+ pax_open_kernel();
100225 do {
100226 struct page *page = pages[*nr];
100227
100228- if (WARN_ON(!pte_none(*pte)))
100229+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100230+ if (pgprot_val(prot) & _PAGE_NX)
100231+#endif
100232+
100233+ if (!pte_none(*pte)) {
100234+ pax_close_kernel();
100235+ WARN_ON(1);
100236 return -EBUSY;
100237- if (WARN_ON(!page))
100238+ }
100239+ if (!page) {
100240+ pax_close_kernel();
100241+ WARN_ON(1);
100242 return -ENOMEM;
100243+ }
100244 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
100245 (*nr)++;
100246 } while (pte++, addr += PAGE_SIZE, addr != end);
100247+ pax_close_kernel();
100248 return 0;
100249 }
100250
100251@@ -141,7 +198,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
100252 pmd_t *pmd;
100253 unsigned long next;
100254
100255- pmd = pmd_alloc(&init_mm, pud, addr);
100256+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
100257 if (!pmd)
100258 return -ENOMEM;
100259 do {
100260@@ -158,7 +215,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
100261 pud_t *pud;
100262 unsigned long next;
100263
100264- pud = pud_alloc(&init_mm, pgd, addr);
100265+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
100266 if (!pud)
100267 return -ENOMEM;
100268 do {
100269@@ -218,6 +275,12 @@ int is_vmalloc_or_module_addr(const void *x)
100270 if (addr >= MODULES_VADDR && addr < MODULES_END)
100271 return 1;
100272 #endif
100273+
100274+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
100275+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
100276+ return 1;
100277+#endif
100278+
100279 return is_vmalloc_addr(x);
100280 }
100281
100282@@ -238,8 +301,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
100283
100284 if (!pgd_none(*pgd)) {
100285 pud_t *pud = pud_offset(pgd, addr);
100286+#ifdef CONFIG_X86
100287+ if (!pud_large(*pud))
100288+#endif
100289 if (!pud_none(*pud)) {
100290 pmd_t *pmd = pmd_offset(pud, addr);
100291+#ifdef CONFIG_X86
100292+ if (!pmd_large(*pmd))
100293+#endif
100294 if (!pmd_none(*pmd)) {
100295 pte_t *ptep, pte;
100296
100297@@ -1183,13 +1252,23 @@ void __init vmalloc_init(void)
100298 for_each_possible_cpu(i) {
100299 struct vmap_block_queue *vbq;
100300 struct vfree_deferred *p;
100301+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100302+ struct stack_deferred *p2;
100303+#endif
100304
100305 vbq = &per_cpu(vmap_block_queue, i);
100306 spin_lock_init(&vbq->lock);
100307 INIT_LIST_HEAD(&vbq->free);
100308+
100309 p = &per_cpu(vfree_deferred, i);
100310 init_llist_head(&p->list);
100311 INIT_WORK(&p->wq, free_work);
100312+
100313+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100314+ p2 = &per_cpu(stack_deferred, i);
100315+ init_llist_head(&p2->list.list);
100316+ INIT_WORK(&p2->wq, unmap_work);
100317+#endif
100318 }
100319
100320 /* Import existing vmlist entries. */
100321@@ -1314,6 +1393,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
100322 struct vm_struct *area;
100323
100324 BUG_ON(in_interrupt());
100325+
100326+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100327+ if (flags & VM_KERNEXEC) {
100328+ if (start != VMALLOC_START || end != VMALLOC_END)
100329+ return NULL;
100330+ start = (unsigned long)MODULES_EXEC_VADDR;
100331+ end = (unsigned long)MODULES_EXEC_END;
100332+ }
100333+#endif
100334+
100335 if (flags & VM_IOREMAP)
100336 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
100337
100338@@ -1519,6 +1608,23 @@ void vunmap(const void *addr)
100339 }
100340 EXPORT_SYMBOL(vunmap);
100341
100342+#ifdef CONFIG_GRKERNSEC_KSTACKOVERFLOW
100343+void unmap_process_stacks(struct task_struct *task)
100344+{
100345+ if (unlikely(in_interrupt())) {
100346+ struct stack_deferred *p = &__get_cpu_var(stack_deferred);
100347+ struct stack_deferred_llist *list = task->stack;
100348+ list->stack = task->stack;
100349+ list->lowmem_stack = task->lowmem_stack;
100350+ if (llist_add((struct llist_node *)&list->list, &p->list.list))
100351+ schedule_work(&p->wq);
100352+ } else {
100353+ __vunmap(task->stack, 0);
100354+ free_kmem_pages((unsigned long)task->lowmem_stack, THREAD_SIZE_ORDER);
100355+ }
100356+}
100357+#endif
100358+
100359 /**
100360 * vmap - map an array of pages into virtually contiguous space
100361 * @pages: array of page pointers
100362@@ -1539,6 +1645,11 @@ void *vmap(struct page **pages, unsigned int count,
100363 if (count > totalram_pages)
100364 return NULL;
100365
100366+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100367+ if (!(pgprot_val(prot) & _PAGE_NX))
100368+ flags |= VM_KERNEXEC;
100369+#endif
100370+
100371 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
100372 __builtin_return_address(0));
100373 if (!area)
100374@@ -1641,6 +1752,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
100375 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
100376 goto fail;
100377
100378+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
100379+ if (!(pgprot_val(prot) & _PAGE_NX))
100380+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC,
100381+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
100382+ else
100383+#endif
100384+
100385 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
100386 start, end, node, gfp_mask, caller);
100387 if (!area)
100388@@ -1817,10 +1935,9 @@ EXPORT_SYMBOL(vzalloc_node);
100389 * For tight control over page level allocator and protection flags
100390 * use __vmalloc() instead.
100391 */
100392-
100393 void *vmalloc_exec(unsigned long size)
100394 {
100395- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
100396+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
100397 NUMA_NO_NODE, __builtin_return_address(0));
100398 }
100399
100400@@ -2127,6 +2244,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
100401 {
100402 struct vm_struct *area;
100403
100404+ BUG_ON(vma->vm_mirror);
100405+
100406 size = PAGE_ALIGN(size);
100407
100408 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
100409@@ -2609,7 +2728,11 @@ static int s_show(struct seq_file *m, void *p)
100410 v->addr, v->addr + v->size, v->size);
100411
100412 if (v->caller)
100413+#ifdef CONFIG_GRKERNSEC_HIDESYM
100414+ seq_printf(m, " %pK", v->caller);
100415+#else
100416 seq_printf(m, " %pS", v->caller);
100417+#endif
100418
100419 if (v->nr_pages)
100420 seq_printf(m, " pages=%d", v->nr_pages);
100421diff --git a/mm/vmpressure.c b/mm/vmpressure.c
100422index d4042e7..c5afd57 100644
100423--- a/mm/vmpressure.c
100424+++ b/mm/vmpressure.c
100425@@ -165,6 +165,7 @@ static void vmpressure_work_fn(struct work_struct *work)
100426 unsigned long scanned;
100427 unsigned long reclaimed;
100428
100429+ spin_lock(&vmpr->sr_lock);
100430 /*
100431 * Several contexts might be calling vmpressure(), so it is
100432 * possible that the work was rescheduled again before the old
100433@@ -173,11 +174,12 @@ static void vmpressure_work_fn(struct work_struct *work)
100434 * here. No need for any locks here since we don't care if
100435 * vmpr->reclaimed is in sync.
100436 */
100437- if (!vmpr->scanned)
100438+ scanned = vmpr->scanned;
100439+ if (!scanned) {
100440+ spin_unlock(&vmpr->sr_lock);
100441 return;
100442+ }
100443
100444- spin_lock(&vmpr->sr_lock);
100445- scanned = vmpr->scanned;
100446 reclaimed = vmpr->reclaimed;
100447 vmpr->scanned = 0;
100448 vmpr->reclaimed = 0;
100449diff --git a/mm/vmstat.c b/mm/vmstat.c
100450index e9ab104..de275bd 100644
100451--- a/mm/vmstat.c
100452+++ b/mm/vmstat.c
100453@@ -20,6 +20,7 @@
100454 #include <linux/writeback.h>
100455 #include <linux/compaction.h>
100456 #include <linux/mm_inline.h>
100457+#include <linux/grsecurity.h>
100458
100459 #include "internal.h"
100460
100461@@ -79,7 +80,7 @@ void vm_events_fold_cpu(int cpu)
100462 *
100463 * vm_stat contains the global counters
100464 */
100465-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
100466+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
100467 EXPORT_SYMBOL(vm_stat);
100468
100469 #ifdef CONFIG_SMP
100470@@ -425,7 +426,7 @@ static inline void fold_diff(int *diff)
100471
100472 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
100473 if (diff[i])
100474- atomic_long_add(diff[i], &vm_stat[i]);
100475+ atomic_long_add_unchecked(diff[i], &vm_stat[i]);
100476 }
100477
100478 /*
100479@@ -457,7 +458,7 @@ static void refresh_cpu_vm_stats(void)
100480 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
100481 if (v) {
100482
100483- atomic_long_add(v, &zone->vm_stat[i]);
100484+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100485 global_diff[i] += v;
100486 #ifdef CONFIG_NUMA
100487 /* 3 seconds idle till flush */
100488@@ -519,7 +520,7 @@ void cpu_vm_stats_fold(int cpu)
100489
100490 v = p->vm_stat_diff[i];
100491 p->vm_stat_diff[i] = 0;
100492- atomic_long_add(v, &zone->vm_stat[i]);
100493+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100494 global_diff[i] += v;
100495 }
100496 }
100497@@ -539,8 +540,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
100498 if (pset->vm_stat_diff[i]) {
100499 int v = pset->vm_stat_diff[i];
100500 pset->vm_stat_diff[i] = 0;
100501- atomic_long_add(v, &zone->vm_stat[i]);
100502- atomic_long_add(v, &vm_stat[i]);
100503+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
100504+ atomic_long_add_unchecked(v, &vm_stat[i]);
100505 }
100506 }
100507 #endif
100508@@ -1163,10 +1164,22 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
100509 stat_items_size += sizeof(struct vm_event_state);
100510 #endif
100511
100512- v = kmalloc(stat_items_size, GFP_KERNEL);
100513+ v = kzalloc(stat_items_size, GFP_KERNEL);
100514 m->private = v;
100515 if (!v)
100516 return ERR_PTR(-ENOMEM);
100517+
100518+#ifdef CONFIG_GRKERNSEC_PROC_ADD
100519+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
100520+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)
100521+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
100522+ && !in_group_p(grsec_proc_gid)
100523+#endif
100524+ )
100525+ return (unsigned long *)m->private + *pos;
100526+#endif
100527+#endif
100528+
100529 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
100530 v[i] = global_page_state(i);
100531 v += NR_VM_ZONE_STAT_ITEMS;
100532@@ -1315,10 +1328,16 @@ static int __init setup_vmstat(void)
100533 cpu_notifier_register_done();
100534 #endif
100535 #ifdef CONFIG_PROC_FS
100536- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
100537- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
100538- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
100539- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
100540+ {
100541+ mode_t gr_mode = S_IRUGO;
100542+#ifdef CONFIG_GRKERNSEC_PROC_ADD
100543+ gr_mode = S_IRUSR;
100544+#endif
100545+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
100546+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
100547+ proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
100548+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
100549+ }
100550 #endif
100551 return 0;
100552 }
100553diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
100554index 64c6bed..b79a5de 100644
100555--- a/net/8021q/vlan.c
100556+++ b/net/8021q/vlan.c
100557@@ -481,7 +481,7 @@ out:
100558 return NOTIFY_DONE;
100559 }
100560
100561-static struct notifier_block vlan_notifier_block __read_mostly = {
100562+static struct notifier_block vlan_notifier_block = {
100563 .notifier_call = vlan_device_event,
100564 };
100565
100566@@ -556,8 +556,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
100567 err = -EPERM;
100568 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
100569 break;
100570- if ((args.u.name_type >= 0) &&
100571- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
100572+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
100573 struct vlan_net *vn;
100574
100575 vn = net_generic(net, vlan_net_id);
100576diff --git a/net/9p/client.c b/net/9p/client.c
100577index e86a9bea..e91f70e 100644
100578--- a/net/9p/client.c
100579+++ b/net/9p/client.c
100580@@ -596,7 +596,7 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
100581 len - inline_len);
100582 } else {
100583 err = copy_from_user(ename + inline_len,
100584- uidata, len - inline_len);
100585+ (char __force_user *)uidata, len - inline_len);
100586 if (err) {
100587 err = -EFAULT;
100588 goto out_err;
100589@@ -1570,7 +1570,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
100590 kernel_buf = 1;
100591 indata = data;
100592 } else
100593- indata = (__force char *)udata;
100594+ indata = (__force_kernel char *)udata;
100595 /*
100596 * response header len is 11
100597 * PDU Header(7) + IO Size (4)
100598@@ -1645,7 +1645,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
100599 kernel_buf = 1;
100600 odata = data;
100601 } else
100602- odata = (char *)udata;
100603+ odata = (char __force_kernel *)udata;
100604 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
100605 P9_ZC_HDR_SZ, kernel_buf, "dqd",
100606 fid->fid, offset, rsize);
100607diff --git a/net/9p/mod.c b/net/9p/mod.c
100608index 6ab36ae..6f1841b 100644
100609--- a/net/9p/mod.c
100610+++ b/net/9p/mod.c
100611@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
100612 void v9fs_register_trans(struct p9_trans_module *m)
100613 {
100614 spin_lock(&v9fs_trans_lock);
100615- list_add_tail(&m->list, &v9fs_trans_list);
100616+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
100617 spin_unlock(&v9fs_trans_lock);
100618 }
100619 EXPORT_SYMBOL(v9fs_register_trans);
100620@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
100621 void v9fs_unregister_trans(struct p9_trans_module *m)
100622 {
100623 spin_lock(&v9fs_trans_lock);
100624- list_del_init(&m->list);
100625+ pax_list_del_init((struct list_head *)&m->list);
100626 spin_unlock(&v9fs_trans_lock);
100627 }
100628 EXPORT_SYMBOL(v9fs_unregister_trans);
100629diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
100630index 80d08f6..de63fd1 100644
100631--- a/net/9p/trans_fd.c
100632+++ b/net/9p/trans_fd.c
100633@@ -428,7 +428,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
100634 oldfs = get_fs();
100635 set_fs(get_ds());
100636 /* The cast to a user pointer is valid due to the set_fs() */
100637- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
100638+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
100639 set_fs(oldfs);
100640
100641 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
100642diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
100643index af46bc4..f9adfcd 100644
100644--- a/net/appletalk/atalk_proc.c
100645+++ b/net/appletalk/atalk_proc.c
100646@@ -256,7 +256,7 @@ int __init atalk_proc_init(void)
100647 struct proc_dir_entry *p;
100648 int rc = -ENOMEM;
100649
100650- atalk_proc_dir = proc_mkdir("atalk", init_net.proc_net);
100651+ atalk_proc_dir = proc_mkdir_restrict("atalk", init_net.proc_net);
100652 if (!atalk_proc_dir)
100653 goto out;
100654
100655diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
100656index 876fbe8..8bbea9f 100644
100657--- a/net/atm/atm_misc.c
100658+++ b/net/atm/atm_misc.c
100659@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
100660 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
100661 return 1;
100662 atm_return(vcc, truesize);
100663- atomic_inc(&vcc->stats->rx_drop);
100664+ atomic_inc_unchecked(&vcc->stats->rx_drop);
100665 return 0;
100666 }
100667 EXPORT_SYMBOL(atm_charge);
100668@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
100669 }
100670 }
100671 atm_return(vcc, guess);
100672- atomic_inc(&vcc->stats->rx_drop);
100673+ atomic_inc_unchecked(&vcc->stats->rx_drop);
100674 return NULL;
100675 }
100676 EXPORT_SYMBOL(atm_alloc_charge);
100677@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
100678
100679 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
100680 {
100681-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
100682+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
100683 __SONET_ITEMS
100684 #undef __HANDLE_ITEM
100685 }
100686@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
100687
100688 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
100689 {
100690-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
100691+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
100692 __SONET_ITEMS
100693 #undef __HANDLE_ITEM
100694 }
100695diff --git a/net/atm/lec.c b/net/atm/lec.c
100696index 4b98f89..5a2f6cb 100644
100697--- a/net/atm/lec.c
100698+++ b/net/atm/lec.c
100699@@ -111,9 +111,9 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
100700 }
100701
100702 static struct lane2_ops lane2_ops = {
100703- lane2_resolve, /* resolve, spec 3.1.3 */
100704- lane2_associate_req, /* associate_req, spec 3.1.4 */
100705- NULL /* associate indicator, spec 3.1.5 */
100706+ .resolve = lane2_resolve,
100707+ .associate_req = lane2_associate_req,
100708+ .associate_indicator = NULL
100709 };
100710
100711 static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
100712diff --git a/net/atm/lec.h b/net/atm/lec.h
100713index 4149db1..f2ab682 100644
100714--- a/net/atm/lec.h
100715+++ b/net/atm/lec.h
100716@@ -48,7 +48,7 @@ struct lane2_ops {
100717 const u8 *tlvs, u32 sizeoftlvs);
100718 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
100719 const u8 *tlvs, u32 sizeoftlvs);
100720-};
100721+} __no_const;
100722
100723 /*
100724 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
100725diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
100726index d1b2d9a..d549f7f 100644
100727--- a/net/atm/mpoa_caches.c
100728+++ b/net/atm/mpoa_caches.c
100729@@ -535,30 +535,30 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
100730
100731
100732 static struct in_cache_ops ingress_ops = {
100733- in_cache_add_entry, /* add_entry */
100734- in_cache_get, /* get */
100735- in_cache_get_with_mask, /* get_with_mask */
100736- in_cache_get_by_vcc, /* get_by_vcc */
100737- in_cache_put, /* put */
100738- in_cache_remove_entry, /* remove_entry */
100739- cache_hit, /* cache_hit */
100740- clear_count_and_expired, /* clear_count */
100741- check_resolving_entries, /* check_resolving */
100742- refresh_entries, /* refresh */
100743- in_destroy_cache /* destroy_cache */
100744+ .add_entry = in_cache_add_entry,
100745+ .get = in_cache_get,
100746+ .get_with_mask = in_cache_get_with_mask,
100747+ .get_by_vcc = in_cache_get_by_vcc,
100748+ .put = in_cache_put,
100749+ .remove_entry = in_cache_remove_entry,
100750+ .cache_hit = cache_hit,
100751+ .clear_count = clear_count_and_expired,
100752+ .check_resolving = check_resolving_entries,
100753+ .refresh = refresh_entries,
100754+ .destroy_cache = in_destroy_cache
100755 };
100756
100757 static struct eg_cache_ops egress_ops = {
100758- eg_cache_add_entry, /* add_entry */
100759- eg_cache_get_by_cache_id, /* get_by_cache_id */
100760- eg_cache_get_by_tag, /* get_by_tag */
100761- eg_cache_get_by_vcc, /* get_by_vcc */
100762- eg_cache_get_by_src_ip, /* get_by_src_ip */
100763- eg_cache_put, /* put */
100764- eg_cache_remove_entry, /* remove_entry */
100765- update_eg_cache_entry, /* update */
100766- clear_expired, /* clear_expired */
100767- eg_destroy_cache /* destroy_cache */
100768+ .add_entry = eg_cache_add_entry,
100769+ .get_by_cache_id = eg_cache_get_by_cache_id,
100770+ .get_by_tag = eg_cache_get_by_tag,
100771+ .get_by_vcc = eg_cache_get_by_vcc,
100772+ .get_by_src_ip = eg_cache_get_by_src_ip,
100773+ .put = eg_cache_put,
100774+ .remove_entry = eg_cache_remove_entry,
100775+ .update = update_eg_cache_entry,
100776+ .clear_expired = clear_expired,
100777+ .destroy_cache = eg_destroy_cache
100778 };
100779
100780
100781diff --git a/net/atm/proc.c b/net/atm/proc.c
100782index bbb6461..cf04016 100644
100783--- a/net/atm/proc.c
100784+++ b/net/atm/proc.c
100785@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
100786 const struct k_atm_aal_stats *stats)
100787 {
100788 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
100789- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
100790- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
100791- atomic_read(&stats->rx_drop));
100792+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
100793+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
100794+ atomic_read_unchecked(&stats->rx_drop));
100795 }
100796
100797 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
100798diff --git a/net/atm/resources.c b/net/atm/resources.c
100799index 0447d5d..3cf4728 100644
100800--- a/net/atm/resources.c
100801+++ b/net/atm/resources.c
100802@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
100803 static void copy_aal_stats(struct k_atm_aal_stats *from,
100804 struct atm_aal_stats *to)
100805 {
100806-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
100807+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
100808 __AAL_STAT_ITEMS
100809 #undef __HANDLE_ITEM
100810 }
100811@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
100812 static void subtract_aal_stats(struct k_atm_aal_stats *from,
100813 struct atm_aal_stats *to)
100814 {
100815-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
100816+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
100817 __AAL_STAT_ITEMS
100818 #undef __HANDLE_ITEM
100819 }
100820diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
100821index 919a5ce..cc6b444 100644
100822--- a/net/ax25/sysctl_net_ax25.c
100823+++ b/net/ax25/sysctl_net_ax25.c
100824@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
100825 {
100826 char path[sizeof("net/ax25/") + IFNAMSIZ];
100827 int k;
100828- struct ctl_table *table;
100829+ ctl_table_no_const *table;
100830
100831 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
100832 if (!table)
100833diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
100834index 1e80539..676c37a 100644
100835--- a/net/batman-adv/bat_iv_ogm.c
100836+++ b/net/batman-adv/bat_iv_ogm.c
100837@@ -313,7 +313,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
100838
100839 /* randomize initial seqno to avoid collision */
100840 get_random_bytes(&random_seqno, sizeof(random_seqno));
100841- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
100842+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
100843
100844 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
100845 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
100846@@ -918,9 +918,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
100847 batadv_ogm_packet->tvlv_len = htons(tvlv_len);
100848
100849 /* change sequence number to network order */
100850- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
100851+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
100852 batadv_ogm_packet->seqno = htonl(seqno);
100853- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
100854+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
100855
100856 batadv_iv_ogm_slide_own_bcast_window(hard_iface);
100857
100858@@ -1597,7 +1597,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
100859 return;
100860
100861 /* could be changed by schedule_own_packet() */
100862- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
100863+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
100864
100865 if (ogm_packet->flags & BATADV_DIRECTLINK)
100866 has_directlink_flag = true;
100867diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
100868index fc1835c..eead856 100644
100869--- a/net/batman-adv/fragmentation.c
100870+++ b/net/batman-adv/fragmentation.c
100871@@ -450,7 +450,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
100872 frag_header.packet_type = BATADV_UNICAST_FRAG;
100873 frag_header.version = BATADV_COMPAT_VERSION;
100874 frag_header.ttl = BATADV_TTL;
100875- frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
100876+ frag_header.seqno = htons(atomic_inc_return_unchecked(&bat_priv->frag_seqno));
100877 frag_header.reserved = 0;
100878 frag_header.no = 0;
100879 frag_header.total_size = htons(skb->len);
100880diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
100881index 5467955..30cc771 100644
100882--- a/net/batman-adv/soft-interface.c
100883+++ b/net/batman-adv/soft-interface.c
100884@@ -296,7 +296,7 @@ send:
100885 primary_if->net_dev->dev_addr);
100886
100887 /* set broadcast sequence number */
100888- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
100889+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
100890 bcast_packet->seqno = htonl(seqno);
100891
100892 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
100893@@ -761,7 +761,7 @@ static int batadv_softif_init_late(struct net_device *dev)
100894 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
100895
100896 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
100897- atomic_set(&bat_priv->bcast_seqno, 1);
100898+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
100899 atomic_set(&bat_priv->tt.vn, 0);
100900 atomic_set(&bat_priv->tt.local_changes, 0);
100901 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
100902@@ -775,7 +775,7 @@ static int batadv_softif_init_late(struct net_device *dev)
100903
100904 /* randomize initial seqno to avoid collision */
100905 get_random_bytes(&random_seqno, sizeof(random_seqno));
100906- atomic_set(&bat_priv->frag_seqno, random_seqno);
100907+ atomic_set_unchecked(&bat_priv->frag_seqno, random_seqno);
100908
100909 bat_priv->primary_if = NULL;
100910 bat_priv->num_ifaces = 0;
100911diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
100912index 8854c05..ee5d5497 100644
100913--- a/net/batman-adv/types.h
100914+++ b/net/batman-adv/types.h
100915@@ -67,7 +67,7 @@ enum batadv_dhcp_recipient {
100916 struct batadv_hard_iface_bat_iv {
100917 unsigned char *ogm_buff;
100918 int ogm_buff_len;
100919- atomic_t ogm_seqno;
100920+ atomic_unchecked_t ogm_seqno;
100921 };
100922
100923 /**
100924@@ -768,7 +768,7 @@ struct batadv_priv {
100925 atomic_t bonding;
100926 atomic_t fragmentation;
100927 atomic_t packet_size_max;
100928- atomic_t frag_seqno;
100929+ atomic_unchecked_t frag_seqno;
100930 #ifdef CONFIG_BATMAN_ADV_BLA
100931 atomic_t bridge_loop_avoidance;
100932 #endif
100933@@ -787,7 +787,7 @@ struct batadv_priv {
100934 #endif
100935 uint32_t isolation_mark;
100936 uint32_t isolation_mark_mask;
100937- atomic_t bcast_seqno;
100938+ atomic_unchecked_t bcast_seqno;
100939 atomic_t bcast_queue_left;
100940 atomic_t batman_queue_left;
100941 char num_ifaces;
100942diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
100943index 115f149..f0ba286 100644
100944--- a/net/bluetooth/hci_sock.c
100945+++ b/net/bluetooth/hci_sock.c
100946@@ -1067,7 +1067,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
100947 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
100948 }
100949
100950- len = min_t(unsigned int, len, sizeof(uf));
100951+ len = min((size_t)len, sizeof(uf));
100952 if (copy_from_user(&uf, optval, len)) {
100953 err = -EFAULT;
100954 break;
100955diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
100956index 14ca8ae..262d49a 100644
100957--- a/net/bluetooth/l2cap_core.c
100958+++ b/net/bluetooth/l2cap_core.c
100959@@ -3565,8 +3565,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
100960 break;
100961
100962 case L2CAP_CONF_RFC:
100963- if (olen == sizeof(rfc))
100964- memcpy(&rfc, (void *)val, olen);
100965+ if (olen != sizeof(rfc))
100966+ break;
100967+
100968+ memcpy(&rfc, (void *)val, olen);
100969
100970 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
100971 rfc.mode != chan->mode)
100972diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
100973index 1884f72..b3b71f9 100644
100974--- a/net/bluetooth/l2cap_sock.c
100975+++ b/net/bluetooth/l2cap_sock.c
100976@@ -629,7 +629,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
100977 struct sock *sk = sock->sk;
100978 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
100979 struct l2cap_options opts;
100980- int len, err = 0;
100981+ int err = 0;
100982+ size_t len = optlen;
100983 u32 opt;
100984
100985 BT_DBG("sk %p", sk);
100986@@ -656,7 +657,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
100987 opts.max_tx = chan->max_tx;
100988 opts.txwin_size = chan->tx_win;
100989
100990- len = min_t(unsigned int, sizeof(opts), optlen);
100991+ len = min(sizeof(opts), len);
100992 if (copy_from_user((char *) &opts, optval, len)) {
100993 err = -EFAULT;
100994 break;
100995@@ -743,7 +744,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
100996 struct bt_security sec;
100997 struct bt_power pwr;
100998 struct l2cap_conn *conn;
100999- int len, err = 0;
101000+ int err = 0;
101001+ size_t len = optlen;
101002 u32 opt;
101003
101004 BT_DBG("sk %p", sk);
101005@@ -767,7 +769,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101006
101007 sec.level = BT_SECURITY_LOW;
101008
101009- len = min_t(unsigned int, sizeof(sec), optlen);
101010+ len = min(sizeof(sec), len);
101011 if (copy_from_user((char *) &sec, optval, len)) {
101012 err = -EFAULT;
101013 break;
101014@@ -862,7 +864,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
101015
101016 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
101017
101018- len = min_t(unsigned int, sizeof(pwr), optlen);
101019+ len = min(sizeof(pwr), len);
101020 if (copy_from_user((char *) &pwr, optval, len)) {
101021 err = -EFAULT;
101022 break;
101023diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
101024index 8bbbb5e..6fc0950 100644
101025--- a/net/bluetooth/rfcomm/sock.c
101026+++ b/net/bluetooth/rfcomm/sock.c
101027@@ -687,7 +687,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
101028 struct sock *sk = sock->sk;
101029 struct bt_security sec;
101030 int err = 0;
101031- size_t len;
101032+ size_t len = optlen;
101033 u32 opt;
101034
101035 BT_DBG("sk %p", sk);
101036@@ -709,7 +709,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
101037
101038 sec.level = BT_SECURITY_LOW;
101039
101040- len = min_t(unsigned int, sizeof(sec), optlen);
101041+ len = min(sizeof(sec), len);
101042 if (copy_from_user((char *) &sec, optval, len)) {
101043 err = -EFAULT;
101044 break;
101045diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
101046index 8e385a0..a5bdd8e 100644
101047--- a/net/bluetooth/rfcomm/tty.c
101048+++ b/net/bluetooth/rfcomm/tty.c
101049@@ -752,7 +752,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
101050 BT_DBG("tty %p id %d", tty, tty->index);
101051
101052 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
101053- dev->channel, dev->port.count);
101054+ dev->channel, atomic_read(&dev->port.count));
101055
101056 err = tty_port_open(&dev->port, tty, filp);
101057 if (err)
101058@@ -775,7 +775,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
101059 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
101060
101061 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
101062- dev->port.count);
101063+ atomic_read(&dev->port.count));
101064
101065 tty_port_close(&dev->port, tty, filp);
101066 }
101067diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
101068index 6d69631..b8fdc85 100644
101069--- a/net/bridge/netfilter/ebtables.c
101070+++ b/net/bridge/netfilter/ebtables.c
101071@@ -1518,7 +1518,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
101072 tmp.valid_hooks = t->table->valid_hooks;
101073 }
101074 mutex_unlock(&ebt_mutex);
101075- if (copy_to_user(user, &tmp, *len) != 0) {
101076+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101077 BUGPRINT("c2u Didn't work\n");
101078 ret = -EFAULT;
101079 break;
101080@@ -2324,7 +2324,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101081 goto out;
101082 tmp.valid_hooks = t->valid_hooks;
101083
101084- if (copy_to_user(user, &tmp, *len) != 0) {
101085+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101086 ret = -EFAULT;
101087 break;
101088 }
101089@@ -2335,7 +2335,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
101090 tmp.entries_size = t->table->entries_size;
101091 tmp.valid_hooks = t->table->valid_hooks;
101092
101093- if (copy_to_user(user, &tmp, *len) != 0) {
101094+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
101095 ret = -EFAULT;
101096 break;
101097 }
101098diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
101099index f5afda1..dcf770a 100644
101100--- a/net/caif/cfctrl.c
101101+++ b/net/caif/cfctrl.c
101102@@ -10,6 +10,7 @@
101103 #include <linux/spinlock.h>
101104 #include <linux/slab.h>
101105 #include <linux/pkt_sched.h>
101106+#include <linux/sched.h>
101107 #include <net/caif/caif_layer.h>
101108 #include <net/caif/cfpkt.h>
101109 #include <net/caif/cfctrl.h>
101110@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
101111 memset(&dev_info, 0, sizeof(dev_info));
101112 dev_info.id = 0xff;
101113 cfsrvl_init(&this->serv, 0, &dev_info, false);
101114- atomic_set(&this->req_seq_no, 1);
101115- atomic_set(&this->rsp_seq_no, 1);
101116+ atomic_set_unchecked(&this->req_seq_no, 1);
101117+ atomic_set_unchecked(&this->rsp_seq_no, 1);
101118 this->serv.layer.receive = cfctrl_recv;
101119 sprintf(this->serv.layer.name, "ctrl");
101120 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
101121@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
101122 struct cfctrl_request_info *req)
101123 {
101124 spin_lock_bh(&ctrl->info_list_lock);
101125- atomic_inc(&ctrl->req_seq_no);
101126- req->sequence_no = atomic_read(&ctrl->req_seq_no);
101127+ atomic_inc_unchecked(&ctrl->req_seq_no);
101128+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
101129 list_add_tail(&req->list, &ctrl->list);
101130 spin_unlock_bh(&ctrl->info_list_lock);
101131 }
101132@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
101133 if (p != first)
101134 pr_warn("Requests are not received in order\n");
101135
101136- atomic_set(&ctrl->rsp_seq_no,
101137+ atomic_set_unchecked(&ctrl->rsp_seq_no,
101138 p->sequence_no);
101139 list_del(&p->list);
101140 goto out;
101141diff --git a/net/can/af_can.c b/net/can/af_can.c
101142index ce82337..5d17b4d 100644
101143--- a/net/can/af_can.c
101144+++ b/net/can/af_can.c
101145@@ -884,7 +884,7 @@ static const struct net_proto_family can_family_ops = {
101146 };
101147
101148 /* notifier block for netdevice event */
101149-static struct notifier_block can_netdev_notifier __read_mostly = {
101150+static struct notifier_block can_netdev_notifier = {
101151 .notifier_call = can_notifier,
101152 };
101153
101154diff --git a/net/can/bcm.c b/net/can/bcm.c
101155index dcb75c0..24b1b43 100644
101156--- a/net/can/bcm.c
101157+++ b/net/can/bcm.c
101158@@ -1624,7 +1624,7 @@ static int __init bcm_module_init(void)
101159 }
101160
101161 /* create /proc/net/can-bcm directory */
101162- proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
101163+ proc_dir = proc_mkdir_restrict("can-bcm", init_net.proc_net);
101164 return 0;
101165 }
101166
101167diff --git a/net/can/gw.c b/net/can/gw.c
101168index 050a211..bb9fe33 100644
101169--- a/net/can/gw.c
101170+++ b/net/can/gw.c
101171@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
101172 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
101173
101174 static HLIST_HEAD(cgw_list);
101175-static struct notifier_block notifier;
101176
101177 static struct kmem_cache *cgw_cache __read_mostly;
101178
101179@@ -947,6 +946,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
101180 return err;
101181 }
101182
101183+static struct notifier_block notifier = {
101184+ .notifier_call = cgw_notifier
101185+};
101186+
101187 static __init int cgw_module_init(void)
101188 {
101189 /* sanitize given module parameter */
101190@@ -962,7 +965,6 @@ static __init int cgw_module_init(void)
101191 return -ENOMEM;
101192
101193 /* set notifier */
101194- notifier.notifier_call = cgw_notifier;
101195 register_netdevice_notifier(&notifier);
101196
101197 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
101198diff --git a/net/can/proc.c b/net/can/proc.c
101199index 1a19b98..df2b4ec 100644
101200--- a/net/can/proc.c
101201+++ b/net/can/proc.c
101202@@ -514,7 +514,7 @@ static void can_remove_proc_readentry(const char *name)
101203 void can_init_proc(void)
101204 {
101205 /* create /proc/net/can directory */
101206- can_dir = proc_mkdir("can", init_net.proc_net);
101207+ can_dir = proc_mkdir_restrict("can", init_net.proc_net);
101208
101209 if (!can_dir) {
101210 printk(KERN_INFO "can: failed to create /proc/net/can . "
101211diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
101212index 9f02369..e6160e9 100644
101213--- a/net/ceph/messenger.c
101214+++ b/net/ceph/messenger.c
101215@@ -188,7 +188,7 @@ static void con_fault(struct ceph_connection *con);
101216 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
101217
101218 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
101219-static atomic_t addr_str_seq = ATOMIC_INIT(0);
101220+static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
101221
101222 static struct page *zero_page; /* used in certain error cases */
101223
101224@@ -199,7 +199,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
101225 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
101226 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
101227
101228- i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
101229+ i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
101230 s = addr_str[i];
101231
101232 switch (ss->ss_family) {
101233diff --git a/net/compat.c b/net/compat.c
101234index bc8aeef..f9c070c 100644
101235--- a/net/compat.c
101236+++ b/net/compat.c
101237@@ -73,9 +73,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
101238 return -EFAULT;
101239 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
101240 kmsg->msg_namelen = sizeof(struct sockaddr_storage);
101241- kmsg->msg_name = compat_ptr(tmp1);
101242- kmsg->msg_iov = compat_ptr(tmp2);
101243- kmsg->msg_control = compat_ptr(tmp3);
101244+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
101245+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
101246+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
101247 return 0;
101248 }
101249
101250@@ -87,7 +87,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
101251
101252 if (kern_msg->msg_name && kern_msg->msg_namelen) {
101253 if (mode == VERIFY_READ) {
101254- int err = move_addr_to_kernel(kern_msg->msg_name,
101255+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
101256 kern_msg->msg_namelen,
101257 kern_address);
101258 if (err < 0)
101259@@ -100,7 +100,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
101260 }
101261
101262 tot_len = iov_from_user_compat_to_kern(kern_iov,
101263- (struct compat_iovec __user *)kern_msg->msg_iov,
101264+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
101265 kern_msg->msg_iovlen);
101266 if (tot_len >= 0)
101267 kern_msg->msg_iov = kern_iov;
101268@@ -120,20 +120,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
101269
101270 #define CMSG_COMPAT_FIRSTHDR(msg) \
101271 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
101272- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
101273+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
101274 (struct compat_cmsghdr __user *)NULL)
101275
101276 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
101277 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
101278 (ucmlen) <= (unsigned long) \
101279 ((mhdr)->msg_controllen - \
101280- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
101281+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
101282
101283 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
101284 struct compat_cmsghdr __user *cmsg, int cmsg_len)
101285 {
101286 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
101287- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
101288+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
101289 msg->msg_controllen)
101290 return NULL;
101291 return (struct compat_cmsghdr __user *)ptr;
101292@@ -223,7 +223,7 @@ Efault:
101293
101294 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
101295 {
101296- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
101297+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
101298 struct compat_cmsghdr cmhdr;
101299 struct compat_timeval ctv;
101300 struct compat_timespec cts[3];
101301@@ -279,7 +279,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
101302
101303 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
101304 {
101305- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
101306+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
101307 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
101308 int fdnum = scm->fp->count;
101309 struct file **fp = scm->fp->fp;
101310@@ -367,7 +367,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
101311 return -EFAULT;
101312 old_fs = get_fs();
101313 set_fs(KERNEL_DS);
101314- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
101315+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
101316 set_fs(old_fs);
101317
101318 return err;
101319@@ -428,7 +428,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
101320 len = sizeof(ktime);
101321 old_fs = get_fs();
101322 set_fs(KERNEL_DS);
101323- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
101324+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
101325 set_fs(old_fs);
101326
101327 if (!err) {
101328@@ -571,7 +571,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101329 case MCAST_JOIN_GROUP:
101330 case MCAST_LEAVE_GROUP:
101331 {
101332- struct compat_group_req __user *gr32 = (void *)optval;
101333+ struct compat_group_req __user *gr32 = (void __user *)optval;
101334 struct group_req __user *kgr =
101335 compat_alloc_user_space(sizeof(struct group_req));
101336 u32 interface;
101337@@ -592,7 +592,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101338 case MCAST_BLOCK_SOURCE:
101339 case MCAST_UNBLOCK_SOURCE:
101340 {
101341- struct compat_group_source_req __user *gsr32 = (void *)optval;
101342+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
101343 struct group_source_req __user *kgsr = compat_alloc_user_space(
101344 sizeof(struct group_source_req));
101345 u32 interface;
101346@@ -613,7 +613,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
101347 }
101348 case MCAST_MSFILTER:
101349 {
101350- struct compat_group_filter __user *gf32 = (void *)optval;
101351+ struct compat_group_filter __user *gf32 = (void __user *)optval;
101352 struct group_filter __user *kgf;
101353 u32 interface, fmode, numsrc;
101354
101355@@ -651,7 +651,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
101356 char __user *optval, int __user *optlen,
101357 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
101358 {
101359- struct compat_group_filter __user *gf32 = (void *)optval;
101360+ struct compat_group_filter __user *gf32 = (void __user *)optval;
101361 struct group_filter __user *kgf;
101362 int __user *koptlen;
101363 u32 interface, fmode, numsrc;
101364@@ -804,7 +804,7 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
101365
101366 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
101367 return -EINVAL;
101368- if (copy_from_user(a, args, nas[call]))
101369+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
101370 return -EFAULT;
101371 a0 = a[0];
101372 a1 = a[1];
101373diff --git a/net/core/datagram.c b/net/core/datagram.c
101374index fdbc9a8..cd6972c 100644
101375--- a/net/core/datagram.c
101376+++ b/net/core/datagram.c
101377@@ -301,7 +301,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
101378 }
101379
101380 kfree_skb(skb);
101381- atomic_inc(&sk->sk_drops);
101382+ atomic_inc_unchecked(&sk->sk_drops);
101383 sk_mem_reclaim_partial(sk);
101384
101385 return err;
101386diff --git a/net/core/dev.c b/net/core/dev.c
101387index cf8a95f..2837211 100644
101388--- a/net/core/dev.c
101389+++ b/net/core/dev.c
101390@@ -1683,14 +1683,14 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
101391 {
101392 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
101393 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
101394- atomic_long_inc(&dev->rx_dropped);
101395+ atomic_long_inc_unchecked(&dev->rx_dropped);
101396 kfree_skb(skb);
101397 return NET_RX_DROP;
101398 }
101399 }
101400
101401 if (unlikely(!is_skb_forwardable(dev, skb))) {
101402- atomic_long_inc(&dev->rx_dropped);
101403+ atomic_long_inc_unchecked(&dev->rx_dropped);
101404 kfree_skb(skb);
101405 return NET_RX_DROP;
101406 }
101407@@ -2487,7 +2487,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
101408
101409 struct dev_gso_cb {
101410 void (*destructor)(struct sk_buff *skb);
101411-};
101412+} __no_const;
101413
101414 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
101415
101416@@ -2952,7 +2952,7 @@ recursion_alert:
101417 rc = -ENETDOWN;
101418 rcu_read_unlock_bh();
101419
101420- atomic_long_inc(&dev->tx_dropped);
101421+ atomic_long_inc_unchecked(&dev->tx_dropped);
101422 kfree_skb(skb);
101423 return rc;
101424 out:
101425@@ -3296,7 +3296,7 @@ enqueue:
101426
101427 local_irq_restore(flags);
101428
101429- atomic_long_inc(&skb->dev->rx_dropped);
101430+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
101431 kfree_skb(skb);
101432 return NET_RX_DROP;
101433 }
101434@@ -3373,7 +3373,7 @@ int netif_rx_ni(struct sk_buff *skb)
101435 }
101436 EXPORT_SYMBOL(netif_rx_ni);
101437
101438-static void net_tx_action(struct softirq_action *h)
101439+static __latent_entropy void net_tx_action(void)
101440 {
101441 struct softnet_data *sd = &__get_cpu_var(softnet_data);
101442
101443@@ -3706,7 +3706,7 @@ ncls:
101444 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
101445 } else {
101446 drop:
101447- atomic_long_inc(&skb->dev->rx_dropped);
101448+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
101449 kfree_skb(skb);
101450 /* Jamal, now you will not able to escape explaining
101451 * me how you were going to use this. :-)
101452@@ -4426,7 +4426,7 @@ void netif_napi_del(struct napi_struct *napi)
101453 }
101454 EXPORT_SYMBOL(netif_napi_del);
101455
101456-static void net_rx_action(struct softirq_action *h)
101457+static __latent_entropy void net_rx_action(void)
101458 {
101459 struct softnet_data *sd = &__get_cpu_var(softnet_data);
101460 unsigned long time_limit = jiffies + 2;
101461@@ -6480,8 +6480,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
101462 } else {
101463 netdev_stats_to_stats64(storage, &dev->stats);
101464 }
101465- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
101466- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
101467+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
101468+ storage->tx_dropped += atomic_long_read_unchecked(&dev->tx_dropped);
101469 return storage;
101470 }
101471 EXPORT_SYMBOL(dev_get_stats);
101472diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
101473index cf999e0..c59a9754 100644
101474--- a/net/core/dev_ioctl.c
101475+++ b/net/core/dev_ioctl.c
101476@@ -366,9 +366,13 @@ void dev_load(struct net *net, const char *name)
101477 if (no_module && capable(CAP_NET_ADMIN))
101478 no_module = request_module("netdev-%s", name);
101479 if (no_module && capable(CAP_SYS_MODULE)) {
101480+#ifdef CONFIG_GRKERNSEC_MODHARDEN
101481+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
101482+#else
101483 if (!request_module("%s", name))
101484 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
101485 name);
101486+#endif
101487 }
101488 }
101489 EXPORT_SYMBOL(dev_load);
101490diff --git a/net/core/filter.c b/net/core/filter.c
101491index d814b8a..b5ab778 100644
101492--- a/net/core/filter.c
101493+++ b/net/core/filter.c
101494@@ -559,7 +559,11 @@ do_pass:
101495
101496 /* Unkown instruction. */
101497 default:
101498- goto err;
101499+ WARN(1, KERN_ALERT "Unknown sock filter code:%u jt:%u tf:%u k:%u\n",
101500+ fp->code, fp->jt, fp->jf, fp->k);
101501+ kfree(addrs);
101502+ BUG();
101503+ return -EINVAL;
101504 }
101505
101506 insn++;
101507@@ -606,7 +610,7 @@ static int check_load_and_stores(const struct sock_filter *filter, int flen)
101508 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
101509 int pc, ret = 0;
101510
101511- BUILD_BUG_ON(BPF_MEMWORDS > 16);
101512+ BUILD_BUG_ON(BPF_MEMWORDS != 16);
101513
101514 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
101515 if (!masks)
101516@@ -933,7 +937,7 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
101517
101518 /* Expand fp for appending the new filter representation. */
101519 old_fp = fp;
101520- fp = krealloc(old_fp, bpf_prog_size(new_len), GFP_KERNEL);
101521+ fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
101522 if (!fp) {
101523 /* The old_fp is still around in case we couldn't
101524 * allocate new memory, so uncharge on that one.
101525@@ -1013,11 +1017,11 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
101526 if (fprog->filter == NULL)
101527 return -EINVAL;
101528
101529- fp = kmalloc(bpf_prog_size(fprog->len), GFP_KERNEL);
101530+ fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
101531 if (!fp)
101532 return -ENOMEM;
101533
101534- memcpy(fp->insns, fprog->filter, fsize);
101535+ memcpy(fp->insns, (void __force_kernel *)fprog->filter, fsize);
101536
101537 fp->len = fprog->len;
101538 /* Since unattached filters are not copied back to user
101539@@ -1069,12 +1073,12 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
101540 if (fprog->filter == NULL)
101541 return -EINVAL;
101542
101543- prog = kmalloc(bpf_fsize, GFP_KERNEL);
101544+ prog = bpf_prog_alloc(bpf_fsize, 0);
101545 if (!prog)
101546 return -ENOMEM;
101547
101548 if (copy_from_user(prog->insns, fprog->filter, fsize)) {
101549- kfree(prog);
101550+ __bpf_prog_free(prog);
101551 return -EFAULT;
101552 }
101553
101554@@ -1082,7 +1086,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
101555
101556 err = bpf_prog_store_orig_filter(prog, fprog);
101557 if (err) {
101558- kfree(prog);
101559+ __bpf_prog_free(prog);
101560 return -ENOMEM;
101561 }
101562
101563diff --git a/net/core/flow.c b/net/core/flow.c
101564index a0348fd..6951c76 100644
101565--- a/net/core/flow.c
101566+++ b/net/core/flow.c
101567@@ -65,7 +65,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
101568 static int flow_entry_valid(struct flow_cache_entry *fle,
101569 struct netns_xfrm *xfrm)
101570 {
101571- if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
101572+ if (atomic_read_unchecked(&xfrm->flow_cache_genid) != fle->genid)
101573 return 0;
101574 if (fle->object && !fle->object->ops->check(fle->object))
101575 return 0;
101576@@ -242,7 +242,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
101577 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
101578 fcp->hash_count++;
101579 }
101580- } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
101581+ } else if (likely(fle->genid == atomic_read_unchecked(&net->xfrm.flow_cache_genid))) {
101582 flo = fle->object;
101583 if (!flo)
101584 goto ret_object;
101585@@ -263,7 +263,7 @@ nocache:
101586 }
101587 flo = resolver(net, key, family, dir, flo, ctx);
101588 if (fle) {
101589- fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
101590+ fle->genid = atomic_read_unchecked(&net->xfrm.flow_cache_genid);
101591 if (!IS_ERR(flo))
101592 fle->object = flo;
101593 else
101594diff --git a/net/core/iovec.c b/net/core/iovec.c
101595index e1ec45a..e5c6f16 100644
101596--- a/net/core/iovec.c
101597+++ b/net/core/iovec.c
101598@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
101599 if (m->msg_name && m->msg_namelen) {
101600 if (mode == VERIFY_READ) {
101601 void __user *namep;
101602- namep = (void __user __force *) m->msg_name;
101603+ namep = (void __force_user *) m->msg_name;
101604 err = move_addr_to_kernel(namep, m->msg_namelen,
101605 address);
101606 if (err < 0)
101607@@ -55,7 +55,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
101608 }
101609
101610 size = m->msg_iovlen * sizeof(struct iovec);
101611- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
101612+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
101613 return -EFAULT;
101614
101615 m->msg_iov = iov;
101616diff --git a/net/core/neighbour.c b/net/core/neighbour.c
101617index ef31fef..8be66d9 100644
101618--- a/net/core/neighbour.c
101619+++ b/net/core/neighbour.c
101620@@ -2825,7 +2825,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
101621 void __user *buffer, size_t *lenp, loff_t *ppos)
101622 {
101623 int size, ret;
101624- struct ctl_table tmp = *ctl;
101625+ ctl_table_no_const tmp = *ctl;
101626
101627 tmp.extra1 = &zero;
101628 tmp.extra2 = &unres_qlen_max;
101629@@ -2887,7 +2887,7 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
101630 void __user *buffer,
101631 size_t *lenp, loff_t *ppos)
101632 {
101633- struct ctl_table tmp = *ctl;
101634+ ctl_table_no_const tmp = *ctl;
101635 int ret;
101636
101637 tmp.extra1 = &zero;
101638diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
101639index 2bf8329..2eb1423 100644
101640--- a/net/core/net-procfs.c
101641+++ b/net/core/net-procfs.c
101642@@ -79,7 +79,13 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
101643 struct rtnl_link_stats64 temp;
101644 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
101645
101646- seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101647+ if (gr_proc_is_restricted())
101648+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101649+ "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
101650+ dev->name, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL,
101651+ 0ULL, 0ULL, 0ULL, 0ULL, 0ULL, 0ULL);
101652+ else
101653+ seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
101654 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
101655 dev->name, stats->rx_bytes, stats->rx_packets,
101656 stats->rx_errors,
101657@@ -166,7 +172,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
101658 return 0;
101659 }
101660
101661-static const struct seq_operations dev_seq_ops = {
101662+const struct seq_operations dev_seq_ops = {
101663 .start = dev_seq_start,
101664 .next = dev_seq_next,
101665 .stop = dev_seq_stop,
101666@@ -196,7 +202,7 @@ static const struct seq_operations softnet_seq_ops = {
101667
101668 static int softnet_seq_open(struct inode *inode, struct file *file)
101669 {
101670- return seq_open(file, &softnet_seq_ops);
101671+ return seq_open_restrict(file, &softnet_seq_ops);
101672 }
101673
101674 static const struct file_operations softnet_seq_fops = {
101675@@ -283,8 +289,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
101676 else
101677 seq_printf(seq, "%04x", ntohs(pt->type));
101678
101679+#ifdef CONFIG_GRKERNSEC_HIDESYM
101680+ seq_printf(seq, " %-8s %pf\n",
101681+ pt->dev ? pt->dev->name : "", NULL);
101682+#else
101683 seq_printf(seq, " %-8s %pf\n",
101684 pt->dev ? pt->dev->name : "", pt->func);
101685+#endif
101686 }
101687
101688 return 0;
101689diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
101690index 9dd0669..c52fb1b 100644
101691--- a/net/core/net-sysfs.c
101692+++ b/net/core/net-sysfs.c
101693@@ -278,7 +278,7 @@ static ssize_t carrier_changes_show(struct device *dev,
101694 {
101695 struct net_device *netdev = to_net_dev(dev);
101696 return sprintf(buf, fmt_dec,
101697- atomic_read(&netdev->carrier_changes));
101698+ atomic_read_unchecked(&netdev->carrier_changes));
101699 }
101700 static DEVICE_ATTR_RO(carrier_changes);
101701
101702diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
101703index 7c6b51a..e9dd57f 100644
101704--- a/net/core/net_namespace.c
101705+++ b/net/core/net_namespace.c
101706@@ -445,7 +445,7 @@ static int __register_pernet_operations(struct list_head *list,
101707 int error;
101708 LIST_HEAD(net_exit_list);
101709
101710- list_add_tail(&ops->list, list);
101711+ pax_list_add_tail((struct list_head *)&ops->list, list);
101712 if (ops->init || (ops->id && ops->size)) {
101713 for_each_net(net) {
101714 error = ops_init(ops, net);
101715@@ -458,7 +458,7 @@ static int __register_pernet_operations(struct list_head *list,
101716
101717 out_undo:
101718 /* If I have an error cleanup all namespaces I initialized */
101719- list_del(&ops->list);
101720+ pax_list_del((struct list_head *)&ops->list);
101721 ops_exit_list(ops, &net_exit_list);
101722 ops_free_list(ops, &net_exit_list);
101723 return error;
101724@@ -469,7 +469,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
101725 struct net *net;
101726 LIST_HEAD(net_exit_list);
101727
101728- list_del(&ops->list);
101729+ pax_list_del((struct list_head *)&ops->list);
101730 for_each_net(net)
101731 list_add_tail(&net->exit_list, &net_exit_list);
101732 ops_exit_list(ops, &net_exit_list);
101733@@ -603,7 +603,7 @@ int register_pernet_device(struct pernet_operations *ops)
101734 mutex_lock(&net_mutex);
101735 error = register_pernet_operations(&pernet_list, ops);
101736 if (!error && (first_device == &pernet_list))
101737- first_device = &ops->list;
101738+ first_device = (struct list_head *)&ops->list;
101739 mutex_unlock(&net_mutex);
101740 return error;
101741 }
101742diff --git a/net/core/netpoll.c b/net/core/netpoll.c
101743index 907fb5e..8260f040b 100644
101744--- a/net/core/netpoll.c
101745+++ b/net/core/netpoll.c
101746@@ -382,7 +382,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
101747 struct udphdr *udph;
101748 struct iphdr *iph;
101749 struct ethhdr *eth;
101750- static atomic_t ip_ident;
101751+ static atomic_unchecked_t ip_ident;
101752 struct ipv6hdr *ip6h;
101753
101754 udp_len = len + sizeof(*udph);
101755@@ -453,7 +453,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
101756 put_unaligned(0x45, (unsigned char *)iph);
101757 iph->tos = 0;
101758 put_unaligned(htons(ip_len), &(iph->tot_len));
101759- iph->id = htons(atomic_inc_return(&ip_ident));
101760+ iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
101761 iph->frag_off = 0;
101762 iph->ttl = 64;
101763 iph->protocol = IPPROTO_UDP;
101764diff --git a/net/core/pktgen.c b/net/core/pktgen.c
101765index 8b849dd..cd88bfc 100644
101766--- a/net/core/pktgen.c
101767+++ b/net/core/pktgen.c
101768@@ -3723,7 +3723,7 @@ static int __net_init pg_net_init(struct net *net)
101769 pn->net = net;
101770 INIT_LIST_HEAD(&pn->pktgen_threads);
101771 pn->pktgen_exiting = false;
101772- pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net);
101773+ pn->proc_dir = proc_mkdir_restrict(PG_PROC_DIR, pn->net->proc_net);
101774 if (!pn->proc_dir) {
101775 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR);
101776 return -ENODEV;
101777diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
101778index f0493e3..c3ffd7f 100644
101779--- a/net/core/rtnetlink.c
101780+++ b/net/core/rtnetlink.c
101781@@ -58,7 +58,7 @@ struct rtnl_link {
101782 rtnl_doit_func doit;
101783 rtnl_dumpit_func dumpit;
101784 rtnl_calcit_func calcit;
101785-};
101786+} __no_const;
101787
101788 static DEFINE_MUTEX(rtnl_mutex);
101789
101790@@ -304,10 +304,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
101791 * to use the ops for creating device. So do not
101792 * fill up dellink as well. That disables rtnl_dellink.
101793 */
101794- if (ops->setup && !ops->dellink)
101795- ops->dellink = unregister_netdevice_queue;
101796+ if (ops->setup && !ops->dellink) {
101797+ pax_open_kernel();
101798+ *(void **)&ops->dellink = unregister_netdevice_queue;
101799+ pax_close_kernel();
101800+ }
101801
101802- list_add_tail(&ops->list, &link_ops);
101803+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
101804 return 0;
101805 }
101806 EXPORT_SYMBOL_GPL(__rtnl_link_register);
101807@@ -354,7 +357,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
101808 for_each_net(net) {
101809 __rtnl_kill_links(net, ops);
101810 }
101811- list_del(&ops->list);
101812+ pax_list_del((struct list_head *)&ops->list);
101813 }
101814 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
101815
101816@@ -1014,7 +1017,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
101817 (dev->ifalias &&
101818 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
101819 nla_put_u32(skb, IFLA_CARRIER_CHANGES,
101820- atomic_read(&dev->carrier_changes)))
101821+ atomic_read_unchecked(&dev->carrier_changes)))
101822 goto nla_put_failure;
101823
101824 if (1) {
101825@@ -2780,6 +2783,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
101826 if (br_spec) {
101827 nla_for_each_nested(attr, br_spec, rem) {
101828 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
101829+ if (nla_len(attr) < sizeof(flags))
101830+ return -EINVAL;
101831+
101832 have_flags = true;
101833 flags = nla_get_u16(attr);
101834 break;
101835@@ -2850,6 +2856,9 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
101836 if (br_spec) {
101837 nla_for_each_nested(attr, br_spec, rem) {
101838 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
101839+ if (nla_len(attr) < sizeof(flags))
101840+ return -EINVAL;
101841+
101842 have_flags = true;
101843 flags = nla_get_u16(attr);
101844 break;
101845diff --git a/net/core/scm.c b/net/core/scm.c
101846index b442e7e..6f5b5a2 100644
101847--- a/net/core/scm.c
101848+++ b/net/core/scm.c
101849@@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send);
101850 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
101851 {
101852 struct cmsghdr __user *cm
101853- = (__force struct cmsghdr __user *)msg->msg_control;
101854+ = (struct cmsghdr __force_user *)msg->msg_control;
101855 struct cmsghdr cmhdr;
101856 int cmlen = CMSG_LEN(len);
101857 int err;
101858@@ -233,7 +233,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
101859 err = -EFAULT;
101860 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
101861 goto out;
101862- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
101863+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
101864 goto out;
101865 cmlen = CMSG_SPACE(len);
101866 if (msg->msg_controllen < cmlen)
101867@@ -249,7 +249,7 @@ EXPORT_SYMBOL(put_cmsg);
101868 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
101869 {
101870 struct cmsghdr __user *cm
101871- = (__force struct cmsghdr __user*)msg->msg_control;
101872+ = (struct cmsghdr __force_user *)msg->msg_control;
101873
101874 int fdmax = 0;
101875 int fdnum = scm->fp->count;
101876@@ -269,7 +269,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
101877 if (fdnum < fdmax)
101878 fdmax = fdnum;
101879
101880- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
101881+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
101882 i++, cmfptr++)
101883 {
101884 struct socket *sock;
101885diff --git a/net/core/skbuff.c b/net/core/skbuff.c
101886index 8d28969..4d36260 100644
101887--- a/net/core/skbuff.c
101888+++ b/net/core/skbuff.c
101889@@ -360,18 +360,29 @@ refill:
101890 goto end;
101891 }
101892 nc->frag.size = PAGE_SIZE << order;
101893-recycle:
101894- atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS);
101895+ /* Even if we own the page, we do not use atomic_set().
101896+ * This would break get_page_unless_zero() users.
101897+ */
101898+ atomic_add(NETDEV_PAGECNT_MAX_BIAS - 1,
101899+ &nc->frag.page->_count);
101900 nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
101901 nc->frag.offset = 0;
101902 }
101903
101904 if (nc->frag.offset + fragsz > nc->frag.size) {
101905- /* avoid unnecessary locked operations if possible */
101906- if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) ||
101907- atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count))
101908- goto recycle;
101909- goto refill;
101910+ if (atomic_read(&nc->frag.page->_count) != nc->pagecnt_bias) {
101911+ if (!atomic_sub_and_test(nc->pagecnt_bias,
101912+ &nc->frag.page->_count))
101913+ goto refill;
101914+ /* OK, page count is 0, we can safely set it */
101915+ atomic_set(&nc->frag.page->_count,
101916+ NETDEV_PAGECNT_MAX_BIAS);
101917+ } else {
101918+ atomic_add(NETDEV_PAGECNT_MAX_BIAS - nc->pagecnt_bias,
101919+ &nc->frag.page->_count);
101920+ }
101921+ nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
101922+ nc->frag.offset = 0;
101923 }
101924
101925 data = page_address(nc->frag.page) + nc->frag.offset;
101926@@ -2011,7 +2022,7 @@ EXPORT_SYMBOL(__skb_checksum);
101927 __wsum skb_checksum(const struct sk_buff *skb, int offset,
101928 int len, __wsum csum)
101929 {
101930- const struct skb_checksum_ops ops = {
101931+ static const struct skb_checksum_ops ops = {
101932 .update = csum_partial_ext,
101933 .combine = csum_block_add_ext,
101934 };
101935@@ -3237,13 +3248,15 @@ void __init skb_init(void)
101936 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
101937 sizeof(struct sk_buff),
101938 0,
101939- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
101940+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
101941+ SLAB_NO_SANITIZE,
101942 NULL);
101943 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
101944 (2*sizeof(struct sk_buff)) +
101945 sizeof(atomic_t),
101946 0,
101947- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
101948+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
101949+ SLAB_NO_SANITIZE,
101950 NULL);
101951 }
101952
101953diff --git a/net/core/sock.c b/net/core/sock.c
101954index 9c3f823..bd8c884 100644
101955--- a/net/core/sock.c
101956+++ b/net/core/sock.c
101957@@ -442,7 +442,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101958 struct sk_buff_head *list = &sk->sk_receive_queue;
101959
101960 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
101961- atomic_inc(&sk->sk_drops);
101962+ atomic_inc_unchecked(&sk->sk_drops);
101963 trace_sock_rcvqueue_full(sk, skb);
101964 return -ENOMEM;
101965 }
101966@@ -452,7 +452,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101967 return err;
101968
101969 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
101970- atomic_inc(&sk->sk_drops);
101971+ atomic_inc_unchecked(&sk->sk_drops);
101972 return -ENOBUFS;
101973 }
101974
101975@@ -472,7 +472,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
101976 skb_dst_force(skb);
101977
101978 spin_lock_irqsave(&list->lock, flags);
101979- skb->dropcount = atomic_read(&sk->sk_drops);
101980+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
101981 __skb_queue_tail(list, skb);
101982 spin_unlock_irqrestore(&list->lock, flags);
101983
101984@@ -492,7 +492,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
101985 skb->dev = NULL;
101986
101987 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
101988- atomic_inc(&sk->sk_drops);
101989+ atomic_inc_unchecked(&sk->sk_drops);
101990 goto discard_and_relse;
101991 }
101992 if (nested)
101993@@ -510,7 +510,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
101994 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
101995 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
101996 bh_unlock_sock(sk);
101997- atomic_inc(&sk->sk_drops);
101998+ atomic_inc_unchecked(&sk->sk_drops);
101999 goto discard_and_relse;
102000 }
102001
102002@@ -999,12 +999,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102003 struct timeval tm;
102004 } v;
102005
102006- int lv = sizeof(int);
102007- int len;
102008+ unsigned int lv = sizeof(int);
102009+ unsigned int len;
102010
102011 if (get_user(len, optlen))
102012 return -EFAULT;
102013- if (len < 0)
102014+ if (len > INT_MAX)
102015 return -EINVAL;
102016
102017 memset(&v, 0, sizeof(v));
102018@@ -1142,11 +1142,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102019
102020 case SO_PEERNAME:
102021 {
102022- char address[128];
102023+ char address[_K_SS_MAXSIZE];
102024
102025 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
102026 return -ENOTCONN;
102027- if (lv < len)
102028+ if (lv < len || sizeof address < len)
102029 return -EINVAL;
102030 if (copy_to_user(optval, address, len))
102031 return -EFAULT;
102032@@ -1227,7 +1227,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
102033
102034 if (len > lv)
102035 len = lv;
102036- if (copy_to_user(optval, &v, len))
102037+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
102038 return -EFAULT;
102039 lenout:
102040 if (put_user(len, optlen))
102041@@ -1723,6 +1723,8 @@ EXPORT_SYMBOL(sock_kmalloc);
102042 */
102043 void sock_kfree_s(struct sock *sk, void *mem, int size)
102044 {
102045+ if (WARN_ON_ONCE(!mem))
102046+ return;
102047 kfree(mem);
102048 atomic_sub(size, &sk->sk_omem_alloc);
102049 }
102050@@ -2369,7 +2371,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
102051 */
102052 smp_wmb();
102053 atomic_set(&sk->sk_refcnt, 1);
102054- atomic_set(&sk->sk_drops, 0);
102055+ atomic_set_unchecked(&sk->sk_drops, 0);
102056 }
102057 EXPORT_SYMBOL(sock_init_data);
102058
102059@@ -2497,6 +2499,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
102060 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
102061 int level, int type)
102062 {
102063+ struct sock_extended_err ee;
102064 struct sock_exterr_skb *serr;
102065 struct sk_buff *skb, *skb2;
102066 int copied, err;
102067@@ -2518,7 +2521,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
102068 sock_recv_timestamp(msg, sk, skb);
102069
102070 serr = SKB_EXT_ERR(skb);
102071- put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
102072+ ee = serr->ee;
102073+ put_cmsg(msg, level, type, sizeof ee, &ee);
102074
102075 msg->msg_flags |= MSG_ERRQUEUE;
102076 err = copied;
102077diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
102078index ad704c7..ca48aff 100644
102079--- a/net/core/sock_diag.c
102080+++ b/net/core/sock_diag.c
102081@@ -9,26 +9,33 @@
102082 #include <linux/inet_diag.h>
102083 #include <linux/sock_diag.h>
102084
102085-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
102086+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
102087 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
102088 static DEFINE_MUTEX(sock_diag_table_mutex);
102089
102090 int sock_diag_check_cookie(void *sk, __u32 *cookie)
102091 {
102092+#ifndef CONFIG_GRKERNSEC_HIDESYM
102093 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
102094 cookie[1] != INET_DIAG_NOCOOKIE) &&
102095 ((u32)(unsigned long)sk != cookie[0] ||
102096 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
102097 return -ESTALE;
102098 else
102099+#endif
102100 return 0;
102101 }
102102 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
102103
102104 void sock_diag_save_cookie(void *sk, __u32 *cookie)
102105 {
102106+#ifdef CONFIG_GRKERNSEC_HIDESYM
102107+ cookie[0] = 0;
102108+ cookie[1] = 0;
102109+#else
102110 cookie[0] = (u32)(unsigned long)sk;
102111 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
102112+#endif
102113 }
102114 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
102115
102116@@ -110,8 +117,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
102117 mutex_lock(&sock_diag_table_mutex);
102118 if (sock_diag_handlers[hndl->family])
102119 err = -EBUSY;
102120- else
102121+ else {
102122+ pax_open_kernel();
102123 sock_diag_handlers[hndl->family] = hndl;
102124+ pax_close_kernel();
102125+ }
102126 mutex_unlock(&sock_diag_table_mutex);
102127
102128 return err;
102129@@ -127,7 +137,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
102130
102131 mutex_lock(&sock_diag_table_mutex);
102132 BUG_ON(sock_diag_handlers[family] != hnld);
102133+ pax_open_kernel();
102134 sock_diag_handlers[family] = NULL;
102135+ pax_close_kernel();
102136 mutex_unlock(&sock_diag_table_mutex);
102137 }
102138 EXPORT_SYMBOL_GPL(sock_diag_unregister);
102139diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
102140index cf9cd13..50683950 100644
102141--- a/net/core/sysctl_net_core.c
102142+++ b/net/core/sysctl_net_core.c
102143@@ -32,7 +32,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
102144 {
102145 unsigned int orig_size, size;
102146 int ret, i;
102147- struct ctl_table tmp = {
102148+ ctl_table_no_const tmp = {
102149 .data = &size,
102150 .maxlen = sizeof(size),
102151 .mode = table->mode
102152@@ -200,7 +200,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
102153 void __user *buffer, size_t *lenp, loff_t *ppos)
102154 {
102155 char id[IFNAMSIZ];
102156- struct ctl_table tbl = {
102157+ ctl_table_no_const tbl = {
102158 .data = id,
102159 .maxlen = IFNAMSIZ,
102160 };
102161@@ -263,7 +263,7 @@ static struct ctl_table net_core_table[] = {
102162 .mode = 0644,
102163 .proc_handler = proc_dointvec
102164 },
102165-#ifdef CONFIG_BPF_JIT
102166+#if defined(CONFIG_BPF_JIT) && !defined(CONFIG_GRKERNSEC_BPF_HARDEN)
102167 {
102168 .procname = "bpf_jit_enable",
102169 .data = &bpf_jit_enable,
102170@@ -379,13 +379,12 @@ static struct ctl_table netns_core_table[] = {
102171
102172 static __net_init int sysctl_core_net_init(struct net *net)
102173 {
102174- struct ctl_table *tbl;
102175+ ctl_table_no_const *tbl = NULL;
102176
102177 net->core.sysctl_somaxconn = SOMAXCONN;
102178
102179- tbl = netns_core_table;
102180 if (!net_eq(net, &init_net)) {
102181- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
102182+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
102183 if (tbl == NULL)
102184 goto err_dup;
102185
102186@@ -395,17 +394,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
102187 if (net->user_ns != &init_user_ns) {
102188 tbl[0].procname = NULL;
102189 }
102190- }
102191-
102192- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102193+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
102194+ } else
102195+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
102196 if (net->core.sysctl_hdr == NULL)
102197 goto err_reg;
102198
102199 return 0;
102200
102201 err_reg:
102202- if (tbl != netns_core_table)
102203- kfree(tbl);
102204+ kfree(tbl);
102205 err_dup:
102206 return -ENOMEM;
102207 }
102208@@ -420,7 +418,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
102209 kfree(tbl);
102210 }
102211
102212-static __net_initdata struct pernet_operations sysctl_core_ops = {
102213+static __net_initconst struct pernet_operations sysctl_core_ops = {
102214 .init = sysctl_core_net_init,
102215 .exit = sysctl_core_net_exit,
102216 };
102217diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
102218index ae011b4..d2d18bf 100644
102219--- a/net/decnet/af_decnet.c
102220+++ b/net/decnet/af_decnet.c
102221@@ -465,6 +465,7 @@ static struct proto dn_proto = {
102222 .sysctl_rmem = sysctl_decnet_rmem,
102223 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
102224 .obj_size = sizeof(struct dn_sock),
102225+ .slab_flags = SLAB_USERCOPY,
102226 };
102227
102228 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
102229diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
102230index 3b726f3..1af6368 100644
102231--- a/net/decnet/dn_dev.c
102232+++ b/net/decnet/dn_dev.c
102233@@ -200,7 +200,7 @@ static struct dn_dev_sysctl_table {
102234 .extra1 = &min_t3,
102235 .extra2 = &max_t3
102236 },
102237- {0}
102238+ { }
102239 },
102240 };
102241
102242diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
102243index 5325b54..a0d4d69 100644
102244--- a/net/decnet/sysctl_net_decnet.c
102245+++ b/net/decnet/sysctl_net_decnet.c
102246@@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
102247
102248 if (len > *lenp) len = *lenp;
102249
102250- if (copy_to_user(buffer, addr, len))
102251+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
102252 return -EFAULT;
102253
102254 *lenp = len;
102255@@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
102256
102257 if (len > *lenp) len = *lenp;
102258
102259- if (copy_to_user(buffer, devname, len))
102260+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
102261 return -EFAULT;
102262
102263 *lenp = len;
102264diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c
102265index 32755cb..236d827 100644
102266--- a/net/ieee802154/reassembly.c
102267+++ b/net/ieee802154/reassembly.c
102268@@ -433,14 +433,13 @@ static struct ctl_table lowpan_frags_ctl_table[] = {
102269
102270 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102271 {
102272- struct ctl_table *table;
102273+ ctl_table_no_const *table = NULL;
102274 struct ctl_table_header *hdr;
102275 struct netns_ieee802154_lowpan *ieee802154_lowpan =
102276 net_ieee802154_lowpan(net);
102277
102278- table = lowpan_frags_ns_ctl_table;
102279 if (!net_eq(net, &init_net)) {
102280- table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
102281+ table = kmemdup(lowpan_frags_ns_ctl_table, sizeof(lowpan_frags_ns_ctl_table),
102282 GFP_KERNEL);
102283 if (table == NULL)
102284 goto err_alloc;
102285@@ -455,9 +454,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102286 /* Don't export sysctls to unprivileged users */
102287 if (net->user_ns != &init_user_ns)
102288 table[0].procname = NULL;
102289- }
102290-
102291- hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
102292+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
102293+ } else
102294+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", lowpan_frags_ns_ctl_table);
102295 if (hdr == NULL)
102296 goto err_reg;
102297
102298@@ -465,8 +464,7 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
102299 return 0;
102300
102301 err_reg:
102302- if (!net_eq(net, &init_net))
102303- kfree(table);
102304+ kfree(table);
102305 err_alloc:
102306 return -ENOMEM;
102307 }
102308diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
102309index 214882e..ec032f6 100644
102310--- a/net/ipv4/devinet.c
102311+++ b/net/ipv4/devinet.c
102312@@ -69,7 +69,8 @@
102313
102314 static struct ipv4_devconf ipv4_devconf = {
102315 .data = {
102316- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
102317+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
102318+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
102319 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
102320 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
102321 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
102322@@ -80,7 +81,8 @@ static struct ipv4_devconf ipv4_devconf = {
102323
102324 static struct ipv4_devconf ipv4_devconf_dflt = {
102325 .data = {
102326- [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
102327+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 0,
102328+ [IPV4_DEVCONF_RP_FILTER - 1] = 1,
102329 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
102330 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
102331 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
102332@@ -1548,7 +1550,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
102333 idx = 0;
102334 head = &net->dev_index_head[h];
102335 rcu_read_lock();
102336- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
102337+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
102338 net->dev_base_seq;
102339 hlist_for_each_entry_rcu(dev, head, index_hlist) {
102340 if (idx < s_idx)
102341@@ -1866,7 +1868,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
102342 idx = 0;
102343 head = &net->dev_index_head[h];
102344 rcu_read_lock();
102345- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
102346+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
102347 net->dev_base_seq;
102348 hlist_for_each_entry_rcu(dev, head, index_hlist) {
102349 if (idx < s_idx)
102350@@ -2101,7 +2103,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
102351 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
102352 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
102353
102354-static struct devinet_sysctl_table {
102355+static const struct devinet_sysctl_table {
102356 struct ctl_table_header *sysctl_header;
102357 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
102358 } devinet_sysctl = {
102359@@ -2233,7 +2235,7 @@ static __net_init int devinet_init_net(struct net *net)
102360 int err;
102361 struct ipv4_devconf *all, *dflt;
102362 #ifdef CONFIG_SYSCTL
102363- struct ctl_table *tbl = ctl_forward_entry;
102364+ ctl_table_no_const *tbl = NULL;
102365 struct ctl_table_header *forw_hdr;
102366 #endif
102367
102368@@ -2251,7 +2253,7 @@ static __net_init int devinet_init_net(struct net *net)
102369 goto err_alloc_dflt;
102370
102371 #ifdef CONFIG_SYSCTL
102372- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
102373+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
102374 if (tbl == NULL)
102375 goto err_alloc_ctl;
102376
102377@@ -2271,7 +2273,10 @@ static __net_init int devinet_init_net(struct net *net)
102378 goto err_reg_dflt;
102379
102380 err = -ENOMEM;
102381- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
102382+ if (!net_eq(net, &init_net))
102383+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
102384+ else
102385+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
102386 if (forw_hdr == NULL)
102387 goto err_reg_ctl;
102388 net->ipv4.forw_hdr = forw_hdr;
102389@@ -2287,8 +2292,7 @@ err_reg_ctl:
102390 err_reg_dflt:
102391 __devinet_sysctl_unregister(all);
102392 err_reg_all:
102393- if (tbl != ctl_forward_entry)
102394- kfree(tbl);
102395+ kfree(tbl);
102396 err_alloc_ctl:
102397 #endif
102398 if (dflt != &ipv4_devconf_dflt)
102399diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
102400index 255aa99..45c78f8 100644
102401--- a/net/ipv4/fib_frontend.c
102402+++ b/net/ipv4/fib_frontend.c
102403@@ -1015,12 +1015,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
102404 #ifdef CONFIG_IP_ROUTE_MULTIPATH
102405 fib_sync_up(dev);
102406 #endif
102407- atomic_inc(&net->ipv4.dev_addr_genid);
102408+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102409 rt_cache_flush(dev_net(dev));
102410 break;
102411 case NETDEV_DOWN:
102412 fib_del_ifaddr(ifa, NULL);
102413- atomic_inc(&net->ipv4.dev_addr_genid);
102414+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102415 if (ifa->ifa_dev->ifa_list == NULL) {
102416 /* Last address was deleted from this interface.
102417 * Disable IP.
102418@@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
102419 #ifdef CONFIG_IP_ROUTE_MULTIPATH
102420 fib_sync_up(dev);
102421 #endif
102422- atomic_inc(&net->ipv4.dev_addr_genid);
102423+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
102424 rt_cache_flush(net);
102425 break;
102426 case NETDEV_DOWN:
102427diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
102428index 4a74ea8..32335a7 100644
102429--- a/net/ipv4/fib_semantics.c
102430+++ b/net/ipv4/fib_semantics.c
102431@@ -768,7 +768,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
102432 nh->nh_saddr = inet_select_addr(nh->nh_dev,
102433 nh->nh_gw,
102434 nh->nh_parent->fib_scope);
102435- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
102436+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
102437
102438 return nh->nh_saddr;
102439 }
102440diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
102441index dd73bea..a2eec02 100644
102442--- a/net/ipv4/gre_offload.c
102443+++ b/net/ipv4/gre_offload.c
102444@@ -59,13 +59,13 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
102445 if (csum)
102446 skb->encap_hdr_csum = 1;
102447
102448- if (unlikely(!pskb_may_pull(skb, ghl)))
102449- goto out;
102450-
102451 /* setup inner skb. */
102452 skb->protocol = greh->protocol;
102453 skb->encapsulation = 0;
102454
102455+ if (unlikely(!pskb_may_pull(skb, ghl)))
102456+ goto out;
102457+
102458 __skb_pull(skb, ghl);
102459 skb_reset_mac_header(skb);
102460 skb_set_network_header(skb, skb_inner_network_offset(skb));
102461diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
102462index 43116e8..ba0916a8 100644
102463--- a/net/ipv4/inet_hashtables.c
102464+++ b/net/ipv4/inet_hashtables.c
102465@@ -18,6 +18,7 @@
102466 #include <linux/sched.h>
102467 #include <linux/slab.h>
102468 #include <linux/wait.h>
102469+#include <linux/security.h>
102470
102471 #include <net/inet_connection_sock.h>
102472 #include <net/inet_hashtables.h>
102473@@ -49,6 +50,8 @@ static unsigned int inet_sk_ehashfn(const struct sock *sk)
102474 return inet_ehashfn(net, laddr, lport, faddr, fport);
102475 }
102476
102477+extern void gr_update_task_in_ip_table(const struct inet_sock *inet);
102478+
102479 /*
102480 * Allocate and initialize a new local port bind bucket.
102481 * The bindhash mutex for snum's hash chain must be held here.
102482@@ -554,6 +557,8 @@ ok:
102483 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
102484 spin_unlock(&head->lock);
102485
102486+ gr_update_task_in_ip_table(inet_sk(sk));
102487+
102488 if (tw) {
102489 inet_twsk_deschedule(tw, death_row);
102490 while (twrefcnt) {
102491diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
102492index bd5f592..e80e605 100644
102493--- a/net/ipv4/inetpeer.c
102494+++ b/net/ipv4/inetpeer.c
102495@@ -482,7 +482,7 @@ relookup:
102496 if (p) {
102497 p->daddr = *daddr;
102498 atomic_set(&p->refcnt, 1);
102499- atomic_set(&p->rid, 0);
102500+ atomic_set_unchecked(&p->rid, 0);
102501 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
102502 p->rate_tokens = 0;
102503 /* 60*HZ is arbitrary, but chosen enough high so that the first
102504diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
102505index 15f0e2b..8cf8177 100644
102506--- a/net/ipv4/ip_fragment.c
102507+++ b/net/ipv4/ip_fragment.c
102508@@ -268,7 +268,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
102509 return 0;
102510
102511 start = qp->rid;
102512- end = atomic_inc_return(&peer->rid);
102513+ end = atomic_inc_return_unchecked(&peer->rid);
102514 qp->rid = end;
102515
102516 rc = qp->q.fragments && (end - start) > max;
102517@@ -746,12 +746,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
102518
102519 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102520 {
102521- struct ctl_table *table;
102522+ ctl_table_no_const *table = NULL;
102523 struct ctl_table_header *hdr;
102524
102525- table = ip4_frags_ns_ctl_table;
102526 if (!net_eq(net, &init_net)) {
102527- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
102528+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
102529 if (table == NULL)
102530 goto err_alloc;
102531
102532@@ -765,9 +764,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102533 /* Don't export sysctls to unprivileged users */
102534 if (net->user_ns != &init_user_ns)
102535 table[0].procname = NULL;
102536- }
102537+ hdr = register_net_sysctl(net, "net/ipv4", table);
102538+ } else
102539+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
102540
102541- hdr = register_net_sysctl(net, "net/ipv4", table);
102542 if (hdr == NULL)
102543 goto err_reg;
102544
102545@@ -775,8 +775,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
102546 return 0;
102547
102548 err_reg:
102549- if (!net_eq(net, &init_net))
102550- kfree(table);
102551+ kfree(table);
102552 err_alloc:
102553 return -ENOMEM;
102554 }
102555diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
102556index 9b84254..c776611 100644
102557--- a/net/ipv4/ip_gre.c
102558+++ b/net/ipv4/ip_gre.c
102559@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
102560 module_param(log_ecn_error, bool, 0644);
102561 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
102562
102563-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
102564+static struct rtnl_link_ops ipgre_link_ops;
102565 static int ipgre_tunnel_init(struct net_device *dev);
102566
102567 static int ipgre_net_id __read_mostly;
102568@@ -733,7 +733,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
102569 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
102570 };
102571
102572-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
102573+static struct rtnl_link_ops ipgre_link_ops = {
102574 .kind = "gre",
102575 .maxtype = IFLA_GRE_MAX,
102576 .policy = ipgre_policy,
102577@@ -747,7 +747,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
102578 .fill_info = ipgre_fill_info,
102579 };
102580
102581-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
102582+static struct rtnl_link_ops ipgre_tap_ops = {
102583 .kind = "gretap",
102584 .maxtype = IFLA_GRE_MAX,
102585 .policy = ipgre_policy,
102586diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
102587index 3d4da2c..40f9c29 100644
102588--- a/net/ipv4/ip_input.c
102589+++ b/net/ipv4/ip_input.c
102590@@ -147,6 +147,10 @@
102591 #include <linux/mroute.h>
102592 #include <linux/netlink.h>
102593
102594+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
102595+extern int grsec_enable_blackhole;
102596+#endif
102597+
102598 /*
102599 * Process Router Attention IP option (RFC 2113)
102600 */
102601@@ -223,6 +227,9 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
102602 if (!raw) {
102603 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
102604 IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS);
102605+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
102606+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
102607+#endif
102608 icmp_send(skb, ICMP_DEST_UNREACH,
102609 ICMP_PROT_UNREACH, 0);
102610 }
102611diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
102612index c43a1e2..73cbbe1 100644
102613--- a/net/ipv4/ip_output.c
102614+++ b/net/ipv4/ip_output.c
102615@@ -231,7 +231,7 @@ static int ip_finish_output_gso(struct sk_buff *skb)
102616 */
102617 features = netif_skb_features(skb);
102618 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
102619- if (IS_ERR(segs)) {
102620+ if (IS_ERR_OR_NULL(segs)) {
102621 kfree_skb(skb);
102622 return -ENOMEM;
102623 }
102624diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
102625index 2407e5d..edc2f1a 100644
102626--- a/net/ipv4/ip_sockglue.c
102627+++ b/net/ipv4/ip_sockglue.c
102628@@ -1188,7 +1188,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
102629 len = min_t(unsigned int, len, opt->optlen);
102630 if (put_user(len, optlen))
102631 return -EFAULT;
102632- if (copy_to_user(optval, opt->__data, len))
102633+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
102634+ copy_to_user(optval, opt->__data, len))
102635 return -EFAULT;
102636 return 0;
102637 }
102638@@ -1319,7 +1320,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
102639 if (sk->sk_type != SOCK_STREAM)
102640 return -ENOPROTOOPT;
102641
102642- msg.msg_control = (__force void *) optval;
102643+ msg.msg_control = (__force_kernel void *) optval;
102644 msg.msg_controllen = len;
102645 msg.msg_flags = flags;
102646
102647diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
102648index e453cb7..3c8d952 100644
102649--- a/net/ipv4/ip_vti.c
102650+++ b/net/ipv4/ip_vti.c
102651@@ -45,7 +45,7 @@
102652 #include <net/net_namespace.h>
102653 #include <net/netns/generic.h>
102654
102655-static struct rtnl_link_ops vti_link_ops __read_mostly;
102656+static struct rtnl_link_ops vti_link_ops;
102657
102658 static int vti_net_id __read_mostly;
102659 static int vti_tunnel_init(struct net_device *dev);
102660@@ -519,7 +519,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
102661 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
102662 };
102663
102664-static struct rtnl_link_ops vti_link_ops __read_mostly = {
102665+static struct rtnl_link_ops vti_link_ops = {
102666 .kind = "vti",
102667 .maxtype = IFLA_VTI_MAX,
102668 .policy = vti_policy,
102669diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
102670index 5bbef4f..5bc4fb6 100644
102671--- a/net/ipv4/ipconfig.c
102672+++ b/net/ipv4/ipconfig.c
102673@@ -332,7 +332,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
102674
102675 mm_segment_t oldfs = get_fs();
102676 set_fs(get_ds());
102677- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
102678+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
102679 set_fs(oldfs);
102680 return res;
102681 }
102682@@ -343,7 +343,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
102683
102684 mm_segment_t oldfs = get_fs();
102685 set_fs(get_ds());
102686- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
102687+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
102688 set_fs(oldfs);
102689 return res;
102690 }
102691@@ -354,7 +354,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
102692
102693 mm_segment_t oldfs = get_fs();
102694 set_fs(get_ds());
102695- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
102696+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
102697 set_fs(oldfs);
102698 return res;
102699 }
102700diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
102701index 62eaa00..29b2dc2 100644
102702--- a/net/ipv4/ipip.c
102703+++ b/net/ipv4/ipip.c
102704@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
102705 static int ipip_net_id __read_mostly;
102706
102707 static int ipip_tunnel_init(struct net_device *dev);
102708-static struct rtnl_link_ops ipip_link_ops __read_mostly;
102709+static struct rtnl_link_ops ipip_link_ops;
102710
102711 static int ipip_err(struct sk_buff *skb, u32 info)
102712 {
102713@@ -409,7 +409,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
102714 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
102715 };
102716
102717-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
102718+static struct rtnl_link_ops ipip_link_ops = {
102719 .kind = "ipip",
102720 .maxtype = IFLA_IPTUN_MAX,
102721 .policy = ipip_policy,
102722diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
102723index f95b6f9..2ee2097 100644
102724--- a/net/ipv4/netfilter/arp_tables.c
102725+++ b/net/ipv4/netfilter/arp_tables.c
102726@@ -885,14 +885,14 @@ static int compat_table_info(const struct xt_table_info *info,
102727 #endif
102728
102729 static int get_info(struct net *net, void __user *user,
102730- const int *len, int compat)
102731+ int len, int compat)
102732 {
102733 char name[XT_TABLE_MAXNAMELEN];
102734 struct xt_table *t;
102735 int ret;
102736
102737- if (*len != sizeof(struct arpt_getinfo)) {
102738- duprintf("length %u != %Zu\n", *len,
102739+ if (len != sizeof(struct arpt_getinfo)) {
102740+ duprintf("length %u != %Zu\n", len,
102741 sizeof(struct arpt_getinfo));
102742 return -EINVAL;
102743 }
102744@@ -929,7 +929,7 @@ static int get_info(struct net *net, void __user *user,
102745 info.size = private->size;
102746 strcpy(info.name, name);
102747
102748- if (copy_to_user(user, &info, *len) != 0)
102749+ if (copy_to_user(user, &info, len) != 0)
102750 ret = -EFAULT;
102751 else
102752 ret = 0;
102753@@ -1690,7 +1690,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
102754
102755 switch (cmd) {
102756 case ARPT_SO_GET_INFO:
102757- ret = get_info(sock_net(sk), user, len, 1);
102758+ ret = get_info(sock_net(sk), user, *len, 1);
102759 break;
102760 case ARPT_SO_GET_ENTRIES:
102761 ret = compat_get_entries(sock_net(sk), user, len);
102762@@ -1735,7 +1735,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
102763
102764 switch (cmd) {
102765 case ARPT_SO_GET_INFO:
102766- ret = get_info(sock_net(sk), user, len, 0);
102767+ ret = get_info(sock_net(sk), user, *len, 0);
102768 break;
102769
102770 case ARPT_SO_GET_ENTRIES:
102771diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
102772index 99e810f..3711b81 100644
102773--- a/net/ipv4/netfilter/ip_tables.c
102774+++ b/net/ipv4/netfilter/ip_tables.c
102775@@ -1073,14 +1073,14 @@ static int compat_table_info(const struct xt_table_info *info,
102776 #endif
102777
102778 static int get_info(struct net *net, void __user *user,
102779- const int *len, int compat)
102780+ int len, int compat)
102781 {
102782 char name[XT_TABLE_MAXNAMELEN];
102783 struct xt_table *t;
102784 int ret;
102785
102786- if (*len != sizeof(struct ipt_getinfo)) {
102787- duprintf("length %u != %zu\n", *len,
102788+ if (len != sizeof(struct ipt_getinfo)) {
102789+ duprintf("length %u != %zu\n", len,
102790 sizeof(struct ipt_getinfo));
102791 return -EINVAL;
102792 }
102793@@ -1117,7 +1117,7 @@ static int get_info(struct net *net, void __user *user,
102794 info.size = private->size;
102795 strcpy(info.name, name);
102796
102797- if (copy_to_user(user, &info, *len) != 0)
102798+ if (copy_to_user(user, &info, len) != 0)
102799 ret = -EFAULT;
102800 else
102801 ret = 0;
102802@@ -1973,7 +1973,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
102803
102804 switch (cmd) {
102805 case IPT_SO_GET_INFO:
102806- ret = get_info(sock_net(sk), user, len, 1);
102807+ ret = get_info(sock_net(sk), user, *len, 1);
102808 break;
102809 case IPT_SO_GET_ENTRIES:
102810 ret = compat_get_entries(sock_net(sk), user, len);
102811@@ -2020,7 +2020,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
102812
102813 switch (cmd) {
102814 case IPT_SO_GET_INFO:
102815- ret = get_info(sock_net(sk), user, len, 0);
102816+ ret = get_info(sock_net(sk), user, *len, 0);
102817 break;
102818
102819 case IPT_SO_GET_ENTRIES:
102820diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
102821index 2510c02..cfb34fa 100644
102822--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
102823+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
102824@@ -720,7 +720,7 @@ static int clusterip_net_init(struct net *net)
102825 spin_lock_init(&cn->lock);
102826
102827 #ifdef CONFIG_PROC_FS
102828- cn->procdir = proc_mkdir("ipt_CLUSTERIP", net->proc_net);
102829+ cn->procdir = proc_mkdir_restrict("ipt_CLUSTERIP", net->proc_net);
102830 if (!cn->procdir) {
102831 pr_err("Unable to proc dir entry\n");
102832 return -ENOMEM;
102833diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
102834index 3524762..2e88bfd 100644
102835--- a/net/ipv4/ping.c
102836+++ b/net/ipv4/ping.c
102837@@ -59,7 +59,7 @@ struct ping_table {
102838 };
102839
102840 static struct ping_table ping_table;
102841-struct pingv6_ops pingv6_ops;
102842+struct pingv6_ops *pingv6_ops;
102843 EXPORT_SYMBOL_GPL(pingv6_ops);
102844
102845 static u16 ping_port_rover;
102846@@ -350,7 +350,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
102847 return -ENODEV;
102848 }
102849 }
102850- has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
102851+ has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
102852 scoped);
102853 rcu_read_unlock();
102854
102855@@ -558,7 +558,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
102856 }
102857 #if IS_ENABLED(CONFIG_IPV6)
102858 } else if (skb->protocol == htons(ETH_P_IPV6)) {
102859- harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
102860+ harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
102861 #endif
102862 }
102863
102864@@ -576,7 +576,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
102865 info, (u8 *)icmph);
102866 #if IS_ENABLED(CONFIG_IPV6)
102867 } else if (family == AF_INET6) {
102868- pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
102869+ pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
102870 info, (u8 *)icmph);
102871 #endif
102872 }
102873@@ -860,7 +860,7 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
102874 return ip_recv_error(sk, msg, len, addr_len);
102875 #if IS_ENABLED(CONFIG_IPV6)
102876 } else if (family == AF_INET6) {
102877- return pingv6_ops.ipv6_recv_error(sk, msg, len,
102878+ return pingv6_ops->ipv6_recv_error(sk, msg, len,
102879 addr_len);
102880 #endif
102881 }
102882@@ -918,10 +918,10 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
102883 }
102884
102885 if (inet6_sk(sk)->rxopt.all)
102886- pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
102887+ pingv6_ops->ip6_datagram_recv_common_ctl(sk, msg, skb);
102888 if (skb->protocol == htons(ETH_P_IPV6) &&
102889 inet6_sk(sk)->rxopt.all)
102890- pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
102891+ pingv6_ops->ip6_datagram_recv_specific_ctl(sk, msg, skb);
102892 else if (skb->protocol == htons(ETH_P_IP) && isk->cmsg_flags)
102893 ip_cmsg_recv(msg, skb);
102894 #endif
102895@@ -1113,7 +1113,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
102896 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
102897 0, sock_i_ino(sp),
102898 atomic_read(&sp->sk_refcnt), sp,
102899- atomic_read(&sp->sk_drops));
102900+ atomic_read_unchecked(&sp->sk_drops));
102901 }
102902
102903 static int ping_v4_seq_show(struct seq_file *seq, void *v)
102904diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
102905index 739db31..74f0210 100644
102906--- a/net/ipv4/raw.c
102907+++ b/net/ipv4/raw.c
102908@@ -314,7 +314,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
102909 int raw_rcv(struct sock *sk, struct sk_buff *skb)
102910 {
102911 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
102912- atomic_inc(&sk->sk_drops);
102913+ atomic_inc_unchecked(&sk->sk_drops);
102914 kfree_skb(skb);
102915 return NET_RX_DROP;
102916 }
102917@@ -755,16 +755,20 @@ static int raw_init(struct sock *sk)
102918
102919 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
102920 {
102921+ struct icmp_filter filter;
102922+
102923 if (optlen > sizeof(struct icmp_filter))
102924 optlen = sizeof(struct icmp_filter);
102925- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
102926+ if (copy_from_user(&filter, optval, optlen))
102927 return -EFAULT;
102928+ raw_sk(sk)->filter = filter;
102929 return 0;
102930 }
102931
102932 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
102933 {
102934 int len, ret = -EFAULT;
102935+ struct icmp_filter filter;
102936
102937 if (get_user(len, optlen))
102938 goto out;
102939@@ -774,8 +778,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
102940 if (len > sizeof(struct icmp_filter))
102941 len = sizeof(struct icmp_filter);
102942 ret = -EFAULT;
102943- if (put_user(len, optlen) ||
102944- copy_to_user(optval, &raw_sk(sk)->filter, len))
102945+ filter = raw_sk(sk)->filter;
102946+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
102947 goto out;
102948 ret = 0;
102949 out: return ret;
102950@@ -1004,7 +1008,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
102951 0, 0L, 0,
102952 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
102953 0, sock_i_ino(sp),
102954- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
102955+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
102956 }
102957
102958 static int raw_seq_show(struct seq_file *seq, void *v)
102959diff --git a/net/ipv4/route.c b/net/ipv4/route.c
102960index 29836f8..bd1e2ba 100644
102961--- a/net/ipv4/route.c
102962+++ b/net/ipv4/route.c
102963@@ -228,7 +228,7 @@ static const struct seq_operations rt_cache_seq_ops = {
102964
102965 static int rt_cache_seq_open(struct inode *inode, struct file *file)
102966 {
102967- return seq_open(file, &rt_cache_seq_ops);
102968+ return seq_open_restrict(file, &rt_cache_seq_ops);
102969 }
102970
102971 static const struct file_operations rt_cache_seq_fops = {
102972@@ -319,7 +319,7 @@ static const struct seq_operations rt_cpu_seq_ops = {
102973
102974 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
102975 {
102976- return seq_open(file, &rt_cpu_seq_ops);
102977+ return seq_open_restrict(file, &rt_cpu_seq_ops);
102978 }
102979
102980 static const struct file_operations rt_cpu_seq_fops = {
102981@@ -357,7 +357,7 @@ static int rt_acct_proc_show(struct seq_file *m, void *v)
102982
102983 static int rt_acct_proc_open(struct inode *inode, struct file *file)
102984 {
102985- return single_open(file, rt_acct_proc_show, NULL);
102986+ return single_open_restrict(file, rt_acct_proc_show, NULL);
102987 }
102988
102989 static const struct file_operations rt_acct_proc_fops = {
102990@@ -459,11 +459,11 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
102991
102992 #define IP_IDENTS_SZ 2048u
102993 struct ip_ident_bucket {
102994- atomic_t id;
102995+ atomic_unchecked_t id;
102996 u32 stamp32;
102997 };
102998
102999-static struct ip_ident_bucket *ip_idents __read_mostly;
103000+static struct ip_ident_bucket ip_idents[IP_IDENTS_SZ] __read_mostly;
103001
103002 /* In order to protect privacy, we add a perturbation to identifiers
103003 * if one generator is seldom used. This makes hard for an attacker
103004@@ -479,7 +479,7 @@ u32 ip_idents_reserve(u32 hash, int segs)
103005 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
103006 delta = prandom_u32_max(now - old);
103007
103008- return atomic_add_return(segs + delta, &bucket->id) - segs;
103009+ return atomic_add_return_unchecked(segs + delta, &bucket->id) - segs;
103010 }
103011 EXPORT_SYMBOL(ip_idents_reserve);
103012
103013@@ -2624,34 +2624,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
103014 .maxlen = sizeof(int),
103015 .mode = 0200,
103016 .proc_handler = ipv4_sysctl_rtcache_flush,
103017+ .extra1 = &init_net,
103018 },
103019 { },
103020 };
103021
103022 static __net_init int sysctl_route_net_init(struct net *net)
103023 {
103024- struct ctl_table *tbl;
103025+ ctl_table_no_const *tbl = NULL;
103026
103027- tbl = ipv4_route_flush_table;
103028 if (!net_eq(net, &init_net)) {
103029- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
103030+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
103031 if (tbl == NULL)
103032 goto err_dup;
103033
103034 /* Don't export sysctls to unprivileged users */
103035 if (net->user_ns != &init_user_ns)
103036 tbl[0].procname = NULL;
103037- }
103038- tbl[0].extra1 = net;
103039+ tbl[0].extra1 = net;
103040+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
103041+ } else
103042+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
103043
103044- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
103045 if (net->ipv4.route_hdr == NULL)
103046 goto err_reg;
103047 return 0;
103048
103049 err_reg:
103050- if (tbl != ipv4_route_flush_table)
103051- kfree(tbl);
103052+ kfree(tbl);
103053 err_dup:
103054 return -ENOMEM;
103055 }
103056@@ -2674,8 +2674,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
103057
103058 static __net_init int rt_genid_init(struct net *net)
103059 {
103060- atomic_set(&net->ipv4.rt_genid, 0);
103061- atomic_set(&net->fnhe_genid, 0);
103062+ atomic_set_unchecked(&net->ipv4.rt_genid, 0);
103063+ atomic_set_unchecked(&net->fnhe_genid, 0);
103064 get_random_bytes(&net->ipv4.dev_addr_genid,
103065 sizeof(net->ipv4.dev_addr_genid));
103066 return 0;
103067@@ -2718,11 +2718,7 @@ int __init ip_rt_init(void)
103068 {
103069 int rc = 0;
103070
103071- ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
103072- if (!ip_idents)
103073- panic("IP: failed to allocate ip_idents\n");
103074-
103075- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
103076+ prandom_bytes(ip_idents, sizeof(ip_idents));
103077
103078 #ifdef CONFIG_IP_ROUTE_CLASSID
103079 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
103080diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
103081index 79a007c..5023029 100644
103082--- a/net/ipv4/sysctl_net_ipv4.c
103083+++ b/net/ipv4/sysctl_net_ipv4.c
103084@@ -60,7 +60,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
103085 container_of(table->data, struct net, ipv4.ip_local_ports.range);
103086 int ret;
103087 int range[2];
103088- struct ctl_table tmp = {
103089+ ctl_table_no_const tmp = {
103090 .data = &range,
103091 .maxlen = sizeof(range),
103092 .mode = table->mode,
103093@@ -118,7 +118,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
103094 int ret;
103095 gid_t urange[2];
103096 kgid_t low, high;
103097- struct ctl_table tmp = {
103098+ ctl_table_no_const tmp = {
103099 .data = &urange,
103100 .maxlen = sizeof(urange),
103101 .mode = table->mode,
103102@@ -149,7 +149,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
103103 void __user *buffer, size_t *lenp, loff_t *ppos)
103104 {
103105 char val[TCP_CA_NAME_MAX];
103106- struct ctl_table tbl = {
103107+ ctl_table_no_const tbl = {
103108 .data = val,
103109 .maxlen = TCP_CA_NAME_MAX,
103110 };
103111@@ -168,7 +168,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
103112 void __user *buffer, size_t *lenp,
103113 loff_t *ppos)
103114 {
103115- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
103116+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
103117 int ret;
103118
103119 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
103120@@ -185,7 +185,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
103121 void __user *buffer, size_t *lenp,
103122 loff_t *ppos)
103123 {
103124- struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
103125+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
103126 int ret;
103127
103128 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
103129@@ -204,7 +204,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
103130 void __user *buffer, size_t *lenp,
103131 loff_t *ppos)
103132 {
103133- struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
103134+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
103135 struct tcp_fastopen_context *ctxt;
103136 int ret;
103137 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
103138@@ -857,13 +857,12 @@ static struct ctl_table ipv4_net_table[] = {
103139
103140 static __net_init int ipv4_sysctl_init_net(struct net *net)
103141 {
103142- struct ctl_table *table;
103143+ ctl_table_no_const *table = NULL;
103144
103145- table = ipv4_net_table;
103146 if (!net_eq(net, &init_net)) {
103147 int i;
103148
103149- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
103150+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
103151 if (table == NULL)
103152 goto err_alloc;
103153
103154@@ -872,7 +871,10 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
103155 table[i].data += (void *)net - (void *)&init_net;
103156 }
103157
103158- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
103159+ if (!net_eq(net, &init_net))
103160+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
103161+ else
103162+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
103163 if (net->ipv4.ipv4_hdr == NULL)
103164 goto err_reg;
103165
103166diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
103167index a906e02..f3b6a0f 100644
103168--- a/net/ipv4/tcp_input.c
103169+++ b/net/ipv4/tcp_input.c
103170@@ -755,7 +755,7 @@ static void tcp_update_pacing_rate(struct sock *sk)
103171 * without any lock. We want to make sure compiler wont store
103172 * intermediate values in this location.
103173 */
103174- ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
103175+ ACCESS_ONCE_RW(sk->sk_pacing_rate) = min_t(u64, rate,
103176 sk->sk_max_pacing_rate);
103177 }
103178
103179@@ -4488,7 +4488,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
103180 * simplifies code)
103181 */
103182 static void
103183-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
103184+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
103185 struct sk_buff *head, struct sk_buff *tail,
103186 u32 start, u32 end)
103187 {
103188@@ -5546,6 +5546,7 @@ discard:
103189 tcp_paws_reject(&tp->rx_opt, 0))
103190 goto discard_and_undo;
103191
103192+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
103193 if (th->syn) {
103194 /* We see SYN without ACK. It is attempt of
103195 * simultaneous connect with crossed SYNs.
103196@@ -5596,6 +5597,7 @@ discard:
103197 goto discard;
103198 #endif
103199 }
103200+#endif
103201 /* "fifth, if neither of the SYN or RST bits is set then
103202 * drop the segment and return."
103203 */
103204@@ -5642,7 +5644,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
103205 goto discard;
103206
103207 if (th->syn) {
103208- if (th->fin)
103209+ if (th->fin || th->urg || th->psh)
103210 goto discard;
103211 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
103212 return 1;
103213diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
103214index 3f49eae..bde687a 100644
103215--- a/net/ipv4/tcp_ipv4.c
103216+++ b/net/ipv4/tcp_ipv4.c
103217@@ -91,6 +91,10 @@ int sysctl_tcp_low_latency __read_mostly;
103218 EXPORT_SYMBOL(sysctl_tcp_low_latency);
103219
103220
103221+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103222+extern int grsec_enable_blackhole;
103223+#endif
103224+
103225 #ifdef CONFIG_TCP_MD5SIG
103226 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
103227 __be32 daddr, __be32 saddr, const struct tcphdr *th);
103228@@ -1487,6 +1491,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
103229 return 0;
103230
103231 reset:
103232+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103233+ if (!grsec_enable_blackhole)
103234+#endif
103235 tcp_v4_send_reset(rsk, skb);
103236 discard:
103237 kfree_skb(skb);
103238@@ -1633,12 +1640,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
103239 TCP_SKB_CB(skb)->sacked = 0;
103240
103241 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
103242- if (!sk)
103243+ if (!sk) {
103244+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103245+ ret = 1;
103246+#endif
103247 goto no_tcp_socket;
103248-
103249+ }
103250 process:
103251- if (sk->sk_state == TCP_TIME_WAIT)
103252+ if (sk->sk_state == TCP_TIME_WAIT) {
103253+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103254+ ret = 2;
103255+#endif
103256 goto do_time_wait;
103257+ }
103258
103259 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
103260 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
103261@@ -1704,6 +1718,10 @@ csum_error:
103262 bad_packet:
103263 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
103264 } else {
103265+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103266+ if (!grsec_enable_blackhole || (ret == 1 &&
103267+ (skb->dev->flags & IFF_LOOPBACK)))
103268+#endif
103269 tcp_v4_send_reset(NULL, skb);
103270 }
103271
103272diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
103273index 1649988..6251843 100644
103274--- a/net/ipv4/tcp_minisocks.c
103275+++ b/net/ipv4/tcp_minisocks.c
103276@@ -27,6 +27,10 @@
103277 #include <net/inet_common.h>
103278 #include <net/xfrm.h>
103279
103280+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103281+extern int grsec_enable_blackhole;
103282+#endif
103283+
103284 int sysctl_tcp_syncookies __read_mostly = 1;
103285 EXPORT_SYMBOL(sysctl_tcp_syncookies);
103286
103287@@ -740,7 +744,10 @@ embryonic_reset:
103288 * avoid becoming vulnerable to outside attack aiming at
103289 * resetting legit local connections.
103290 */
103291- req->rsk_ops->send_reset(sk, skb);
103292+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103293+ if (!grsec_enable_blackhole)
103294+#endif
103295+ req->rsk_ops->send_reset(sk, skb);
103296 } else if (fastopen) { /* received a valid RST pkt */
103297 reqsk_fastopen_remove(sk, req, true);
103298 tcp_reset(sk);
103299diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
103300index 3b66610..bfbe23a 100644
103301--- a/net/ipv4/tcp_probe.c
103302+++ b/net/ipv4/tcp_probe.c
103303@@ -238,7 +238,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
103304 if (cnt + width >= len)
103305 break;
103306
103307- if (copy_to_user(buf + cnt, tbuf, width))
103308+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
103309 return -EFAULT;
103310 cnt += width;
103311 }
103312diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
103313index df90cd1..9ab2c9b 100644
103314--- a/net/ipv4/tcp_timer.c
103315+++ b/net/ipv4/tcp_timer.c
103316@@ -22,6 +22,10 @@
103317 #include <linux/gfp.h>
103318 #include <net/tcp.h>
103319
103320+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103321+extern int grsec_lastack_retries;
103322+#endif
103323+
103324 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
103325 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
103326 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
103327@@ -192,6 +196,13 @@ static int tcp_write_timeout(struct sock *sk)
103328 }
103329 }
103330
103331+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103332+ if ((sk->sk_state == TCP_LAST_ACK) &&
103333+ (grsec_lastack_retries > 0) &&
103334+ (grsec_lastack_retries < retry_until))
103335+ retry_until = grsec_lastack_retries;
103336+#endif
103337+
103338 if (retransmits_timed_out(sk, retry_until,
103339 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
103340 /* Has it gone just too far? */
103341diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
103342index f57c0e4..cf24bd0 100644
103343--- a/net/ipv4/udp.c
103344+++ b/net/ipv4/udp.c
103345@@ -87,6 +87,7 @@
103346 #include <linux/types.h>
103347 #include <linux/fcntl.h>
103348 #include <linux/module.h>
103349+#include <linux/security.h>
103350 #include <linux/socket.h>
103351 #include <linux/sockios.h>
103352 #include <linux/igmp.h>
103353@@ -113,6 +114,10 @@
103354 #include <net/busy_poll.h>
103355 #include "udp_impl.h"
103356
103357+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103358+extern int grsec_enable_blackhole;
103359+#endif
103360+
103361 struct udp_table udp_table __read_mostly;
103362 EXPORT_SYMBOL(udp_table);
103363
103364@@ -594,6 +599,9 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
103365 return true;
103366 }
103367
103368+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
103369+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
103370+
103371 /*
103372 * This routine is called by the ICMP module when it gets some
103373 * sort of error condition. If err < 0 then the socket should
103374@@ -931,9 +939,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
103375 dport = usin->sin_port;
103376 if (dport == 0)
103377 return -EINVAL;
103378+
103379+ err = gr_search_udp_sendmsg(sk, usin);
103380+ if (err)
103381+ return err;
103382 } else {
103383 if (sk->sk_state != TCP_ESTABLISHED)
103384 return -EDESTADDRREQ;
103385+
103386+ err = gr_search_udp_sendmsg(sk, NULL);
103387+ if (err)
103388+ return err;
103389+
103390 daddr = inet->inet_daddr;
103391 dport = inet->inet_dport;
103392 /* Open fast path for connected socket.
103393@@ -1181,7 +1198,7 @@ static unsigned int first_packet_length(struct sock *sk)
103394 IS_UDPLITE(sk));
103395 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
103396 IS_UDPLITE(sk));
103397- atomic_inc(&sk->sk_drops);
103398+ atomic_inc_unchecked(&sk->sk_drops);
103399 __skb_unlink(skb, rcvq);
103400 __skb_queue_tail(&list_kill, skb);
103401 }
103402@@ -1261,6 +1278,10 @@ try_again:
103403 if (!skb)
103404 goto out;
103405
103406+ err = gr_search_udp_recvmsg(sk, skb);
103407+ if (err)
103408+ goto out_free;
103409+
103410 ulen = skb->len - sizeof(struct udphdr);
103411 copied = len;
103412 if (copied > ulen)
103413@@ -1294,7 +1315,7 @@ try_again:
103414 if (unlikely(err)) {
103415 trace_kfree_skb(skb, udp_recvmsg);
103416 if (!peeked) {
103417- atomic_inc(&sk->sk_drops);
103418+ atomic_inc_unchecked(&sk->sk_drops);
103419 UDP_INC_STATS_USER(sock_net(sk),
103420 UDP_MIB_INERRORS, is_udplite);
103421 }
103422@@ -1591,7 +1612,7 @@ csum_error:
103423 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
103424 drop:
103425 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
103426- atomic_inc(&sk->sk_drops);
103427+ atomic_inc_unchecked(&sk->sk_drops);
103428 kfree_skb(skb);
103429 return -1;
103430 }
103431@@ -1610,7 +1631,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
103432 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
103433
103434 if (!skb1) {
103435- atomic_inc(&sk->sk_drops);
103436+ atomic_inc_unchecked(&sk->sk_drops);
103437 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
103438 IS_UDPLITE(sk));
103439 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
103440@@ -1807,6 +1828,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
103441 goto csum_error;
103442
103443 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
103444+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
103445+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
103446+#endif
103447 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
103448
103449 /*
103450@@ -2393,7 +2417,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
103451 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
103452 0, sock_i_ino(sp),
103453 atomic_read(&sp->sk_refcnt), sp,
103454- atomic_read(&sp->sk_drops));
103455+ atomic_read_unchecked(&sp->sk_drops));
103456 }
103457
103458 int udp4_seq_show(struct seq_file *seq, void *v)
103459diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
103460index 6156f68..d6ab46d 100644
103461--- a/net/ipv4/xfrm4_policy.c
103462+++ b/net/ipv4/xfrm4_policy.c
103463@@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
103464 fl4->flowi4_tos = iph->tos;
103465 }
103466
103467-static inline int xfrm4_garbage_collect(struct dst_ops *ops)
103468+static int xfrm4_garbage_collect(struct dst_ops *ops)
103469 {
103470 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
103471
103472- xfrm4_policy_afinfo.garbage_collect(net);
103473+ xfrm_garbage_collect_deferred(net);
103474 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
103475 }
103476
103477@@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
103478
103479 static int __net_init xfrm4_net_init(struct net *net)
103480 {
103481- struct ctl_table *table;
103482+ ctl_table_no_const *table = NULL;
103483 struct ctl_table_header *hdr;
103484
103485- table = xfrm4_policy_table;
103486 if (!net_eq(net, &init_net)) {
103487- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
103488+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
103489 if (!table)
103490 goto err_alloc;
103491
103492 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
103493- }
103494-
103495- hdr = register_net_sysctl(net, "net/ipv4", table);
103496+ hdr = register_net_sysctl(net, "net/ipv4", table);
103497+ } else
103498+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
103499 if (!hdr)
103500 goto err_reg;
103501
103502@@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
103503 return 0;
103504
103505 err_reg:
103506- if (!net_eq(net, &init_net))
103507- kfree(table);
103508+ kfree(table);
103509 err_alloc:
103510 return -ENOMEM;
103511 }
103512diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
103513index 3e118df..288a0d1 100644
103514--- a/net/ipv6/addrconf.c
103515+++ b/net/ipv6/addrconf.c
103516@@ -171,7 +171,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
103517 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
103518 .mtu6 = IPV6_MIN_MTU,
103519 .accept_ra = 1,
103520- .accept_redirects = 1,
103521+ .accept_redirects = 0,
103522 .autoconf = 1,
103523 .force_mld_version = 0,
103524 .mldv1_unsolicited_report_interval = 10 * HZ,
103525@@ -208,7 +208,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
103526 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
103527 .mtu6 = IPV6_MIN_MTU,
103528 .accept_ra = 1,
103529- .accept_redirects = 1,
103530+ .accept_redirects = 0,
103531 .autoconf = 1,
103532 .force_mld_version = 0,
103533 .mldv1_unsolicited_report_interval = 10 * HZ,
103534@@ -604,7 +604,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
103535 idx = 0;
103536 head = &net->dev_index_head[h];
103537 rcu_read_lock();
103538- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
103539+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
103540 net->dev_base_seq;
103541 hlist_for_each_entry_rcu(dev, head, index_hlist) {
103542 if (idx < s_idx)
103543@@ -2396,7 +2396,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
103544 p.iph.ihl = 5;
103545 p.iph.protocol = IPPROTO_IPV6;
103546 p.iph.ttl = 64;
103547- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
103548+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
103549
103550 if (ops->ndo_do_ioctl) {
103551 mm_segment_t oldfs = get_fs();
103552@@ -3531,16 +3531,23 @@ static const struct file_operations if6_fops = {
103553 .release = seq_release_net,
103554 };
103555
103556+extern void register_ipv6_seq_ops_addr(struct seq_operations *addr);
103557+extern void unregister_ipv6_seq_ops_addr(void);
103558+
103559 static int __net_init if6_proc_net_init(struct net *net)
103560 {
103561- if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
103562+ register_ipv6_seq_ops_addr(&if6_seq_ops);
103563+ if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops)) {
103564+ unregister_ipv6_seq_ops_addr();
103565 return -ENOMEM;
103566+ }
103567 return 0;
103568 }
103569
103570 static void __net_exit if6_proc_net_exit(struct net *net)
103571 {
103572 remove_proc_entry("if_inet6", net->proc_net);
103573+ unregister_ipv6_seq_ops_addr();
103574 }
103575
103576 static struct pernet_operations if6_proc_net_ops = {
103577@@ -4156,7 +4163,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
103578 s_ip_idx = ip_idx = cb->args[2];
103579
103580 rcu_read_lock();
103581- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
103582+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
103583 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
103584 idx = 0;
103585 head = &net->dev_index_head[h];
103586@@ -4784,7 +4791,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
103587 rt_genid_bump_ipv6(net);
103588 break;
103589 }
103590- atomic_inc(&net->ipv6.dev_addr_genid);
103591+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
103592 }
103593
103594 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
103595@@ -4804,7 +4811,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
103596 int *valp = ctl->data;
103597 int val = *valp;
103598 loff_t pos = *ppos;
103599- struct ctl_table lctl;
103600+ ctl_table_no_const lctl;
103601 int ret;
103602
103603 /*
103604@@ -4889,7 +4896,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
103605 int *valp = ctl->data;
103606 int val = *valp;
103607 loff_t pos = *ppos;
103608- struct ctl_table lctl;
103609+ ctl_table_no_const lctl;
103610 int ret;
103611
103612 /*
103613diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
103614index 2daa3a1..341066c 100644
103615--- a/net/ipv6/af_inet6.c
103616+++ b/net/ipv6/af_inet6.c
103617@@ -766,7 +766,7 @@ static int __net_init inet6_net_init(struct net *net)
103618 net->ipv6.sysctl.icmpv6_time = 1*HZ;
103619 net->ipv6.sysctl.flowlabel_consistency = 1;
103620 net->ipv6.sysctl.auto_flowlabels = 0;
103621- atomic_set(&net->ipv6.rt_genid, 0);
103622+ atomic_set_unchecked(&net->ipv6.rt_genid, 0);
103623
103624 err = ipv6_init_mibs(net);
103625 if (err)
103626diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
103627index 2753319..b7e625c 100644
103628--- a/net/ipv6/datagram.c
103629+++ b/net/ipv6/datagram.c
103630@@ -939,5 +939,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
103631 0,
103632 sock_i_ino(sp),
103633 atomic_read(&sp->sk_refcnt), sp,
103634- atomic_read(&sp->sk_drops));
103635+ atomic_read_unchecked(&sp->sk_drops));
103636 }
103637diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
103638index 06ba3e5..5c08d38 100644
103639--- a/net/ipv6/icmp.c
103640+++ b/net/ipv6/icmp.c
103641@@ -993,7 +993,7 @@ static struct ctl_table ipv6_icmp_table_template[] = {
103642
103643 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
103644 {
103645- struct ctl_table *table;
103646+ ctl_table_no_const *table;
103647
103648 table = kmemdup(ipv6_icmp_table_template,
103649 sizeof(ipv6_icmp_table_template),
103650diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
103651index cacb493..3cae894 100644
103652--- a/net/ipv6/ip6_gre.c
103653+++ b/net/ipv6/ip6_gre.c
103654@@ -71,8 +71,8 @@ struct ip6gre_net {
103655 struct net_device *fb_tunnel_dev;
103656 };
103657
103658-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
103659-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
103660+static struct rtnl_link_ops ip6gre_link_ops;
103661+static struct rtnl_link_ops ip6gre_tap_ops;
103662 static int ip6gre_tunnel_init(struct net_device *dev);
103663 static void ip6gre_tunnel_setup(struct net_device *dev);
103664 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
103665@@ -1285,7 +1285,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
103666 }
103667
103668
103669-static struct inet6_protocol ip6gre_protocol __read_mostly = {
103670+static struct inet6_protocol ip6gre_protocol = {
103671 .handler = ip6gre_rcv,
103672 .err_handler = ip6gre_err,
103673 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
103674@@ -1646,7 +1646,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
103675 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
103676 };
103677
103678-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
103679+static struct rtnl_link_ops ip6gre_link_ops = {
103680 .kind = "ip6gre",
103681 .maxtype = IFLA_GRE_MAX,
103682 .policy = ip6gre_policy,
103683@@ -1660,7 +1660,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
103684 .fill_info = ip6gre_fill_info,
103685 };
103686
103687-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
103688+static struct rtnl_link_ops ip6gre_tap_ops = {
103689 .kind = "ip6gretap",
103690 .maxtype = IFLA_GRE_MAX,
103691 .policy = ip6gre_policy,
103692diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
103693index 65eda2a..620a102 100644
103694--- a/net/ipv6/ip6_offload.c
103695+++ b/net/ipv6/ip6_offload.c
103696@@ -46,6 +46,7 @@ static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
103697 if (unlikely(!pskb_may_pull(skb, len)))
103698 break;
103699
103700+ opth = (void *)skb->data;
103701 proto = opth->nexthdr;
103702 __skb_pull(skb, len);
103703 }
103704diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
103705index d2eeb3b..c186e9a 100644
103706--- a/net/ipv6/ip6_tunnel.c
103707+++ b/net/ipv6/ip6_tunnel.c
103708@@ -86,7 +86,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
103709
103710 static int ip6_tnl_dev_init(struct net_device *dev);
103711 static void ip6_tnl_dev_setup(struct net_device *dev);
103712-static struct rtnl_link_ops ip6_link_ops __read_mostly;
103713+static struct rtnl_link_ops ip6_link_ops;
103714
103715 static int ip6_tnl_net_id __read_mostly;
103716 struct ip6_tnl_net {
103717@@ -1706,7 +1706,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
103718 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
103719 };
103720
103721-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
103722+static struct rtnl_link_ops ip6_link_ops = {
103723 .kind = "ip6tnl",
103724 .maxtype = IFLA_IPTUN_MAX,
103725 .policy = ip6_tnl_policy,
103726diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
103727index 99c9487..63f4d92 100644
103728--- a/net/ipv6/ip6_vti.c
103729+++ b/net/ipv6/ip6_vti.c
103730@@ -62,7 +62,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
103731
103732 static int vti6_dev_init(struct net_device *dev);
103733 static void vti6_dev_setup(struct net_device *dev);
103734-static struct rtnl_link_ops vti6_link_ops __read_mostly;
103735+static struct rtnl_link_ops vti6_link_ops;
103736
103737 static int vti6_net_id __read_mostly;
103738 struct vti6_net {
103739@@ -972,7 +972,7 @@ static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
103740 [IFLA_VTI_OKEY] = { .type = NLA_U32 },
103741 };
103742
103743-static struct rtnl_link_ops vti6_link_ops __read_mostly = {
103744+static struct rtnl_link_ops vti6_link_ops = {
103745 .kind = "vti6",
103746 .maxtype = IFLA_VTI_MAX,
103747 .policy = vti6_policy,
103748diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
103749index 0c28998..d0a2ecd 100644
103750--- a/net/ipv6/ipv6_sockglue.c
103751+++ b/net/ipv6/ipv6_sockglue.c
103752@@ -995,7 +995,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
103753 if (sk->sk_type != SOCK_STREAM)
103754 return -ENOPROTOOPT;
103755
103756- msg.msg_control = optval;
103757+ msg.msg_control = (void __force_kernel *)optval;
103758 msg.msg_controllen = len;
103759 msg.msg_flags = flags;
103760
103761diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
103762index e080fbb..412b3cf 100644
103763--- a/net/ipv6/netfilter/ip6_tables.c
103764+++ b/net/ipv6/netfilter/ip6_tables.c
103765@@ -1083,14 +1083,14 @@ static int compat_table_info(const struct xt_table_info *info,
103766 #endif
103767
103768 static int get_info(struct net *net, void __user *user,
103769- const int *len, int compat)
103770+ int len, int compat)
103771 {
103772 char name[XT_TABLE_MAXNAMELEN];
103773 struct xt_table *t;
103774 int ret;
103775
103776- if (*len != sizeof(struct ip6t_getinfo)) {
103777- duprintf("length %u != %zu\n", *len,
103778+ if (len != sizeof(struct ip6t_getinfo)) {
103779+ duprintf("length %u != %zu\n", len,
103780 sizeof(struct ip6t_getinfo));
103781 return -EINVAL;
103782 }
103783@@ -1127,7 +1127,7 @@ static int get_info(struct net *net, void __user *user,
103784 info.size = private->size;
103785 strcpy(info.name, name);
103786
103787- if (copy_to_user(user, &info, *len) != 0)
103788+ if (copy_to_user(user, &info, len) != 0)
103789 ret = -EFAULT;
103790 else
103791 ret = 0;
103792@@ -1983,7 +1983,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103793
103794 switch (cmd) {
103795 case IP6T_SO_GET_INFO:
103796- ret = get_info(sock_net(sk), user, len, 1);
103797+ ret = get_info(sock_net(sk), user, *len, 1);
103798 break;
103799 case IP6T_SO_GET_ENTRIES:
103800 ret = compat_get_entries(sock_net(sk), user, len);
103801@@ -2030,7 +2030,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
103802
103803 switch (cmd) {
103804 case IP6T_SO_GET_INFO:
103805- ret = get_info(sock_net(sk), user, len, 0);
103806+ ret = get_info(sock_net(sk), user, *len, 0);
103807 break;
103808
103809 case IP6T_SO_GET_ENTRIES:
103810diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
103811index 6f187c8..34b367f 100644
103812--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
103813+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
103814@@ -96,12 +96,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
103815
103816 static int nf_ct_frag6_sysctl_register(struct net *net)
103817 {
103818- struct ctl_table *table;
103819+ ctl_table_no_const *table = NULL;
103820 struct ctl_table_header *hdr;
103821
103822- table = nf_ct_frag6_sysctl_table;
103823 if (!net_eq(net, &init_net)) {
103824- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
103825+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
103826 GFP_KERNEL);
103827 if (table == NULL)
103828 goto err_alloc;
103829@@ -112,9 +111,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
103830 table[2].data = &net->nf_frag.frags.high_thresh;
103831 table[2].extra1 = &net->nf_frag.frags.low_thresh;
103832 table[2].extra2 = &init_net.nf_frag.frags.high_thresh;
103833- }
103834-
103835- hdr = register_net_sysctl(net, "net/netfilter", table);
103836+ hdr = register_net_sysctl(net, "net/netfilter", table);
103837+ } else
103838+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
103839 if (hdr == NULL)
103840 goto err_reg;
103841
103842@@ -122,8 +121,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
103843 return 0;
103844
103845 err_reg:
103846- if (!net_eq(net, &init_net))
103847- kfree(table);
103848+ kfree(table);
103849 err_alloc:
103850 return -ENOMEM;
103851 }
103852diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
103853index 5b7a1ed..d9da205 100644
103854--- a/net/ipv6/ping.c
103855+++ b/net/ipv6/ping.c
103856@@ -240,6 +240,24 @@ static struct pernet_operations ping_v6_net_ops = {
103857 };
103858 #endif
103859
103860+static struct pingv6_ops real_pingv6_ops = {
103861+ .ipv6_recv_error = ipv6_recv_error,
103862+ .ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl,
103863+ .ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl,
103864+ .icmpv6_err_convert = icmpv6_err_convert,
103865+ .ipv6_icmp_error = ipv6_icmp_error,
103866+ .ipv6_chk_addr = ipv6_chk_addr,
103867+};
103868+
103869+static struct pingv6_ops dummy_pingv6_ops = {
103870+ .ipv6_recv_error = dummy_ipv6_recv_error,
103871+ .ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl,
103872+ .ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl,
103873+ .icmpv6_err_convert = dummy_icmpv6_err_convert,
103874+ .ipv6_icmp_error = dummy_ipv6_icmp_error,
103875+ .ipv6_chk_addr = dummy_ipv6_chk_addr,
103876+};
103877+
103878 int __init pingv6_init(void)
103879 {
103880 #ifdef CONFIG_PROC_FS
103881@@ -247,13 +265,7 @@ int __init pingv6_init(void)
103882 if (ret)
103883 return ret;
103884 #endif
103885- pingv6_ops.ipv6_recv_error = ipv6_recv_error;
103886- pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl;
103887- pingv6_ops.ip6_datagram_recv_specific_ctl =
103888- ip6_datagram_recv_specific_ctl;
103889- pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
103890- pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
103891- pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
103892+ pingv6_ops = &real_pingv6_ops;
103893 return inet6_register_protosw(&pingv6_protosw);
103894 }
103895
103896@@ -262,14 +274,9 @@ int __init pingv6_init(void)
103897 */
103898 void pingv6_exit(void)
103899 {
103900- pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
103901- pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl;
103902- pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl;
103903- pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
103904- pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
103905- pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
103906 #ifdef CONFIG_PROC_FS
103907 unregister_pernet_subsys(&ping_v6_net_ops);
103908 #endif
103909+ pingv6_ops = &dummy_pingv6_ops;
103910 inet6_unregister_protosw(&pingv6_protosw);
103911 }
103912diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
103913index 2d6f860..b0165f5 100644
103914--- a/net/ipv6/proc.c
103915+++ b/net/ipv6/proc.c
103916@@ -309,7 +309,7 @@ static int __net_init ipv6_proc_init_net(struct net *net)
103917 if (!proc_create("snmp6", S_IRUGO, net->proc_net, &snmp6_seq_fops))
103918 goto proc_snmp6_fail;
103919
103920- net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net);
103921+ net->mib.proc_net_devsnmp6 = proc_mkdir_restrict("dev_snmp6", net->proc_net);
103922 if (!net->mib.proc_net_devsnmp6)
103923 goto proc_dev_snmp6_fail;
103924 return 0;
103925diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
103926index 39d4422..b0979547 100644
103927--- a/net/ipv6/raw.c
103928+++ b/net/ipv6/raw.c
103929@@ -388,7 +388,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
103930 {
103931 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
103932 skb_checksum_complete(skb)) {
103933- atomic_inc(&sk->sk_drops);
103934+ atomic_inc_unchecked(&sk->sk_drops);
103935 kfree_skb(skb);
103936 return NET_RX_DROP;
103937 }
103938@@ -416,7 +416,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
103939 struct raw6_sock *rp = raw6_sk(sk);
103940
103941 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
103942- atomic_inc(&sk->sk_drops);
103943+ atomic_inc_unchecked(&sk->sk_drops);
103944 kfree_skb(skb);
103945 return NET_RX_DROP;
103946 }
103947@@ -440,7 +440,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
103948
103949 if (inet->hdrincl) {
103950 if (skb_checksum_complete(skb)) {
103951- atomic_inc(&sk->sk_drops);
103952+ atomic_inc_unchecked(&sk->sk_drops);
103953 kfree_skb(skb);
103954 return NET_RX_DROP;
103955 }
103956@@ -608,7 +608,7 @@ out:
103957 return err;
103958 }
103959
103960-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
103961+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
103962 struct flowi6 *fl6, struct dst_entry **dstp,
103963 unsigned int flags)
103964 {
103965@@ -914,12 +914,15 @@ do_confirm:
103966 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
103967 char __user *optval, int optlen)
103968 {
103969+ struct icmp6_filter filter;
103970+
103971 switch (optname) {
103972 case ICMPV6_FILTER:
103973 if (optlen > sizeof(struct icmp6_filter))
103974 optlen = sizeof(struct icmp6_filter);
103975- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
103976+ if (copy_from_user(&filter, optval, optlen))
103977 return -EFAULT;
103978+ raw6_sk(sk)->filter = filter;
103979 return 0;
103980 default:
103981 return -ENOPROTOOPT;
103982@@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
103983 char __user *optval, int __user *optlen)
103984 {
103985 int len;
103986+ struct icmp6_filter filter;
103987
103988 switch (optname) {
103989 case ICMPV6_FILTER:
103990@@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
103991 len = sizeof(struct icmp6_filter);
103992 if (put_user(len, optlen))
103993 return -EFAULT;
103994- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
103995+ filter = raw6_sk(sk)->filter;
103996+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
103997 return -EFAULT;
103998 return 0;
103999 default:
104000diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
104001index c6557d9..173e728 100644
104002--- a/net/ipv6/reassembly.c
104003+++ b/net/ipv6/reassembly.c
104004@@ -627,12 +627,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
104005
104006 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104007 {
104008- struct ctl_table *table;
104009+ ctl_table_no_const *table = NULL;
104010 struct ctl_table_header *hdr;
104011
104012- table = ip6_frags_ns_ctl_table;
104013 if (!net_eq(net, &init_net)) {
104014- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
104015+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
104016 if (table == NULL)
104017 goto err_alloc;
104018
104019@@ -646,9 +645,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104020 /* Don't export sysctls to unprivileged users */
104021 if (net->user_ns != &init_user_ns)
104022 table[0].procname = NULL;
104023- }
104024+ hdr = register_net_sysctl(net, "net/ipv6", table);
104025+ } else
104026+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
104027
104028- hdr = register_net_sysctl(net, "net/ipv6", table);
104029 if (hdr == NULL)
104030 goto err_reg;
104031
104032@@ -656,8 +656,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
104033 return 0;
104034
104035 err_reg:
104036- if (!net_eq(net, &init_net))
104037- kfree(table);
104038+ kfree(table);
104039 err_alloc:
104040 return -ENOMEM;
104041 }
104042diff --git a/net/ipv6/route.c b/net/ipv6/route.c
104043index bafde82..af2c91f 100644
104044--- a/net/ipv6/route.c
104045+++ b/net/ipv6/route.c
104046@@ -2967,7 +2967,7 @@ struct ctl_table ipv6_route_table_template[] = {
104047
104048 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
104049 {
104050- struct ctl_table *table;
104051+ ctl_table_no_const *table;
104052
104053 table = kmemdup(ipv6_route_table_template,
104054 sizeof(ipv6_route_table_template),
104055diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
104056index ca1c7c4..37fba59 100644
104057--- a/net/ipv6/sit.c
104058+++ b/net/ipv6/sit.c
104059@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
104060 static void ipip6_dev_free(struct net_device *dev);
104061 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
104062 __be32 *v4dst);
104063-static struct rtnl_link_ops sit_link_ops __read_mostly;
104064+static struct rtnl_link_ops sit_link_ops;
104065
104066 static int sit_net_id __read_mostly;
104067 struct sit_net {
104068@@ -484,11 +484,11 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
104069 */
104070 static int ipip6_err_gen_icmpv6_unreach(struct sk_buff *skb)
104071 {
104072- const struct iphdr *iph = (const struct iphdr *) skb->data;
104073+ int ihl = ((const struct iphdr *)skb->data)->ihl*4;
104074 struct rt6_info *rt;
104075 struct sk_buff *skb2;
104076
104077- if (!pskb_may_pull(skb, iph->ihl * 4 + sizeof(struct ipv6hdr) + 8))
104078+ if (!pskb_may_pull(skb, ihl + sizeof(struct ipv6hdr) + 8))
104079 return 1;
104080
104081 skb2 = skb_clone(skb, GFP_ATOMIC);
104082@@ -497,7 +497,7 @@ static int ipip6_err_gen_icmpv6_unreach(struct sk_buff *skb)
104083 return 1;
104084
104085 skb_dst_drop(skb2);
104086- skb_pull(skb2, iph->ihl * 4);
104087+ skb_pull(skb2, ihl);
104088 skb_reset_network_header(skb2);
104089
104090 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0, 0);
104091@@ -1659,7 +1659,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
104092 unregister_netdevice_queue(dev, head);
104093 }
104094
104095-static struct rtnl_link_ops sit_link_ops __read_mostly = {
104096+static struct rtnl_link_ops sit_link_ops = {
104097 .kind = "sit",
104098 .maxtype = IFLA_IPTUN_MAX,
104099 .policy = ipip6_policy,
104100diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
104101index 0c56c93..ece50df 100644
104102--- a/net/ipv6/sysctl_net_ipv6.c
104103+++ b/net/ipv6/sysctl_net_ipv6.c
104104@@ -68,7 +68,7 @@ static struct ctl_table ipv6_rotable[] = {
104105
104106 static int __net_init ipv6_sysctl_net_init(struct net *net)
104107 {
104108- struct ctl_table *ipv6_table;
104109+ ctl_table_no_const *ipv6_table;
104110 struct ctl_table *ipv6_route_table;
104111 struct ctl_table *ipv6_icmp_table;
104112 int err;
104113diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
104114index 264c0f2..b6512c6 100644
104115--- a/net/ipv6/tcp_ipv6.c
104116+++ b/net/ipv6/tcp_ipv6.c
104117@@ -102,6 +102,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
104118 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
104119 }
104120
104121+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104122+extern int grsec_enable_blackhole;
104123+#endif
104124+
104125 static void tcp_v6_hash(struct sock *sk)
104126 {
104127 if (sk->sk_state != TCP_CLOSE) {
104128@@ -1333,6 +1337,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
104129 return 0;
104130
104131 reset:
104132+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104133+ if (!grsec_enable_blackhole)
104134+#endif
104135 tcp_v6_send_reset(sk, skb);
104136 discard:
104137 if (opt_skb)
104138@@ -1417,12 +1424,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
104139 TCP_SKB_CB(skb)->sacked = 0;
104140
104141 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
104142- if (!sk)
104143+ if (!sk) {
104144+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104145+ ret = 1;
104146+#endif
104147 goto no_tcp_socket;
104148+ }
104149
104150 process:
104151- if (sk->sk_state == TCP_TIME_WAIT)
104152+ if (sk->sk_state == TCP_TIME_WAIT) {
104153+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104154+ ret = 2;
104155+#endif
104156 goto do_time_wait;
104157+ }
104158
104159 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
104160 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
104161@@ -1479,6 +1494,10 @@ csum_error:
104162 bad_packet:
104163 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
104164 } else {
104165+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104166+ if (!grsec_enable_blackhole || (ret == 1 &&
104167+ (skb->dev->flags & IFF_LOOPBACK)))
104168+#endif
104169 tcp_v6_send_reset(NULL, skb);
104170 }
104171
104172diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
104173index 4836af8..0e52bbd 100644
104174--- a/net/ipv6/udp.c
104175+++ b/net/ipv6/udp.c
104176@@ -76,6 +76,10 @@ static unsigned int udp6_ehashfn(struct net *net,
104177 udp_ipv6_hash_secret + net_hash_mix(net));
104178 }
104179
104180+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104181+extern int grsec_enable_blackhole;
104182+#endif
104183+
104184 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
104185 {
104186 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
104187@@ -434,7 +438,7 @@ try_again:
104188 if (unlikely(err)) {
104189 trace_kfree_skb(skb, udpv6_recvmsg);
104190 if (!peeked) {
104191- atomic_inc(&sk->sk_drops);
104192+ atomic_inc_unchecked(&sk->sk_drops);
104193 if (is_udp4)
104194 UDP_INC_STATS_USER(sock_net(sk),
104195 UDP_MIB_INERRORS,
104196@@ -701,7 +705,7 @@ csum_error:
104197 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
104198 drop:
104199 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
104200- atomic_inc(&sk->sk_drops);
104201+ atomic_inc_unchecked(&sk->sk_drops);
104202 kfree_skb(skb);
104203 return -1;
104204 }
104205@@ -740,7 +744,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
104206 if (likely(skb1 == NULL))
104207 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
104208 if (!skb1) {
104209- atomic_inc(&sk->sk_drops);
104210+ atomic_inc_unchecked(&sk->sk_drops);
104211 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
104212 IS_UDPLITE(sk));
104213 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
104214@@ -915,6 +919,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
104215 goto csum_error;
104216
104217 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
104218+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
104219+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
104220+#endif
104221 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
104222
104223 kfree_skb(skb);
104224diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
104225index 2a0bbda..fcd5396 100644
104226--- a/net/ipv6/xfrm6_policy.c
104227+++ b/net/ipv6/xfrm6_policy.c
104228@@ -130,8 +130,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
104229 {
104230 struct flowi6 *fl6 = &fl->u.ip6;
104231 int onlyproto = 0;
104232- u16 offset = skb_network_header_len(skb);
104233 const struct ipv6hdr *hdr = ipv6_hdr(skb);
104234+ u16 offset = sizeof(*hdr);
104235 struct ipv6_opt_hdr *exthdr;
104236 const unsigned char *nh = skb_network_header(skb);
104237 u8 nexthdr = nh[IP6CB(skb)->nhoff];
104238@@ -170,8 +170,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
104239 case IPPROTO_DCCP:
104240 if (!onlyproto && (nh + offset + 4 < skb->data ||
104241 pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
104242- __be16 *ports = (__be16 *)exthdr;
104243+ __be16 *ports;
104244
104245+ nh = skb_network_header(skb);
104246+ ports = (__be16 *)(nh + offset);
104247 fl6->fl6_sport = ports[!!reverse];
104248 fl6->fl6_dport = ports[!reverse];
104249 }
104250@@ -180,8 +182,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
104251
104252 case IPPROTO_ICMPV6:
104253 if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) {
104254- u8 *icmp = (u8 *)exthdr;
104255+ u8 *icmp;
104256
104257+ nh = skb_network_header(skb);
104258+ icmp = (u8 *)(nh + offset);
104259 fl6->fl6_icmp_type = icmp[0];
104260 fl6->fl6_icmp_code = icmp[1];
104261 }
104262@@ -192,8 +196,9 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
104263 case IPPROTO_MH:
104264 if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
104265 struct ip6_mh *mh;
104266- mh = (struct ip6_mh *)exthdr;
104267
104268+ nh = skb_network_header(skb);
104269+ mh = (struct ip6_mh *)(nh + offset);
104270 fl6->fl6_mh_type = mh->ip6mh_type;
104271 }
104272 fl6->flowi6_proto = nexthdr;
104273@@ -212,11 +217,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
104274 }
104275 }
104276
104277-static inline int xfrm6_garbage_collect(struct dst_ops *ops)
104278+static int xfrm6_garbage_collect(struct dst_ops *ops)
104279 {
104280 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
104281
104282- xfrm6_policy_afinfo.garbage_collect(net);
104283+ xfrm_garbage_collect_deferred(net);
104284 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
104285 }
104286
104287@@ -329,19 +334,19 @@ static struct ctl_table xfrm6_policy_table[] = {
104288
104289 static int __net_init xfrm6_net_init(struct net *net)
104290 {
104291- struct ctl_table *table;
104292+ ctl_table_no_const *table = NULL;
104293 struct ctl_table_header *hdr;
104294
104295- table = xfrm6_policy_table;
104296 if (!net_eq(net, &init_net)) {
104297- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
104298+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
104299 if (!table)
104300 goto err_alloc;
104301
104302 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
104303- }
104304+ hdr = register_net_sysctl(net, "net/ipv6", table);
104305+ } else
104306+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
104307
104308- hdr = register_net_sysctl(net, "net/ipv6", table);
104309 if (!hdr)
104310 goto err_reg;
104311
104312@@ -349,8 +354,7 @@ static int __net_init xfrm6_net_init(struct net *net)
104313 return 0;
104314
104315 err_reg:
104316- if (!net_eq(net, &init_net))
104317- kfree(table);
104318+ kfree(table);
104319 err_alloc:
104320 return -ENOMEM;
104321 }
104322diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
104323index e15c16a..7cf07aa 100644
104324--- a/net/ipx/ipx_proc.c
104325+++ b/net/ipx/ipx_proc.c
104326@@ -289,7 +289,7 @@ int __init ipx_proc_init(void)
104327 struct proc_dir_entry *p;
104328 int rc = -ENOMEM;
104329
104330- ipx_proc_dir = proc_mkdir("ipx", init_net.proc_net);
104331+ ipx_proc_dir = proc_mkdir_restrict("ipx", init_net.proc_net);
104332
104333 if (!ipx_proc_dir)
104334 goto out;
104335diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
104336index 61ceb4c..e788eb8 100644
104337--- a/net/irda/ircomm/ircomm_tty.c
104338+++ b/net/irda/ircomm/ircomm_tty.c
104339@@ -317,10 +317,10 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104340 add_wait_queue(&port->open_wait, &wait);
104341
104342 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
104343- __FILE__, __LINE__, tty->driver->name, port->count);
104344+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104345
104346 spin_lock_irqsave(&port->lock, flags);
104347- port->count--;
104348+ atomic_dec(&port->count);
104349 port->blocked_open++;
104350 spin_unlock_irqrestore(&port->lock, flags);
104351
104352@@ -355,7 +355,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104353 }
104354
104355 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
104356- __FILE__, __LINE__, tty->driver->name, port->count);
104357+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104358
104359 schedule();
104360 }
104361@@ -365,12 +365,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
104362
104363 spin_lock_irqsave(&port->lock, flags);
104364 if (!tty_hung_up_p(filp))
104365- port->count++;
104366+ atomic_inc(&port->count);
104367 port->blocked_open--;
104368 spin_unlock_irqrestore(&port->lock, flags);
104369
104370 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
104371- __FILE__, __LINE__, tty->driver->name, port->count);
104372+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
104373
104374 if (!retval)
104375 port->flags |= ASYNC_NORMAL_ACTIVE;
104376@@ -444,12 +444,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
104377
104378 /* ++ is not atomic, so this should be protected - Jean II */
104379 spin_lock_irqsave(&self->port.lock, flags);
104380- self->port.count++;
104381+ atomic_inc(&self->port.count);
104382 spin_unlock_irqrestore(&self->port.lock, flags);
104383 tty_port_tty_set(&self->port, tty);
104384
104385 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
104386- self->line, self->port.count);
104387+ self->line, atomic_read(&self->port.count));
104388
104389 /* Not really used by us, but lets do it anyway */
104390 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
104391@@ -985,7 +985,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
104392 tty_kref_put(port->tty);
104393 }
104394 port->tty = NULL;
104395- port->count = 0;
104396+ atomic_set(&port->count, 0);
104397 spin_unlock_irqrestore(&port->lock, flags);
104398
104399 wake_up_interruptible(&port->open_wait);
104400@@ -1342,7 +1342,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
104401 seq_putc(m, '\n');
104402
104403 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
104404- seq_printf(m, "Open count: %d\n", self->port.count);
104405+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
104406 seq_printf(m, "Max data size: %d\n", self->max_data_size);
104407 seq_printf(m, "Max header size: %d\n", self->max_header_size);
104408
104409diff --git a/net/irda/irproc.c b/net/irda/irproc.c
104410index b9ac598..f88cc56 100644
104411--- a/net/irda/irproc.c
104412+++ b/net/irda/irproc.c
104413@@ -66,7 +66,7 @@ void __init irda_proc_register(void)
104414 {
104415 int i;
104416
104417- proc_irda = proc_mkdir("irda", init_net.proc_net);
104418+ proc_irda = proc_mkdir_restrict("irda", init_net.proc_net);
104419 if (proc_irda == NULL)
104420 return;
104421
104422diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
104423index a089b6b..3ca3b60 100644
104424--- a/net/iucv/af_iucv.c
104425+++ b/net/iucv/af_iucv.c
104426@@ -686,10 +686,10 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
104427 {
104428 char name[12];
104429
104430- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
104431+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
104432 while (__iucv_get_sock_by_name(name)) {
104433 sprintf(name, "%08x",
104434- atomic_inc_return(&iucv_sk_list.autobind_name));
104435+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
104436 }
104437 memcpy(iucv->src_name, name, 8);
104438 }
104439diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
104440index da78793..bdd78cf 100644
104441--- a/net/iucv/iucv.c
104442+++ b/net/iucv/iucv.c
104443@@ -702,7 +702,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
104444 return NOTIFY_OK;
104445 }
104446
104447-static struct notifier_block __refdata iucv_cpu_notifier = {
104448+static struct notifier_block iucv_cpu_notifier = {
104449 .notifier_call = iucv_cpu_notify,
104450 };
104451
104452diff --git a/net/key/af_key.c b/net/key/af_key.c
104453index 1847ec4..26ef732 100644
104454--- a/net/key/af_key.c
104455+++ b/net/key/af_key.c
104456@@ -3049,10 +3049,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
104457 static u32 get_acqseq(void)
104458 {
104459 u32 res;
104460- static atomic_t acqseq;
104461+ static atomic_unchecked_t acqseq;
104462
104463 do {
104464- res = atomic_inc_return(&acqseq);
104465+ res = atomic_inc_return_unchecked(&acqseq);
104466 } while (!res);
104467 return res;
104468 }
104469diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
104470index edb78e6..8dc654a 100644
104471--- a/net/l2tp/l2tp_eth.c
104472+++ b/net/l2tp/l2tp_eth.c
104473@@ -42,12 +42,12 @@ struct l2tp_eth {
104474 struct sock *tunnel_sock;
104475 struct l2tp_session *session;
104476 struct list_head list;
104477- atomic_long_t tx_bytes;
104478- atomic_long_t tx_packets;
104479- atomic_long_t tx_dropped;
104480- atomic_long_t rx_bytes;
104481- atomic_long_t rx_packets;
104482- atomic_long_t rx_errors;
104483+ atomic_long_unchecked_t tx_bytes;
104484+ atomic_long_unchecked_t tx_packets;
104485+ atomic_long_unchecked_t tx_dropped;
104486+ atomic_long_unchecked_t rx_bytes;
104487+ atomic_long_unchecked_t rx_packets;
104488+ atomic_long_unchecked_t rx_errors;
104489 };
104490
104491 /* via l2tp_session_priv() */
104492@@ -98,10 +98,10 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
104493 int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
104494
104495 if (likely(ret == NET_XMIT_SUCCESS)) {
104496- atomic_long_add(len, &priv->tx_bytes);
104497- atomic_long_inc(&priv->tx_packets);
104498+ atomic_long_add_unchecked(len, &priv->tx_bytes);
104499+ atomic_long_inc_unchecked(&priv->tx_packets);
104500 } else {
104501- atomic_long_inc(&priv->tx_dropped);
104502+ atomic_long_inc_unchecked(&priv->tx_dropped);
104503 }
104504 return NETDEV_TX_OK;
104505 }
104506@@ -111,12 +111,12 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
104507 {
104508 struct l2tp_eth *priv = netdev_priv(dev);
104509
104510- stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
104511- stats->tx_packets = atomic_long_read(&priv->tx_packets);
104512- stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
104513- stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
104514- stats->rx_packets = atomic_long_read(&priv->rx_packets);
104515- stats->rx_errors = atomic_long_read(&priv->rx_errors);
104516+ stats->tx_bytes = atomic_long_read_unchecked(&priv->tx_bytes);
104517+ stats->tx_packets = atomic_long_read_unchecked(&priv->tx_packets);
104518+ stats->tx_dropped = atomic_long_read_unchecked(&priv->tx_dropped);
104519+ stats->rx_bytes = atomic_long_read_unchecked(&priv->rx_bytes);
104520+ stats->rx_packets = atomic_long_read_unchecked(&priv->rx_packets);
104521+ stats->rx_errors = atomic_long_read_unchecked(&priv->rx_errors);
104522 return stats;
104523 }
104524
104525@@ -166,15 +166,15 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
104526 nf_reset(skb);
104527
104528 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
104529- atomic_long_inc(&priv->rx_packets);
104530- atomic_long_add(data_len, &priv->rx_bytes);
104531+ atomic_long_inc_unchecked(&priv->rx_packets);
104532+ atomic_long_add_unchecked(data_len, &priv->rx_bytes);
104533 } else {
104534- atomic_long_inc(&priv->rx_errors);
104535+ atomic_long_inc_unchecked(&priv->rx_errors);
104536 }
104537 return;
104538
104539 error:
104540- atomic_long_inc(&priv->rx_errors);
104541+ atomic_long_inc_unchecked(&priv->rx_errors);
104542 kfree_skb(skb);
104543 }
104544
104545diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
104546index 1a3c7e0..80f8b0c 100644
104547--- a/net/llc/llc_proc.c
104548+++ b/net/llc/llc_proc.c
104549@@ -247,7 +247,7 @@ int __init llc_proc_init(void)
104550 int rc = -ENOMEM;
104551 struct proc_dir_entry *p;
104552
104553- llc_proc_dir = proc_mkdir("llc", init_net.proc_net);
104554+ llc_proc_dir = proc_mkdir_restrict("llc", init_net.proc_net);
104555 if (!llc_proc_dir)
104556 goto out;
104557
104558diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
104559index 927b4ea..88a30e2 100644
104560--- a/net/mac80211/cfg.c
104561+++ b/net/mac80211/cfg.c
104562@@ -540,7 +540,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
104563 ret = ieee80211_vif_use_channel(sdata, chandef,
104564 IEEE80211_CHANCTX_EXCLUSIVE);
104565 }
104566- } else if (local->open_count == local->monitors) {
104567+ } else if (local_read(&local->open_count) == local->monitors) {
104568 local->_oper_chandef = *chandef;
104569 ieee80211_hw_config(local, 0);
104570 }
104571@@ -3286,7 +3286,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
104572 else
104573 local->probe_req_reg--;
104574
104575- if (!local->open_count)
104576+ if (!local_read(&local->open_count))
104577 break;
104578
104579 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
104580@@ -3420,8 +3420,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
104581 if (chanctx_conf) {
104582 *chandef = chanctx_conf->def;
104583 ret = 0;
104584- } else if (local->open_count > 0 &&
104585- local->open_count == local->monitors &&
104586+ } else if (local_read(&local->open_count) > 0 &&
104587+ local_read(&local->open_count) == local->monitors &&
104588 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
104589 if (local->use_chanctx)
104590 *chandef = local->monitor_chandef;
104591diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
104592index 5d102b5..6199fca 100644
104593--- a/net/mac80211/ieee80211_i.h
104594+++ b/net/mac80211/ieee80211_i.h
104595@@ -28,6 +28,7 @@
104596 #include <net/ieee80211_radiotap.h>
104597 #include <net/cfg80211.h>
104598 #include <net/mac80211.h>
104599+#include <asm/local.h>
104600 #include "key.h"
104601 #include "sta_info.h"
104602 #include "debug.h"
104603@@ -1055,7 +1056,7 @@ struct ieee80211_local {
104604 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
104605 spinlock_t queue_stop_reason_lock;
104606
104607- int open_count;
104608+ local_t open_count;
104609 int monitors, cooked_mntrs;
104610 /* number of interfaces with corresponding FIF_ flags */
104611 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
104612diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
104613index 3538e5e..0aa7879 100644
104614--- a/net/mac80211/iface.c
104615+++ b/net/mac80211/iface.c
104616@@ -531,7 +531,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104617 break;
104618 }
104619
104620- if (local->open_count == 0) {
104621+ if (local_read(&local->open_count) == 0) {
104622 res = drv_start(local);
104623 if (res)
104624 goto err_del_bss;
104625@@ -578,7 +578,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104626 res = drv_add_interface(local, sdata);
104627 if (res)
104628 goto err_stop;
104629- } else if (local->monitors == 0 && local->open_count == 0) {
104630+ } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
104631 res = ieee80211_add_virtual_monitor(local);
104632 if (res)
104633 goto err_stop;
104634@@ -687,7 +687,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104635 atomic_inc(&local->iff_promiscs);
104636
104637 if (coming_up)
104638- local->open_count++;
104639+ local_inc(&local->open_count);
104640
104641 if (hw_reconf_flags)
104642 ieee80211_hw_config(local, hw_reconf_flags);
104643@@ -725,7 +725,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
104644 err_del_interface:
104645 drv_remove_interface(local, sdata);
104646 err_stop:
104647- if (!local->open_count)
104648+ if (!local_read(&local->open_count))
104649 drv_stop(local);
104650 err_del_bss:
104651 sdata->bss = NULL;
104652@@ -891,7 +891,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104653 }
104654
104655 if (going_down)
104656- local->open_count--;
104657+ local_dec(&local->open_count);
104658
104659 switch (sdata->vif.type) {
104660 case NL80211_IFTYPE_AP_VLAN:
104661@@ -952,7 +952,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104662 }
104663 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
104664
104665- if (local->open_count == 0)
104666+ if (local_read(&local->open_count) == 0)
104667 ieee80211_clear_tx_pending(local);
104668
104669 /*
104670@@ -995,7 +995,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104671 if (cancel_scan)
104672 flush_delayed_work(&local->scan_work);
104673
104674- if (local->open_count == 0) {
104675+ if (local_read(&local->open_count) == 0) {
104676 ieee80211_stop_device(local);
104677
104678 /* no reconfiguring after stop! */
104679@@ -1006,7 +1006,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
104680 ieee80211_configure_filter(local);
104681 ieee80211_hw_config(local, hw_reconf_flags);
104682
104683- if (local->monitors == local->open_count)
104684+ if (local->monitors == local_read(&local->open_count))
104685 ieee80211_add_virtual_monitor(local);
104686 }
104687
104688diff --git a/net/mac80211/main.c b/net/mac80211/main.c
104689index e0ab432..36b7b94 100644
104690--- a/net/mac80211/main.c
104691+++ b/net/mac80211/main.c
104692@@ -174,7 +174,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
104693 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
104694 IEEE80211_CONF_CHANGE_POWER);
104695
104696- if (changed && local->open_count) {
104697+ if (changed && local_read(&local->open_count)) {
104698 ret = drv_config(local, changed);
104699 /*
104700 * Goal:
104701diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
104702index 4c5192e..04cc0d8 100644
104703--- a/net/mac80211/pm.c
104704+++ b/net/mac80211/pm.c
104705@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104706 struct ieee80211_sub_if_data *sdata;
104707 struct sta_info *sta;
104708
104709- if (!local->open_count)
104710+ if (!local_read(&local->open_count))
104711 goto suspend;
104712
104713 ieee80211_scan_cancel(local);
104714@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104715 cancel_work_sync(&local->dynamic_ps_enable_work);
104716 del_timer_sync(&local->dynamic_ps_timer);
104717
104718- local->wowlan = wowlan && local->open_count;
104719+ local->wowlan = wowlan && local_read(&local->open_count);
104720 if (local->wowlan) {
104721 int err = drv_suspend(local, wowlan);
104722 if (err < 0) {
104723@@ -125,7 +125,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
104724 WARN_ON(!list_empty(&local->chanctx_list));
104725
104726 /* stop hardware - this must stop RX */
104727- if (local->open_count)
104728+ if (local_read(&local->open_count))
104729 ieee80211_stop_device(local);
104730
104731 suspend:
104732diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
104733index 6081329..ab23834 100644
104734--- a/net/mac80211/rate.c
104735+++ b/net/mac80211/rate.c
104736@@ -720,7 +720,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
104737
104738 ASSERT_RTNL();
104739
104740- if (local->open_count)
104741+ if (local_read(&local->open_count))
104742 return -EBUSY;
104743
104744 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
104745diff --git a/net/mac80211/util.c b/net/mac80211/util.c
104746index 725af7a..a21a20a 100644
104747--- a/net/mac80211/util.c
104748+++ b/net/mac80211/util.c
104749@@ -1643,7 +1643,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
104750 }
104751 #endif
104752 /* everything else happens only if HW was up & running */
104753- if (!local->open_count)
104754+ if (!local_read(&local->open_count))
104755 goto wake_up;
104756
104757 /*
104758@@ -1869,7 +1869,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
104759 local->in_reconfig = false;
104760 barrier();
104761
104762- if (local->monitors == local->open_count && local->monitors > 0)
104763+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
104764 ieee80211_add_virtual_monitor(local);
104765
104766 /*
104767diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
104768index 6d77cce..36e2fc3 100644
104769--- a/net/netfilter/Kconfig
104770+++ b/net/netfilter/Kconfig
104771@@ -1096,6 +1096,16 @@ config NETFILTER_XT_MATCH_ESP
104772
104773 To compile it as a module, choose M here. If unsure, say N.
104774
104775+config NETFILTER_XT_MATCH_GRADM
104776+ tristate '"gradm" match support'
104777+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
104778+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
104779+ ---help---
104780+ The gradm match allows to match on grsecurity RBAC being enabled.
104781+ It is useful when iptables rules are applied early on bootup to
104782+ prevent connections to the machine (except from a trusted host)
104783+ while the RBAC system is disabled.
104784+
104785 config NETFILTER_XT_MATCH_HASHLIMIT
104786 tristate '"hashlimit" match support'
104787 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
104788diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
104789index fad5fdb..ba3672a 100644
104790--- a/net/netfilter/Makefile
104791+++ b/net/netfilter/Makefile
104792@@ -136,6 +136,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
104793 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
104794 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
104795 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
104796+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
104797 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
104798 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
104799 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
104800diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
104801index 6582dce..a911da7 100644
104802--- a/net/netfilter/ipset/ip_set_core.c
104803+++ b/net/netfilter/ipset/ip_set_core.c
104804@@ -1921,7 +1921,7 @@ done:
104805 return ret;
104806 }
104807
104808-static struct nf_sockopt_ops so_set __read_mostly = {
104809+static struct nf_sockopt_ops so_set = {
104810 .pf = PF_INET,
104811 .get_optmin = SO_IP_SET,
104812 .get_optmax = SO_IP_SET + 1,
104813diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
104814index 610e19c..08d0c3f 100644
104815--- a/net/netfilter/ipvs/ip_vs_conn.c
104816+++ b/net/netfilter/ipvs/ip_vs_conn.c
104817@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
104818 /* Increase the refcnt counter of the dest */
104819 ip_vs_dest_hold(dest);
104820
104821- conn_flags = atomic_read(&dest->conn_flags);
104822+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
104823 if (cp->protocol != IPPROTO_UDP)
104824 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
104825 flags = cp->flags;
104826@@ -899,7 +899,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
104827
104828 cp->control = NULL;
104829 atomic_set(&cp->n_control, 0);
104830- atomic_set(&cp->in_pkts, 0);
104831+ atomic_set_unchecked(&cp->in_pkts, 0);
104832
104833 cp->packet_xmit = NULL;
104834 cp->app = NULL;
104835@@ -1187,7 +1187,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
104836
104837 /* Don't drop the entry if its number of incoming packets is not
104838 located in [0, 8] */
104839- i = atomic_read(&cp->in_pkts);
104840+ i = atomic_read_unchecked(&cp->in_pkts);
104841 if (i > 8 || i < 0) return 0;
104842
104843 if (!todrop_rate[i]) return 0;
104844diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
104845index 5c34e8d..0d8eb7f 100644
104846--- a/net/netfilter/ipvs/ip_vs_core.c
104847+++ b/net/netfilter/ipvs/ip_vs_core.c
104848@@ -567,7 +567,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
104849 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
104850 /* do not touch skb anymore */
104851
104852- atomic_inc(&cp->in_pkts);
104853+ atomic_inc_unchecked(&cp->in_pkts);
104854 ip_vs_conn_put(cp);
104855 return ret;
104856 }
104857@@ -1711,7 +1711,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
104858 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
104859 pkts = sysctl_sync_threshold(ipvs);
104860 else
104861- pkts = atomic_add_return(1, &cp->in_pkts);
104862+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
104863
104864 if (ipvs->sync_state & IP_VS_STATE_MASTER)
104865 ip_vs_sync_conn(net, cp, pkts);
104866diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
104867index fd3f444..ab28fa24 100644
104868--- a/net/netfilter/ipvs/ip_vs_ctl.c
104869+++ b/net/netfilter/ipvs/ip_vs_ctl.c
104870@@ -794,7 +794,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
104871 */
104872 ip_vs_rs_hash(ipvs, dest);
104873 }
104874- atomic_set(&dest->conn_flags, conn_flags);
104875+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
104876
104877 /* bind the service */
104878 old_svc = rcu_dereference_protected(dest->svc, 1);
104879@@ -1654,7 +1654,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
104880 * align with netns init in ip_vs_control_net_init()
104881 */
104882
104883-static struct ctl_table vs_vars[] = {
104884+static ctl_table_no_const vs_vars[] __read_only = {
104885 {
104886 .procname = "amemthresh",
104887 .maxlen = sizeof(int),
104888@@ -1989,7 +1989,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
104889 " %-7s %-6d %-10d %-10d\n",
104890 &dest->addr.in6,
104891 ntohs(dest->port),
104892- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
104893+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
104894 atomic_read(&dest->weight),
104895 atomic_read(&dest->activeconns),
104896 atomic_read(&dest->inactconns));
104897@@ -2000,7 +2000,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
104898 "%-7s %-6d %-10d %-10d\n",
104899 ntohl(dest->addr.ip),
104900 ntohs(dest->port),
104901- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
104902+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
104903 atomic_read(&dest->weight),
104904 atomic_read(&dest->activeconns),
104905 atomic_read(&dest->inactconns));
104906@@ -2471,7 +2471,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
104907
104908 entry.addr = dest->addr.ip;
104909 entry.port = dest->port;
104910- entry.conn_flags = atomic_read(&dest->conn_flags);
104911+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
104912 entry.weight = atomic_read(&dest->weight);
104913 entry.u_threshold = dest->u_threshold;
104914 entry.l_threshold = dest->l_threshold;
104915@@ -3010,7 +3010,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
104916 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
104917 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
104918 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
104919- (atomic_read(&dest->conn_flags) &
104920+ (atomic_read_unchecked(&dest->conn_flags) &
104921 IP_VS_CONN_F_FWD_MASK)) ||
104922 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
104923 atomic_read(&dest->weight)) ||
104924@@ -3600,7 +3600,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
104925 {
104926 int idx;
104927 struct netns_ipvs *ipvs = net_ipvs(net);
104928- struct ctl_table *tbl;
104929+ ctl_table_no_const *tbl;
104930
104931 atomic_set(&ipvs->dropentry, 0);
104932 spin_lock_init(&ipvs->dropentry_lock);
104933diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
104934index 547ff33..c8c8117 100644
104935--- a/net/netfilter/ipvs/ip_vs_lblc.c
104936+++ b/net/netfilter/ipvs/ip_vs_lblc.c
104937@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
104938 * IPVS LBLC sysctl table
104939 */
104940 #ifdef CONFIG_SYSCTL
104941-static struct ctl_table vs_vars_table[] = {
104942+static ctl_table_no_const vs_vars_table[] __read_only = {
104943 {
104944 .procname = "lblc_expiration",
104945 .data = NULL,
104946diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
104947index 3f21a2f..a112e85 100644
104948--- a/net/netfilter/ipvs/ip_vs_lblcr.c
104949+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
104950@@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
104951 * IPVS LBLCR sysctl table
104952 */
104953
104954-static struct ctl_table vs_vars_table[] = {
104955+static ctl_table_no_const vs_vars_table[] __read_only = {
104956 {
104957 .procname = "lblcr_expiration",
104958 .data = NULL,
104959diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
104960index eadffb2..c2feeae 100644
104961--- a/net/netfilter/ipvs/ip_vs_sync.c
104962+++ b/net/netfilter/ipvs/ip_vs_sync.c
104963@@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
104964 cp = cp->control;
104965 if (cp) {
104966 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
104967- pkts = atomic_add_return(1, &cp->in_pkts);
104968+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
104969 else
104970 pkts = sysctl_sync_threshold(ipvs);
104971 ip_vs_sync_conn(net, cp->control, pkts);
104972@@ -771,7 +771,7 @@ control:
104973 if (!cp)
104974 return;
104975 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
104976- pkts = atomic_add_return(1, &cp->in_pkts);
104977+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
104978 else
104979 pkts = sysctl_sync_threshold(ipvs);
104980 goto sloop;
104981@@ -894,7 +894,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
104982
104983 if (opt)
104984 memcpy(&cp->in_seq, opt, sizeof(*opt));
104985- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
104986+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
104987 cp->state = state;
104988 cp->old_state = cp->state;
104989 /*
104990diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
104991index 56896a4..dfe3806 100644
104992--- a/net/netfilter/ipvs/ip_vs_xmit.c
104993+++ b/net/netfilter/ipvs/ip_vs_xmit.c
104994@@ -1114,7 +1114,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
104995 else
104996 rc = NF_ACCEPT;
104997 /* do not touch skb anymore */
104998- atomic_inc(&cp->in_pkts);
104999+ atomic_inc_unchecked(&cp->in_pkts);
105000 goto out;
105001 }
105002
105003@@ -1206,7 +1206,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
105004 else
105005 rc = NF_ACCEPT;
105006 /* do not touch skb anymore */
105007- atomic_inc(&cp->in_pkts);
105008+ atomic_inc_unchecked(&cp->in_pkts);
105009 goto out;
105010 }
105011
105012diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
105013index a4b5e2a..13b1de3 100644
105014--- a/net/netfilter/nf_conntrack_acct.c
105015+++ b/net/netfilter/nf_conntrack_acct.c
105016@@ -62,7 +62,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
105017 #ifdef CONFIG_SYSCTL
105018 static int nf_conntrack_acct_init_sysctl(struct net *net)
105019 {
105020- struct ctl_table *table;
105021+ ctl_table_no_const *table;
105022
105023 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
105024 GFP_KERNEL);
105025diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
105026index de88c4a..ec84234 100644
105027--- a/net/netfilter/nf_conntrack_core.c
105028+++ b/net/netfilter/nf_conntrack_core.c
105029@@ -1739,6 +1739,10 @@ void nf_conntrack_init_end(void)
105030 #define DYING_NULLS_VAL ((1<<30)+1)
105031 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
105032
105033+#ifdef CONFIG_GRKERNSEC_HIDESYM
105034+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
105035+#endif
105036+
105037 int nf_conntrack_init_net(struct net *net)
105038 {
105039 int ret = -ENOMEM;
105040@@ -1764,7 +1768,11 @@ int nf_conntrack_init_net(struct net *net)
105041 if (!net->ct.stat)
105042 goto err_pcpu_lists;
105043
105044+#ifdef CONFIG_GRKERNSEC_HIDESYM
105045+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08x", atomic_inc_return_unchecked(&conntrack_cache_id));
105046+#else
105047 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
105048+#endif
105049 if (!net->ct.slabname)
105050 goto err_slabname;
105051
105052diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
105053index 4e78c57..ec8fb74 100644
105054--- a/net/netfilter/nf_conntrack_ecache.c
105055+++ b/net/netfilter/nf_conntrack_ecache.c
105056@@ -264,7 +264,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
105057 #ifdef CONFIG_SYSCTL
105058 static int nf_conntrack_event_init_sysctl(struct net *net)
105059 {
105060- struct ctl_table *table;
105061+ ctl_table_no_const *table;
105062
105063 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
105064 GFP_KERNEL);
105065diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
105066index 5b3eae7..dd4b8fe 100644
105067--- a/net/netfilter/nf_conntrack_helper.c
105068+++ b/net/netfilter/nf_conntrack_helper.c
105069@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
105070
105071 static int nf_conntrack_helper_init_sysctl(struct net *net)
105072 {
105073- struct ctl_table *table;
105074+ ctl_table_no_const *table;
105075
105076 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
105077 GFP_KERNEL);
105078diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
105079index b65d586..beec902 100644
105080--- a/net/netfilter/nf_conntrack_proto.c
105081+++ b/net/netfilter/nf_conntrack_proto.c
105082@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
105083
105084 static void
105085 nf_ct_unregister_sysctl(struct ctl_table_header **header,
105086- struct ctl_table **table,
105087+ ctl_table_no_const **table,
105088 unsigned int users)
105089 {
105090 if (users > 0)
105091diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
105092index f641751..d3c5b51 100644
105093--- a/net/netfilter/nf_conntrack_standalone.c
105094+++ b/net/netfilter/nf_conntrack_standalone.c
105095@@ -471,7 +471,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
105096
105097 static int nf_conntrack_standalone_init_sysctl(struct net *net)
105098 {
105099- struct ctl_table *table;
105100+ ctl_table_no_const *table;
105101
105102 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
105103 GFP_KERNEL);
105104diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
105105index 7a394df..bd91a8a 100644
105106--- a/net/netfilter/nf_conntrack_timestamp.c
105107+++ b/net/netfilter/nf_conntrack_timestamp.c
105108@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
105109 #ifdef CONFIG_SYSCTL
105110 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
105111 {
105112- struct ctl_table *table;
105113+ ctl_table_no_const *table;
105114
105115 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
105116 GFP_KERNEL);
105117diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
105118index daad602..384be13 100644
105119--- a/net/netfilter/nf_log.c
105120+++ b/net/netfilter/nf_log.c
105121@@ -353,7 +353,7 @@ static const struct file_operations nflog_file_ops = {
105122
105123 #ifdef CONFIG_SYSCTL
105124 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
105125-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
105126+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
105127
105128 static int nf_log_proc_dostring(struct ctl_table *table, int write,
105129 void __user *buffer, size_t *lenp, loff_t *ppos)
105130@@ -384,14 +384,16 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
105131 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
105132 mutex_unlock(&nf_log_mutex);
105133 } else {
105134+ ctl_table_no_const nf_log_table = *table;
105135+
105136 mutex_lock(&nf_log_mutex);
105137 logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
105138 lockdep_is_held(&nf_log_mutex));
105139 if (!logger)
105140- table->data = "NONE";
105141+ nf_log_table.data = "NONE";
105142 else
105143- table->data = logger->name;
105144- r = proc_dostring(table, write, buffer, lenp, ppos);
105145+ nf_log_table.data = logger->name;
105146+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
105147 mutex_unlock(&nf_log_mutex);
105148 }
105149
105150diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
105151index c68c1e5..8b5d670 100644
105152--- a/net/netfilter/nf_sockopt.c
105153+++ b/net/netfilter/nf_sockopt.c
105154@@ -43,7 +43,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
105155 }
105156 }
105157
105158- list_add(&reg->list, &nf_sockopts);
105159+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
105160 out:
105161 mutex_unlock(&nf_sockopt_mutex);
105162 return ret;
105163@@ -53,7 +53,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
105164 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
105165 {
105166 mutex_lock(&nf_sockopt_mutex);
105167- list_del(&reg->list);
105168+ pax_list_del((struct list_head *)&reg->list);
105169 mutex_unlock(&nf_sockopt_mutex);
105170 }
105171 EXPORT_SYMBOL(nf_unregister_sockopt);
105172diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
105173index 3250735..1fac969 100644
105174--- a/net/netfilter/nfnetlink_log.c
105175+++ b/net/netfilter/nfnetlink_log.c
105176@@ -80,7 +80,7 @@ static int nfnl_log_net_id __read_mostly;
105177 struct nfnl_log_net {
105178 spinlock_t instances_lock;
105179 struct hlist_head instance_table[INSTANCE_BUCKETS];
105180- atomic_t global_seq;
105181+ atomic_unchecked_t global_seq;
105182 };
105183
105184 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
105185@@ -563,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
105186 /* global sequence number */
105187 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
105188 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
105189- htonl(atomic_inc_return(&log->global_seq))))
105190+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
105191 goto nla_put_failure;
105192
105193 if (data_len) {
105194diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
105195index 108120f..5b169db 100644
105196--- a/net/netfilter/nfnetlink_queue_core.c
105197+++ b/net/netfilter/nfnetlink_queue_core.c
105198@@ -665,7 +665,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
105199 * returned by nf_queue. For instance, callers rely on -ECANCELED to
105200 * mean 'ignore this hook'.
105201 */
105202- if (IS_ERR(segs))
105203+ if (IS_ERR_OR_NULL(segs))
105204 goto out_err;
105205 queued = 0;
105206 err = 0;
105207diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
105208index 5b5ab9e..fc1015c 100644
105209--- a/net/netfilter/nft_compat.c
105210+++ b/net/netfilter/nft_compat.c
105211@@ -225,7 +225,7 @@ target_dump_info(struct sk_buff *skb, const struct xt_target *t, const void *in)
105212 /* We want to reuse existing compat_to_user */
105213 old_fs = get_fs();
105214 set_fs(KERNEL_DS);
105215- t->compat_to_user(out, in);
105216+ t->compat_to_user((void __force_user *)out, in);
105217 set_fs(old_fs);
105218 ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), out);
105219 kfree(out);
105220@@ -421,7 +421,7 @@ match_dump_info(struct sk_buff *skb, const struct xt_match *m, const void *in)
105221 /* We want to reuse existing compat_to_user */
105222 old_fs = get_fs();
105223 set_fs(KERNEL_DS);
105224- m->compat_to_user(out, in);
105225+ m->compat_to_user((void __force_user *)out, in);
105226 set_fs(old_fs);
105227 ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), out);
105228 kfree(out);
105229diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
105230new file mode 100644
105231index 0000000..c566332
105232--- /dev/null
105233+++ b/net/netfilter/xt_gradm.c
105234@@ -0,0 +1,51 @@
105235+/*
105236+ * gradm match for netfilter
105237