]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-3.1-4.2.6-201511092040.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.1-4.2.6-201511092040.patch
CommitLineData
2d1b3edc
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index 9de9813..1462492 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -3,9 +3,11 @@
6 *.bc
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -15,6 +17,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -51,14 +54,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -72,9 +78,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -83,6 +91,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -95,32 +104,40 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 crc32table.h*
90 cscope.*
91 defkeymap.c
92+devicetable-offsets.h
93 devlist.h*
94 dnotify_test
95 docproc
96 dslm
97+dtc-lexer.lex.c
98 elf2ecoff
99 elfconfig.h*
100 evergreen_reg_safe.h
101+exception_policy.conf
102 fixdep
103 flask.h
104 fore200e_mkfirm
105@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
106 gconf
107 gconf.glade.h
108 gen-devlist
109+gen-kdb_cmds.c
110 gen_crc32table
111 gen_init_cpio
112 generated
113 genheaders
114 genksyms
115 *_gray256.c
116+hash
117+hid-example
118 hpet_example
119 hugepage-mmap
120 hugepage-shm
121@@ -148,14 +168,14 @@ int32.c
122 int4.c
123 int8.c
124 kallsyms
125-kconfig
126+kern_constants.h
127 keywords.c
128 ksym.c*
129 ksym.h*
130 kxgettext
131 lex.c
132 lex.*.c
133-linux
134+lib1funcs.S
135 logo_*.c
136 logo_*_clut224.c
137 logo_*_mono.c
138@@ -165,14 +185,15 @@ mach-types.h
139 machtypes.h
140 map
141 map_hugetlb
142-media
143 mconf
144+mdp
145 miboot*
146 mk_elfconfig
147 mkboot
148 mkbugboot
149 mkcpustr
150 mkdep
151+mkpiggy
152 mkprep
153 mkregtable
154 mktables
155@@ -188,6 +209,8 @@ oui.c*
156 page-types
157 parse.c
158 parse.h
159+parse-events*
160+pasyms.h
161 patches*
162 pca200e.bin
163 pca200e_ecd.bin2
164@@ -197,6 +220,7 @@ perf-archive
165 piggyback
166 piggy.gzip
167 piggy.S
168+pmu-*
169 pnmtologo
170 ppc_defs.h*
171 pss_boot.h
172@@ -206,7 +230,12 @@ r200_reg_safe.h
173 r300_reg_safe.h
174 r420_reg_safe.h
175 r600_reg_safe.h
176+randomize_layout_hash.h
177+randomize_layout_seed.h
178+realmode.lds
179+realmode.relocs
180 recordmcount
181+regdb.c
182 relocs
183 rlim_names.h
184 rn50_reg_safe.h
185@@ -216,8 +245,12 @@ series
186 setup
187 setup.bin
188 setup.elf
189+signing_key*
190+size_overflow_hash.h
191 sImage
192+slabinfo
193 sm_tbl*
194+sortextable
195 split-include
196 syscalltab.h
197 tables.c
198@@ -227,6 +260,7 @@ tftpboot.img
199 timeconst.h
200 times.h*
201 trix_boot.h
202+user_constants.h
203 utsrelease.h*
204 vdso-syms.lds
205 vdso.lds
206@@ -238,13 +272,17 @@ vdso32.lds
207 vdso32.so.dbg
208 vdso64.lds
209 vdso64.so.dbg
210+vdsox32.lds
211+vdsox32-syms.lds
212 version.h*
213 vmImage
214 vmlinux
215 vmlinux-*
216 vmlinux.aout
217 vmlinux.bin.all
218+vmlinux.bin.bz2
219 vmlinux.lds
220+vmlinux.relocs
221 vmlinuz
222 voffset.h
223 vsyscall.lds
224@@ -252,9 +290,12 @@ vsyscall_32.lds
225 wanxlfw.inc
226 uImage
227 unifdef
228+utsrelease.h
229 wakeup.bin
230 wakeup.elf
231 wakeup.lds
232+x509*
233 zImage*
234 zconf.hash.c
235+zconf.lex.c
236 zoffset.h
237diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
238index 13f888a..250729b 100644
239--- a/Documentation/kbuild/makefiles.txt
240+++ b/Documentation/kbuild/makefiles.txt
241@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
242 === 4 Host Program support
243 --- 4.1 Simple Host Program
244 --- 4.2 Composite Host Programs
245- --- 4.3 Using C++ for host programs
246- --- 4.4 Controlling compiler options for host programs
247- --- 4.5 When host programs are actually built
248- --- 4.6 Using hostprogs-$(CONFIG_FOO)
249+ --- 4.3 Defining shared libraries
250+ --- 4.4 Using C++ for host programs
251+ --- 4.5 Controlling compiler options for host programs
252+ --- 4.6 When host programs are actually built
253+ --- 4.7 Using hostprogs-$(CONFIG_FOO)
254
255 === 5 Kbuild clean infrastructure
256
257@@ -643,7 +644,29 @@ Both possibilities are described in the following.
258 Finally, the two .o files are linked to the executable, lxdialog.
259 Note: The syntax <executable>-y is not permitted for host-programs.
260
261---- 4.3 Using C++ for host programs
262+--- 4.3 Defining shared libraries
263+
264+ Objects with extension .so are considered shared libraries, and
265+ will be compiled as position independent objects.
266+ Kbuild provides support for shared libraries, but the usage
267+ shall be restricted.
268+ In the following example the libkconfig.so shared library is used
269+ to link the executable conf.
270+
271+ Example:
272+ #scripts/kconfig/Makefile
273+ hostprogs-y := conf
274+ conf-objs := conf.o libkconfig.so
275+ libkconfig-objs := expr.o type.o
276+
277+ Shared libraries always require a corresponding -objs line, and
278+ in the example above the shared library libkconfig is composed by
279+ the two objects expr.o and type.o.
280+ expr.o and type.o will be built as position independent code and
281+ linked as a shared library libkconfig.so. C++ is not supported for
282+ shared libraries.
283+
284+--- 4.4 Using C++ for host programs
285
286 kbuild offers support for host programs written in C++. This was
287 introduced solely to support kconfig, and is not recommended
288@@ -666,7 +689,7 @@ Both possibilities are described in the following.
289 qconf-cxxobjs := qconf.o
290 qconf-objs := check.o
291
292---- 4.4 Controlling compiler options for host programs
293+--- 4.5 Controlling compiler options for host programs
294
295 When compiling host programs, it is possible to set specific flags.
296 The programs will always be compiled utilising $(HOSTCC) passed
297@@ -694,7 +717,7 @@ Both possibilities are described in the following.
298 When linking qconf, it will be passed the extra option
299 "-L$(QTDIR)/lib".
300
301---- 4.5 When host programs are actually built
302+--- 4.6 When host programs are actually built
303
304 Kbuild will only build host-programs when they are referenced
305 as a prerequisite.
306@@ -725,7 +748,7 @@ Both possibilities are described in the following.
307 This will tell kbuild to build lxdialog even if not referenced in
308 any rule.
309
310---- 4.6 Using hostprogs-$(CONFIG_FOO)
311+--- 4.7 Using hostprogs-$(CONFIG_FOO)
312
313 A typical pattern in a Kbuild file looks like this:
314
315diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
316index 1d6f045..2714987 100644
317--- a/Documentation/kernel-parameters.txt
318+++ b/Documentation/kernel-parameters.txt
319@@ -1244,6 +1244,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
320 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
321 Default: 1024
322
323+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
324+ ignore grsecurity's /proc restrictions
325+
326+ grsec_sysfs_restrict= Format: 0 | 1
327+ Default: 1
328+ Disables GRKERNSEC_SYSFS_RESTRICT if enabled in config
329+
330 hashdist= [KNL,NUMA] Large hashes allocated during boot
331 are distributed across NUMA nodes. Defaults on
332 for 64-bit NUMA, off otherwise.
333@@ -2364,6 +2371,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
334 noexec=on: enable non-executable mappings (default)
335 noexec=off: disable non-executable mappings
336
337+ nopcid [X86-64]
338+ Disable PCID (Process-Context IDentifier) even if it
339+ is supported by the processor.
340+
341 nosmap [X86]
342 Disable SMAP (Supervisor Mode Access Prevention)
343 even if it is supported by processor.
344@@ -2662,6 +2673,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
345 the specified number of seconds. This is to be used if
346 your oopses keep scrolling off the screen.
347
348+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
349+ virtualization environments that don't cope well with the
350+ expand down segment used by UDEREF on X86-32 or the frequent
351+ page table updates on X86-64.
352+
353+ pax_sanitize_slab=
354+ Format: { 0 | 1 | off | fast | full }
355+ Options '0' and '1' are only provided for backward
356+ compatibility, 'off' or 'fast' should be used instead.
357+ 0|off : disable slab object sanitization
358+ 1|fast: enable slab object sanitization excluding
359+ whitelisted slabs (default)
360+ full : sanitize all slabs, even the whitelisted ones
361+
362+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
363+
364+ pax_extra_latent_entropy
365+ Enable a very simple form of latent entropy extraction
366+ from the first 4GB of memory as the bootmem allocator
367+ passes the memory pages to the buddy allocator.
368+
369+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
370+ when the processor supports PCID.
371+
372 pcbit= [HW,ISDN]
373
374 pcd. [PARIDE]
375diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
376index 6fccb69..60c7c7a 100644
377--- a/Documentation/sysctl/kernel.txt
378+++ b/Documentation/sysctl/kernel.txt
379@@ -41,6 +41,7 @@ show up in /proc/sys/kernel:
380 - kptr_restrict
381 - kstack_depth_to_print [ X86 only ]
382 - l2cr [ PPC only ]
383+- modify_ldt [ X86 only ]
384 - modprobe ==> Documentation/debugging-modules.txt
385 - modules_disabled
386 - msg_next_id [ sysv ipc ]
387@@ -391,6 +392,20 @@ This flag controls the L2 cache of G3 processor boards. If
388
389 ==============================================================
390
391+modify_ldt: (X86 only)
392+
393+Enables (1) or disables (0) the modify_ldt syscall. Modifying the LDT
394+(Local Descriptor Table) may be needed to run a 16-bit or segmented code
395+such as Dosemu or Wine. This is done via a system call which is not needed
396+to run portable applications, and which can sometimes be abused to exploit
397+some weaknesses of the architecture, opening new vulnerabilities.
398+
399+This sysctl allows one to increase the system's security by disabling the
400+system call, or to restore compatibility with specific applications when it
401+was already disabled.
402+
403+==============================================================
404+
405 modules_disabled:
406
407 A toggle value indicating if modules are allowed to be loaded
408diff --git a/Makefile b/Makefile
409index 9ef3739..20b7716 100644
410--- a/Makefile
411+++ b/Makefile
412@@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
413 HOSTCC = gcc
414 HOSTCXX = g++
415 HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
416-HOSTCXXFLAGS = -O2
417+HOSTCFLAGS = -W -Wno-unused-parameter -Wno-missing-field-initializers -fno-delete-null-pointer-checks
418+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
419+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
420
421 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
422 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
423@@ -434,8 +436,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
424 # Rules shared between *config targets and build targets
425
426 # Basic helpers built in scripts/
427-PHONY += scripts_basic
428-scripts_basic:
429+PHONY += scripts_basic gcc-plugins
430+scripts_basic: gcc-plugins
431 $(Q)$(MAKE) $(build)=scripts/basic
432 $(Q)rm -f .tmp_quiet_recordmcount
433
434@@ -615,6 +617,74 @@ endif
435 # Tell gcc to never replace conditional load with a non-conditional one
436 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
437
438+ifndef DISABLE_PAX_PLUGINS
439+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
440+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
441+else
442+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
443+endif
444+ifneq ($(PLUGINCC),)
445+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
446+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
447+endif
448+ifdef CONFIG_PAX_MEMORY_STACKLEAK
449+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
450+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
451+endif
452+ifdef CONFIG_KALLOCSTAT_PLUGIN
453+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
454+endif
455+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
456+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
457+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
458+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
459+endif
460+ifdef CONFIG_GRKERNSEC_RANDSTRUCT
461+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
462+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
463+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
464+endif
465+endif
466+ifdef CONFIG_CHECKER_PLUGIN
467+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
468+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
469+endif
470+endif
471+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
472+ifdef CONFIG_PAX_SIZE_OVERFLOW
473+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
474+endif
475+ifdef CONFIG_PAX_LATENT_ENTROPY
476+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
477+endif
478+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
479+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
480+endif
481+INITIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/initify_plugin.so -DINITIFY_PLUGIN
482+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
483+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
484+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
485+GCC_PLUGINS_CFLAGS += $(INITIFY_PLUGIN_CFLAGS)
486+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
487+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
488+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
489+ifeq ($(KBUILD_EXTMOD),)
490+gcc-plugins:
491+ $(Q)$(MAKE) $(build)=tools/gcc
492+else
493+gcc-plugins: ;
494+endif
495+else
496+gcc-plugins:
497+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
498+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
499+else
500+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
501+endif
502+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
503+endif
504+endif
505+
506 ifdef CONFIG_READABLE_ASM
507 # Disable optimizations that make assembler listings hard to read.
508 # reorder blocks reorders the control in the function
509@@ -714,7 +784,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
510 else
511 KBUILD_CFLAGS += -g
512 endif
513-KBUILD_AFLAGS += -Wa,-gdwarf-2
514+KBUILD_AFLAGS += -Wa,--gdwarf-2
515 endif
516 ifdef CONFIG_DEBUG_INFO_DWARF4
517 KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
518@@ -886,7 +956,7 @@ export mod_sign_cmd
519
520
521 ifeq ($(KBUILD_EXTMOD),)
522-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
523+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
524
525 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
526 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
527@@ -936,6 +1006,8 @@ endif
528
529 # The actual objects are generated when descending,
530 # make sure no implicit rule kicks in
531+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
532+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
533 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
534
535 # Handle descending into subdirectories listed in $(vmlinux-dirs)
536@@ -945,7 +1017,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
537 # Error messages still appears in the original language
538
539 PHONY += $(vmlinux-dirs)
540-$(vmlinux-dirs): prepare scripts
541+$(vmlinux-dirs): gcc-plugins prepare scripts
542 $(Q)$(MAKE) $(build)=$@
543
544 define filechk_kernel.release
545@@ -988,10 +1060,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
546
547 archprepare: archheaders archscripts prepare1 scripts_basic
548
549+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
550+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
551 prepare0: archprepare FORCE
552 $(Q)$(MAKE) $(build)=.
553
554 # All the preparing..
555+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
556 prepare: prepare0
557
558 # Generate some files
559@@ -1099,6 +1174,8 @@ all: modules
560 # using awk while concatenating to the final file.
561
562 PHONY += modules
563+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
564+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
565 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
566 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
567 @$(kecho) ' Building modules, stage 2.';
568@@ -1114,7 +1191,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
569
570 # Target to prepare building external modules
571 PHONY += modules_prepare
572-modules_prepare: prepare scripts
573+modules_prepare: gcc-plugins prepare scripts
574
575 # Target to install modules
576 PHONY += modules_install
577@@ -1180,7 +1257,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
578 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
579 signing_key.priv signing_key.x509 x509.genkey \
580 extra_certificates signing_key.x509.keyid \
581- signing_key.x509.signer vmlinux-gdb.py
582+ signing_key.x509.signer vmlinux-gdb.py \
583+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
584+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
585+ tools/gcc/randomize_layout_seed.h
586
587 # clean - Delete most, but leave enough to build external modules
588 #
589@@ -1219,7 +1299,7 @@ distclean: mrproper
590 @find $(srctree) $(RCS_FIND_IGNORE) \
591 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
592 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
593- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
594+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
595 -type f -print | xargs rm -f
596
597
598@@ -1385,6 +1465,8 @@ PHONY += $(module-dirs) modules
599 $(module-dirs): crmodverdir $(objtree)/Module.symvers
600 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
601
602+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
603+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
604 modules: $(module-dirs)
605 @$(kecho) ' Building modules, stage 2.';
606 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
607@@ -1525,17 +1607,21 @@ else
608 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
609 endif
610
611-%.s: %.c prepare scripts FORCE
612+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
613+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
614+%.s: %.c gcc-plugins prepare scripts FORCE
615 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
616 %.i: %.c prepare scripts FORCE
617 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
618-%.o: %.c prepare scripts FORCE
619+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
620+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
621+%.o: %.c gcc-plugins prepare scripts FORCE
622 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
623 %.lst: %.c prepare scripts FORCE
624 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
625-%.s: %.S prepare scripts FORCE
626+%.s: %.S gcc-plugins prepare scripts FORCE
627 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
628-%.o: %.S prepare scripts FORCE
629+%.o: %.S gcc-plugins prepare scripts FORCE
630 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
631 %.symtypes: %.c prepare scripts FORCE
632 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
633@@ -1547,11 +1633,15 @@ endif
634 $(build)=$(build-dir)
635 # Make sure the latest headers are built for Documentation
636 Documentation/: headers_install
637-%/: prepare scripts FORCE
638+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
639+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
640+%/: gcc-plugins prepare scripts FORCE
641 $(cmd_crmodverdir)
642 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
643 $(build)=$(build-dir)
644-%.ko: prepare scripts FORCE
645+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
646+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
647+%.ko: gcc-plugins prepare scripts FORCE
648 $(cmd_crmodverdir)
649 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
650 $(build)=$(build-dir) $(@:.ko=.o)
651diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
652index 8f8eafb..3405f46 100644
653--- a/arch/alpha/include/asm/atomic.h
654+++ b/arch/alpha/include/asm/atomic.h
655@@ -239,4 +239,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
656 #define atomic_dec(v) atomic_sub(1,(v))
657 #define atomic64_dec(v) atomic64_sub(1,(v))
658
659+#define atomic64_read_unchecked(v) atomic64_read(v)
660+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
661+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
662+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
663+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
664+#define atomic64_inc_unchecked(v) atomic64_inc(v)
665+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
666+#define atomic64_dec_unchecked(v) atomic64_dec(v)
667+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
668+
669 #endif /* _ALPHA_ATOMIC_H */
670diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
671index ad368a9..fbe0f25 100644
672--- a/arch/alpha/include/asm/cache.h
673+++ b/arch/alpha/include/asm/cache.h
674@@ -4,19 +4,19 @@
675 #ifndef __ARCH_ALPHA_CACHE_H
676 #define __ARCH_ALPHA_CACHE_H
677
678+#include <linux/const.h>
679
680 /* Bytes per L1 (data) cache line. */
681 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
682-# define L1_CACHE_BYTES 64
683 # define L1_CACHE_SHIFT 6
684 #else
685 /* Both EV4 and EV5 are write-through, read-allocate,
686 direct-mapped, physical.
687 */
688-# define L1_CACHE_BYTES 32
689 # define L1_CACHE_SHIFT 5
690 #endif
691
692+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
693 #define SMP_CACHE_BYTES L1_CACHE_BYTES
694
695 #endif
696diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
697index 968d999..d36b2df 100644
698--- a/arch/alpha/include/asm/elf.h
699+++ b/arch/alpha/include/asm/elf.h
700@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
701
702 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
703
704+#ifdef CONFIG_PAX_ASLR
705+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
706+
707+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
708+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
709+#endif
710+
711 /* $0 is set by ld.so to a pointer to a function which might be
712 registered using atexit. This provides a mean for the dynamic
713 linker to call DT_FINI functions for shared libraries that have
714diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
715index aab14a0..b4fa3e7 100644
716--- a/arch/alpha/include/asm/pgalloc.h
717+++ b/arch/alpha/include/asm/pgalloc.h
718@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
719 pgd_set(pgd, pmd);
720 }
721
722+static inline void
723+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
724+{
725+ pgd_populate(mm, pgd, pmd);
726+}
727+
728 extern pgd_t *pgd_alloc(struct mm_struct *mm);
729
730 static inline void
731diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
732index a9a1195..e9b8417 100644
733--- a/arch/alpha/include/asm/pgtable.h
734+++ b/arch/alpha/include/asm/pgtable.h
735@@ -101,6 +101,17 @@ struct vm_area_struct;
736 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
737 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
738 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
739+
740+#ifdef CONFIG_PAX_PAGEEXEC
741+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
742+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
743+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
744+#else
745+# define PAGE_SHARED_NOEXEC PAGE_SHARED
746+# define PAGE_COPY_NOEXEC PAGE_COPY
747+# define PAGE_READONLY_NOEXEC PAGE_READONLY
748+#endif
749+
750 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
751
752 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
753diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
754index 2fd00b7..cfd5069 100644
755--- a/arch/alpha/kernel/module.c
756+++ b/arch/alpha/kernel/module.c
757@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
758
759 /* The small sections were sorted to the end of the segment.
760 The following should definitely cover them. */
761- gp = (u64)me->module_core + me->core_size - 0x8000;
762+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
763 got = sechdrs[me->arch.gotsecindex].sh_addr;
764
765 for (i = 0; i < n; i++) {
766diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
767index 36dc91a..6769cb0 100644
768--- a/arch/alpha/kernel/osf_sys.c
769+++ b/arch/alpha/kernel/osf_sys.c
770@@ -1295,10 +1295,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
771 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
772
773 static unsigned long
774-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
775- unsigned long limit)
776+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
777+ unsigned long limit, unsigned long flags)
778 {
779 struct vm_unmapped_area_info info;
780+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
781
782 info.flags = 0;
783 info.length = len;
784@@ -1306,6 +1307,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
785 info.high_limit = limit;
786 info.align_mask = 0;
787 info.align_offset = 0;
788+ info.threadstack_offset = offset;
789 return vm_unmapped_area(&info);
790 }
791
792@@ -1338,20 +1340,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
793 merely specific addresses, but regions of memory -- perhaps
794 this feature should be incorporated into all ports? */
795
796+#ifdef CONFIG_PAX_RANDMMAP
797+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
798+#endif
799+
800 if (addr) {
801- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
802+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
803 if (addr != (unsigned long) -ENOMEM)
804 return addr;
805 }
806
807 /* Next, try allocating at TASK_UNMAPPED_BASE. */
808- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
809- len, limit);
810+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
811+
812 if (addr != (unsigned long) -ENOMEM)
813 return addr;
814
815 /* Finally, try allocating in low memory. */
816- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
817+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
818
819 return addr;
820 }
821diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
822index 4a905bd..0a4da53 100644
823--- a/arch/alpha/mm/fault.c
824+++ b/arch/alpha/mm/fault.c
825@@ -52,6 +52,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
826 __reload_thread(pcb);
827 }
828
829+#ifdef CONFIG_PAX_PAGEEXEC
830+/*
831+ * PaX: decide what to do with offenders (regs->pc = fault address)
832+ *
833+ * returns 1 when task should be killed
834+ * 2 when patched PLT trampoline was detected
835+ * 3 when unpatched PLT trampoline was detected
836+ */
837+static int pax_handle_fetch_fault(struct pt_regs *regs)
838+{
839+
840+#ifdef CONFIG_PAX_EMUPLT
841+ int err;
842+
843+ do { /* PaX: patched PLT emulation #1 */
844+ unsigned int ldah, ldq, jmp;
845+
846+ err = get_user(ldah, (unsigned int *)regs->pc);
847+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
848+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
849+
850+ if (err)
851+ break;
852+
853+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
854+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
855+ jmp == 0x6BFB0000U)
856+ {
857+ unsigned long r27, addr;
858+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
859+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
860+
861+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
862+ err = get_user(r27, (unsigned long *)addr);
863+ if (err)
864+ break;
865+
866+ regs->r27 = r27;
867+ regs->pc = r27;
868+ return 2;
869+ }
870+ } while (0);
871+
872+ do { /* PaX: patched PLT emulation #2 */
873+ unsigned int ldah, lda, br;
874+
875+ err = get_user(ldah, (unsigned int *)regs->pc);
876+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
877+ err |= get_user(br, (unsigned int *)(regs->pc+8));
878+
879+ if (err)
880+ break;
881+
882+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
883+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
884+ (br & 0xFFE00000U) == 0xC3E00000U)
885+ {
886+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
887+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
888+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
889+
890+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
891+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
892+ return 2;
893+ }
894+ } while (0);
895+
896+ do { /* PaX: unpatched PLT emulation */
897+ unsigned int br;
898+
899+ err = get_user(br, (unsigned int *)regs->pc);
900+
901+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
902+ unsigned int br2, ldq, nop, jmp;
903+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
904+
905+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
906+ err = get_user(br2, (unsigned int *)addr);
907+ err |= get_user(ldq, (unsigned int *)(addr+4));
908+ err |= get_user(nop, (unsigned int *)(addr+8));
909+ err |= get_user(jmp, (unsigned int *)(addr+12));
910+ err |= get_user(resolver, (unsigned long *)(addr+16));
911+
912+ if (err)
913+ break;
914+
915+ if (br2 == 0xC3600000U &&
916+ ldq == 0xA77B000CU &&
917+ nop == 0x47FF041FU &&
918+ jmp == 0x6B7B0000U)
919+ {
920+ regs->r28 = regs->pc+4;
921+ regs->r27 = addr+16;
922+ regs->pc = resolver;
923+ return 3;
924+ }
925+ }
926+ } while (0);
927+#endif
928+
929+ return 1;
930+}
931+
932+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
933+{
934+ unsigned long i;
935+
936+ printk(KERN_ERR "PAX: bytes at PC: ");
937+ for (i = 0; i < 5; i++) {
938+ unsigned int c;
939+ if (get_user(c, (unsigned int *)pc+i))
940+ printk(KERN_CONT "???????? ");
941+ else
942+ printk(KERN_CONT "%08x ", c);
943+ }
944+ printk("\n");
945+}
946+#endif
947
948 /*
949 * This routine handles page faults. It determines the address,
950@@ -132,8 +250,29 @@ retry:
951 good_area:
952 si_code = SEGV_ACCERR;
953 if (cause < 0) {
954- if (!(vma->vm_flags & VM_EXEC))
955+ if (!(vma->vm_flags & VM_EXEC)) {
956+
957+#ifdef CONFIG_PAX_PAGEEXEC
958+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
959+ goto bad_area;
960+
961+ up_read(&mm->mmap_sem);
962+ switch (pax_handle_fetch_fault(regs)) {
963+
964+#ifdef CONFIG_PAX_EMUPLT
965+ case 2:
966+ case 3:
967+ return;
968+#endif
969+
970+ }
971+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
972+ do_group_exit(SIGKILL);
973+#else
974 goto bad_area;
975+#endif
976+
977+ }
978 } else if (!cause) {
979 /* Allow reads even for write-only mappings */
980 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
981diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
982index bd4670d..920c97a 100644
983--- a/arch/arc/Kconfig
984+++ b/arch/arc/Kconfig
985@@ -485,6 +485,7 @@ config ARC_DBG_TLB_MISS_COUNT
986 bool "Profile TLB Misses"
987 default n
988 select DEBUG_FS
989+ depends on !GRKERNSEC_KMEM
990 help
991 Counts number of I and D TLB Misses and exports them via Debugfs
992 The counters can be cleared via Debugfs as well
993diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
994index ede2526..9e12300 100644
995--- a/arch/arm/Kconfig
996+++ b/arch/arm/Kconfig
997@@ -1770,7 +1770,7 @@ config ALIGNMENT_TRAP
998
999 config UACCESS_WITH_MEMCPY
1000 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
1001- depends on MMU
1002+ depends on MMU && !PAX_MEMORY_UDEREF
1003 default y if CPU_FEROCEON
1004 help
1005 Implement faster copy_to_user and clear_user methods for CPU
1006@@ -2006,6 +2006,7 @@ config KEXEC
1007 bool "Kexec system call (EXPERIMENTAL)"
1008 depends on (!SMP || PM_SLEEP_SMP)
1009 depends on !CPU_V7M
1010+ depends on !GRKERNSEC_KMEM
1011 help
1012 kexec is a system call that implements the ability to shutdown your
1013 current kernel, and to start another kernel. It is like a reboot
1014diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
1015index a2e16f9..b26e911 100644
1016--- a/arch/arm/Kconfig.debug
1017+++ b/arch/arm/Kconfig.debug
1018@@ -7,6 +7,7 @@ config ARM_PTDUMP
1019 depends on DEBUG_KERNEL
1020 depends on MMU
1021 select DEBUG_FS
1022+ depends on !GRKERNSEC_KMEM
1023 ---help---
1024 Say Y here if you want to show the kernel pagetable layout in a
1025 debugfs file. This information is only useful for kernel developers
1026diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
1027index e22c119..abe7041 100644
1028--- a/arch/arm/include/asm/atomic.h
1029+++ b/arch/arm/include/asm/atomic.h
1030@@ -18,17 +18,41 @@
1031 #include <asm/barrier.h>
1032 #include <asm/cmpxchg.h>
1033
1034+#ifdef CONFIG_GENERIC_ATOMIC64
1035+#include <asm-generic/atomic64.h>
1036+#endif
1037+
1038 #define ATOMIC_INIT(i) { (i) }
1039
1040 #ifdef __KERNEL__
1041
1042+#ifdef CONFIG_THUMB2_KERNEL
1043+#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
1044+#else
1045+#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
1046+#endif
1047+
1048+#define _ASM_EXTABLE(from, to) \
1049+" .pushsection __ex_table,\"a\"\n"\
1050+" .align 3\n" \
1051+" .long " #from ", " #to"\n" \
1052+" .popsection"
1053+
1054 /*
1055 * On ARM, ordinary assignment (str instruction) doesn't clear the local
1056 * strex/ldrex monitor on some implementations. The reason we can use it for
1057 * atomic_set() is the clrex or dummy strex done on every exception return.
1058 */
1059 #define atomic_read(v) ACCESS_ONCE((v)->counter)
1060+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
1061+{
1062+ return ACCESS_ONCE(v->counter);
1063+}
1064 #define atomic_set(v,i) (((v)->counter) = (i))
1065+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
1066+{
1067+ v->counter = i;
1068+}
1069
1070 #if __LINUX_ARM_ARCH__ >= 6
1071
1072@@ -38,26 +62,50 @@
1073 * to ensure that the update happens.
1074 */
1075
1076-#define ATOMIC_OP(op, c_op, asm_op) \
1077-static inline void atomic_##op(int i, atomic_t *v) \
1078+#ifdef CONFIG_PAX_REFCOUNT
1079+#define __OVERFLOW_POST \
1080+ " bvc 3f\n" \
1081+ "2: " REFCOUNT_TRAP_INSN "\n"\
1082+ "3:\n"
1083+#define __OVERFLOW_POST_RETURN \
1084+ " bvc 3f\n" \
1085+" mov %0, %1\n" \
1086+ "2: " REFCOUNT_TRAP_INSN "\n"\
1087+ "3:\n"
1088+#define __OVERFLOW_EXTABLE \
1089+ "4:\n" \
1090+ _ASM_EXTABLE(2b, 4b)
1091+#else
1092+#define __OVERFLOW_POST
1093+#define __OVERFLOW_POST_RETURN
1094+#define __OVERFLOW_EXTABLE
1095+#endif
1096+
1097+#define __ATOMIC_OP(op, suffix, c_op, asm_op, post_op, extable) \
1098+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1099 { \
1100 unsigned long tmp; \
1101 int result; \
1102 \
1103 prefetchw(&v->counter); \
1104- __asm__ __volatile__("@ atomic_" #op "\n" \
1105+ __asm__ __volatile__("@ atomic_" #op #suffix "\n" \
1106 "1: ldrex %0, [%3]\n" \
1107 " " #asm_op " %0, %0, %4\n" \
1108+ post_op \
1109 " strex %1, %0, [%3]\n" \
1110 " teq %1, #0\n" \
1111-" bne 1b" \
1112+" bne 1b\n" \
1113+ extable \
1114 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1115 : "r" (&v->counter), "Ir" (i) \
1116 : "cc"); \
1117 } \
1118
1119-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1120-static inline int atomic_##op##_return(int i, atomic_t *v) \
1121+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, _unchecked, c_op, asm_op, , )\
1122+ __ATOMIC_OP(op, , c_op, asm_op##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1123+
1124+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op, post_op, extable) \
1125+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1126 { \
1127 unsigned long tmp; \
1128 int result; \
1129@@ -65,12 +113,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1130 smp_mb(); \
1131 prefetchw(&v->counter); \
1132 \
1133- __asm__ __volatile__("@ atomic_" #op "_return\n" \
1134+ __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n" \
1135 "1: ldrex %0, [%3]\n" \
1136 " " #asm_op " %0, %0, %4\n" \
1137+ post_op \
1138 " strex %1, %0, [%3]\n" \
1139 " teq %1, #0\n" \
1140-" bne 1b" \
1141+" bne 1b\n" \
1142+ extable \
1143 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1144 : "r" (&v->counter), "Ir" (i) \
1145 : "cc"); \
1146@@ -80,6 +130,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1147 return result; \
1148 }
1149
1150+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op, , )\
1151+ __ATOMIC_OP_RETURN(op, , c_op, asm_op##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1152+
1153 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1154 {
1155 int oldval;
1156@@ -115,12 +168,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1157 __asm__ __volatile__ ("@ atomic_add_unless\n"
1158 "1: ldrex %0, [%4]\n"
1159 " teq %0, %5\n"
1160-" beq 2f\n"
1161-" add %1, %0, %6\n"
1162+" beq 4f\n"
1163+" adds %1, %0, %6\n"
1164+
1165+#ifdef CONFIG_PAX_REFCOUNT
1166+" bvc 3f\n"
1167+"2: " REFCOUNT_TRAP_INSN "\n"
1168+"3:\n"
1169+#endif
1170+
1171 " strex %2, %1, [%4]\n"
1172 " teq %2, #0\n"
1173 " bne 1b\n"
1174-"2:"
1175+"4:"
1176+
1177+#ifdef CONFIG_PAX_REFCOUNT
1178+ _ASM_EXTABLE(2b, 4b)
1179+#endif
1180+
1181 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1182 : "r" (&v->counter), "r" (u), "r" (a)
1183 : "cc");
1184@@ -131,14 +196,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1185 return oldval;
1186 }
1187
1188+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1189+{
1190+ unsigned long oldval, res;
1191+
1192+ smp_mb();
1193+
1194+ do {
1195+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1196+ "ldrex %1, [%3]\n"
1197+ "mov %0, #0\n"
1198+ "teq %1, %4\n"
1199+ "strexeq %0, %5, [%3]\n"
1200+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1201+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1202+ : "cc");
1203+ } while (res);
1204+
1205+ smp_mb();
1206+
1207+ return oldval;
1208+}
1209+
1210 #else /* ARM_ARCH_6 */
1211
1212 #ifdef CONFIG_SMP
1213 #error SMP not supported on pre-ARMv6 CPUs
1214 #endif
1215
1216-#define ATOMIC_OP(op, c_op, asm_op) \
1217-static inline void atomic_##op(int i, atomic_t *v) \
1218+#define __ATOMIC_OP(op, suffix, c_op, asm_op) \
1219+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1220 { \
1221 unsigned long flags; \
1222 \
1223@@ -147,8 +234,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
1224 raw_local_irq_restore(flags); \
1225 } \
1226
1227-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1228-static inline int atomic_##op##_return(int i, atomic_t *v) \
1229+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op) \
1230+ __ATOMIC_OP(op, _unchecked, c_op, asm_op)
1231+
1232+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \
1233+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1234 { \
1235 unsigned long flags; \
1236 int val; \
1237@@ -161,6 +251,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1238 return val; \
1239 }
1240
1241+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
1242+ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
1243+
1244 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1245 {
1246 int ret;
1247@@ -175,6 +268,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1248 return ret;
1249 }
1250
1251+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1252+{
1253+ return atomic_cmpxchg((atomic_t *)v, old, new);
1254+}
1255+
1256 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1257 {
1258 int c, old;
1259@@ -196,16 +294,38 @@ ATOMIC_OPS(sub, -=, sub)
1260
1261 #undef ATOMIC_OPS
1262 #undef ATOMIC_OP_RETURN
1263+#undef __ATOMIC_OP_RETURN
1264 #undef ATOMIC_OP
1265+#undef __ATOMIC_OP
1266
1267 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1268+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1269+{
1270+ return xchg(&v->counter, new);
1271+}
1272
1273 #define atomic_inc(v) atomic_add(1, v)
1274+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1275+{
1276+ atomic_add_unchecked(1, v);
1277+}
1278 #define atomic_dec(v) atomic_sub(1, v)
1279+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1280+{
1281+ atomic_sub_unchecked(1, v);
1282+}
1283
1284 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1285+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1286+{
1287+ return atomic_add_return_unchecked(1, v) == 0;
1288+}
1289 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1290 #define atomic_inc_return(v) (atomic_add_return(1, v))
1291+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1292+{
1293+ return atomic_add_return_unchecked(1, v);
1294+}
1295 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1296 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1297
1298@@ -216,6 +336,14 @@ typedef struct {
1299 long long counter;
1300 } atomic64_t;
1301
1302+#ifdef CONFIG_PAX_REFCOUNT
1303+typedef struct {
1304+ long long counter;
1305+} atomic64_unchecked_t;
1306+#else
1307+typedef atomic64_t atomic64_unchecked_t;
1308+#endif
1309+
1310 #define ATOMIC64_INIT(i) { (i) }
1311
1312 #ifdef CONFIG_ARM_LPAE
1313@@ -232,6 +360,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1314 return result;
1315 }
1316
1317+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1318+{
1319+ long long result;
1320+
1321+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1322+" ldrd %0, %H0, [%1]"
1323+ : "=&r" (result)
1324+ : "r" (&v->counter), "Qo" (v->counter)
1325+ );
1326+
1327+ return result;
1328+}
1329+
1330 static inline void atomic64_set(atomic64_t *v, long long i)
1331 {
1332 __asm__ __volatile__("@ atomic64_set\n"
1333@@ -240,6 +381,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1334 : "r" (&v->counter), "r" (i)
1335 );
1336 }
1337+
1338+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1339+{
1340+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1341+" strd %2, %H2, [%1]"
1342+ : "=Qo" (v->counter)
1343+ : "r" (&v->counter), "r" (i)
1344+ );
1345+}
1346 #else
1347 static inline long long atomic64_read(const atomic64_t *v)
1348 {
1349@@ -254,6 +404,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1350 return result;
1351 }
1352
1353+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1354+{
1355+ long long result;
1356+
1357+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1358+" ldrexd %0, %H0, [%1]"
1359+ : "=&r" (result)
1360+ : "r" (&v->counter), "Qo" (v->counter)
1361+ );
1362+
1363+ return result;
1364+}
1365+
1366 static inline void atomic64_set(atomic64_t *v, long long i)
1367 {
1368 long long tmp;
1369@@ -268,29 +431,57 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1370 : "r" (&v->counter), "r" (i)
1371 : "cc");
1372 }
1373+
1374+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1375+{
1376+ long long tmp;
1377+
1378+ prefetchw(&v->counter);
1379+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1380+"1: ldrexd %0, %H0, [%2]\n"
1381+" strexd %0, %3, %H3, [%2]\n"
1382+" teq %0, #0\n"
1383+" bne 1b"
1384+ : "=&r" (tmp), "=Qo" (v->counter)
1385+ : "r" (&v->counter), "r" (i)
1386+ : "cc");
1387+}
1388 #endif
1389
1390-#define ATOMIC64_OP(op, op1, op2) \
1391-static inline void atomic64_##op(long long i, atomic64_t *v) \
1392+#undef __OVERFLOW_POST_RETURN
1393+#define __OVERFLOW_POST_RETURN \
1394+ " bvc 3f\n" \
1395+" mov %0, %1\n" \
1396+" mov %H0, %H1\n" \
1397+ "2: " REFCOUNT_TRAP_INSN "\n"\
1398+ "3:\n"
1399+
1400+#define __ATOMIC64_OP(op, suffix, op1, op2, post_op, extable) \
1401+static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
1402 { \
1403 long long result; \
1404 unsigned long tmp; \
1405 \
1406 prefetchw(&v->counter); \
1407- __asm__ __volatile__("@ atomic64_" #op "\n" \
1408+ __asm__ __volatile__("@ atomic64_" #op #suffix "\n" \
1409 "1: ldrexd %0, %H0, [%3]\n" \
1410 " " #op1 " %Q0, %Q0, %Q4\n" \
1411 " " #op2 " %R0, %R0, %R4\n" \
1412+ post_op \
1413 " strexd %1, %0, %H0, [%3]\n" \
1414 " teq %1, #0\n" \
1415-" bne 1b" \
1416+" bne 1b\n" \
1417+ extable \
1418 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1419 : "r" (&v->counter), "r" (i) \
1420 : "cc"); \
1421 } \
1422
1423-#define ATOMIC64_OP_RETURN(op, op1, op2) \
1424-static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1425+#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, _unchecked, op1, op2, , ) \
1426+ __ATOMIC64_OP(op, , op1, op2##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1427+
1428+#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2, post_op, extable) \
1429+static inline long long atomic64_##op##_return##suffix(long long i, atomic64##suffix##_t *v) \
1430 { \
1431 long long result; \
1432 unsigned long tmp; \
1433@@ -298,13 +489,15 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1434 smp_mb(); \
1435 prefetchw(&v->counter); \
1436 \
1437- __asm__ __volatile__("@ atomic64_" #op "_return\n" \
1438+ __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n" \
1439 "1: ldrexd %0, %H0, [%3]\n" \
1440 " " #op1 " %Q0, %Q0, %Q4\n" \
1441 " " #op2 " %R0, %R0, %R4\n" \
1442+ post_op \
1443 " strexd %1, %0, %H0, [%3]\n" \
1444 " teq %1, #0\n" \
1445-" bne 1b" \
1446+" bne 1b\n" \
1447+ extable \
1448 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1449 : "r" (&v->counter), "r" (i) \
1450 : "cc"); \
1451@@ -314,6 +507,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
1452 return result; \
1453 }
1454
1455+#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2, , ) \
1456+ __ATOMIC64_OP_RETURN(op, , op1, op2##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1457+
1458 #define ATOMIC64_OPS(op, op1, op2) \
1459 ATOMIC64_OP(op, op1, op2) \
1460 ATOMIC64_OP_RETURN(op, op1, op2)
1461@@ -323,7 +519,12 @@ ATOMIC64_OPS(sub, subs, sbc)
1462
1463 #undef ATOMIC64_OPS
1464 #undef ATOMIC64_OP_RETURN
1465+#undef __ATOMIC64_OP_RETURN
1466 #undef ATOMIC64_OP
1467+#undef __ATOMIC64_OP
1468+#undef __OVERFLOW_EXTABLE
1469+#undef __OVERFLOW_POST_RETURN
1470+#undef __OVERFLOW_POST
1471
1472 static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1473 long long new)
1474@@ -351,6 +552,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
1475 return oldval;
1476 }
1477
1478+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
1479+ long long new)
1480+{
1481+ long long oldval;
1482+ unsigned long res;
1483+
1484+ smp_mb();
1485+
1486+ do {
1487+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1488+ "ldrexd %1, %H1, [%3]\n"
1489+ "mov %0, #0\n"
1490+ "teq %1, %4\n"
1491+ "teqeq %H1, %H4\n"
1492+ "strexdeq %0, %5, %H5, [%3]"
1493+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1494+ : "r" (&ptr->counter), "r" (old), "r" (new)
1495+ : "cc");
1496+ } while (res);
1497+
1498+ smp_mb();
1499+
1500+ return oldval;
1501+}
1502+
1503 static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1504 {
1505 long long result;
1506@@ -376,21 +602,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
1507 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1508 {
1509 long long result;
1510- unsigned long tmp;
1511+ u64 tmp;
1512
1513 smp_mb();
1514 prefetchw(&v->counter);
1515
1516 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1517-"1: ldrexd %0, %H0, [%3]\n"
1518-" subs %Q0, %Q0, #1\n"
1519-" sbc %R0, %R0, #0\n"
1520+"1: ldrexd %1, %H1, [%3]\n"
1521+" subs %Q0, %Q1, #1\n"
1522+" sbcs %R0, %R1, #0\n"
1523+
1524+#ifdef CONFIG_PAX_REFCOUNT
1525+" bvc 3f\n"
1526+" mov %Q0, %Q1\n"
1527+" mov %R0, %R1\n"
1528+"2: " REFCOUNT_TRAP_INSN "\n"
1529+"3:\n"
1530+#endif
1531+
1532 " teq %R0, #0\n"
1533-" bmi 2f\n"
1534+" bmi 4f\n"
1535 " strexd %1, %0, %H0, [%3]\n"
1536 " teq %1, #0\n"
1537 " bne 1b\n"
1538-"2:"
1539+"4:\n"
1540+
1541+#ifdef CONFIG_PAX_REFCOUNT
1542+ _ASM_EXTABLE(2b, 4b)
1543+#endif
1544+
1545 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1546 : "r" (&v->counter)
1547 : "cc");
1548@@ -414,13 +654,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1549 " teq %0, %5\n"
1550 " teqeq %H0, %H5\n"
1551 " moveq %1, #0\n"
1552-" beq 2f\n"
1553+" beq 4f\n"
1554 " adds %Q0, %Q0, %Q6\n"
1555-" adc %R0, %R0, %R6\n"
1556+" adcs %R0, %R0, %R6\n"
1557+
1558+#ifdef CONFIG_PAX_REFCOUNT
1559+" bvc 3f\n"
1560+"2: " REFCOUNT_TRAP_INSN "\n"
1561+"3:\n"
1562+#endif
1563+
1564 " strexd %2, %0, %H0, [%4]\n"
1565 " teq %2, #0\n"
1566 " bne 1b\n"
1567-"2:"
1568+"4:\n"
1569+
1570+#ifdef CONFIG_PAX_REFCOUNT
1571+ _ASM_EXTABLE(2b, 4b)
1572+#endif
1573+
1574 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1575 : "r" (&v->counter), "r" (u), "r" (a)
1576 : "cc");
1577@@ -433,10 +685,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1578
1579 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1580 #define atomic64_inc(v) atomic64_add(1LL, (v))
1581+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1582 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1583+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1584 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1585 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1586 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1587+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1588 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1589 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1590 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1591diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
1592index 6c2327e..85beac4 100644
1593--- a/arch/arm/include/asm/barrier.h
1594+++ b/arch/arm/include/asm/barrier.h
1595@@ -67,7 +67,7 @@
1596 do { \
1597 compiletime_assert_atomic_type(*p); \
1598 smp_mb(); \
1599- ACCESS_ONCE(*p) = (v); \
1600+ ACCESS_ONCE_RW(*p) = (v); \
1601 } while (0)
1602
1603 #define smp_load_acquire(p) \
1604diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1605index 75fe66b..ba3dee4 100644
1606--- a/arch/arm/include/asm/cache.h
1607+++ b/arch/arm/include/asm/cache.h
1608@@ -4,8 +4,10 @@
1609 #ifndef __ASMARM_CACHE_H
1610 #define __ASMARM_CACHE_H
1611
1612+#include <linux/const.h>
1613+
1614 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1615-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1616+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1617
1618 /*
1619 * Memory returned by kmalloc() may be used for DMA, so we must make
1620@@ -24,5 +26,6 @@
1621 #endif
1622
1623 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1624+#define __read_only __attribute__ ((__section__(".data..read_only")))
1625
1626 #endif
1627diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1628index 4812cda..9da8116 100644
1629--- a/arch/arm/include/asm/cacheflush.h
1630+++ b/arch/arm/include/asm/cacheflush.h
1631@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1632 void (*dma_unmap_area)(const void *, size_t, int);
1633
1634 void (*dma_flush_range)(const void *, const void *);
1635-};
1636+} __no_const;
1637
1638 /*
1639 * Select the calling method
1640diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1641index 5233151..87a71fa 100644
1642--- a/arch/arm/include/asm/checksum.h
1643+++ b/arch/arm/include/asm/checksum.h
1644@@ -37,7 +37,19 @@ __wsum
1645 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1646
1647 __wsum
1648-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1649+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1650+
1651+static inline __wsum
1652+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1653+{
1654+ __wsum ret;
1655+ pax_open_userland();
1656+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1657+ pax_close_userland();
1658+ return ret;
1659+}
1660+
1661+
1662
1663 /*
1664 * Fold a partial checksum without adding pseudo headers
1665diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1666index 1692a05..1835802 100644
1667--- a/arch/arm/include/asm/cmpxchg.h
1668+++ b/arch/arm/include/asm/cmpxchg.h
1669@@ -107,6 +107,10 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1670 (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
1671 sizeof(*(ptr))); \
1672 })
1673+#define xchg_unchecked(ptr, x) ({ \
1674+ (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
1675+ sizeof(*(ptr))); \
1676+})
1677
1678 #include <asm-generic/cmpxchg-local.h>
1679
1680diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
1681index 0f84249..8e83c55 100644
1682--- a/arch/arm/include/asm/cpuidle.h
1683+++ b/arch/arm/include/asm/cpuidle.h
1684@@ -32,7 +32,7 @@ struct device_node;
1685 struct cpuidle_ops {
1686 int (*suspend)(int cpu, unsigned long arg);
1687 int (*init)(struct device_node *, int cpu);
1688-};
1689+} __no_const;
1690
1691 struct of_cpuidle_method {
1692 const char *method;
1693diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1694index 6ddbe44..b5e38b1a 100644
1695--- a/arch/arm/include/asm/domain.h
1696+++ b/arch/arm/include/asm/domain.h
1697@@ -48,18 +48,37 @@
1698 * Domain types
1699 */
1700 #define DOMAIN_NOACCESS 0
1701-#define DOMAIN_CLIENT 1
1702 #ifdef CONFIG_CPU_USE_DOMAINS
1703+#define DOMAIN_USERCLIENT 1
1704+#define DOMAIN_KERNELCLIENT 1
1705 #define DOMAIN_MANAGER 3
1706+#define DOMAIN_VECTORS DOMAIN_USER
1707 #else
1708+
1709+#ifdef CONFIG_PAX_KERNEXEC
1710 #define DOMAIN_MANAGER 1
1711+#define DOMAIN_KERNEXEC 3
1712+#else
1713+#define DOMAIN_MANAGER 1
1714+#endif
1715+
1716+#ifdef CONFIG_PAX_MEMORY_UDEREF
1717+#define DOMAIN_USERCLIENT 0
1718+#define DOMAIN_UDEREF 1
1719+#define DOMAIN_VECTORS DOMAIN_KERNEL
1720+#else
1721+#define DOMAIN_USERCLIENT 1
1722+#define DOMAIN_VECTORS DOMAIN_USER
1723+#endif
1724+#define DOMAIN_KERNELCLIENT 1
1725+
1726 #endif
1727
1728 #define domain_val(dom,type) ((type) << (2*(dom)))
1729
1730 #ifndef __ASSEMBLY__
1731
1732-#ifdef CONFIG_CPU_USE_DOMAINS
1733+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1734 static inline void set_domain(unsigned val)
1735 {
1736 asm volatile(
1737@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1738 isb();
1739 }
1740
1741-#define modify_domain(dom,type) \
1742- do { \
1743- struct thread_info *thread = current_thread_info(); \
1744- unsigned int domain = thread->cpu_domain; \
1745- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1746- thread->cpu_domain = domain | domain_val(dom, type); \
1747- set_domain(thread->cpu_domain); \
1748- } while (0)
1749-
1750+extern void modify_domain(unsigned int dom, unsigned int type);
1751 #else
1752 static inline void set_domain(unsigned val) { }
1753 static inline void modify_domain(unsigned dom, unsigned type) { }
1754diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1755index d2315ff..f60b47b 100644
1756--- a/arch/arm/include/asm/elf.h
1757+++ b/arch/arm/include/asm/elf.h
1758@@ -117,7 +117,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1759 the loader. We need to make sure that it is out of the way of the program
1760 that it will "exec", and that there is sufficient room for the brk. */
1761
1762-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1763+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1764+
1765+#ifdef CONFIG_PAX_ASLR
1766+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1767+
1768+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1769+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1770+#endif
1771
1772 /* When the program starts, a1 contains a pointer to a function to be
1773 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1774diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1775index de53547..52b9a28 100644
1776--- a/arch/arm/include/asm/fncpy.h
1777+++ b/arch/arm/include/asm/fncpy.h
1778@@ -81,7 +81,9 @@
1779 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1780 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1781 \
1782+ pax_open_kernel(); \
1783 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1784+ pax_close_kernel(); \
1785 flush_icache_range((unsigned long)(dest_buf), \
1786 (unsigned long)(dest_buf) + (size)); \
1787 \
1788diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1789index 5eed828..365e018 100644
1790--- a/arch/arm/include/asm/futex.h
1791+++ b/arch/arm/include/asm/futex.h
1792@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1793 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1794 return -EFAULT;
1795
1796+ pax_open_userland();
1797+
1798 smp_mb();
1799 /* Prefetching cannot fault */
1800 prefetchw(uaddr);
1801@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1802 : "cc", "memory");
1803 smp_mb();
1804
1805+ pax_close_userland();
1806+
1807 *uval = val;
1808 return ret;
1809 }
1810@@ -94,6 +98,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1811 return -EFAULT;
1812
1813 preempt_disable();
1814+ pax_open_userland();
1815+
1816 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1817 "1: " TUSER(ldr) " %1, [%4]\n"
1818 " teq %1, %2\n"
1819@@ -104,6 +110,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1820 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1821 : "cc", "memory");
1822
1823+ pax_close_userland();
1824+
1825 *uval = val;
1826 preempt_enable();
1827
1828@@ -131,6 +139,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1829 preempt_disable();
1830 #endif
1831 pagefault_disable();
1832+ pax_open_userland();
1833
1834 switch (op) {
1835 case FUTEX_OP_SET:
1836@@ -152,6 +161,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1837 ret = -ENOSYS;
1838 }
1839
1840+ pax_close_userland();
1841 pagefault_enable();
1842 #ifndef CONFIG_SMP
1843 preempt_enable();
1844diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1845index 83eb2f7..ed77159 100644
1846--- a/arch/arm/include/asm/kmap_types.h
1847+++ b/arch/arm/include/asm/kmap_types.h
1848@@ -4,6 +4,6 @@
1849 /*
1850 * This is the "bare minimum". AIO seems to require this.
1851 */
1852-#define KM_TYPE_NR 16
1853+#define KM_TYPE_NR 17
1854
1855 #endif
1856diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1857index 9e614a1..3302cca 100644
1858--- a/arch/arm/include/asm/mach/dma.h
1859+++ b/arch/arm/include/asm/mach/dma.h
1860@@ -22,7 +22,7 @@ struct dma_ops {
1861 int (*residue)(unsigned int, dma_t *); /* optional */
1862 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1863 const char *type;
1864-};
1865+} __do_const;
1866
1867 struct dma_struct {
1868 void *addr; /* single DMA address */
1869diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1870index f98c7f3..e5c626d 100644
1871--- a/arch/arm/include/asm/mach/map.h
1872+++ b/arch/arm/include/asm/mach/map.h
1873@@ -23,17 +23,19 @@ struct map_desc {
1874
1875 /* types 0-3 are defined in asm/io.h */
1876 enum {
1877- MT_UNCACHED = 4,
1878- MT_CACHECLEAN,
1879- MT_MINICLEAN,
1880+ MT_UNCACHED_RW = 4,
1881+ MT_CACHECLEAN_RO,
1882+ MT_MINICLEAN_RO,
1883 MT_LOW_VECTORS,
1884 MT_HIGH_VECTORS,
1885- MT_MEMORY_RWX,
1886+ __MT_MEMORY_RWX,
1887 MT_MEMORY_RW,
1888- MT_ROM,
1889- MT_MEMORY_RWX_NONCACHED,
1890+ MT_MEMORY_RX,
1891+ MT_ROM_RX,
1892+ MT_MEMORY_RW_NONCACHED,
1893+ MT_MEMORY_RX_NONCACHED,
1894 MT_MEMORY_RW_DTCM,
1895- MT_MEMORY_RWX_ITCM,
1896+ MT_MEMORY_RX_ITCM,
1897 MT_MEMORY_RW_SO,
1898 MT_MEMORY_DMA_READY,
1899 };
1900diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1901index 563b92f..689d58e 100644
1902--- a/arch/arm/include/asm/outercache.h
1903+++ b/arch/arm/include/asm/outercache.h
1904@@ -39,7 +39,7 @@ struct outer_cache_fns {
1905 /* This is an ARM L2C thing */
1906 void (*write_sec)(unsigned long, unsigned);
1907 void (*configure)(const struct l2x0_regs *);
1908-};
1909+} __no_const;
1910
1911 extern struct outer_cache_fns outer_cache;
1912
1913diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1914index 4355f0e..cd9168e 100644
1915--- a/arch/arm/include/asm/page.h
1916+++ b/arch/arm/include/asm/page.h
1917@@ -23,6 +23,7 @@
1918
1919 #else
1920
1921+#include <linux/compiler.h>
1922 #include <asm/glue.h>
1923
1924 /*
1925@@ -114,7 +115,7 @@ struct cpu_user_fns {
1926 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1927 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1928 unsigned long vaddr, struct vm_area_struct *vma);
1929-};
1930+} __no_const;
1931
1932 #ifdef MULTI_USER
1933 extern struct cpu_user_fns cpu_user;
1934diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1935index 19cfab5..3f5c7e9 100644
1936--- a/arch/arm/include/asm/pgalloc.h
1937+++ b/arch/arm/include/asm/pgalloc.h
1938@@ -17,6 +17,7 @@
1939 #include <asm/processor.h>
1940 #include <asm/cacheflush.h>
1941 #include <asm/tlbflush.h>
1942+#include <asm/system_info.h>
1943
1944 #define check_pgt_cache() do { } while (0)
1945
1946@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1947 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1948 }
1949
1950+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1951+{
1952+ pud_populate(mm, pud, pmd);
1953+}
1954+
1955 #else /* !CONFIG_ARM_LPAE */
1956
1957 /*
1958@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1959 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1960 #define pmd_free(mm, pmd) do { } while (0)
1961 #define pud_populate(mm,pmd,pte) BUG()
1962+#define pud_populate_kernel(mm,pmd,pte) BUG()
1963
1964 #endif /* CONFIG_ARM_LPAE */
1965
1966@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1967 __free_page(pte);
1968 }
1969
1970+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1971+{
1972+#ifdef CONFIG_ARM_LPAE
1973+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1974+#else
1975+ if (addr & SECTION_SIZE)
1976+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1977+ else
1978+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1979+#endif
1980+ flush_pmd_entry(pmdp);
1981+}
1982+
1983 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1984 pmdval_t prot)
1985 {
1986diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1987index 5e68278..1869bae 100644
1988--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1989+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1990@@ -27,7 +27,7 @@
1991 /*
1992 * - section
1993 */
1994-#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1995+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1996 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1997 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1998 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1999@@ -39,6 +39,7 @@
2000 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
2001 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
2002 #define PMD_SECT_AF (_AT(pmdval_t, 0))
2003+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
2004
2005 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
2006 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
2007@@ -68,6 +69,7 @@
2008 * - extended small page/tiny page
2009 */
2010 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
2011+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
2012 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
2013 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
2014 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
2015diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
2016index aeddd28..207745c 100644
2017--- a/arch/arm/include/asm/pgtable-2level.h
2018+++ b/arch/arm/include/asm/pgtable-2level.h
2019@@ -127,6 +127,9 @@
2020 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
2021 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
2022
2023+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
2024+#define L_PTE_PXN (_AT(pteval_t, 0))
2025+
2026 /*
2027 * These are the memory types, defined to be compatible with
2028 * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B
2029diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
2030index a745a2a..481350a 100644
2031--- a/arch/arm/include/asm/pgtable-3level.h
2032+++ b/arch/arm/include/asm/pgtable-3level.h
2033@@ -80,6 +80,7 @@
2034 #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
2035 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
2036 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
2037+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
2038 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
2039 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
2040 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
2041@@ -91,10 +92,12 @@
2042 #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
2043 #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
2044 #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
2045+#define PMD_SECT_RDONLY PMD_SECT_AP2
2046
2047 /*
2048 * To be used in assembly code with the upper page attributes.
2049 */
2050+#define L_PTE_PXN_HIGH (1 << (53 - 32))
2051 #define L_PTE_XN_HIGH (1 << (54 - 32))
2052 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
2053
2054diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
2055index f403541..b10df68 100644
2056--- a/arch/arm/include/asm/pgtable.h
2057+++ b/arch/arm/include/asm/pgtable.h
2058@@ -33,6 +33,9 @@
2059 #include <asm/pgtable-2level.h>
2060 #endif
2061
2062+#define ktla_ktva(addr) (addr)
2063+#define ktva_ktla(addr) (addr)
2064+
2065 /*
2066 * Just any arbitrary offset to the start of the vmalloc VM area: the
2067 * current 8MB value just means that there will be a 8MB "hole" after the
2068@@ -48,6 +51,9 @@
2069 #define LIBRARY_TEXT_START 0x0c000000
2070
2071 #ifndef __ASSEMBLY__
2072+extern pteval_t __supported_pte_mask;
2073+extern pmdval_t __supported_pmd_mask;
2074+
2075 extern void __pte_error(const char *file, int line, pte_t);
2076 extern void __pmd_error(const char *file, int line, pmd_t);
2077 extern void __pgd_error(const char *file, int line, pgd_t);
2078@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2079 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2080 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2081
2082+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2083+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2084+
2085+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2086+#include <asm/domain.h>
2087+#include <linux/thread_info.h>
2088+#include <linux/preempt.h>
2089+
2090+static inline int test_domain(int domain, int domaintype)
2091+{
2092+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2093+}
2094+#endif
2095+
2096+#ifdef CONFIG_PAX_KERNEXEC
2097+static inline unsigned long pax_open_kernel(void) {
2098+#ifdef CONFIG_ARM_LPAE
2099+ /* TODO */
2100+#else
2101+ preempt_disable();
2102+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2103+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2104+#endif
2105+ return 0;
2106+}
2107+
2108+static inline unsigned long pax_close_kernel(void) {
2109+#ifdef CONFIG_ARM_LPAE
2110+ /* TODO */
2111+#else
2112+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2113+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2114+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2115+ preempt_enable_no_resched();
2116+#endif
2117+ return 0;
2118+}
2119+#else
2120+static inline unsigned long pax_open_kernel(void) { return 0; }
2121+static inline unsigned long pax_close_kernel(void) { return 0; }
2122+#endif
2123+
2124 /*
2125 * This is the lowest virtual address we can permit any user space
2126 * mapping to be mapped at. This is particularly important for
2127@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2128 /*
2129 * The pgprot_* and protection_map entries will be fixed up in runtime
2130 * to include the cachable and bufferable bits based on memory policy,
2131- * as well as any architecture dependent bits like global/ASID and SMP
2132- * shared mapping bits.
2133+ * as well as any architecture dependent bits like global/ASID, PXN,
2134+ * and SMP shared mapping bits.
2135 */
2136 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2137
2138@@ -307,7 +355,7 @@ static inline pte_t pte_mknexec(pte_t pte)
2139 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2140 {
2141 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2142- L_PTE_NONE | L_PTE_VALID;
2143+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2144 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2145 return pte;
2146 }
2147diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2148index c25ef3e..735f14b 100644
2149--- a/arch/arm/include/asm/psci.h
2150+++ b/arch/arm/include/asm/psci.h
2151@@ -32,7 +32,7 @@ struct psci_operations {
2152 int (*affinity_info)(unsigned long target_affinity,
2153 unsigned long lowest_affinity_level);
2154 int (*migrate_info_type)(void);
2155-};
2156+} __no_const;
2157
2158 extern struct psci_operations psci_ops;
2159 extern struct smp_operations psci_smp_ops;
2160diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2161index 2f3ac1b..67182ae0 100644
2162--- a/arch/arm/include/asm/smp.h
2163+++ b/arch/arm/include/asm/smp.h
2164@@ -108,7 +108,7 @@ struct smp_operations {
2165 int (*cpu_disable)(unsigned int cpu);
2166 #endif
2167 #endif
2168-};
2169+} __no_const;
2170
2171 struct of_cpu_method {
2172 const char *method;
2173diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2174index bd32ede..bd90a0b 100644
2175--- a/arch/arm/include/asm/thread_info.h
2176+++ b/arch/arm/include/asm/thread_info.h
2177@@ -74,9 +74,9 @@ struct thread_info {
2178 .flags = 0, \
2179 .preempt_count = INIT_PREEMPT_COUNT, \
2180 .addr_limit = KERNEL_DS, \
2181- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2182- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2183- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2184+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2185+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2186+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2187 }
2188
2189 #define init_thread_info (init_thread_union.thread_info)
2190@@ -152,7 +152,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2191 #define TIF_SYSCALL_AUDIT 9
2192 #define TIF_SYSCALL_TRACEPOINT 10
2193 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2194-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2195+/* within 8 bits of TIF_SYSCALL_TRACE
2196+ * to meet flexible second operand requirements
2197+ */
2198+#define TIF_GRSEC_SETXID 12
2199+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2200 #define TIF_USING_IWMMXT 17
2201 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2202 #define TIF_RESTORE_SIGMASK 20
2203@@ -166,10 +170,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2204 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2205 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2206 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2207+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2208
2209 /* Checks for any syscall work in entry-common.S */
2210 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2211- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2212+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2213
2214 /*
2215 * Change these and you break ASM code in entry-common.S
2216diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
2217index 5f833f7..76e6644 100644
2218--- a/arch/arm/include/asm/tls.h
2219+++ b/arch/arm/include/asm/tls.h
2220@@ -3,6 +3,7 @@
2221
2222 #include <linux/compiler.h>
2223 #include <asm/thread_info.h>
2224+#include <asm/pgtable.h>
2225
2226 #ifdef __ASSEMBLY__
2227 #include <asm/asm-offsets.h>
2228@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
2229 * at 0xffff0fe0 must be used instead. (see
2230 * entry-armv.S for details)
2231 */
2232+ pax_open_kernel();
2233 *((unsigned int *)0xffff0ff0) = val;
2234+ pax_close_kernel();
2235 #endif
2236 }
2237
2238diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2239index 74b17d0..7e6da4b 100644
2240--- a/arch/arm/include/asm/uaccess.h
2241+++ b/arch/arm/include/asm/uaccess.h
2242@@ -18,6 +18,7 @@
2243 #include <asm/domain.h>
2244 #include <asm/unified.h>
2245 #include <asm/compiler.h>
2246+#include <asm/pgtable.h>
2247
2248 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2249 #include <asm-generic/uaccess-unaligned.h>
2250@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2251 static inline void set_fs(mm_segment_t fs)
2252 {
2253 current_thread_info()->addr_limit = fs;
2254- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2255+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2256 }
2257
2258 #define segment_eq(a, b) ((a) == (b))
2259
2260+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2261+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2262+
2263+static inline void pax_open_userland(void)
2264+{
2265+
2266+#ifdef CONFIG_PAX_MEMORY_UDEREF
2267+ if (segment_eq(get_fs(), USER_DS)) {
2268+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2269+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2270+ }
2271+#endif
2272+
2273+}
2274+
2275+static inline void pax_close_userland(void)
2276+{
2277+
2278+#ifdef CONFIG_PAX_MEMORY_UDEREF
2279+ if (segment_eq(get_fs(), USER_DS)) {
2280+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2281+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2282+ }
2283+#endif
2284+
2285+}
2286+
2287 #define __addr_ok(addr) ({ \
2288 unsigned long flag; \
2289 __asm__("cmp %2, %0; movlo %0, #0" \
2290@@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
2291
2292 #define get_user(x, p) \
2293 ({ \
2294+ int __e; \
2295 might_fault(); \
2296- __get_user_check(x, p); \
2297+ pax_open_userland(); \
2298+ __e = __get_user_check((x), (p)); \
2299+ pax_close_userland(); \
2300+ __e; \
2301 })
2302
2303 extern int __put_user_1(void *, unsigned int);
2304@@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
2305
2306 #define put_user(x, p) \
2307 ({ \
2308+ int __e; \
2309 might_fault(); \
2310- __put_user_check(x, p); \
2311+ pax_open_userland(); \
2312+ __e = __put_user_check((x), (p)); \
2313+ pax_close_userland(); \
2314+ __e; \
2315 })
2316
2317 #else /* CONFIG_MMU */
2318@@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
2319
2320 #endif /* CONFIG_MMU */
2321
2322+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
2323 #define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
2324
2325 #define user_addr_max() \
2326@@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
2327 #define __get_user(x, ptr) \
2328 ({ \
2329 long __gu_err = 0; \
2330+ pax_open_userland(); \
2331 __get_user_err((x), (ptr), __gu_err); \
2332+ pax_close_userland(); \
2333 __gu_err; \
2334 })
2335
2336 #define __get_user_error(x, ptr, err) \
2337 ({ \
2338+ pax_open_userland(); \
2339 __get_user_err((x), (ptr), err); \
2340+ pax_close_userland(); \
2341 (void) 0; \
2342 })
2343
2344@@ -368,13 +409,17 @@ do { \
2345 #define __put_user(x, ptr) \
2346 ({ \
2347 long __pu_err = 0; \
2348+ pax_open_userland(); \
2349 __put_user_err((x), (ptr), __pu_err); \
2350+ pax_close_userland(); \
2351 __pu_err; \
2352 })
2353
2354 #define __put_user_error(x, ptr, err) \
2355 ({ \
2356+ pax_open_userland(); \
2357 __put_user_err((x), (ptr), err); \
2358+ pax_close_userland(); \
2359 (void) 0; \
2360 })
2361
2362@@ -474,11 +519,44 @@ do { \
2363
2364
2365 #ifdef CONFIG_MMU
2366-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2367-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2368-extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2369-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2370-extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2371+extern unsigned long __must_check __size_overflow(3) ___copy_from_user(void *to, const void __user *from, unsigned long n);
2372+extern unsigned long __must_check __size_overflow(3) ___copy_to_user(void __user *to, const void *from, unsigned long n);
2373+
2374+static inline unsigned long __must_check __size_overflow(3) __copy_from_user(void *to, const void __user *from, unsigned long n)
2375+{
2376+ unsigned long ret;
2377+
2378+ check_object_size(to, n, false);
2379+ pax_open_userland();
2380+ ret = ___copy_from_user(to, from, n);
2381+ pax_close_userland();
2382+ return ret;
2383+}
2384+
2385+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2386+{
2387+ unsigned long ret;
2388+
2389+ check_object_size(from, n, true);
2390+ pax_open_userland();
2391+ ret = ___copy_to_user(to, from, n);
2392+ pax_close_userland();
2393+ return ret;
2394+}
2395+
2396+extern unsigned long __must_check __size_overflow(3) __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2397+extern unsigned long __must_check __size_overflow(2) ___clear_user(void __user *addr, unsigned long n);
2398+extern unsigned long __must_check __size_overflow(2) __clear_user_std(void __user *addr, unsigned long n);
2399+
2400+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2401+{
2402+ unsigned long ret;
2403+ pax_open_userland();
2404+ ret = ___clear_user(addr, n);
2405+ pax_close_userland();
2406+ return ret;
2407+}
2408+
2409 #else
2410 #define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
2411 #define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
2412@@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2413
2414 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2415 {
2416+ if ((long)n < 0)
2417+ return n;
2418+
2419 if (access_ok(VERIFY_READ, from, n))
2420 n = __copy_from_user(to, from, n);
2421 else /* security hole - plug it */
2422@@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2423
2424 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2425 {
2426+ if ((long)n < 0)
2427+ return n;
2428+
2429 if (access_ok(VERIFY_WRITE, to, n))
2430 n = __copy_to_user(to, from, n);
2431 return n;
2432diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2433index 5af0ed1..cea83883 100644
2434--- a/arch/arm/include/uapi/asm/ptrace.h
2435+++ b/arch/arm/include/uapi/asm/ptrace.h
2436@@ -92,7 +92,7 @@
2437 * ARMv7 groups of PSR bits
2438 */
2439 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2440-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2441+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2442 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2443 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2444
2445diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2446index 5e5a51a..b21eeef 100644
2447--- a/arch/arm/kernel/armksyms.c
2448+++ b/arch/arm/kernel/armksyms.c
2449@@ -58,7 +58,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2450
2451 /* networking */
2452 EXPORT_SYMBOL(csum_partial);
2453-EXPORT_SYMBOL(csum_partial_copy_from_user);
2454+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2455 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2456 EXPORT_SYMBOL(__csum_ipv6_magic);
2457
2458@@ -97,9 +97,9 @@ EXPORT_SYMBOL(mmiocpy);
2459 #ifdef CONFIG_MMU
2460 EXPORT_SYMBOL(copy_page);
2461
2462-EXPORT_SYMBOL(__copy_from_user);
2463-EXPORT_SYMBOL(__copy_to_user);
2464-EXPORT_SYMBOL(__clear_user);
2465+EXPORT_SYMBOL(___copy_from_user);
2466+EXPORT_SYMBOL(___copy_to_user);
2467+EXPORT_SYMBOL(___clear_user);
2468
2469 EXPORT_SYMBOL(__get_user_1);
2470 EXPORT_SYMBOL(__get_user_2);
2471diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c
2472index 318da33..373689f 100644
2473--- a/arch/arm/kernel/cpuidle.c
2474+++ b/arch/arm/kernel/cpuidle.c
2475@@ -19,7 +19,7 @@ extern struct of_cpuidle_method __cpuidle_method_of_table[];
2476 static const struct of_cpuidle_method __cpuidle_method_of_table_sentinel
2477 __used __section(__cpuidle_method_of_table_end);
2478
2479-static struct cpuidle_ops cpuidle_ops[NR_CPUS];
2480+static struct cpuidle_ops cpuidle_ops[NR_CPUS] __read_only;
2481
2482 /**
2483 * arm_cpuidle_simple_enter() - a wrapper to cpu_do_idle()
2484diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2485index cb4fb1e..dc7fcaf 100644
2486--- a/arch/arm/kernel/entry-armv.S
2487+++ b/arch/arm/kernel/entry-armv.S
2488@@ -50,6 +50,87 @@
2489 9997:
2490 .endm
2491
2492+ .macro pax_enter_kernel
2493+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2494+ @ make aligned space for saved DACR
2495+ sub sp, sp, #8
2496+ @ save regs
2497+ stmdb sp!, {r1, r2}
2498+ @ read DACR from cpu_domain into r1
2499+ mov r2, sp
2500+ @ assume 8K pages, since we have to split the immediate in two
2501+ bic r2, r2, #(0x1fc0)
2502+ bic r2, r2, #(0x3f)
2503+ ldr r1, [r2, #TI_CPU_DOMAIN]
2504+ @ store old DACR on stack
2505+ str r1, [sp, #8]
2506+#ifdef CONFIG_PAX_KERNEXEC
2507+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2508+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2509+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2510+#endif
2511+#ifdef CONFIG_PAX_MEMORY_UDEREF
2512+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2513+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2514+#endif
2515+ @ write r1 to current_thread_info()->cpu_domain
2516+ str r1, [r2, #TI_CPU_DOMAIN]
2517+ @ write r1 to DACR
2518+ mcr p15, 0, r1, c3, c0, 0
2519+ @ instruction sync
2520+ instr_sync
2521+ @ restore regs
2522+ ldmia sp!, {r1, r2}
2523+#endif
2524+ .endm
2525+
2526+ .macro pax_open_userland
2527+#ifdef CONFIG_PAX_MEMORY_UDEREF
2528+ @ save regs
2529+ stmdb sp!, {r0, r1}
2530+ @ read DACR from cpu_domain into r1
2531+ mov r0, sp
2532+ @ assume 8K pages, since we have to split the immediate in two
2533+ bic r0, r0, #(0x1fc0)
2534+ bic r0, r0, #(0x3f)
2535+ ldr r1, [r0, #TI_CPU_DOMAIN]
2536+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2537+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2538+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2539+ @ write r1 to current_thread_info()->cpu_domain
2540+ str r1, [r0, #TI_CPU_DOMAIN]
2541+ @ write r1 to DACR
2542+ mcr p15, 0, r1, c3, c0, 0
2543+ @ instruction sync
2544+ instr_sync
2545+ @ restore regs
2546+ ldmia sp!, {r0, r1}
2547+#endif
2548+ .endm
2549+
2550+ .macro pax_close_userland
2551+#ifdef CONFIG_PAX_MEMORY_UDEREF
2552+ @ save regs
2553+ stmdb sp!, {r0, r1}
2554+ @ read DACR from cpu_domain into r1
2555+ mov r0, sp
2556+ @ assume 8K pages, since we have to split the immediate in two
2557+ bic r0, r0, #(0x1fc0)
2558+ bic r0, r0, #(0x3f)
2559+ ldr r1, [r0, #TI_CPU_DOMAIN]
2560+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2561+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2562+ @ write r1 to current_thread_info()->cpu_domain
2563+ str r1, [r0, #TI_CPU_DOMAIN]
2564+ @ write r1 to DACR
2565+ mcr p15, 0, r1, c3, c0, 0
2566+ @ instruction sync
2567+ instr_sync
2568+ @ restore regs
2569+ ldmia sp!, {r0, r1}
2570+#endif
2571+ .endm
2572+
2573 .macro pabt_helper
2574 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2575 #ifdef MULTI_PABORT
2576@@ -92,11 +173,15 @@
2577 * Invalid mode handlers
2578 */
2579 .macro inv_entry, reason
2580+
2581+ pax_enter_kernel
2582+
2583 sub sp, sp, #S_FRAME_SIZE
2584 ARM( stmib sp, {r1 - lr} )
2585 THUMB( stmia sp, {r0 - r12} )
2586 THUMB( str sp, [sp, #S_SP] )
2587 THUMB( str lr, [sp, #S_LR] )
2588+
2589 mov r1, #\reason
2590 .endm
2591
2592@@ -152,7 +237,11 @@ ENDPROC(__und_invalid)
2593 .macro svc_entry, stack_hole=0, trace=1
2594 UNWIND(.fnstart )
2595 UNWIND(.save {r0 - pc} )
2596+
2597+ pax_enter_kernel
2598+
2599 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2600+
2601 #ifdef CONFIG_THUMB2_KERNEL
2602 SPFIX( str r0, [sp] ) @ temporarily saved
2603 SPFIX( mov r0, sp )
2604@@ -167,7 +256,12 @@ ENDPROC(__und_invalid)
2605 ldmia r0, {r3 - r5}
2606 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2607 mov r6, #-1 @ "" "" "" ""
2608+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2609+ @ offset sp by 8 as done in pax_enter_kernel
2610+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2611+#else
2612 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2613+#endif
2614 SPFIX( addeq r2, r2, #4 )
2615 str r3, [sp, #-4]! @ save the "real" r0 copied
2616 @ from the exception stack
2617@@ -371,6 +465,9 @@ ENDPROC(__fiq_abt)
2618 .macro usr_entry, trace=1
2619 UNWIND(.fnstart )
2620 UNWIND(.cantunwind ) @ don't unwind the user space
2621+
2622+ pax_enter_kernel_user
2623+
2624 sub sp, sp, #S_FRAME_SIZE
2625 ARM( stmib sp, {r1 - r12} )
2626 THUMB( stmia sp, {r0 - r12} )
2627@@ -481,7 +578,9 @@ __und_usr:
2628 tst r3, #PSR_T_BIT @ Thumb mode?
2629 bne __und_usr_thumb
2630 sub r4, r2, #4 @ ARM instr at LR - 4
2631+ pax_open_userland
2632 1: ldrt r0, [r4]
2633+ pax_close_userland
2634 ARM_BE8(rev r0, r0) @ little endian instruction
2635
2636 @ r0 = 32-bit ARM instruction which caused the exception
2637@@ -515,11 +614,15 @@ __und_usr_thumb:
2638 */
2639 .arch armv6t2
2640 #endif
2641+ pax_open_userland
2642 2: ldrht r5, [r4]
2643+ pax_close_userland
2644 ARM_BE8(rev16 r5, r5) @ little endian instruction
2645 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2646 blo __und_usr_fault_16 @ 16bit undefined instruction
2647+ pax_open_userland
2648 3: ldrht r0, [r2]
2649+ pax_close_userland
2650 ARM_BE8(rev16 r0, r0) @ little endian instruction
2651 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2652 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2653@@ -549,7 +652,8 @@ ENDPROC(__und_usr)
2654 */
2655 .pushsection .text.fixup, "ax"
2656 .align 2
2657-4: str r4, [sp, #S_PC] @ retry current instruction
2658+4: pax_close_userland
2659+ str r4, [sp, #S_PC] @ retry current instruction
2660 ret r9
2661 .popsection
2662 .pushsection __ex_table,"a"
2663@@ -769,7 +873,7 @@ ENTRY(__switch_to)
2664 THUMB( str lr, [ip], #4 )
2665 ldr r4, [r2, #TI_TP_VALUE]
2666 ldr r5, [r2, #TI_TP_VALUE + 4]
2667-#ifdef CONFIG_CPU_USE_DOMAINS
2668+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2669 ldr r6, [r2, #TI_CPU_DOMAIN]
2670 #endif
2671 switch_tls r1, r4, r5, r3, r7
2672@@ -778,7 +882,7 @@ ENTRY(__switch_to)
2673 ldr r8, =__stack_chk_guard
2674 ldr r7, [r7, #TSK_STACK_CANARY]
2675 #endif
2676-#ifdef CONFIG_CPU_USE_DOMAINS
2677+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2678 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2679 #endif
2680 mov r5, r0
2681diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2682index b48dd4f..9f9a72f 100644
2683--- a/arch/arm/kernel/entry-common.S
2684+++ b/arch/arm/kernel/entry-common.S
2685@@ -11,18 +11,46 @@
2686 #include <asm/assembler.h>
2687 #include <asm/unistd.h>
2688 #include <asm/ftrace.h>
2689+#include <asm/domain.h>
2690 #include <asm/unwind.h>
2691
2692+#include "entry-header.S"
2693+
2694 #ifdef CONFIG_NEED_RET_TO_USER
2695 #include <mach/entry-macro.S>
2696 #else
2697 .macro arch_ret_to_user, tmp1, tmp2
2698+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2699+ @ save regs
2700+ stmdb sp!, {r1, r2}
2701+ @ read DACR from cpu_domain into r1
2702+ mov r2, sp
2703+ @ assume 8K pages, since we have to split the immediate in two
2704+ bic r2, r2, #(0x1fc0)
2705+ bic r2, r2, #(0x3f)
2706+ ldr r1, [r2, #TI_CPU_DOMAIN]
2707+#ifdef CONFIG_PAX_KERNEXEC
2708+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2709+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2710+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2711+#endif
2712+#ifdef CONFIG_PAX_MEMORY_UDEREF
2713+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2714+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2715+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2716+#endif
2717+ @ write r1 to current_thread_info()->cpu_domain
2718+ str r1, [r2, #TI_CPU_DOMAIN]
2719+ @ write r1 to DACR
2720+ mcr p15, 0, r1, c3, c0, 0
2721+ @ instruction sync
2722+ instr_sync
2723+ @ restore regs
2724+ ldmia sp!, {r1, r2}
2725+#endif
2726 .endm
2727 #endif
2728
2729-#include "entry-header.S"
2730-
2731-
2732 .align 5
2733 /*
2734 * This is the fast syscall return path. We do as little as
2735@@ -174,6 +202,12 @@ ENTRY(vector_swi)
2736 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2737 #endif
2738
2739+ /*
2740+ * do this here to avoid a performance hit of wrapping the code above
2741+ * that directly dereferences userland to parse the SWI instruction
2742+ */
2743+ pax_enter_kernel_user
2744+
2745 adr tbl, sys_call_table @ load syscall table pointer
2746
2747 #if defined(CONFIG_OABI_COMPAT)
2748diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2749index 1a0045a..9b4f34d 100644
2750--- a/arch/arm/kernel/entry-header.S
2751+++ b/arch/arm/kernel/entry-header.S
2752@@ -196,6 +196,60 @@
2753 msr cpsr_c, \rtemp @ switch back to the SVC mode
2754 .endm
2755
2756+ .macro pax_enter_kernel_user
2757+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2758+ @ save regs
2759+ stmdb sp!, {r0, r1}
2760+ @ read DACR from cpu_domain into r1
2761+ mov r0, sp
2762+ @ assume 8K pages, since we have to split the immediate in two
2763+ bic r0, r0, #(0x1fc0)
2764+ bic r0, r0, #(0x3f)
2765+ ldr r1, [r0, #TI_CPU_DOMAIN]
2766+#ifdef CONFIG_PAX_MEMORY_UDEREF
2767+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2768+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2769+#endif
2770+#ifdef CONFIG_PAX_KERNEXEC
2771+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2772+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2773+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2774+#endif
2775+ @ write r1 to current_thread_info()->cpu_domain
2776+ str r1, [r0, #TI_CPU_DOMAIN]
2777+ @ write r1 to DACR
2778+ mcr p15, 0, r1, c3, c0, 0
2779+ @ instruction sync
2780+ instr_sync
2781+ @ restore regs
2782+ ldmia sp!, {r0, r1}
2783+#endif
2784+ .endm
2785+
2786+ .macro pax_exit_kernel
2787+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2788+ @ save regs
2789+ stmdb sp!, {r0, r1}
2790+ @ read old DACR from stack into r1
2791+ ldr r1, [sp, #(8 + S_SP)]
2792+ sub r1, r1, #8
2793+ ldr r1, [r1]
2794+
2795+ @ write r1 to current_thread_info()->cpu_domain
2796+ mov r0, sp
2797+ @ assume 8K pages, since we have to split the immediate in two
2798+ bic r0, r0, #(0x1fc0)
2799+ bic r0, r0, #(0x3f)
2800+ str r1, [r0, #TI_CPU_DOMAIN]
2801+ @ write r1 to DACR
2802+ mcr p15, 0, r1, c3, c0, 0
2803+ @ instruction sync
2804+ instr_sync
2805+ @ restore regs
2806+ ldmia sp!, {r0, r1}
2807+#endif
2808+ .endm
2809+
2810 #ifndef CONFIG_THUMB2_KERNEL
2811 .macro svc_exit, rpsr, irq = 0
2812 .if \irq != 0
2813@@ -215,6 +269,9 @@
2814 blne trace_hardirqs_off
2815 #endif
2816 .endif
2817+
2818+ pax_exit_kernel
2819+
2820 msr spsr_cxsf, \rpsr
2821 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
2822 @ We must avoid clrex due to Cortex-A15 erratum #830321
2823@@ -291,6 +348,9 @@
2824 blne trace_hardirqs_off
2825 #endif
2826 .endif
2827+
2828+ pax_exit_kernel
2829+
2830 ldr lr, [sp, #S_SP] @ top of the stack
2831 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2832
2833diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2834index 059c3da..8e45cfc 100644
2835--- a/arch/arm/kernel/fiq.c
2836+++ b/arch/arm/kernel/fiq.c
2837@@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length)
2838 void *base = vectors_page;
2839 unsigned offset = FIQ_OFFSET;
2840
2841+ pax_open_kernel();
2842 memcpy(base + offset, start, length);
2843+ pax_close_kernel();
2844+
2845 if (!cache_is_vipt_nonaliasing())
2846 flush_icache_range((unsigned long)base + offset, offset +
2847 length);
2848diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2849index 29e2991..7bc5757 100644
2850--- a/arch/arm/kernel/head.S
2851+++ b/arch/arm/kernel/head.S
2852@@ -467,7 +467,7 @@ __enable_mmu:
2853 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2854 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2855 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2856- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2857+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2858 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2859 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2860 #endif
2861diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
2862index 097e2e2..3927085 100644
2863--- a/arch/arm/kernel/module-plts.c
2864+++ b/arch/arm/kernel/module-plts.c
2865@@ -30,17 +30,12 @@ struct plt_entries {
2866 u32 lit[PLT_ENT_COUNT];
2867 };
2868
2869-static bool in_init(const struct module *mod, u32 addr)
2870-{
2871- return addr - (u32)mod->module_init < mod->init_size;
2872-}
2873-
2874 u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
2875 {
2876 struct plt_entries *plt, *plt_end;
2877 int c, *count;
2878
2879- if (in_init(mod, loc)) {
2880+ if (within_module_init(loc, mod)) {
2881 plt = (void *)mod->arch.init_plt->sh_addr;
2882 plt_end = (void *)plt + mod->arch.init_plt->sh_size;
2883 count = &mod->arch.init_plt_count;
2884diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2885index efdddcb..35e58f6 100644
2886--- a/arch/arm/kernel/module.c
2887+++ b/arch/arm/kernel/module.c
2888@@ -38,17 +38,47 @@
2889 #endif
2890
2891 #ifdef CONFIG_MMU
2892-void *module_alloc(unsigned long size)
2893+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2894 {
2895- void *p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2896- GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
2897+ void *p;
2898+
2899+ if (!size || (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) && PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR))
2900+ return NULL;
2901+
2902+ p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2903+ GFP_KERNEL, prot, 0, NUMA_NO_NODE,
2904 __builtin_return_address(0));
2905 if (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || p)
2906 return p;
2907 return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
2908- GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
2909+ GFP_KERNEL, prot, 0, NUMA_NO_NODE,
2910 __builtin_return_address(0));
2911 }
2912+
2913+void *module_alloc(unsigned long size)
2914+{
2915+
2916+#ifdef CONFIG_PAX_KERNEXEC
2917+ return __module_alloc(size, PAGE_KERNEL);
2918+#else
2919+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2920+#endif
2921+
2922+}
2923+
2924+#ifdef CONFIG_PAX_KERNEXEC
2925+void module_memfree_exec(void *module_region)
2926+{
2927+ module_memfree(module_region);
2928+}
2929+EXPORT_SYMBOL(module_memfree_exec);
2930+
2931+void *module_alloc_exec(unsigned long size)
2932+{
2933+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2934+}
2935+EXPORT_SYMBOL(module_alloc_exec);
2936+#endif
2937 #endif
2938
2939 int
2940diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2941index 69bda1a..755113a 100644
2942--- a/arch/arm/kernel/patch.c
2943+++ b/arch/arm/kernel/patch.c
2944@@ -66,6 +66,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2945 else
2946 __acquire(&patch_lock);
2947
2948+ pax_open_kernel();
2949 if (thumb2 && __opcode_is_thumb16(insn)) {
2950 *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
2951 size = sizeof(u16);
2952@@ -97,6 +98,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
2953 *(u32 *)waddr = insn;
2954 size = sizeof(u32);
2955 }
2956+ pax_close_kernel();
2957
2958 if (waddr != addr) {
2959 flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
2960diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2961index f192a2a..1a40523 100644
2962--- a/arch/arm/kernel/process.c
2963+++ b/arch/arm/kernel/process.c
2964@@ -105,8 +105,8 @@ void __show_regs(struct pt_regs *regs)
2965
2966 show_regs_print_info(KERN_DEFAULT);
2967
2968- print_symbol("PC is at %s\n", instruction_pointer(regs));
2969- print_symbol("LR is at %s\n", regs->ARM_lr);
2970+ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2971+ printk("LR is at %pA\n", (void *)regs->ARM_lr);
2972 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2973 "sp : %08lx ip : %08lx fp : %08lx\n",
2974 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2975@@ -283,12 +283,6 @@ unsigned long get_wchan(struct task_struct *p)
2976 return 0;
2977 }
2978
2979-unsigned long arch_randomize_brk(struct mm_struct *mm)
2980-{
2981- unsigned long range_end = mm->brk + 0x02000000;
2982- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2983-}
2984-
2985 #ifdef CONFIG_MMU
2986 #ifdef CONFIG_KUSER_HELPERS
2987 /*
2988@@ -304,7 +298,7 @@ static struct vm_area_struct gate_vma = {
2989
2990 static int __init gate_vma_init(void)
2991 {
2992- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2993+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2994 return 0;
2995 }
2996 arch_initcall(gate_vma_init);
2997@@ -333,91 +327,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
2998 return is_gate_vma(vma) ? "[vectors]" : NULL;
2999 }
3000
3001-/* If possible, provide a placement hint at a random offset from the
3002- * stack for the sigpage and vdso pages.
3003- */
3004-static unsigned long sigpage_addr(const struct mm_struct *mm,
3005- unsigned int npages)
3006-{
3007- unsigned long offset;
3008- unsigned long first;
3009- unsigned long last;
3010- unsigned long addr;
3011- unsigned int slots;
3012-
3013- first = PAGE_ALIGN(mm->start_stack);
3014-
3015- last = TASK_SIZE - (npages << PAGE_SHIFT);
3016-
3017- /* No room after stack? */
3018- if (first > last)
3019- return 0;
3020-
3021- /* Just enough room? */
3022- if (first == last)
3023- return first;
3024-
3025- slots = ((last - first) >> PAGE_SHIFT) + 1;
3026-
3027- offset = get_random_int() % slots;
3028-
3029- addr = first + (offset << PAGE_SHIFT);
3030-
3031- return addr;
3032-}
3033-
3034-static struct page *signal_page;
3035-extern struct page *get_signal_page(void);
3036-
3037-static const struct vm_special_mapping sigpage_mapping = {
3038- .name = "[sigpage]",
3039- .pages = &signal_page,
3040-};
3041-
3042 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3043 {
3044 struct mm_struct *mm = current->mm;
3045- struct vm_area_struct *vma;
3046- unsigned long npages;
3047- unsigned long addr;
3048- unsigned long hint;
3049- int ret = 0;
3050-
3051- if (!signal_page)
3052- signal_page = get_signal_page();
3053- if (!signal_page)
3054- return -ENOMEM;
3055-
3056- npages = 1; /* for sigpage */
3057- npages += vdso_total_pages;
3058
3059 down_write(&mm->mmap_sem);
3060- hint = sigpage_addr(mm, npages);
3061- addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
3062- if (IS_ERR_VALUE(addr)) {
3063- ret = addr;
3064- goto up_fail;
3065- }
3066-
3067- vma = _install_special_mapping(mm, addr, PAGE_SIZE,
3068- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
3069- &sigpage_mapping);
3070-
3071- if (IS_ERR(vma)) {
3072- ret = PTR_ERR(vma);
3073- goto up_fail;
3074- }
3075-
3076- mm->context.sigpage = addr;
3077-
3078- /* Unlike the sigpage, failure to install the vdso is unlikely
3079- * to be fatal to the process, so no error check needed
3080- * here.
3081- */
3082- arm_install_vdso(mm, addr + PAGE_SIZE);
3083-
3084- up_fail:
3085+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
3086 up_write(&mm->mmap_sem);
3087- return ret;
3088+ return 0;
3089 }
3090 #endif
3091diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
3092index f90fdf4..24e8c84 100644
3093--- a/arch/arm/kernel/psci.c
3094+++ b/arch/arm/kernel/psci.c
3095@@ -26,7 +26,7 @@
3096 #include <asm/psci.h>
3097 #include <asm/system_misc.h>
3098
3099-struct psci_operations psci_ops;
3100+struct psci_operations psci_ops __read_only;
3101
3102 static int (*invoke_psci_fn)(u32, u32, u32, u32);
3103 typedef int (*psci_initcall_t)(const struct device_node *);
3104diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3105index ef9119f..31995a3 100644
3106--- a/arch/arm/kernel/ptrace.c
3107+++ b/arch/arm/kernel/ptrace.c
3108@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
3109 regs->ARM_ip = ip;
3110 }
3111
3112+#ifdef CONFIG_GRKERNSEC_SETXID
3113+extern void gr_delayed_cred_worker(void);
3114+#endif
3115+
3116 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3117 {
3118 current_thread_info()->syscall = scno;
3119
3120+#ifdef CONFIG_GRKERNSEC_SETXID
3121+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3122+ gr_delayed_cred_worker();
3123+#endif
3124+
3125 /* Do the secure computing check first; failures should be fast. */
3126 #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
3127 if (secure_computing() == -1)
3128diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
3129index 3826935..8ed63ed 100644
3130--- a/arch/arm/kernel/reboot.c
3131+++ b/arch/arm/kernel/reboot.c
3132@@ -122,6 +122,7 @@ void machine_power_off(void)
3133
3134 if (pm_power_off)
3135 pm_power_off();
3136+ while (1);
3137 }
3138
3139 /*
3140diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3141index 36c18b7..0d78292 100644
3142--- a/arch/arm/kernel/setup.c
3143+++ b/arch/arm/kernel/setup.c
3144@@ -108,21 +108,23 @@ EXPORT_SYMBOL(elf_hwcap);
3145 unsigned int elf_hwcap2 __read_mostly;
3146 EXPORT_SYMBOL(elf_hwcap2);
3147
3148+pteval_t __supported_pte_mask __read_only;
3149+pmdval_t __supported_pmd_mask __read_only;
3150
3151 #ifdef MULTI_CPU
3152-struct processor processor __read_mostly;
3153+struct processor processor __read_only;
3154 #endif
3155 #ifdef MULTI_TLB
3156-struct cpu_tlb_fns cpu_tlb __read_mostly;
3157+struct cpu_tlb_fns cpu_tlb __read_only;
3158 #endif
3159 #ifdef MULTI_USER
3160-struct cpu_user_fns cpu_user __read_mostly;
3161+struct cpu_user_fns cpu_user __read_only;
3162 #endif
3163 #ifdef MULTI_CACHE
3164-struct cpu_cache_fns cpu_cache __read_mostly;
3165+struct cpu_cache_fns cpu_cache __read_only;
3166 #endif
3167 #ifdef CONFIG_OUTER_CACHE
3168-struct outer_cache_fns outer_cache __read_mostly;
3169+struct outer_cache_fns outer_cache __read_only;
3170 EXPORT_SYMBOL(outer_cache);
3171 #endif
3172
3173@@ -253,9 +255,13 @@ static int __get_cpu_architecture(void)
3174 * Register 0 and check for VMSAv7 or PMSAv7 */
3175 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
3176 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3177- (mmfr0 & 0x000000f0) >= 0x00000030)
3178+ (mmfr0 & 0x000000f0) >= 0x00000030) {
3179 cpu_arch = CPU_ARCH_ARMv7;
3180- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3181+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3182+ __supported_pte_mask |= L_PTE_PXN;
3183+ __supported_pmd_mask |= PMD_PXNTABLE;
3184+ }
3185+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3186 (mmfr0 & 0x000000f0) == 0x00000020)
3187 cpu_arch = CPU_ARCH_ARMv6;
3188 else
3189diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3190index 586eef2..61aabd4 100644
3191--- a/arch/arm/kernel/signal.c
3192+++ b/arch/arm/kernel/signal.c
3193@@ -24,8 +24,6 @@
3194
3195 extern const unsigned long sigreturn_codes[7];
3196
3197-static unsigned long signal_return_offset;
3198-
3199 #ifdef CONFIG_CRUNCH
3200 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3201 {
3202@@ -390,8 +388,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3203 * except when the MPU has protected the vectors
3204 * page from PL0
3205 */
3206- retcode = mm->context.sigpage + signal_return_offset +
3207- (idx << 2) + thumb;
3208+ retcode = mm->context.sigpage + (idx << 2) + thumb;
3209 } else
3210 #endif
3211 {
3212@@ -597,33 +594,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3213 } while (thread_flags & _TIF_WORK_MASK);
3214 return 0;
3215 }
3216-
3217-struct page *get_signal_page(void)
3218-{
3219- unsigned long ptr;
3220- unsigned offset;
3221- struct page *page;
3222- void *addr;
3223-
3224- page = alloc_pages(GFP_KERNEL, 0);
3225-
3226- if (!page)
3227- return NULL;
3228-
3229- addr = page_address(page);
3230-
3231- /* Give the signal return code some randomness */
3232- offset = 0x200 + (get_random_int() & 0x7fc);
3233- signal_return_offset = offset;
3234-
3235- /*
3236- * Copy signal return handlers into the vector page, and
3237- * set sigreturn to be a pointer to these.
3238- */
3239- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3240-
3241- ptr = (unsigned long)addr + offset;
3242- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3243-
3244- return page;
3245-}
3246diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3247index 3d6b782..8b3baeb 100644
3248--- a/arch/arm/kernel/smp.c
3249+++ b/arch/arm/kernel/smp.c
3250@@ -76,7 +76,7 @@ enum ipi_msg_type {
3251
3252 static DECLARE_COMPLETION(cpu_running);
3253
3254-static struct smp_operations smp_ops;
3255+static struct smp_operations smp_ops __read_only;
3256
3257 void __init smp_set_ops(struct smp_operations *ops)
3258 {
3259diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
3260index b10e136..cb5edf9 100644
3261--- a/arch/arm/kernel/tcm.c
3262+++ b/arch/arm/kernel/tcm.c
3263@@ -64,7 +64,7 @@ static struct map_desc itcm_iomap[] __initdata = {
3264 .virtual = ITCM_OFFSET,
3265 .pfn = __phys_to_pfn(ITCM_OFFSET),
3266 .length = 0,
3267- .type = MT_MEMORY_RWX_ITCM,
3268+ .type = MT_MEMORY_RX_ITCM,
3269 }
3270 };
3271
3272@@ -362,7 +362,9 @@ no_dtcm:
3273 start = &__sitcm_text;
3274 end = &__eitcm_text;
3275 ram = &__itcm_start;
3276+ pax_open_kernel();
3277 memcpy(start, ram, itcm_code_sz);
3278+ pax_close_kernel();
3279 pr_debug("CPU ITCM: copied code from %p - %p\n",
3280 start, end);
3281 itcm_present = true;
3282diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3283index d358226..bfd4019 100644
3284--- a/arch/arm/kernel/traps.c
3285+++ b/arch/arm/kernel/traps.c
3286@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3287 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3288 {
3289 #ifdef CONFIG_KALLSYMS
3290- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3291+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3292 #else
3293 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3294 #endif
3295@@ -267,6 +267,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3296 static int die_owner = -1;
3297 static unsigned int die_nest_count;
3298
3299+extern void gr_handle_kernel_exploit(void);
3300+
3301 static unsigned long oops_begin(void)
3302 {
3303 int cpu;
3304@@ -309,6 +311,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3305 panic("Fatal exception in interrupt");
3306 if (panic_on_oops)
3307 panic("Fatal exception");
3308+
3309+ gr_handle_kernel_exploit();
3310+
3311 if (signr)
3312 do_exit(signr);
3313 }
3314@@ -870,7 +875,11 @@ void __init early_trap_init(void *vectors_base)
3315 kuser_init(vectors_base);
3316
3317 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3318- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3319+
3320+#ifndef CONFIG_PAX_MEMORY_UDEREF
3321+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3322+#endif
3323+
3324 #else /* ifndef CONFIG_CPU_V7M */
3325 /*
3326 * on V7-M there is no need to copy the vector table to a dedicated
3327diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3328index 8b60fde..8d986dd 100644
3329--- a/arch/arm/kernel/vmlinux.lds.S
3330+++ b/arch/arm/kernel/vmlinux.lds.S
3331@@ -37,7 +37,7 @@
3332 #endif
3333
3334 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3335- defined(CONFIG_GENERIC_BUG)
3336+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3337 #define ARM_EXIT_KEEP(x) x
3338 #define ARM_EXIT_DISCARD(x)
3339 #else
3340@@ -120,6 +120,8 @@ SECTIONS
3341 #ifdef CONFIG_DEBUG_RODATA
3342 . = ALIGN(1<<SECTION_SHIFT);
3343 #endif
3344+ _etext = .; /* End of text section */
3345+
3346 RO_DATA(PAGE_SIZE)
3347
3348 . = ALIGN(4);
3349@@ -150,8 +152,6 @@ SECTIONS
3350
3351 NOTES
3352
3353- _etext = .; /* End of text and rodata section */
3354-
3355 #ifndef CONFIG_XIP_KERNEL
3356 # ifdef CONFIG_ARM_KERNMEM_PERMS
3357 . = ALIGN(1<<SECTION_SHIFT);
3358diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3359index f9c341c..7430436 100644
3360--- a/arch/arm/kvm/arm.c
3361+++ b/arch/arm/kvm/arm.c
3362@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
3363 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3364
3365 /* The VMID used in the VTTBR */
3366-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3367+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3368 static u8 kvm_next_vmid;
3369 static DEFINE_SPINLOCK(kvm_vmid_lock);
3370
3371@@ -372,7 +372,7 @@ void force_vm_exit(const cpumask_t *mask)
3372 */
3373 static bool need_new_vmid_gen(struct kvm *kvm)
3374 {
3375- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3376+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3377 }
3378
3379 /**
3380@@ -405,7 +405,7 @@ static void update_vttbr(struct kvm *kvm)
3381
3382 /* First user of a new VMID generation? */
3383 if (unlikely(kvm_next_vmid == 0)) {
3384- atomic64_inc(&kvm_vmid_gen);
3385+ atomic64_inc_unchecked(&kvm_vmid_gen);
3386 kvm_next_vmid = 1;
3387
3388 /*
3389@@ -422,7 +422,7 @@ static void update_vttbr(struct kvm *kvm)
3390 kvm_call_hyp(__kvm_flush_vm_context);
3391 }
3392
3393- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3394+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3395 kvm->arch.vmid = kvm_next_vmid;
3396 kvm_next_vmid++;
3397
3398@@ -1110,7 +1110,7 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
3399 /**
3400 * Initialize Hyp-mode and memory mappings on all CPUs.
3401 */
3402-int kvm_arch_init(void *opaque)
3403+int kvm_arch_init(const void *opaque)
3404 {
3405 int err;
3406 int ret, cpu;
3407diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3408index 1710fd7..ec3e014 100644
3409--- a/arch/arm/lib/clear_user.S
3410+++ b/arch/arm/lib/clear_user.S
3411@@ -12,14 +12,14 @@
3412
3413 .text
3414
3415-/* Prototype: int __clear_user(void *addr, size_t sz)
3416+/* Prototype: int ___clear_user(void *addr, size_t sz)
3417 * Purpose : clear some user memory
3418 * Params : addr - user memory address to clear
3419 * : sz - number of bytes to clear
3420 * Returns : number of bytes NOT cleared
3421 */
3422 ENTRY(__clear_user_std)
3423-WEAK(__clear_user)
3424+WEAK(___clear_user)
3425 stmfd sp!, {r1, lr}
3426 mov r2, #0
3427 cmp r1, #4
3428@@ -44,7 +44,7 @@ WEAK(__clear_user)
3429 USER( strnebt r2, [r0])
3430 mov r0, #0
3431 ldmfd sp!, {r1, pc}
3432-ENDPROC(__clear_user)
3433+ENDPROC(___clear_user)
3434 ENDPROC(__clear_user_std)
3435
3436 .pushsection .text.fixup,"ax"
3437diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3438index 7a235b9..73a0556 100644
3439--- a/arch/arm/lib/copy_from_user.S
3440+++ b/arch/arm/lib/copy_from_user.S
3441@@ -17,7 +17,7 @@
3442 /*
3443 * Prototype:
3444 *
3445- * size_t __copy_from_user(void *to, const void *from, size_t n)
3446+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3447 *
3448 * Purpose:
3449 *
3450@@ -89,11 +89,11 @@
3451
3452 .text
3453
3454-ENTRY(__copy_from_user)
3455+ENTRY(___copy_from_user)
3456
3457 #include "copy_template.S"
3458
3459-ENDPROC(__copy_from_user)
3460+ENDPROC(___copy_from_user)
3461
3462 .pushsection .fixup,"ax"
3463 .align 0
3464diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3465index 6ee2f67..d1cce76 100644
3466--- a/arch/arm/lib/copy_page.S
3467+++ b/arch/arm/lib/copy_page.S
3468@@ -10,6 +10,7 @@
3469 * ASM optimised string functions
3470 */
3471 #include <linux/linkage.h>
3472+#include <linux/const.h>
3473 #include <asm/assembler.h>
3474 #include <asm/asm-offsets.h>
3475 #include <asm/cache.h>
3476diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3477index 9648b06..19c333c 100644
3478--- a/arch/arm/lib/copy_to_user.S
3479+++ b/arch/arm/lib/copy_to_user.S
3480@@ -17,7 +17,7 @@
3481 /*
3482 * Prototype:
3483 *
3484- * size_t __copy_to_user(void *to, const void *from, size_t n)
3485+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3486 *
3487 * Purpose:
3488 *
3489@@ -93,11 +93,11 @@
3490 .text
3491
3492 ENTRY(__copy_to_user_std)
3493-WEAK(__copy_to_user)
3494+WEAK(___copy_to_user)
3495
3496 #include "copy_template.S"
3497
3498-ENDPROC(__copy_to_user)
3499+ENDPROC(___copy_to_user)
3500 ENDPROC(__copy_to_user_std)
3501
3502 .pushsection .text.fixup,"ax"
3503diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3504index 1d0957e..f708846 100644
3505--- a/arch/arm/lib/csumpartialcopyuser.S
3506+++ b/arch/arm/lib/csumpartialcopyuser.S
3507@@ -57,8 +57,8 @@
3508 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3509 */
3510
3511-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3512-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3513+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3514+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3515
3516 #include "csumpartialcopygeneric.S"
3517
3518diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3519index 8044591..c9b2609 100644
3520--- a/arch/arm/lib/delay.c
3521+++ b/arch/arm/lib/delay.c
3522@@ -29,7 +29,7 @@
3523 /*
3524 * Default to the loop-based delay implementation.
3525 */
3526-struct arm_delay_ops arm_delay_ops = {
3527+struct arm_delay_ops arm_delay_ops __read_only = {
3528 .delay = __loop_delay,
3529 .const_udelay = __loop_const_udelay,
3530 .udelay = __loop_udelay,
3531diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3532index 4b39af2..9ae747d 100644
3533--- a/arch/arm/lib/uaccess_with_memcpy.c
3534+++ b/arch/arm/lib/uaccess_with_memcpy.c
3535@@ -85,7 +85,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
3536 return 1;
3537 }
3538
3539-static unsigned long noinline
3540+static unsigned long noinline __size_overflow(3)
3541 __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
3542 {
3543 int atomic;
3544@@ -136,7 +136,7 @@ out:
3545 }
3546
3547 unsigned long
3548-__copy_to_user(void __user *to, const void *from, unsigned long n)
3549+___copy_to_user(void __user *to, const void *from, unsigned long n)
3550 {
3551 /*
3552 * This test is stubbed out of the main function above to keep
3553@@ -150,7 +150,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
3554 return __copy_to_user_memcpy(to, from, n);
3555 }
3556
3557-static unsigned long noinline
3558+static unsigned long noinline __size_overflow(2)
3559 __clear_user_memset(void __user *addr, unsigned long n)
3560 {
3561 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
3562@@ -190,7 +190,7 @@ out:
3563 return n;
3564 }
3565
3566-unsigned long __clear_user(void __user *addr, unsigned long n)
3567+unsigned long ___clear_user(void __user *addr, unsigned long n)
3568 {
3569 /* See rational for this in __copy_to_user() above. */
3570 if (n < 64)
3571diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
3572index f572219..2cf36d5 100644
3573--- a/arch/arm/mach-exynos/suspend.c
3574+++ b/arch/arm/mach-exynos/suspend.c
3575@@ -732,8 +732,10 @@ void __init exynos_pm_init(void)
3576 tmp |= pm_data->wake_disable_mask;
3577 pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
3578
3579- exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3580- exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3581+ pax_open_kernel();
3582+ *(void **)&exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
3583+ *(void **)&exynos_pm_syscore_ops.resume = pm_data->pm_resume;
3584+ pax_close_kernel();
3585
3586 register_syscore_ops(&exynos_pm_syscore_ops);
3587 suspend_set_ops(&exynos_suspend_ops);
3588diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
3589index e46e9ea..9141c83 100644
3590--- a/arch/arm/mach-mvebu/coherency.c
3591+++ b/arch/arm/mach-mvebu/coherency.c
3592@@ -117,7 +117,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
3593
3594 /*
3595 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
3596- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
3597+ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
3598 * is needed as a workaround for a deadlock issue between the PCIe
3599 * interface and the cache controller.
3600 */
3601@@ -130,7 +130,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
3602 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
3603
3604 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
3605- mtype = MT_UNCACHED;
3606+ mtype = MT_UNCACHED_RW;
3607
3608 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
3609 }
3610diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3611index b6443a4..20a0b74 100644
3612--- a/arch/arm/mach-omap2/board-n8x0.c
3613+++ b/arch/arm/mach-omap2/board-n8x0.c
3614@@ -569,7 +569,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3615 }
3616 #endif
3617
3618-struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3619+struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3620 .late_init = n8x0_menelaus_late_init,
3621 };
3622
3623diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3624index 79f49d9..70bf184 100644
3625--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3626+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3627@@ -86,7 +86,7 @@ struct cpu_pm_ops {
3628 void (*resume)(void);
3629 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3630 void (*hotplug_restart)(void);
3631-};
3632+} __no_const;
3633
3634 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3635 static struct powerdomain *mpuss_pd;
3636@@ -105,7 +105,7 @@ static void dummy_cpu_resume(void)
3637 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3638 {}
3639
3640-struct cpu_pm_ops omap_pm_ops = {
3641+static struct cpu_pm_ops omap_pm_ops __read_only = {
3642 .finish_suspend = default_finish_suspend,
3643 .resume = dummy_cpu_resume,
3644 .scu_prepare = dummy_scu_prepare,
3645diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
3646index 5305ec7..6d74045 100644
3647--- a/arch/arm/mach-omap2/omap-smp.c
3648+++ b/arch/arm/mach-omap2/omap-smp.c
3649@@ -19,6 +19,7 @@
3650 #include <linux/device.h>
3651 #include <linux/smp.h>
3652 #include <linux/io.h>
3653+#include <linux/irq.h>
3654 #include <linux/irqchip/arm-gic.h>
3655
3656 #include <asm/smp_scu.h>
3657diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3658index e1d2e99..d9b3177 100644
3659--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3660+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3661@@ -330,7 +330,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3662 return NOTIFY_OK;
3663 }
3664
3665-static struct notifier_block __refdata irq_hotplug_notifier = {
3666+static struct notifier_block irq_hotplug_notifier = {
3667 .notifier_call = irq_cpu_hotplug_notify,
3668 };
3669
3670diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3671index 4cb8fd9..5ce65bc 100644
3672--- a/arch/arm/mach-omap2/omap_device.c
3673+++ b/arch/arm/mach-omap2/omap_device.c
3674@@ -504,7 +504,7 @@ void omap_device_delete(struct omap_device *od)
3675 struct platform_device __init *omap_device_build(const char *pdev_name,
3676 int pdev_id,
3677 struct omap_hwmod *oh,
3678- void *pdata, int pdata_len)
3679+ const void *pdata, int pdata_len)
3680 {
3681 struct omap_hwmod *ohs[] = { oh };
3682
3683@@ -532,7 +532,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3684 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3685 int pdev_id,
3686 struct omap_hwmod **ohs,
3687- int oh_cnt, void *pdata,
3688+ int oh_cnt, const void *pdata,
3689 int pdata_len)
3690 {
3691 int ret = -ENOMEM;
3692diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3693index 78c02b3..c94109a 100644
3694--- a/arch/arm/mach-omap2/omap_device.h
3695+++ b/arch/arm/mach-omap2/omap_device.h
3696@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3697 /* Core code interface */
3698
3699 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3700- struct omap_hwmod *oh, void *pdata,
3701+ struct omap_hwmod *oh, const void *pdata,
3702 int pdata_len);
3703
3704 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3705 struct omap_hwmod **oh, int oh_cnt,
3706- void *pdata, int pdata_len);
3707+ const void *pdata, int pdata_len);
3708
3709 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3710 struct omap_hwmod **ohs, int oh_cnt);
3711diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3712index 486cc4d..8d1a0b7 100644
3713--- a/arch/arm/mach-omap2/omap_hwmod.c
3714+++ b/arch/arm/mach-omap2/omap_hwmod.c
3715@@ -199,10 +199,10 @@ struct omap_hwmod_soc_ops {
3716 int (*init_clkdm)(struct omap_hwmod *oh);
3717 void (*update_context_lost)(struct omap_hwmod *oh);
3718 int (*get_context_lost)(struct omap_hwmod *oh);
3719-};
3720+} __no_const;
3721
3722 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3723-static struct omap_hwmod_soc_ops soc_ops;
3724+static struct omap_hwmod_soc_ops soc_ops __read_only;
3725
3726 /* omap_hwmod_list contains all registered struct omap_hwmods */
3727 static LIST_HEAD(omap_hwmod_list);
3728diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
3729index 95fee54..cfa9cf1 100644
3730--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
3731+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
3732@@ -10,6 +10,7 @@
3733
3734 #include <linux/kernel.h>
3735 #include <linux/init.h>
3736+#include <asm/pgtable.h>
3737
3738 #include "powerdomain.h"
3739
3740@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
3741
3742 void __init am43xx_powerdomains_init(void)
3743 {
3744- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3745+ pax_open_kernel();
3746+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
3747+ pax_close_kernel();
3748 pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
3749 pwrdm_register_pwrdms(powerdomains_am43xx);
3750 pwrdm_complete_init();
3751diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3752index ff0a68c..b312aa0 100644
3753--- a/arch/arm/mach-omap2/wd_timer.c
3754+++ b/arch/arm/mach-omap2/wd_timer.c
3755@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3756 struct omap_hwmod *oh;
3757 char *oh_name = "wd_timer2";
3758 char *dev_name = "omap_wdt";
3759- struct omap_wd_timer_platform_data pdata;
3760+ static struct omap_wd_timer_platform_data pdata = {
3761+ .read_reset_sources = prm_read_reset_sources
3762+ };
3763
3764 if (!cpu_class_is_omap2() || of_have_populated_dt())
3765 return 0;
3766@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3767 return -EINVAL;
3768 }
3769
3770- pdata.read_reset_sources = prm_read_reset_sources;
3771-
3772 pdev = omap_device_build(dev_name, id, oh, &pdata,
3773 sizeof(struct omap_wd_timer_platform_data));
3774 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3775diff --git a/arch/arm/mach-shmobile/platsmp-apmu.c b/arch/arm/mach-shmobile/platsmp-apmu.c
3776index b0790fc..71eb21f 100644
3777--- a/arch/arm/mach-shmobile/platsmp-apmu.c
3778+++ b/arch/arm/mach-shmobile/platsmp-apmu.c
3779@@ -22,6 +22,7 @@
3780 #include <asm/proc-fns.h>
3781 #include <asm/smp_plat.h>
3782 #include <asm/suspend.h>
3783+#include <asm/pgtable.h>
3784 #include "common.h"
3785 #include "platsmp-apmu.h"
3786
3787@@ -233,6 +234,8 @@ static int shmobile_smp_apmu_enter_suspend(suspend_state_t state)
3788
3789 void __init shmobile_smp_apmu_suspend_init(void)
3790 {
3791- shmobile_suspend_ops.enter = shmobile_smp_apmu_enter_suspend;
3792+ pax_open_kernel();
3793+ *(void **)&shmobile_suspend_ops.enter = shmobile_smp_apmu_enter_suspend;
3794+ pax_close_kernel();
3795 }
3796 #endif
3797diff --git a/arch/arm/mach-shmobile/pm-r8a7740.c b/arch/arm/mach-shmobile/pm-r8a7740.c
3798index 34608fc..344d7c0 100644
3799--- a/arch/arm/mach-shmobile/pm-r8a7740.c
3800+++ b/arch/arm/mach-shmobile/pm-r8a7740.c
3801@@ -11,6 +11,7 @@
3802 #include <linux/console.h>
3803 #include <linux/io.h>
3804 #include <linux/suspend.h>
3805+#include <asm/pgtable.h>
3806
3807 #include "common.h"
3808 #include "pm-rmobile.h"
3809@@ -117,7 +118,9 @@ static int r8a7740_enter_suspend(suspend_state_t suspend_state)
3810
3811 static void r8a7740_suspend_init(void)
3812 {
3813- shmobile_suspend_ops.enter = r8a7740_enter_suspend;
3814+ pax_open_kernel();
3815+ *(void **)&shmobile_suspend_ops.enter = r8a7740_enter_suspend;
3816+ pax_close_kernel();
3817 }
3818 #else
3819 static void r8a7740_suspend_init(void) {}
3820diff --git a/arch/arm/mach-shmobile/pm-sh73a0.c b/arch/arm/mach-shmobile/pm-sh73a0.c
3821index a7e4668..83334f33 100644
3822--- a/arch/arm/mach-shmobile/pm-sh73a0.c
3823+++ b/arch/arm/mach-shmobile/pm-sh73a0.c
3824@@ -9,6 +9,7 @@
3825 */
3826
3827 #include <linux/suspend.h>
3828+#include <asm/pgtable.h>
3829 #include "common.h"
3830
3831 #ifdef CONFIG_SUSPEND
3832@@ -20,7 +21,9 @@ static int sh73a0_enter_suspend(suspend_state_t suspend_state)
3833
3834 static void sh73a0_suspend_init(void)
3835 {
3836- shmobile_suspend_ops.enter = sh73a0_enter_suspend;
3837+ pax_open_kernel();
3838+ *(void **)&shmobile_suspend_ops.enter = sh73a0_enter_suspend;
3839+ pax_close_kernel();
3840 }
3841 #else
3842 static void sh73a0_suspend_init(void) {}
3843diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3844index 7469347..1ecc350 100644
3845--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3846+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3847@@ -177,7 +177,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3848 bool entered_lp2 = false;
3849
3850 if (tegra_pending_sgi())
3851- ACCESS_ONCE(abort_flag) = true;
3852+ ACCESS_ONCE_RW(abort_flag) = true;
3853
3854 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3855
3856diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
3857index 3b9098d..15b390f 100644
3858--- a/arch/arm/mach-tegra/irq.c
3859+++ b/arch/arm/mach-tegra/irq.c
3860@@ -20,6 +20,7 @@
3861 #include <linux/cpu_pm.h>
3862 #include <linux/interrupt.h>
3863 #include <linux/io.h>
3864+#include <linux/irq.h>
3865 #include <linux/irqchip/arm-gic.h>
3866 #include <linux/irq.h>
3867 #include <linux/kernel.h>
3868diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
3869index 8538910..2f39bc4 100644
3870--- a/arch/arm/mach-ux500/pm.c
3871+++ b/arch/arm/mach-ux500/pm.c
3872@@ -10,6 +10,7 @@
3873 */
3874
3875 #include <linux/kernel.h>
3876+#include <linux/irq.h>
3877 #include <linux/irqchip/arm-gic.h>
3878 #include <linux/delay.h>
3879 #include <linux/io.h>
3880diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
3881index f66816c..228b951 100644
3882--- a/arch/arm/mach-zynq/platsmp.c
3883+++ b/arch/arm/mach-zynq/platsmp.c
3884@@ -24,6 +24,7 @@
3885 #include <linux/io.h>
3886 #include <asm/cacheflush.h>
3887 #include <asm/smp_scu.h>
3888+#include <linux/irq.h>
3889 #include <linux/irqchip/arm-gic.h>
3890 #include "common.h"
3891
3892diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3893index 7c6b976..055db09 100644
3894--- a/arch/arm/mm/Kconfig
3895+++ b/arch/arm/mm/Kconfig
3896@@ -446,6 +446,7 @@ config CPU_32v5
3897
3898 config CPU_32v6
3899 bool
3900+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3901 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3902
3903 config CPU_32v6K
3904@@ -600,6 +601,7 @@ config CPU_CP15_MPU
3905
3906 config CPU_USE_DOMAINS
3907 bool
3908+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3909 help
3910 This option enables or disables the use of domain switching
3911 via the set_fs() function.
3912@@ -818,7 +820,7 @@ config NEED_KUSER_HELPERS
3913
3914 config KUSER_HELPERS
3915 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3916- depends on MMU
3917+ depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
3918 default y
3919 help
3920 Warning: disabling this option may break user programs.
3921@@ -832,7 +834,7 @@ config KUSER_HELPERS
3922 See Documentation/arm/kernel_user_helpers.txt for details.
3923
3924 However, the fixed address nature of these helpers can be used
3925- by ROP (return orientated programming) authors when creating
3926+ by ROP (Return Oriented Programming) authors when creating
3927 exploits.
3928
3929 If all of the binaries and libraries which run on your platform
3930diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3931index 9769f1e..16aaa55 100644
3932--- a/arch/arm/mm/alignment.c
3933+++ b/arch/arm/mm/alignment.c
3934@@ -216,10 +216,12 @@ union offset_union {
3935 #define __get16_unaligned_check(ins,val,addr) \
3936 do { \
3937 unsigned int err = 0, v, a = addr; \
3938+ pax_open_userland(); \
3939 __get8_unaligned_check(ins,v,a,err); \
3940 val = v << ((BE) ? 8 : 0); \
3941 __get8_unaligned_check(ins,v,a,err); \
3942 val |= v << ((BE) ? 0 : 8); \
3943+ pax_close_userland(); \
3944 if (err) \
3945 goto fault; \
3946 } while (0)
3947@@ -233,6 +235,7 @@ union offset_union {
3948 #define __get32_unaligned_check(ins,val,addr) \
3949 do { \
3950 unsigned int err = 0, v, a = addr; \
3951+ pax_open_userland(); \
3952 __get8_unaligned_check(ins,v,a,err); \
3953 val = v << ((BE) ? 24 : 0); \
3954 __get8_unaligned_check(ins,v,a,err); \
3955@@ -241,6 +244,7 @@ union offset_union {
3956 val |= v << ((BE) ? 8 : 16); \
3957 __get8_unaligned_check(ins,v,a,err); \
3958 val |= v << ((BE) ? 0 : 24); \
3959+ pax_close_userland(); \
3960 if (err) \
3961 goto fault; \
3962 } while (0)
3963@@ -254,6 +258,7 @@ union offset_union {
3964 #define __put16_unaligned_check(ins,val,addr) \
3965 do { \
3966 unsigned int err = 0, v = val, a = addr; \
3967+ pax_open_userland(); \
3968 __asm__( FIRST_BYTE_16 \
3969 ARM( "1: "ins" %1, [%2], #1\n" ) \
3970 THUMB( "1: "ins" %1, [%2]\n" ) \
3971@@ -273,6 +278,7 @@ union offset_union {
3972 " .popsection\n" \
3973 : "=r" (err), "=&r" (v), "=&r" (a) \
3974 : "0" (err), "1" (v), "2" (a)); \
3975+ pax_close_userland(); \
3976 if (err) \
3977 goto fault; \
3978 } while (0)
3979@@ -286,6 +292,7 @@ union offset_union {
3980 #define __put32_unaligned_check(ins,val,addr) \
3981 do { \
3982 unsigned int err = 0, v = val, a = addr; \
3983+ pax_open_userland(); \
3984 __asm__( FIRST_BYTE_32 \
3985 ARM( "1: "ins" %1, [%2], #1\n" ) \
3986 THUMB( "1: "ins" %1, [%2]\n" ) \
3987@@ -315,6 +322,7 @@ union offset_union {
3988 " .popsection\n" \
3989 : "=r" (err), "=&r" (v), "=&r" (a) \
3990 : "0" (err), "1" (v), "2" (a)); \
3991+ pax_close_userland(); \
3992 if (err) \
3993 goto fault; \
3994 } while (0)
3995diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3996index 71b3d33..8af9ade 100644
3997--- a/arch/arm/mm/cache-l2x0.c
3998+++ b/arch/arm/mm/cache-l2x0.c
3999@@ -44,7 +44,7 @@ struct l2c_init_data {
4000 void (*configure)(void __iomem *);
4001 void (*unlock)(void __iomem *, unsigned);
4002 struct outer_cache_fns outer_cache;
4003-};
4004+} __do_const;
4005
4006 #define CACHE_LINE_SIZE 32
4007
4008diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
4009index 845769e..4278fd7 100644
4010--- a/arch/arm/mm/context.c
4011+++ b/arch/arm/mm/context.c
4012@@ -43,7 +43,7 @@
4013 #define NUM_USER_ASIDS ASID_FIRST_VERSION
4014
4015 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
4016-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
4017+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
4018 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
4019
4020 static DEFINE_PER_CPU(atomic64_t, active_asids);
4021@@ -178,7 +178,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
4022 {
4023 static u32 cur_idx = 1;
4024 u64 asid = atomic64_read(&mm->context.id);
4025- u64 generation = atomic64_read(&asid_generation);
4026+ u64 generation = atomic64_read_unchecked(&asid_generation);
4027
4028 if (asid != 0) {
4029 /*
4030@@ -208,7 +208,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
4031 */
4032 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
4033 if (asid == NUM_USER_ASIDS) {
4034- generation = atomic64_add_return(ASID_FIRST_VERSION,
4035+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
4036 &asid_generation);
4037 flush_context(cpu);
4038 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
4039@@ -240,14 +240,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
4040 cpu_set_reserved_ttbr0();
4041
4042 asid = atomic64_read(&mm->context.id);
4043- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
4044+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
4045 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
4046 goto switch_mm_fastpath;
4047
4048 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
4049 /* Check that our ASID belongs to the current generation. */
4050 asid = atomic64_read(&mm->context.id);
4051- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
4052+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
4053 asid = new_context(mm, cpu);
4054 atomic64_set(&mm->context.id, asid);
4055 }
4056diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
4057index 0d629b8..01867c8 100644
4058--- a/arch/arm/mm/fault.c
4059+++ b/arch/arm/mm/fault.c
4060@@ -25,6 +25,7 @@
4061 #include <asm/system_misc.h>
4062 #include <asm/system_info.h>
4063 #include <asm/tlbflush.h>
4064+#include <asm/sections.h>
4065
4066 #include "fault.h"
4067
4068@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
4069 if (fixup_exception(regs))
4070 return;
4071
4072+#ifdef CONFIG_PAX_MEMORY_UDEREF
4073+ if (addr < TASK_SIZE) {
4074+ if (current->signal->curr_ip)
4075+ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4076+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4077+ else
4078+ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4079+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4080+ }
4081+#endif
4082+
4083+#ifdef CONFIG_PAX_KERNEXEC
4084+ if ((fsr & FSR_WRITE) &&
4085+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
4086+ (MODULES_VADDR <= addr && addr < MODULES_END)))
4087+ {
4088+ if (current->signal->curr_ip)
4089+ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4090+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
4091+ else
4092+ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
4093+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
4094+ }
4095+#endif
4096+
4097 /*
4098 * No handler, we'll have to terminate things with extreme prejudice.
4099 */
4100@@ -173,6 +199,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
4101 }
4102 #endif
4103
4104+#ifdef CONFIG_PAX_PAGEEXEC
4105+ if (fsr & FSR_LNX_PF) {
4106+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
4107+ do_group_exit(SIGKILL);
4108+ }
4109+#endif
4110+
4111 tsk->thread.address = addr;
4112 tsk->thread.error_code = fsr;
4113 tsk->thread.trap_no = 14;
4114@@ -400,6 +433,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
4115 }
4116 #endif /* CONFIG_MMU */
4117
4118+#ifdef CONFIG_PAX_PAGEEXEC
4119+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4120+{
4121+ long i;
4122+
4123+ printk(KERN_ERR "PAX: bytes at PC: ");
4124+ for (i = 0; i < 20; i++) {
4125+ unsigned char c;
4126+ if (get_user(c, (__force unsigned char __user *)pc+i))
4127+ printk(KERN_CONT "?? ");
4128+ else
4129+ printk(KERN_CONT "%02x ", c);
4130+ }
4131+ printk("\n");
4132+
4133+ printk(KERN_ERR "PAX: bytes at SP-4: ");
4134+ for (i = -1; i < 20; i++) {
4135+ unsigned long c;
4136+ if (get_user(c, (__force unsigned long __user *)sp+i))
4137+ printk(KERN_CONT "???????? ");
4138+ else
4139+ printk(KERN_CONT "%08lx ", c);
4140+ }
4141+ printk("\n");
4142+}
4143+#endif
4144+
4145 /*
4146 * First Level Translation Fault Handler
4147 *
4148@@ -547,9 +607,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
4149 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
4150 struct siginfo info;
4151
4152+#ifdef CONFIG_PAX_MEMORY_UDEREF
4153+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
4154+ if (current->signal->curr_ip)
4155+ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4156+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4157+ else
4158+ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
4159+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
4160+ goto die;
4161+ }
4162+#endif
4163+
4164 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
4165 return;
4166
4167+die:
4168 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
4169 inf->name, fsr, addr);
4170 show_pte(current->mm, addr);
4171@@ -574,15 +647,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
4172 ifsr_info[nr].name = name;
4173 }
4174
4175+asmlinkage int sys_sigreturn(struct pt_regs *regs);
4176+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
4177+
4178 asmlinkage void __exception
4179 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4180 {
4181 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
4182 struct siginfo info;
4183+ unsigned long pc = instruction_pointer(regs);
4184+
4185+ if (user_mode(regs)) {
4186+ unsigned long sigpage = current->mm->context.sigpage;
4187+
4188+ if (sigpage <= pc && pc < sigpage + 7*4) {
4189+ if (pc < sigpage + 3*4)
4190+ sys_sigreturn(regs);
4191+ else
4192+ sys_rt_sigreturn(regs);
4193+ return;
4194+ }
4195+ if (pc == 0xffff0f60UL) {
4196+ /*
4197+ * PaX: __kuser_cmpxchg64 emulation
4198+ */
4199+ // TODO
4200+ //regs->ARM_pc = regs->ARM_lr;
4201+ //return;
4202+ }
4203+ if (pc == 0xffff0fa0UL) {
4204+ /*
4205+ * PaX: __kuser_memory_barrier emulation
4206+ */
4207+ // dmb(); implied by the exception
4208+ regs->ARM_pc = regs->ARM_lr;
4209+ return;
4210+ }
4211+ if (pc == 0xffff0fc0UL) {
4212+ /*
4213+ * PaX: __kuser_cmpxchg emulation
4214+ */
4215+ // TODO
4216+ //long new;
4217+ //int op;
4218+
4219+ //op = FUTEX_OP_SET << 28;
4220+ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
4221+ //regs->ARM_r0 = old != new;
4222+ //regs->ARM_pc = regs->ARM_lr;
4223+ //return;
4224+ }
4225+ if (pc == 0xffff0fe0UL) {
4226+ /*
4227+ * PaX: __kuser_get_tls emulation
4228+ */
4229+ regs->ARM_r0 = current_thread_info()->tp_value[0];
4230+ regs->ARM_pc = regs->ARM_lr;
4231+ return;
4232+ }
4233+ }
4234+
4235+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4236+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
4237+ if (current->signal->curr_ip)
4238+ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4239+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4240+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4241+ else
4242+ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4243+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4244+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4245+ goto die;
4246+ }
4247+#endif
4248+
4249+#ifdef CONFIG_PAX_REFCOUNT
4250+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4251+#ifdef CONFIG_THUMB2_KERNEL
4252+ unsigned short bkpt;
4253+
4254+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
4255+#else
4256+ unsigned int bkpt;
4257+
4258+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4259+#endif
4260+ current->thread.error_code = ifsr;
4261+ current->thread.trap_no = 0;
4262+ pax_report_refcount_overflow(regs);
4263+ fixup_exception(regs);
4264+ return;
4265+ }
4266+ }
4267+#endif
4268
4269 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4270 return;
4271
4272+die:
4273 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4274 inf->name, ifsr, addr);
4275
4276diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4277index cf08bdf..772656c 100644
4278--- a/arch/arm/mm/fault.h
4279+++ b/arch/arm/mm/fault.h
4280@@ -3,6 +3,7 @@
4281
4282 /*
4283 * Fault status register encodings. We steal bit 31 for our own purposes.
4284+ * Set when the FSR value is from an instruction fault.
4285 */
4286 #define FSR_LNX_PF (1 << 31)
4287 #define FSR_WRITE (1 << 11)
4288@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4289 }
4290 #endif
4291
4292+/* valid for LPAE and !LPAE */
4293+static inline int is_xn_fault(unsigned int fsr)
4294+{
4295+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
4296+}
4297+
4298+static inline int is_domain_fault(unsigned int fsr)
4299+{
4300+ return ((fsr_fs(fsr) & 0xD) == 0x9);
4301+}
4302+
4303 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4304 unsigned long search_exception_table(unsigned long addr);
4305
4306diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4307index 8a63b4c..6b04370 100644
4308--- a/arch/arm/mm/init.c
4309+++ b/arch/arm/mm/init.c
4310@@ -710,7 +710,46 @@ void free_tcmmem(void)
4311 {
4312 #ifdef CONFIG_HAVE_TCM
4313 extern char __tcm_start, __tcm_end;
4314+#endif
4315
4316+#ifdef CONFIG_PAX_KERNEXEC
4317+ unsigned long addr;
4318+ pgd_t *pgd;
4319+ pud_t *pud;
4320+ pmd_t *pmd;
4321+ int cpu_arch = cpu_architecture();
4322+ unsigned int cr = get_cr();
4323+
4324+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4325+ /* make pages tables, etc before .text NX */
4326+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4327+ pgd = pgd_offset_k(addr);
4328+ pud = pud_offset(pgd, addr);
4329+ pmd = pmd_offset(pud, addr);
4330+ __section_update(pmd, addr, PMD_SECT_XN);
4331+ }
4332+ /* make init NX */
4333+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4334+ pgd = pgd_offset_k(addr);
4335+ pud = pud_offset(pgd, addr);
4336+ pmd = pmd_offset(pud, addr);
4337+ __section_update(pmd, addr, PMD_SECT_XN);
4338+ }
4339+ /* make kernel code/rodata RX */
4340+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4341+ pgd = pgd_offset_k(addr);
4342+ pud = pud_offset(pgd, addr);
4343+ pmd = pmd_offset(pud, addr);
4344+#ifdef CONFIG_ARM_LPAE
4345+ __section_update(pmd, addr, PMD_SECT_RDONLY);
4346+#else
4347+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4348+#endif
4349+ }
4350+ }
4351+#endif
4352+
4353+#ifdef CONFIG_HAVE_TCM
4354 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4355 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4356 #endif
4357diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4358index 0c81056..97279f7 100644
4359--- a/arch/arm/mm/ioremap.c
4360+++ b/arch/arm/mm/ioremap.c
4361@@ -405,9 +405,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4362 unsigned int mtype;
4363
4364 if (cached)
4365- mtype = MT_MEMORY_RWX;
4366+ mtype = MT_MEMORY_RX;
4367 else
4368- mtype = MT_MEMORY_RWX_NONCACHED;
4369+ mtype = MT_MEMORY_RX_NONCACHED;
4370
4371 return __arm_ioremap_caller(phys_addr, size, mtype,
4372 __builtin_return_address(0));
4373diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4374index 407dc78..047ce9d 100644
4375--- a/arch/arm/mm/mmap.c
4376+++ b/arch/arm/mm/mmap.c
4377@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4378 struct vm_area_struct *vma;
4379 int do_align = 0;
4380 int aliasing = cache_is_vipt_aliasing();
4381+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4382 struct vm_unmapped_area_info info;
4383
4384 /*
4385@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4386 if (len > TASK_SIZE)
4387 return -ENOMEM;
4388
4389+#ifdef CONFIG_PAX_RANDMMAP
4390+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4391+#endif
4392+
4393 if (addr) {
4394 if (do_align)
4395 addr = COLOUR_ALIGN(addr, pgoff);
4396@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4397 addr = PAGE_ALIGN(addr);
4398
4399 vma = find_vma(mm, addr);
4400- if (TASK_SIZE - len >= addr &&
4401- (!vma || addr + len <= vma->vm_start))
4402+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4403 return addr;
4404 }
4405
4406@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4407 info.high_limit = TASK_SIZE;
4408 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4409 info.align_offset = pgoff << PAGE_SHIFT;
4410+ info.threadstack_offset = offset;
4411 return vm_unmapped_area(&info);
4412 }
4413
4414@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4415 unsigned long addr = addr0;
4416 int do_align = 0;
4417 int aliasing = cache_is_vipt_aliasing();
4418+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4419 struct vm_unmapped_area_info info;
4420
4421 /*
4422@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4423 return addr;
4424 }
4425
4426+#ifdef CONFIG_PAX_RANDMMAP
4427+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4428+#endif
4429+
4430 /* requesting a specific address */
4431 if (addr) {
4432 if (do_align)
4433@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4434 else
4435 addr = PAGE_ALIGN(addr);
4436 vma = find_vma(mm, addr);
4437- if (TASK_SIZE - len >= addr &&
4438- (!vma || addr + len <= vma->vm_start))
4439+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4440 return addr;
4441 }
4442
4443@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4444 info.high_limit = mm->mmap_base;
4445 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4446 info.align_offset = pgoff << PAGE_SHIFT;
4447+ info.threadstack_offset = offset;
4448 addr = vm_unmapped_area(&info);
4449
4450 /*
4451@@ -183,14 +193,30 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4452 {
4453 unsigned long random_factor = 0UL;
4454
4455+#ifdef CONFIG_PAX_RANDMMAP
4456+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4457+#endif
4458+
4459 if (current->flags & PF_RANDOMIZE)
4460 random_factor = arch_mmap_rnd();
4461
4462 if (mmap_is_legacy()) {
4463 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4464+
4465+#ifdef CONFIG_PAX_RANDMMAP
4466+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4467+ mm->mmap_base += mm->delta_mmap;
4468+#endif
4469+
4470 mm->get_unmapped_area = arch_get_unmapped_area;
4471 } else {
4472 mm->mmap_base = mmap_base(random_factor);
4473+
4474+#ifdef CONFIG_PAX_RANDMMAP
4475+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4476+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4477+#endif
4478+
4479 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4480 }
4481 }
4482diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4483index 870838a..070df1d 100644
4484--- a/arch/arm/mm/mmu.c
4485+++ b/arch/arm/mm/mmu.c
4486@@ -41,6 +41,22 @@
4487 #include "mm.h"
4488 #include "tcm.h"
4489
4490+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4491+void modify_domain(unsigned int dom, unsigned int type)
4492+{
4493+ struct thread_info *thread = current_thread_info();
4494+ unsigned int domain = thread->cpu_domain;
4495+ /*
4496+ * DOMAIN_MANAGER might be defined to some other value,
4497+ * use the arch-defined constant
4498+ */
4499+ domain &= ~domain_val(dom, 3);
4500+ thread->cpu_domain = domain | domain_val(dom, type);
4501+ set_domain(thread->cpu_domain);
4502+}
4503+EXPORT_SYMBOL(modify_domain);
4504+#endif
4505+
4506 /*
4507 * empty_zero_page is a special page that is used for
4508 * zero-initialized data and COW.
4509@@ -242,7 +258,15 @@ __setup("noalign", noalign_setup);
4510 #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
4511 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4512
4513-static struct mem_type mem_types[] = {
4514+#ifdef CONFIG_PAX_KERNEXEC
4515+#define L_PTE_KERNEXEC L_PTE_RDONLY
4516+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4517+#else
4518+#define L_PTE_KERNEXEC L_PTE_DIRTY
4519+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4520+#endif
4521+
4522+static struct mem_type mem_types[] __read_only = {
4523 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4524 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4525 L_PTE_SHARED,
4526@@ -271,19 +295,19 @@ static struct mem_type mem_types[] = {
4527 .prot_sect = PROT_SECT_DEVICE,
4528 .domain = DOMAIN_IO,
4529 },
4530- [MT_UNCACHED] = {
4531+ [MT_UNCACHED_RW] = {
4532 .prot_pte = PROT_PTE_DEVICE,
4533 .prot_l1 = PMD_TYPE_TABLE,
4534 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4535 .domain = DOMAIN_IO,
4536 },
4537- [MT_CACHECLEAN] = {
4538- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4539+ [MT_CACHECLEAN_RO] = {
4540+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
4541 .domain = DOMAIN_KERNEL,
4542 },
4543 #ifndef CONFIG_ARM_LPAE
4544- [MT_MINICLEAN] = {
4545- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4546+ [MT_MINICLEAN_RO] = {
4547+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
4548 .domain = DOMAIN_KERNEL,
4549 },
4550 #endif
4551@@ -291,15 +315,15 @@ static struct mem_type mem_types[] = {
4552 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4553 L_PTE_RDONLY,
4554 .prot_l1 = PMD_TYPE_TABLE,
4555- .domain = DOMAIN_USER,
4556+ .domain = DOMAIN_VECTORS,
4557 },
4558 [MT_HIGH_VECTORS] = {
4559 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4560 L_PTE_USER | L_PTE_RDONLY,
4561 .prot_l1 = PMD_TYPE_TABLE,
4562- .domain = DOMAIN_USER,
4563+ .domain = DOMAIN_VECTORS,
4564 },
4565- [MT_MEMORY_RWX] = {
4566+ [__MT_MEMORY_RWX] = {
4567 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4568 .prot_l1 = PMD_TYPE_TABLE,
4569 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4570@@ -312,17 +336,30 @@ static struct mem_type mem_types[] = {
4571 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4572 .domain = DOMAIN_KERNEL,
4573 },
4574- [MT_ROM] = {
4575- .prot_sect = PMD_TYPE_SECT,
4576+ [MT_MEMORY_RX] = {
4577+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4578+ .prot_l1 = PMD_TYPE_TABLE,
4579+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4580+ .domain = DOMAIN_KERNEL,
4581+ },
4582+ [MT_ROM_RX] = {
4583+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4584 .domain = DOMAIN_KERNEL,
4585 },
4586- [MT_MEMORY_RWX_NONCACHED] = {
4587+ [MT_MEMORY_RW_NONCACHED] = {
4588 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4589 L_PTE_MT_BUFFERABLE,
4590 .prot_l1 = PMD_TYPE_TABLE,
4591 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4592 .domain = DOMAIN_KERNEL,
4593 },
4594+ [MT_MEMORY_RX_NONCACHED] = {
4595+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4596+ L_PTE_MT_BUFFERABLE,
4597+ .prot_l1 = PMD_TYPE_TABLE,
4598+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4599+ .domain = DOMAIN_KERNEL,
4600+ },
4601 [MT_MEMORY_RW_DTCM] = {
4602 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4603 L_PTE_XN,
4604@@ -330,9 +367,10 @@ static struct mem_type mem_types[] = {
4605 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4606 .domain = DOMAIN_KERNEL,
4607 },
4608- [MT_MEMORY_RWX_ITCM] = {
4609- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4610+ [MT_MEMORY_RX_ITCM] = {
4611+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4612 .prot_l1 = PMD_TYPE_TABLE,
4613+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4614 .domain = DOMAIN_KERNEL,
4615 },
4616 [MT_MEMORY_RW_SO] = {
4617@@ -544,9 +582,14 @@ static void __init build_mem_type_table(void)
4618 * Mark cache clean areas and XIP ROM read only
4619 * from SVC mode and no access from userspace.
4620 */
4621- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4622- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4623- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4624+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4625+#ifdef CONFIG_PAX_KERNEXEC
4626+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4627+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4628+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4629+#endif
4630+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4631+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4632 #endif
4633
4634 /*
4635@@ -563,13 +606,17 @@ static void __init build_mem_type_table(void)
4636 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4637 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4638 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4639- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4640- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4641+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4642+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4643 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4644 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4645+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4646+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4647 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4648- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
4649- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
4650+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
4651+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
4652+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
4653+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
4654 }
4655 }
4656
4657@@ -580,15 +627,20 @@ static void __init build_mem_type_table(void)
4658 if (cpu_arch >= CPU_ARCH_ARMv6) {
4659 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4660 /* Non-cacheable Normal is XCB = 001 */
4661- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4662+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4663+ PMD_SECT_BUFFERED;
4664+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4665 PMD_SECT_BUFFERED;
4666 } else {
4667 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4668- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
4669+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
4670+ PMD_SECT_TEX(1);
4671+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
4672 PMD_SECT_TEX(1);
4673 }
4674 } else {
4675- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4676+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4677+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4678 }
4679
4680 #ifdef CONFIG_ARM_LPAE
4681@@ -609,6 +661,8 @@ static void __init build_mem_type_table(void)
4682 user_pgprot |= PTE_EXT_PXN;
4683 #endif
4684
4685+ user_pgprot |= __supported_pte_mask;
4686+
4687 for (i = 0; i < 16; i++) {
4688 pteval_t v = pgprot_val(protection_map[i]);
4689 protection_map[i] = __pgprot(v | user_pgprot);
4690@@ -626,21 +680,24 @@ static void __init build_mem_type_table(void)
4691
4692 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4693 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4694- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4695- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4696+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4697+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4698 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4699 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4700+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4701+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4702 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4703- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
4704- mem_types[MT_ROM].prot_sect |= cp->pmd;
4705+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
4706+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
4707+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
4708
4709 switch (cp->pmd) {
4710 case PMD_SECT_WT:
4711- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
4712+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
4713 break;
4714 case PMD_SECT_WB:
4715 case PMD_SECT_WBWA:
4716- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
4717+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
4718 break;
4719 }
4720 pr_info("Memory policy: %sData cache %s\n",
4721@@ -854,7 +911,7 @@ static void __init create_mapping(struct map_desc *md)
4722 return;
4723 }
4724
4725- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
4726+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
4727 md->virtual >= PAGE_OFFSET &&
4728 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
4729 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
4730@@ -1224,18 +1281,15 @@ void __init arm_mm_memblock_reserve(void)
4731 * called function. This means you can't use any function or debugging
4732 * method which may touch any device, otherwise the kernel _will_ crash.
4733 */
4734+
4735+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4736+
4737 static void __init devicemaps_init(const struct machine_desc *mdesc)
4738 {
4739 struct map_desc map;
4740 unsigned long addr;
4741- void *vectors;
4742
4743- /*
4744- * Allocate the vector page early.
4745- */
4746- vectors = early_alloc(PAGE_SIZE * 2);
4747-
4748- early_trap_init(vectors);
4749+ early_trap_init(&vectors);
4750
4751 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4752 pmd_clear(pmd_off_k(addr));
4753@@ -1248,7 +1302,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4754 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
4755 map.virtual = MODULES_VADDR;
4756 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
4757- map.type = MT_ROM;
4758+ map.type = MT_ROM_RX;
4759 create_mapping(&map);
4760 #endif
4761
4762@@ -1259,14 +1313,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4763 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
4764 map.virtual = FLUSH_BASE;
4765 map.length = SZ_1M;
4766- map.type = MT_CACHECLEAN;
4767+ map.type = MT_CACHECLEAN_RO;
4768 create_mapping(&map);
4769 #endif
4770 #ifdef FLUSH_BASE_MINICACHE
4771 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
4772 map.virtual = FLUSH_BASE_MINICACHE;
4773 map.length = SZ_1M;
4774- map.type = MT_MINICLEAN;
4775+ map.type = MT_MINICLEAN_RO;
4776 create_mapping(&map);
4777 #endif
4778
4779@@ -1275,7 +1329,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4780 * location (0xffff0000). If we aren't using high-vectors, also
4781 * create a mapping at the low-vectors virtual address.
4782 */
4783- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4784+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4785 map.virtual = 0xffff0000;
4786 map.length = PAGE_SIZE;
4787 #ifdef CONFIG_KUSER_HELPERS
4788@@ -1335,8 +1389,10 @@ static void __init kmap_init(void)
4789 static void __init map_lowmem(void)
4790 {
4791 struct memblock_region *reg;
4792+#ifndef CONFIG_PAX_KERNEXEC
4793 phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
4794 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
4795+#endif
4796
4797 /* Map all the lowmem memory banks. */
4798 for_each_memblock(memory, reg) {
4799@@ -1349,11 +1405,48 @@ static void __init map_lowmem(void)
4800 if (start >= end)
4801 break;
4802
4803+#ifdef CONFIG_PAX_KERNEXEC
4804+ map.pfn = __phys_to_pfn(start);
4805+ map.virtual = __phys_to_virt(start);
4806+ map.length = end - start;
4807+
4808+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4809+ struct map_desc kernel;
4810+ struct map_desc initmap;
4811+
4812+ /* when freeing initmem we will make this RW */
4813+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4814+ initmap.virtual = (unsigned long)__init_begin;
4815+ initmap.length = _sdata - __init_begin;
4816+ initmap.type = __MT_MEMORY_RWX;
4817+ create_mapping(&initmap);
4818+
4819+ /* when freeing initmem we will make this RX */
4820+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4821+ kernel.virtual = (unsigned long)_stext;
4822+ kernel.length = __init_begin - _stext;
4823+ kernel.type = __MT_MEMORY_RWX;
4824+ create_mapping(&kernel);
4825+
4826+ if (map.virtual < (unsigned long)_stext) {
4827+ map.length = (unsigned long)_stext - map.virtual;
4828+ map.type = __MT_MEMORY_RWX;
4829+ create_mapping(&map);
4830+ }
4831+
4832+ map.pfn = __phys_to_pfn(__pa(_sdata));
4833+ map.virtual = (unsigned long)_sdata;
4834+ map.length = end - __pa(_sdata);
4835+ }
4836+
4837+ map.type = MT_MEMORY_RW;
4838+ create_mapping(&map);
4839+#else
4840 if (end < kernel_x_start) {
4841 map.pfn = __phys_to_pfn(start);
4842 map.virtual = __phys_to_virt(start);
4843 map.length = end - start;
4844- map.type = MT_MEMORY_RWX;
4845+ map.type = __MT_MEMORY_RWX;
4846
4847 create_mapping(&map);
4848 } else if (start >= kernel_x_end) {
4849@@ -1377,7 +1470,7 @@ static void __init map_lowmem(void)
4850 map.pfn = __phys_to_pfn(kernel_x_start);
4851 map.virtual = __phys_to_virt(kernel_x_start);
4852 map.length = kernel_x_end - kernel_x_start;
4853- map.type = MT_MEMORY_RWX;
4854+ map.type = __MT_MEMORY_RWX;
4855
4856 create_mapping(&map);
4857
4858@@ -1390,6 +1483,7 @@ static void __init map_lowmem(void)
4859 create_mapping(&map);
4860 }
4861 }
4862+#endif
4863 }
4864 }
4865
4866diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
4867index c011e22..92a0260 100644
4868--- a/arch/arm/net/bpf_jit_32.c
4869+++ b/arch/arm/net/bpf_jit_32.c
4870@@ -20,6 +20,7 @@
4871 #include <asm/cacheflush.h>
4872 #include <asm/hwcap.h>
4873 #include <asm/opcodes.h>
4874+#include <asm/pgtable.h>
4875
4876 #include "bpf_jit_32.h"
4877
4878@@ -72,54 +73,38 @@ struct jit_ctx {
4879 #endif
4880 };
4881
4882+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
4883+int bpf_jit_enable __read_only;
4884+#else
4885 int bpf_jit_enable __read_mostly;
4886+#endif
4887
4888-static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
4889- unsigned int size)
4890-{
4891- void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
4892-
4893- if (!ptr)
4894- return -EFAULT;
4895- memcpy(ret, ptr, size);
4896- return 0;
4897-}
4898-
4899-static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
4900+static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
4901 {
4902 u8 ret;
4903 int err;
4904
4905- if (offset < 0)
4906- err = call_neg_helper(skb, offset, &ret, 1);
4907- else
4908- err = skb_copy_bits(skb, offset, &ret, 1);
4909+ err = skb_copy_bits(skb, offset, &ret, 1);
4910
4911 return (u64)err << 32 | ret;
4912 }
4913
4914-static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
4915+static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
4916 {
4917 u16 ret;
4918 int err;
4919
4920- if (offset < 0)
4921- err = call_neg_helper(skb, offset, &ret, 2);
4922- else
4923- err = skb_copy_bits(skb, offset, &ret, 2);
4924+ err = skb_copy_bits(skb, offset, &ret, 2);
4925
4926 return (u64)err << 32 | ntohs(ret);
4927 }
4928
4929-static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
4930+static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
4931 {
4932 u32 ret;
4933 int err;
4934
4935- if (offset < 0)
4936- err = call_neg_helper(skb, offset, &ret, 4);
4937- else
4938- err = skb_copy_bits(skb, offset, &ret, 4);
4939+ err = skb_copy_bits(skb, offset, &ret, 4);
4940
4941 return (u64)err << 32 | ntohl(ret);
4942 }
4943@@ -199,8 +184,10 @@ static void jit_fill_hole(void *area, unsigned int size)
4944 {
4945 u32 *ptr;
4946 /* We are guaranteed to have aligned memory. */
4947+ pax_open_kernel();
4948 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
4949 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
4950+ pax_close_kernel();
4951 }
4952
4953 static void build_prologue(struct jit_ctx *ctx)
4954@@ -556,6 +543,9 @@ static int build_body(struct jit_ctx *ctx)
4955 case BPF_LD | BPF_B | BPF_ABS:
4956 load_order = 0;
4957 load:
4958+ /* the interpreter will deal with the negative K */
4959+ if ((int)k < 0)
4960+ return -ENOTSUPP;
4961 emit_mov_i(r_off, k, ctx);
4962 load_common:
4963 ctx->seen |= SEEN_DATA | SEEN_CALL;
4964@@ -570,18 +560,6 @@ load_common:
4965 condt = ARM_COND_HI;
4966 }
4967
4968- /*
4969- * test for negative offset, only if we are
4970- * currently scheduled to take the fast
4971- * path. this will update the flags so that
4972- * the slowpath instruction are ignored if the
4973- * offset is negative.
4974- *
4975- * for loard_order == 0 the HI condition will
4976- * make loads at offset 0 take the slow path too.
4977- */
4978- _emit(condt, ARM_CMP_I(r_off, 0), ctx);
4979-
4980 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
4981 ctx);
4982
4983diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
4984index 5b217f4..c23f40e 100644
4985--- a/arch/arm/plat-iop/setup.c
4986+++ b/arch/arm/plat-iop/setup.c
4987@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
4988 .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
4989 .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
4990 .length = IOP3XX_PERIPHERAL_SIZE,
4991- .type = MT_UNCACHED,
4992+ .type = MT_UNCACHED_RW,
4993 },
4994 };
4995
4996diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4997index a5bc92d..0bb4730 100644
4998--- a/arch/arm/plat-omap/sram.c
4999+++ b/arch/arm/plat-omap/sram.c
5000@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
5001 * Looks like we need to preserve some bootloader code at the
5002 * beginning of SRAM for jumping to flash for reboot to work...
5003 */
5004+ pax_open_kernel();
5005 memset_io(omap_sram_base + omap_sram_skip, 0,
5006 omap_sram_size - omap_sram_skip);
5007+ pax_close_kernel();
5008 }
5009diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
5010index d6285ef..b684dac 100644
5011--- a/arch/arm64/Kconfig.debug
5012+++ b/arch/arm64/Kconfig.debug
5013@@ -10,6 +10,7 @@ config ARM64_PTDUMP
5014 bool "Export kernel pagetable layout to userspace via debugfs"
5015 depends on DEBUG_KERNEL
5016 select DEBUG_FS
5017+ depends on !GRKERNSEC_KMEM
5018 help
5019 Say Y here if you want to show the kernel pagetable layout in a
5020 debugfs file. This information is only useful for kernel developers
5021diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
5022index 7047051..44e8675 100644
5023--- a/arch/arm64/include/asm/atomic.h
5024+++ b/arch/arm64/include/asm/atomic.h
5025@@ -252,5 +252,15 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
5026 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
5027 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
5028
5029+#define atomic64_read_unchecked(v) atomic64_read(v)
5030+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5031+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5032+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5033+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5034+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5035+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5036+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5037+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5038+
5039 #endif
5040 #endif
5041diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
5042index 0fa47c4..b167938 100644
5043--- a/arch/arm64/include/asm/barrier.h
5044+++ b/arch/arm64/include/asm/barrier.h
5045@@ -44,7 +44,7 @@
5046 do { \
5047 compiletime_assert_atomic_type(*p); \
5048 barrier(); \
5049- ACCESS_ONCE(*p) = (v); \
5050+ ACCESS_ONCE_RW(*p) = (v); \
5051 } while (0)
5052
5053 #define smp_load_acquire(p) \
5054diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
5055index 4fde8c1..441f84f 100644
5056--- a/arch/arm64/include/asm/percpu.h
5057+++ b/arch/arm64/include/asm/percpu.h
5058@@ -135,16 +135,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
5059 {
5060 switch (size) {
5061 case 1:
5062- ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
5063+ ACCESS_ONCE_RW(*(u8 *)ptr) = (u8)val;
5064 break;
5065 case 2:
5066- ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
5067+ ACCESS_ONCE_RW(*(u16 *)ptr) = (u16)val;
5068 break;
5069 case 4:
5070- ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
5071+ ACCESS_ONCE_RW(*(u32 *)ptr) = (u32)val;
5072 break;
5073 case 8:
5074- ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
5075+ ACCESS_ONCE_RW(*(u64 *)ptr) = (u64)val;
5076 break;
5077 default:
5078 BUILD_BUG();
5079diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
5080index 7642056..bffc904 100644
5081--- a/arch/arm64/include/asm/pgalloc.h
5082+++ b/arch/arm64/include/asm/pgalloc.h
5083@@ -46,6 +46,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5084 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
5085 }
5086
5087+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5088+{
5089+ pud_populate(mm, pud, pmd);
5090+}
5091+
5092 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
5093
5094 #if CONFIG_PGTABLE_LEVELS > 3
5095diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
5096index 07e1ba44..ec8cbbb 100644
5097--- a/arch/arm64/include/asm/uaccess.h
5098+++ b/arch/arm64/include/asm/uaccess.h
5099@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
5100 flag; \
5101 })
5102
5103+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5104 #define access_ok(type, addr, size) __range_ok(addr, size)
5105 #define user_addr_max get_fs
5106
5107diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
5108index d16a1ce..a5acc60 100644
5109--- a/arch/arm64/mm/dma-mapping.c
5110+++ b/arch/arm64/mm/dma-mapping.c
5111@@ -134,7 +134,7 @@ static void __dma_free_coherent(struct device *dev, size_t size,
5112 phys_to_page(paddr),
5113 size >> PAGE_SHIFT);
5114 if (!freed)
5115- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5116+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5117 }
5118
5119 static void *__dma_alloc(struct device *dev, size_t size,
5120diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
5121index c3a58a1..78fbf54 100644
5122--- a/arch/avr32/include/asm/cache.h
5123+++ b/arch/avr32/include/asm/cache.h
5124@@ -1,8 +1,10 @@
5125 #ifndef __ASM_AVR32_CACHE_H
5126 #define __ASM_AVR32_CACHE_H
5127
5128+#include <linux/const.h>
5129+
5130 #define L1_CACHE_SHIFT 5
5131-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5132+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5133
5134 /*
5135 * Memory returned by kmalloc() may be used for DMA, so we must make
5136diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
5137index 0388ece..87c8df1 100644
5138--- a/arch/avr32/include/asm/elf.h
5139+++ b/arch/avr32/include/asm/elf.h
5140@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
5141 the loader. We need to make sure that it is out of the way of the program
5142 that it will "exec", and that there is sufficient room for the brk. */
5143
5144-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
5145+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
5146
5147+#ifdef CONFIG_PAX_ASLR
5148+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
5149+
5150+#define PAX_DELTA_MMAP_LEN 15
5151+#define PAX_DELTA_STACK_LEN 15
5152+#endif
5153
5154 /* This yields a mask that user programs can use to figure out what
5155 instruction set this CPU supports. This could be done in user space,
5156diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
5157index 479330b..53717a8 100644
5158--- a/arch/avr32/include/asm/kmap_types.h
5159+++ b/arch/avr32/include/asm/kmap_types.h
5160@@ -2,9 +2,9 @@
5161 #define __ASM_AVR32_KMAP_TYPES_H
5162
5163 #ifdef CONFIG_DEBUG_HIGHMEM
5164-# define KM_TYPE_NR 29
5165+# define KM_TYPE_NR 30
5166 #else
5167-# define KM_TYPE_NR 14
5168+# define KM_TYPE_NR 15
5169 #endif
5170
5171 #endif /* __ASM_AVR32_KMAP_TYPES_H */
5172diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
5173index c035339..e1fa594 100644
5174--- a/arch/avr32/mm/fault.c
5175+++ b/arch/avr32/mm/fault.c
5176@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
5177
5178 int exception_trace = 1;
5179
5180+#ifdef CONFIG_PAX_PAGEEXEC
5181+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5182+{
5183+ unsigned long i;
5184+
5185+ printk(KERN_ERR "PAX: bytes at PC: ");
5186+ for (i = 0; i < 20; i++) {
5187+ unsigned char c;
5188+ if (get_user(c, (unsigned char *)pc+i))
5189+ printk(KERN_CONT "???????? ");
5190+ else
5191+ printk(KERN_CONT "%02x ", c);
5192+ }
5193+ printk("\n");
5194+}
5195+#endif
5196+
5197 /*
5198 * This routine handles page faults. It determines the address and the
5199 * problem, and then passes it off to one of the appropriate routines.
5200@@ -178,6 +195,16 @@ bad_area:
5201 up_read(&mm->mmap_sem);
5202
5203 if (user_mode(regs)) {
5204+
5205+#ifdef CONFIG_PAX_PAGEEXEC
5206+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
5207+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
5208+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
5209+ do_group_exit(SIGKILL);
5210+ }
5211+ }
5212+#endif
5213+
5214 if (exception_trace && printk_ratelimit())
5215 printk("%s%s[%d]: segfault at %08lx pc %08lx "
5216 "sp %08lx ecr %lu\n",
5217diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug
5218index f3337ee..15b6f8d 100644
5219--- a/arch/blackfin/Kconfig.debug
5220+++ b/arch/blackfin/Kconfig.debug
5221@@ -18,6 +18,7 @@ config DEBUG_VERBOSE
5222 config DEBUG_MMRS
5223 tristate "Generate Blackfin MMR tree"
5224 select DEBUG_FS
5225+ depends on !GRKERNSEC_KMEM
5226 help
5227 Create a tree of Blackfin MMRs via the debugfs tree. If
5228 you enable this, you will find all MMRs laid out in the
5229diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
5230index 568885a..f8008df 100644
5231--- a/arch/blackfin/include/asm/cache.h
5232+++ b/arch/blackfin/include/asm/cache.h
5233@@ -7,6 +7,7 @@
5234 #ifndef __ARCH_BLACKFIN_CACHE_H
5235 #define __ARCH_BLACKFIN_CACHE_H
5236
5237+#include <linux/const.h>
5238 #include <linux/linkage.h> /* for asmlinkage */
5239
5240 /*
5241@@ -14,7 +15,7 @@
5242 * Blackfin loads 32 bytes for cache
5243 */
5244 #define L1_CACHE_SHIFT 5
5245-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5246+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5247 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5248
5249 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5250diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
5251index aea2718..3639a60 100644
5252--- a/arch/cris/include/arch-v10/arch/cache.h
5253+++ b/arch/cris/include/arch-v10/arch/cache.h
5254@@ -1,8 +1,9 @@
5255 #ifndef _ASM_ARCH_CACHE_H
5256 #define _ASM_ARCH_CACHE_H
5257
5258+#include <linux/const.h>
5259 /* Etrax 100LX have 32-byte cache-lines. */
5260-#define L1_CACHE_BYTES 32
5261 #define L1_CACHE_SHIFT 5
5262+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5263
5264 #endif /* _ASM_ARCH_CACHE_H */
5265diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
5266index 7caf25d..ee65ac5 100644
5267--- a/arch/cris/include/arch-v32/arch/cache.h
5268+++ b/arch/cris/include/arch-v32/arch/cache.h
5269@@ -1,11 +1,12 @@
5270 #ifndef _ASM_CRIS_ARCH_CACHE_H
5271 #define _ASM_CRIS_ARCH_CACHE_H
5272
5273+#include <linux/const.h>
5274 #include <arch/hwregs/dma.h>
5275
5276 /* A cache-line is 32 bytes. */
5277-#define L1_CACHE_BYTES 32
5278 #define L1_CACHE_SHIFT 5
5279+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5280
5281 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5282
5283diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
5284index 102190a..5334cea 100644
5285--- a/arch/frv/include/asm/atomic.h
5286+++ b/arch/frv/include/asm/atomic.h
5287@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
5288 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
5289 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
5290
5291+#define atomic64_read_unchecked(v) atomic64_read(v)
5292+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5293+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5294+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5295+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5296+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5297+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5298+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5299+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5300+
5301 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5302 {
5303 int c, old;
5304diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
5305index 2797163..c2a401df9 100644
5306--- a/arch/frv/include/asm/cache.h
5307+++ b/arch/frv/include/asm/cache.h
5308@@ -12,10 +12,11 @@
5309 #ifndef __ASM_CACHE_H
5310 #define __ASM_CACHE_H
5311
5312+#include <linux/const.h>
5313
5314 /* bytes per L1 cache line */
5315 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
5316-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5317+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5318
5319 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5320 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
5321diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
5322index 43901f2..0d8b865 100644
5323--- a/arch/frv/include/asm/kmap_types.h
5324+++ b/arch/frv/include/asm/kmap_types.h
5325@@ -2,6 +2,6 @@
5326 #ifndef _ASM_KMAP_TYPES_H
5327 #define _ASM_KMAP_TYPES_H
5328
5329-#define KM_TYPE_NR 17
5330+#define KM_TYPE_NR 18
5331
5332 #endif
5333diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
5334index 836f147..4cf23f5 100644
5335--- a/arch/frv/mm/elf-fdpic.c
5336+++ b/arch/frv/mm/elf-fdpic.c
5337@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5338 {
5339 struct vm_area_struct *vma;
5340 struct vm_unmapped_area_info info;
5341+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5342
5343 if (len > TASK_SIZE)
5344 return -ENOMEM;
5345@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5346 if (addr) {
5347 addr = PAGE_ALIGN(addr);
5348 vma = find_vma(current->mm, addr);
5349- if (TASK_SIZE - len >= addr &&
5350- (!vma || addr + len <= vma->vm_start))
5351+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
5352 goto success;
5353 }
5354
5355@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5356 info.high_limit = (current->mm->start_stack - 0x00200000);
5357 info.align_mask = 0;
5358 info.align_offset = 0;
5359+ info.threadstack_offset = offset;
5360 addr = vm_unmapped_area(&info);
5361 if (!(addr & ~PAGE_MASK))
5362 goto success;
5363diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
5364index 69952c18..4fa2908 100644
5365--- a/arch/hexagon/include/asm/cache.h
5366+++ b/arch/hexagon/include/asm/cache.h
5367@@ -21,9 +21,11 @@
5368 #ifndef __ASM_CACHE_H
5369 #define __ASM_CACHE_H
5370
5371+#include <linux/const.h>
5372+
5373 /* Bytes per L1 cache line */
5374-#define L1_CACHE_SHIFT (5)
5375-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5376+#define L1_CACHE_SHIFT 5
5377+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5378
5379 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5380
5381diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
5382index 42a91a7..29d446e 100644
5383--- a/arch/ia64/Kconfig
5384+++ b/arch/ia64/Kconfig
5385@@ -518,6 +518,7 @@ source "drivers/sn/Kconfig"
5386 config KEXEC
5387 bool "kexec system call"
5388 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
5389+ depends on !GRKERNSEC_KMEM
5390 help
5391 kexec is a system call that implements the ability to shutdown your
5392 current kernel, and to start another kernel. It is like a reboot
5393diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
5394index 970d0bd..e750b9b 100644
5395--- a/arch/ia64/Makefile
5396+++ b/arch/ia64/Makefile
5397@@ -98,5 +98,6 @@ endef
5398 archprepare: make_nr_irqs_h FORCE
5399 PHONY += make_nr_irqs_h FORCE
5400
5401+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
5402 make_nr_irqs_h: FORCE
5403 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
5404diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
5405index 0bf0350..2ad1957 100644
5406--- a/arch/ia64/include/asm/atomic.h
5407+++ b/arch/ia64/include/asm/atomic.h
5408@@ -193,4 +193,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
5409 #define atomic64_inc(v) atomic64_add(1, (v))
5410 #define atomic64_dec(v) atomic64_sub(1, (v))
5411
5412+#define atomic64_read_unchecked(v) atomic64_read(v)
5413+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5414+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5415+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5416+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5417+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5418+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5419+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5420+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5421+
5422 #endif /* _ASM_IA64_ATOMIC_H */
5423diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
5424index 843ba43..fa118fb 100644
5425--- a/arch/ia64/include/asm/barrier.h
5426+++ b/arch/ia64/include/asm/barrier.h
5427@@ -66,7 +66,7 @@
5428 do { \
5429 compiletime_assert_atomic_type(*p); \
5430 barrier(); \
5431- ACCESS_ONCE(*p) = (v); \
5432+ ACCESS_ONCE_RW(*p) = (v); \
5433 } while (0)
5434
5435 #define smp_load_acquire(p) \
5436diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
5437index 988254a..e1ee885 100644
5438--- a/arch/ia64/include/asm/cache.h
5439+++ b/arch/ia64/include/asm/cache.h
5440@@ -1,6 +1,7 @@
5441 #ifndef _ASM_IA64_CACHE_H
5442 #define _ASM_IA64_CACHE_H
5443
5444+#include <linux/const.h>
5445
5446 /*
5447 * Copyright (C) 1998-2000 Hewlett-Packard Co
5448@@ -9,7 +10,7 @@
5449
5450 /* Bytes per L1 (data) cache line. */
5451 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
5452-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5453+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5454
5455 #ifdef CONFIG_SMP
5456 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5457diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
5458index 5a83c5c..4d7f553 100644
5459--- a/arch/ia64/include/asm/elf.h
5460+++ b/arch/ia64/include/asm/elf.h
5461@@ -42,6 +42,13 @@
5462 */
5463 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
5464
5465+#ifdef CONFIG_PAX_ASLR
5466+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
5467+
5468+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5469+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
5470+#endif
5471+
5472 #define PT_IA_64_UNWIND 0x70000001
5473
5474 /* IA-64 relocations: */
5475diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
5476index f5e70e9..624fad5 100644
5477--- a/arch/ia64/include/asm/pgalloc.h
5478+++ b/arch/ia64/include/asm/pgalloc.h
5479@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5480 pgd_val(*pgd_entry) = __pa(pud);
5481 }
5482
5483+static inline void
5484+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
5485+{
5486+ pgd_populate(mm, pgd_entry, pud);
5487+}
5488+
5489 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
5490 {
5491 return quicklist_alloc(0, GFP_KERNEL, NULL);
5492@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5493 pud_val(*pud_entry) = __pa(pmd);
5494 }
5495
5496+static inline void
5497+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
5498+{
5499+ pud_populate(mm, pud_entry, pmd);
5500+}
5501+
5502 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5503 {
5504 return quicklist_alloc(0, GFP_KERNEL, NULL);
5505diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
5506index 9f3ed9e..c99b418 100644
5507--- a/arch/ia64/include/asm/pgtable.h
5508+++ b/arch/ia64/include/asm/pgtable.h
5509@@ -12,7 +12,7 @@
5510 * David Mosberger-Tang <davidm@hpl.hp.com>
5511 */
5512
5513-
5514+#include <linux/const.h>
5515 #include <asm/mman.h>
5516 #include <asm/page.h>
5517 #include <asm/processor.h>
5518@@ -139,6 +139,17 @@
5519 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5520 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5521 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
5522+
5523+#ifdef CONFIG_PAX_PAGEEXEC
5524+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
5525+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5526+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
5527+#else
5528+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5529+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5530+# define PAGE_COPY_NOEXEC PAGE_COPY
5531+#endif
5532+
5533 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
5534 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
5535 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
5536diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
5537index 45698cd..e8e2dbc 100644
5538--- a/arch/ia64/include/asm/spinlock.h
5539+++ b/arch/ia64/include/asm/spinlock.h
5540@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
5541 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5542
5543 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5544- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5545+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5546 }
5547
5548 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5549diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5550index 4f3fb6cc..254055e 100644
5551--- a/arch/ia64/include/asm/uaccess.h
5552+++ b/arch/ia64/include/asm/uaccess.h
5553@@ -70,6 +70,7 @@
5554 && ((segment).seg == KERNEL_DS.seg \
5555 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
5556 })
5557+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
5558 #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
5559
5560 /*
5561@@ -241,12 +242,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5562 static inline unsigned long
5563 __copy_to_user (void __user *to, const void *from, unsigned long count)
5564 {
5565+ if (count > INT_MAX)
5566+ return count;
5567+
5568+ if (!__builtin_constant_p(count))
5569+ check_object_size(from, count, true);
5570+
5571 return __copy_user(to, (__force void __user *) from, count);
5572 }
5573
5574 static inline unsigned long
5575 __copy_from_user (void *to, const void __user *from, unsigned long count)
5576 {
5577+ if (count > INT_MAX)
5578+ return count;
5579+
5580+ if (!__builtin_constant_p(count))
5581+ check_object_size(to, count, false);
5582+
5583 return __copy_user((__force void __user *) to, from, count);
5584 }
5585
5586@@ -256,10 +269,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5587 ({ \
5588 void __user *__cu_to = (to); \
5589 const void *__cu_from = (from); \
5590- long __cu_len = (n); \
5591+ unsigned long __cu_len = (n); \
5592 \
5593- if (__access_ok(__cu_to, __cu_len, get_fs())) \
5594+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5595+ if (!__builtin_constant_p(n)) \
5596+ check_object_size(__cu_from, __cu_len, true); \
5597 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5598+ } \
5599 __cu_len; \
5600 })
5601
5602@@ -267,11 +283,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5603 ({ \
5604 void *__cu_to = (to); \
5605 const void __user *__cu_from = (from); \
5606- long __cu_len = (n); \
5607+ unsigned long __cu_len = (n); \
5608 \
5609 __chk_user_ptr(__cu_from); \
5610- if (__access_ok(__cu_from, __cu_len, get_fs())) \
5611+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5612+ if (!__builtin_constant_p(n)) \
5613+ check_object_size(__cu_to, __cu_len, false); \
5614 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5615+ } \
5616 __cu_len; \
5617 })
5618
5619diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5620index b15933c..098b1c8 100644
5621--- a/arch/ia64/kernel/module.c
5622+++ b/arch/ia64/kernel/module.c
5623@@ -484,15 +484,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5624 }
5625
5626 static inline int
5627+in_init_rx (const struct module *mod, uint64_t addr)
5628+{
5629+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5630+}
5631+
5632+static inline int
5633+in_init_rw (const struct module *mod, uint64_t addr)
5634+{
5635+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5636+}
5637+
5638+static inline int
5639 in_init (const struct module *mod, uint64_t addr)
5640 {
5641- return addr - (uint64_t) mod->module_init < mod->init_size;
5642+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5643+}
5644+
5645+static inline int
5646+in_core_rx (const struct module *mod, uint64_t addr)
5647+{
5648+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5649+}
5650+
5651+static inline int
5652+in_core_rw (const struct module *mod, uint64_t addr)
5653+{
5654+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5655 }
5656
5657 static inline int
5658 in_core (const struct module *mod, uint64_t addr)
5659 {
5660- return addr - (uint64_t) mod->module_core < mod->core_size;
5661+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5662 }
5663
5664 static inline int
5665@@ -675,7 +699,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5666 break;
5667
5668 case RV_BDREL:
5669- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5670+ if (in_init_rx(mod, val))
5671+ val -= (uint64_t) mod->module_init_rx;
5672+ else if (in_init_rw(mod, val))
5673+ val -= (uint64_t) mod->module_init_rw;
5674+ else if (in_core_rx(mod, val))
5675+ val -= (uint64_t) mod->module_core_rx;
5676+ else if (in_core_rw(mod, val))
5677+ val -= (uint64_t) mod->module_core_rw;
5678 break;
5679
5680 case RV_LTV:
5681@@ -810,15 +841,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5682 * addresses have been selected...
5683 */
5684 uint64_t gp;
5685- if (mod->core_size > MAX_LTOFF)
5686+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5687 /*
5688 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5689 * at the end of the module.
5690 */
5691- gp = mod->core_size - MAX_LTOFF / 2;
5692+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5693 else
5694- gp = mod->core_size / 2;
5695- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5696+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5697+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5698 mod->arch.gp = gp;
5699 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5700 }
5701diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5702index c39c3cd..3c77738 100644
5703--- a/arch/ia64/kernel/palinfo.c
5704+++ b/arch/ia64/kernel/palinfo.c
5705@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5706 return NOTIFY_OK;
5707 }
5708
5709-static struct notifier_block __refdata palinfo_cpu_notifier =
5710+static struct notifier_block palinfo_cpu_notifier =
5711 {
5712 .notifier_call = palinfo_cpu_callback,
5713 .priority = 0,
5714diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5715index 41e33f8..65180b2a 100644
5716--- a/arch/ia64/kernel/sys_ia64.c
5717+++ b/arch/ia64/kernel/sys_ia64.c
5718@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5719 unsigned long align_mask = 0;
5720 struct mm_struct *mm = current->mm;
5721 struct vm_unmapped_area_info info;
5722+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5723
5724 if (len > RGN_MAP_LIMIT)
5725 return -ENOMEM;
5726@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5727 if (REGION_NUMBER(addr) == RGN_HPAGE)
5728 addr = 0;
5729 #endif
5730+
5731+#ifdef CONFIG_PAX_RANDMMAP
5732+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5733+ addr = mm->free_area_cache;
5734+ else
5735+#endif
5736+
5737 if (!addr)
5738 addr = TASK_UNMAPPED_BASE;
5739
5740@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5741 info.high_limit = TASK_SIZE;
5742 info.align_mask = align_mask;
5743 info.align_offset = 0;
5744+ info.threadstack_offset = offset;
5745 return vm_unmapped_area(&info);
5746 }
5747
5748diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5749index dc506b0..39baade 100644
5750--- a/arch/ia64/kernel/vmlinux.lds.S
5751+++ b/arch/ia64/kernel/vmlinux.lds.S
5752@@ -171,7 +171,7 @@ SECTIONS {
5753 /* Per-cpu data: */
5754 . = ALIGN(PERCPU_PAGE_SIZE);
5755 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5756- __phys_per_cpu_start = __per_cpu_load;
5757+ __phys_per_cpu_start = per_cpu_load;
5758 /*
5759 * ensure percpu data fits
5760 * into percpu page size
5761diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5762index 70b40d1..01a9a28 100644
5763--- a/arch/ia64/mm/fault.c
5764+++ b/arch/ia64/mm/fault.c
5765@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5766 return pte_present(pte);
5767 }
5768
5769+#ifdef CONFIG_PAX_PAGEEXEC
5770+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5771+{
5772+ unsigned long i;
5773+
5774+ printk(KERN_ERR "PAX: bytes at PC: ");
5775+ for (i = 0; i < 8; i++) {
5776+ unsigned int c;
5777+ if (get_user(c, (unsigned int *)pc+i))
5778+ printk(KERN_CONT "???????? ");
5779+ else
5780+ printk(KERN_CONT "%08x ", c);
5781+ }
5782+ printk("\n");
5783+}
5784+#endif
5785+
5786 # define VM_READ_BIT 0
5787 # define VM_WRITE_BIT 1
5788 # define VM_EXEC_BIT 2
5789@@ -151,8 +168,21 @@ retry:
5790 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5791 goto bad_area;
5792
5793- if ((vma->vm_flags & mask) != mask)
5794+ if ((vma->vm_flags & mask) != mask) {
5795+
5796+#ifdef CONFIG_PAX_PAGEEXEC
5797+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5798+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5799+ goto bad_area;
5800+
5801+ up_read(&mm->mmap_sem);
5802+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5803+ do_group_exit(SIGKILL);
5804+ }
5805+#endif
5806+
5807 goto bad_area;
5808+ }
5809
5810 /*
5811 * If for any reason at all we couldn't handle the fault, make
5812diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5813index f50d4b3..c7975ee 100644
5814--- a/arch/ia64/mm/hugetlbpage.c
5815+++ b/arch/ia64/mm/hugetlbpage.c
5816@@ -138,6 +138,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5817 unsigned long pgoff, unsigned long flags)
5818 {
5819 struct vm_unmapped_area_info info;
5820+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5821
5822 if (len > RGN_MAP_LIMIT)
5823 return -ENOMEM;
5824@@ -161,6 +162,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5825 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5826 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5827 info.align_offset = 0;
5828+ info.threadstack_offset = offset;
5829 return vm_unmapped_area(&info);
5830 }
5831
5832diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5833index 97e48b0..fc59c36 100644
5834--- a/arch/ia64/mm/init.c
5835+++ b/arch/ia64/mm/init.c
5836@@ -119,6 +119,19 @@ ia64_init_addr_space (void)
5837 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5838 vma->vm_end = vma->vm_start + PAGE_SIZE;
5839 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5840+
5841+#ifdef CONFIG_PAX_PAGEEXEC
5842+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5843+ vma->vm_flags &= ~VM_EXEC;
5844+
5845+#ifdef CONFIG_PAX_MPROTECT
5846+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5847+ vma->vm_flags &= ~VM_MAYEXEC;
5848+#endif
5849+
5850+ }
5851+#endif
5852+
5853 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5854 down_write(&current->mm->mmap_sem);
5855 if (insert_vm_struct(current->mm, vma)) {
5856@@ -279,7 +292,7 @@ static int __init gate_vma_init(void)
5857 gate_vma.vm_start = FIXADDR_USER_START;
5858 gate_vma.vm_end = FIXADDR_USER_END;
5859 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
5860- gate_vma.vm_page_prot = __P101;
5861+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
5862
5863 return 0;
5864 }
5865diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5866index 40b3ee98..8c2c112 100644
5867--- a/arch/m32r/include/asm/cache.h
5868+++ b/arch/m32r/include/asm/cache.h
5869@@ -1,8 +1,10 @@
5870 #ifndef _ASM_M32R_CACHE_H
5871 #define _ASM_M32R_CACHE_H
5872
5873+#include <linux/const.h>
5874+
5875 /* L1 cache line size */
5876 #define L1_CACHE_SHIFT 4
5877-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5878+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5879
5880 #endif /* _ASM_M32R_CACHE_H */
5881diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5882index 82abd15..d95ae5d 100644
5883--- a/arch/m32r/lib/usercopy.c
5884+++ b/arch/m32r/lib/usercopy.c
5885@@ -14,6 +14,9 @@
5886 unsigned long
5887 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5888 {
5889+ if ((long)n < 0)
5890+ return n;
5891+
5892 prefetch(from);
5893 if (access_ok(VERIFY_WRITE, to, n))
5894 __copy_user(to,from,n);
5895@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5896 unsigned long
5897 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5898 {
5899+ if ((long)n < 0)
5900+ return n;
5901+
5902 prefetchw(to);
5903 if (access_ok(VERIFY_READ, from, n))
5904 __copy_user_zeroing(to,from,n);
5905diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5906index 0395c51..5f26031 100644
5907--- a/arch/m68k/include/asm/cache.h
5908+++ b/arch/m68k/include/asm/cache.h
5909@@ -4,9 +4,11 @@
5910 #ifndef __ARCH_M68K_CACHE_H
5911 #define __ARCH_M68K_CACHE_H
5912
5913+#include <linux/const.h>
5914+
5915 /* bytes per L1 cache line */
5916 #define L1_CACHE_SHIFT 4
5917-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5918+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5919
5920 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5921
5922diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
5923index 5a696e5..070490d 100644
5924--- a/arch/metag/include/asm/barrier.h
5925+++ b/arch/metag/include/asm/barrier.h
5926@@ -90,7 +90,7 @@ static inline void fence(void)
5927 do { \
5928 compiletime_assert_atomic_type(*p); \
5929 smp_mb(); \
5930- ACCESS_ONCE(*p) = (v); \
5931+ ACCESS_ONCE_RW(*p) = (v); \
5932 } while (0)
5933
5934 #define smp_load_acquire(p) \
5935diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5936index 53f0f6c..2dc07fd 100644
5937--- a/arch/metag/mm/hugetlbpage.c
5938+++ b/arch/metag/mm/hugetlbpage.c
5939@@ -189,6 +189,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5940 info.high_limit = TASK_SIZE;
5941 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5942 info.align_offset = 0;
5943+ info.threadstack_offset = 0;
5944 return vm_unmapped_area(&info);
5945 }
5946
5947diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5948index 4efe96a..60e8699 100644
5949--- a/arch/microblaze/include/asm/cache.h
5950+++ b/arch/microblaze/include/asm/cache.h
5951@@ -13,11 +13,12 @@
5952 #ifndef _ASM_MICROBLAZE_CACHE_H
5953 #define _ASM_MICROBLAZE_CACHE_H
5954
5955+#include <linux/const.h>
5956 #include <asm/registers.h>
5957
5958 #define L1_CACHE_SHIFT 5
5959 /* word-granular cache in microblaze */
5960-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5961+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5962
5963 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5964
5965diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5966index 199a835..822b487 100644
5967--- a/arch/mips/Kconfig
5968+++ b/arch/mips/Kconfig
5969@@ -2591,6 +2591,7 @@ source "kernel/Kconfig.preempt"
5970
5971 config KEXEC
5972 bool "Kexec system call"
5973+ depends on !GRKERNSEC_KMEM
5974 help
5975 kexec is a system call that implements the ability to shutdown your
5976 current kernel, and to start another kernel. It is like a reboot
5977diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
5978index d8960d4..77dbd31 100644
5979--- a/arch/mips/cavium-octeon/dma-octeon.c
5980+++ b/arch/mips/cavium-octeon/dma-octeon.c
5981@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
5982 if (dma_release_from_coherent(dev, order, vaddr))
5983 return;
5984
5985- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
5986+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
5987 }
5988
5989 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
5990diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5991index 26d4363..3c9a82e 100644
5992--- a/arch/mips/include/asm/atomic.h
5993+++ b/arch/mips/include/asm/atomic.h
5994@@ -22,15 +22,39 @@
5995 #include <asm/cmpxchg.h>
5996 #include <asm/war.h>
5997
5998+#ifdef CONFIG_GENERIC_ATOMIC64
5999+#include <asm-generic/atomic64.h>
6000+#endif
6001+
6002 #define ATOMIC_INIT(i) { (i) }
6003
6004+#ifdef CONFIG_64BIT
6005+#define _ASM_EXTABLE(from, to) \
6006+" .section __ex_table,\"a\"\n" \
6007+" .dword " #from ", " #to"\n" \
6008+" .previous\n"
6009+#else
6010+#define _ASM_EXTABLE(from, to) \
6011+" .section __ex_table,\"a\"\n" \
6012+" .word " #from ", " #to"\n" \
6013+" .previous\n"
6014+#endif
6015+
6016 /*
6017 * atomic_read - read atomic variable
6018 * @v: pointer of type atomic_t
6019 *
6020 * Atomically reads the value of @v.
6021 */
6022-#define atomic_read(v) ACCESS_ONCE((v)->counter)
6023+static inline int atomic_read(const atomic_t *v)
6024+{
6025+ return ACCESS_ONCE(v->counter);
6026+}
6027+
6028+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6029+{
6030+ return ACCESS_ONCE(v->counter);
6031+}
6032
6033 /*
6034 * atomic_set - set atomic variable
6035@@ -39,47 +63,77 @@
6036 *
6037 * Atomically sets the value of @v to @i.
6038 */
6039-#define atomic_set(v, i) ((v)->counter = (i))
6040+static inline void atomic_set(atomic_t *v, int i)
6041+{
6042+ v->counter = i;
6043+}
6044
6045-#define ATOMIC_OP(op, c_op, asm_op) \
6046-static __inline__ void atomic_##op(int i, atomic_t * v) \
6047+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6048+{
6049+ v->counter = i;
6050+}
6051+
6052+#ifdef CONFIG_PAX_REFCOUNT
6053+#define __OVERFLOW_POST \
6054+ " b 4f \n" \
6055+ " .set noreorder \n" \
6056+ "3: b 5f \n" \
6057+ " move %0, %1 \n" \
6058+ " .set reorder \n"
6059+#define __OVERFLOW_EXTABLE \
6060+ "3:\n" \
6061+ _ASM_EXTABLE(2b, 3b)
6062+#else
6063+#define __OVERFLOW_POST
6064+#define __OVERFLOW_EXTABLE
6065+#endif
6066+
6067+#define __ATOMIC_OP(op, suffix, asm_op, extable) \
6068+static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v) \
6069 { \
6070 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
6071 int temp; \
6072 \
6073 __asm__ __volatile__( \
6074- " .set arch=r4000 \n" \
6075- "1: ll %0, %1 # atomic_" #op " \n" \
6076- " " #asm_op " %0, %2 \n" \
6077+ " .set mips3 \n" \
6078+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
6079+ "2: " #asm_op " %0, %2 \n" \
6080 " sc %0, %1 \n" \
6081 " beqzl %0, 1b \n" \
6082+ extable \
6083 " .set mips0 \n" \
6084 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6085 : "Ir" (i)); \
6086 } else if (kernel_uses_llsc) { \
6087 int temp; \
6088 \
6089- do { \
6090- __asm__ __volatile__( \
6091- " .set "MIPS_ISA_LEVEL" \n" \
6092- " ll %0, %1 # atomic_" #op "\n" \
6093- " " #asm_op " %0, %2 \n" \
6094- " sc %0, %1 \n" \
6095- " .set mips0 \n" \
6096- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6097- : "Ir" (i)); \
6098- } while (unlikely(!temp)); \
6099+ __asm__ __volatile__( \
6100+ " .set "MIPS_ISA_LEVEL" \n" \
6101+ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
6102+ "2: " #asm_op " %0, %2 \n" \
6103+ " sc %0, %1 \n" \
6104+ " beqz %0, 1b \n" \
6105+ extable \
6106+ " .set mips0 \n" \
6107+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6108+ : "Ir" (i)); \
6109 } else { \
6110 unsigned long flags; \
6111 \
6112 raw_local_irq_save(flags); \
6113- v->counter c_op i; \
6114+ __asm__ __volatile__( \
6115+ "2: " #asm_op " %0, %1 \n" \
6116+ extable \
6117+ : "+r" (v->counter) : "Ir" (i)); \
6118 raw_local_irq_restore(flags); \
6119 } \
6120 }
6121
6122-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
6123-static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
6124+#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, _unchecked, asm_op##u, ) \
6125+ __ATOMIC_OP(op, , asm_op, __OVERFLOW_EXTABLE)
6126+
6127+#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable) \
6128+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t * v) \
6129 { \
6130 int result; \
6131 \
6132@@ -89,12 +143,15 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
6133 int temp; \
6134 \
6135 __asm__ __volatile__( \
6136- " .set arch=r4000 \n" \
6137- "1: ll %1, %2 # atomic_" #op "_return \n" \
6138- " " #asm_op " %0, %1, %3 \n" \
6139+ " .set mips3 \n" \
6140+ "1: ll %1, %2 # atomic_" #op "_return" #suffix"\n" \
6141+ "2: " #asm_op " %0, %1, %3 \n" \
6142 " sc %0, %2 \n" \
6143 " beqzl %0, 1b \n" \
6144- " " #asm_op " %0, %1, %3 \n" \
6145+ post_op \
6146+ extable \
6147+ "4: " #asm_op " %0, %1, %3 \n" \
6148+ "5: \n" \
6149 " .set mips0 \n" \
6150 : "=&r" (result), "=&r" (temp), \
6151 "+" GCC_OFF_SMALL_ASM() (v->counter) \
6152@@ -102,26 +159,33 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
6153 } else if (kernel_uses_llsc) { \
6154 int temp; \
6155 \
6156- do { \
6157- __asm__ __volatile__( \
6158- " .set "MIPS_ISA_LEVEL" \n" \
6159- " ll %1, %2 # atomic_" #op "_return \n" \
6160- " " #asm_op " %0, %1, %3 \n" \
6161- " sc %0, %2 \n" \
6162- " .set mips0 \n" \
6163- : "=&r" (result), "=&r" (temp), \
6164- "+" GCC_OFF_SMALL_ASM() (v->counter) \
6165- : "Ir" (i)); \
6166- } while (unlikely(!result)); \
6167+ __asm__ __volatile__( \
6168+ " .set "MIPS_ISA_LEVEL" \n" \
6169+ "1: ll %1, %2 # atomic_" #op "_return" #suffix "\n" \
6170+ "2: " #asm_op " %0, %1, %3 \n" \
6171+ " sc %0, %2 \n" \
6172+ post_op \
6173+ extable \
6174+ "4: " #asm_op " %0, %1, %3 \n" \
6175+ "5: \n" \
6176+ " .set mips0 \n" \
6177+ : "=&r" (result), "=&r" (temp), \
6178+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
6179+ : "Ir" (i)); \
6180 \
6181 result = temp; result c_op i; \
6182 } else { \
6183 unsigned long flags; \
6184 \
6185 raw_local_irq_save(flags); \
6186- result = v->counter; \
6187- result c_op i; \
6188- v->counter = result; \
6189+ __asm__ __volatile__( \
6190+ " lw %0, %1 \n" \
6191+ "2: " #asm_op " %0, %1, %2 \n" \
6192+ " sw %0, %1 \n" \
6193+ "3: \n" \
6194+ extable \
6195+ : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6196+ : "Ir" (i)); \
6197 raw_local_irq_restore(flags); \
6198 } \
6199 \
6200@@ -130,16 +194,21 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
6201 return result; \
6202 }
6203
6204-#define ATOMIC_OPS(op, c_op, asm_op) \
6205- ATOMIC_OP(op, c_op, asm_op) \
6206- ATOMIC_OP_RETURN(op, c_op, asm_op)
6207+#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, asm_op##u, , ) \
6208+ __ATOMIC_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
6209
6210-ATOMIC_OPS(add, +=, addu)
6211-ATOMIC_OPS(sub, -=, subu)
6212+#define ATOMIC_OPS(op, asm_op) \
6213+ ATOMIC_OP(op, asm_op) \
6214+ ATOMIC_OP_RETURN(op, asm_op)
6215+
6216+ATOMIC_OPS(add, add)
6217+ATOMIC_OPS(sub, sub)
6218
6219 #undef ATOMIC_OPS
6220 #undef ATOMIC_OP_RETURN
6221+#undef __ATOMIC_OP_RETURN
6222 #undef ATOMIC_OP
6223+#undef __ATOMIC_OP
6224
6225 /*
6226 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
6227@@ -149,7 +218,7 @@ ATOMIC_OPS(sub, -=, subu)
6228 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6229 * The function returns the old value of @v minus @i.
6230 */
6231-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6232+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
6233 {
6234 int result;
6235
6236@@ -159,7 +228,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6237 int temp;
6238
6239 __asm__ __volatile__(
6240- " .set arch=r4000 \n"
6241+ " .set "MIPS_ISA_LEVEL" \n"
6242 "1: ll %1, %2 # atomic_sub_if_positive\n"
6243 " subu %0, %1, %3 \n"
6244 " bltz %0, 1f \n"
6245@@ -208,8 +277,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
6246 return result;
6247 }
6248
6249-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
6250-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
6251+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6252+{
6253+ return cmpxchg(&v->counter, old, new);
6254+}
6255+
6256+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
6257+ int new)
6258+{
6259+ return cmpxchg(&(v->counter), old, new);
6260+}
6261+
6262+static inline int atomic_xchg(atomic_t *v, int new)
6263+{
6264+ return xchg(&v->counter, new);
6265+}
6266+
6267+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6268+{
6269+ return xchg(&(v->counter), new);
6270+}
6271
6272 /**
6273 * __atomic_add_unless - add unless the number is a given value
6274@@ -237,6 +324,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6275
6276 #define atomic_dec_return(v) atomic_sub_return(1, (v))
6277 #define atomic_inc_return(v) atomic_add_return(1, (v))
6278+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6279+{
6280+ return atomic_add_return_unchecked(1, v);
6281+}
6282
6283 /*
6284 * atomic_sub_and_test - subtract value from variable and test result
6285@@ -258,6 +349,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6286 * other cases.
6287 */
6288 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
6289+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6290+{
6291+ return atomic_add_return_unchecked(1, v) == 0;
6292+}
6293
6294 /*
6295 * atomic_dec_and_test - decrement by 1 and test
6296@@ -282,6 +377,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6297 * Atomically increments @v by 1.
6298 */
6299 #define atomic_inc(v) atomic_add(1, (v))
6300+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
6301+{
6302+ atomic_add_unchecked(1, v);
6303+}
6304
6305 /*
6306 * atomic_dec - decrement and test
6307@@ -290,6 +389,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6308 * Atomically decrements @v by 1.
6309 */
6310 #define atomic_dec(v) atomic_sub(1, (v))
6311+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
6312+{
6313+ atomic_sub_unchecked(1, v);
6314+}
6315
6316 /*
6317 * atomic_add_negative - add and test if negative
6318@@ -311,54 +414,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
6319 * @v: pointer of type atomic64_t
6320 *
6321 */
6322-#define atomic64_read(v) ACCESS_ONCE((v)->counter)
6323+static inline long atomic64_read(const atomic64_t *v)
6324+{
6325+ return ACCESS_ONCE(v->counter);
6326+}
6327+
6328+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6329+{
6330+ return ACCESS_ONCE(v->counter);
6331+}
6332
6333 /*
6334 * atomic64_set - set atomic variable
6335 * @v: pointer of type atomic64_t
6336 * @i: required value
6337 */
6338-#define atomic64_set(v, i) ((v)->counter = (i))
6339+static inline void atomic64_set(atomic64_t *v, long i)
6340+{
6341+ v->counter = i;
6342+}
6343
6344-#define ATOMIC64_OP(op, c_op, asm_op) \
6345-static __inline__ void atomic64_##op(long i, atomic64_t * v) \
6346+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6347+{
6348+ v->counter = i;
6349+}
6350+
6351+#define __ATOMIC64_OP(op, suffix, asm_op, extable) \
6352+static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v) \
6353 { \
6354 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
6355 long temp; \
6356 \
6357 __asm__ __volatile__( \
6358- " .set arch=r4000 \n" \
6359- "1: lld %0, %1 # atomic64_" #op " \n" \
6360- " " #asm_op " %0, %2 \n" \
6361+ " .set "MIPS_ISA_LEVEL" \n" \
6362+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6363+ "2: " #asm_op " %0, %2 \n" \
6364 " scd %0, %1 \n" \
6365 " beqzl %0, 1b \n" \
6366+ extable \
6367 " .set mips0 \n" \
6368 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6369 : "Ir" (i)); \
6370 } else if (kernel_uses_llsc) { \
6371 long temp; \
6372 \
6373- do { \
6374- __asm__ __volatile__( \
6375- " .set "MIPS_ISA_LEVEL" \n" \
6376- " lld %0, %1 # atomic64_" #op "\n" \
6377- " " #asm_op " %0, %2 \n" \
6378- " scd %0, %1 \n" \
6379- " .set mips0 \n" \
6380- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6381- : "Ir" (i)); \
6382- } while (unlikely(!temp)); \
6383+ __asm__ __volatile__( \
6384+ " .set "MIPS_ISA_LEVEL" \n" \
6385+ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
6386+ "2: " #asm_op " %0, %2 \n" \
6387+ " scd %0, %1 \n" \
6388+ " beqz %0, 1b \n" \
6389+ extable \
6390+ " .set mips0 \n" \
6391+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6392+ : "Ir" (i)); \
6393 } else { \
6394 unsigned long flags; \
6395 \
6396 raw_local_irq_save(flags); \
6397- v->counter c_op i; \
6398+ __asm__ __volatile__( \
6399+ "2: " #asm_op " %0, %1 \n" \
6400+ extable \
6401+ : "+" GCC_OFF_SMALL_ASM() (v->counter) : "Ir" (i)); \
6402 raw_local_irq_restore(flags); \
6403 } \
6404 }
6405
6406-#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
6407-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6408+#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, _unchecked, asm_op##u, ) \
6409+ __ATOMIC64_OP(op, , asm_op, __OVERFLOW_EXTABLE)
6410+
6411+#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable) \
6412+static inline long atomic64_##op##_return##suffix(long i, atomic64##suffix##_t * v)\
6413 { \
6414 long result; \
6415 \
6416@@ -368,12 +494,15 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6417 long temp; \
6418 \
6419 __asm__ __volatile__( \
6420- " .set arch=r4000 \n" \
6421+ " .set mips3 \n" \
6422 "1: lld %1, %2 # atomic64_" #op "_return\n" \
6423- " " #asm_op " %0, %1, %3 \n" \
6424+ "2: " #asm_op " %0, %1, %3 \n" \
6425 " scd %0, %2 \n" \
6426 " beqzl %0, 1b \n" \
6427- " " #asm_op " %0, %1, %3 \n" \
6428+ post_op \
6429+ extable \
6430+ "4: " #asm_op " %0, %1, %3 \n" \
6431+ "5: \n" \
6432 " .set mips0 \n" \
6433 : "=&r" (result), "=&r" (temp), \
6434 "+" GCC_OFF_SMALL_ASM() (v->counter) \
6435@@ -381,27 +510,35 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6436 } else if (kernel_uses_llsc) { \
6437 long temp; \
6438 \
6439- do { \
6440- __asm__ __volatile__( \
6441- " .set "MIPS_ISA_LEVEL" \n" \
6442- " lld %1, %2 # atomic64_" #op "_return\n" \
6443- " " #asm_op " %0, %1, %3 \n" \
6444- " scd %0, %2 \n" \
6445- " .set mips0 \n" \
6446- : "=&r" (result), "=&r" (temp), \
6447- "=" GCC_OFF_SMALL_ASM() (v->counter) \
6448- : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
6449- : "memory"); \
6450- } while (unlikely(!result)); \
6451+ __asm__ __volatile__( \
6452+ " .set "MIPS_ISA_LEVEL" \n" \
6453+ "1: lld %1, %2 # atomic64_" #op "_return" #suffix "\n"\
6454+ "2: " #asm_op " %0, %1, %3 \n" \
6455+ " scd %0, %2 \n" \
6456+ " beqz %0, 1b \n" \
6457+ post_op \
6458+ extable \
6459+ "4: " #asm_op " %0, %1, %3 \n" \
6460+ "5: \n" \
6461+ " .set mips0 \n" \
6462+ : "=&r" (result), "=&r" (temp), \
6463+ "=" GCC_OFF_SMALL_ASM() (v->counter) \
6464+ : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
6465+ : "memory"); \
6466 \
6467 result = temp; result c_op i; \
6468 } else { \
6469 unsigned long flags; \
6470 \
6471 raw_local_irq_save(flags); \
6472- result = v->counter; \
6473- result c_op i; \
6474- v->counter = result; \
6475+ __asm__ __volatile__( \
6476+ " ld %0, %1 \n" \
6477+ "2: " #asm_op " %0, %1, %2 \n" \
6478+ " sd %0, %1 \n" \
6479+ "3: \n" \
6480+ extable \
6481+ : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
6482+ : "Ir" (i)); \
6483 raw_local_irq_restore(flags); \
6484 } \
6485 \
6486@@ -410,16 +547,23 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
6487 return result; \
6488 }
6489
6490-#define ATOMIC64_OPS(op, c_op, asm_op) \
6491- ATOMIC64_OP(op, c_op, asm_op) \
6492- ATOMIC64_OP_RETURN(op, c_op, asm_op)
6493+#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, _unchecked, asm_op##u, , ) \
6494+ __ATOMIC64_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
6495
6496-ATOMIC64_OPS(add, +=, daddu)
6497-ATOMIC64_OPS(sub, -=, dsubu)
6498+#define ATOMIC64_OPS(op, asm_op) \
6499+ ATOMIC64_OP(op, asm_op) \
6500+ ATOMIC64_OP_RETURN(op, asm_op)
6501+
6502+ATOMIC64_OPS(add, dadd)
6503+ATOMIC64_OPS(sub, dsub)
6504
6505 #undef ATOMIC64_OPS
6506 #undef ATOMIC64_OP_RETURN
6507+#undef __ATOMIC64_OP_RETURN
6508 #undef ATOMIC64_OP
6509+#undef __ATOMIC64_OP
6510+#undef __OVERFLOW_EXTABLE
6511+#undef __OVERFLOW_POST
6512
6513 /*
6514 * atomic64_sub_if_positive - conditionally subtract integer from atomic
6515@@ -430,7 +574,7 @@ ATOMIC64_OPS(sub, -=, dsubu)
6516 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6517 * The function returns the old value of @v minus @i.
6518 */
6519-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6520+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6521 {
6522 long result;
6523
6524@@ -440,7 +584,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6525 long temp;
6526
6527 __asm__ __volatile__(
6528- " .set arch=r4000 \n"
6529+ " .set "MIPS_ISA_LEVEL" \n"
6530 "1: lld %1, %2 # atomic64_sub_if_positive\n"
6531 " dsubu %0, %1, %3 \n"
6532 " bltz %0, 1f \n"
6533@@ -489,9 +633,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6534 return result;
6535 }
6536
6537-#define atomic64_cmpxchg(v, o, n) \
6538- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6539-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6540+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6541+{
6542+ return cmpxchg(&v->counter, old, new);
6543+}
6544+
6545+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6546+ long new)
6547+{
6548+ return cmpxchg(&(v->counter), old, new);
6549+}
6550+
6551+static inline long atomic64_xchg(atomic64_t *v, long new)
6552+{
6553+ return xchg(&v->counter, new);
6554+}
6555+
6556+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6557+{
6558+ return xchg(&(v->counter), new);
6559+}
6560
6561 /**
6562 * atomic64_add_unless - add unless the number is a given value
6563@@ -521,6 +682,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6564
6565 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6566 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6567+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6568
6569 /*
6570 * atomic64_sub_and_test - subtract value from variable and test result
6571@@ -542,6 +704,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6572 * other cases.
6573 */
6574 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6575+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6576
6577 /*
6578 * atomic64_dec_and_test - decrement by 1 and test
6579@@ -566,6 +729,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6580 * Atomically increments @v by 1.
6581 */
6582 #define atomic64_inc(v) atomic64_add(1, (v))
6583+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6584
6585 /*
6586 * atomic64_dec - decrement and test
6587@@ -574,6 +738,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6588 * Atomically decrements @v by 1.
6589 */
6590 #define atomic64_dec(v) atomic64_sub(1, (v))
6591+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6592
6593 /*
6594 * atomic64_add_negative - add and test if negative
6595diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
6596index 7ecba84..21774af 100644
6597--- a/arch/mips/include/asm/barrier.h
6598+++ b/arch/mips/include/asm/barrier.h
6599@@ -133,7 +133,7 @@
6600 do { \
6601 compiletime_assert_atomic_type(*p); \
6602 smp_mb(); \
6603- ACCESS_ONCE(*p) = (v); \
6604+ ACCESS_ONCE_RW(*p) = (v); \
6605 } while (0)
6606
6607 #define smp_load_acquire(p) \
6608diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6609index b4db69f..8f3b093 100644
6610--- a/arch/mips/include/asm/cache.h
6611+++ b/arch/mips/include/asm/cache.h
6612@@ -9,10 +9,11 @@
6613 #ifndef _ASM_CACHE_H
6614 #define _ASM_CACHE_H
6615
6616+#include <linux/const.h>
6617 #include <kmalloc.h>
6618
6619 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6620-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6621+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6622
6623 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6624 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6625diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6626index f19e890..a4f8177 100644
6627--- a/arch/mips/include/asm/elf.h
6628+++ b/arch/mips/include/asm/elf.h
6629@@ -417,6 +417,13 @@ extern const char *__elf_platform;
6630 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6631 #endif
6632
6633+#ifdef CONFIG_PAX_ASLR
6634+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6635+
6636+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6637+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6638+#endif
6639+
6640 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6641 struct linux_binprm;
6642 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6643diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6644index c1f6afa..38cc6e9 100644
6645--- a/arch/mips/include/asm/exec.h
6646+++ b/arch/mips/include/asm/exec.h
6647@@ -12,6 +12,6 @@
6648 #ifndef _ASM_EXEC_H
6649 #define _ASM_EXEC_H
6650
6651-extern unsigned long arch_align_stack(unsigned long sp);
6652+#define arch_align_stack(x) ((x) & ~0xfUL)
6653
6654 #endif /* _ASM_EXEC_H */
6655diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
6656index 9e8ef59..1139d6b 100644
6657--- a/arch/mips/include/asm/hw_irq.h
6658+++ b/arch/mips/include/asm/hw_irq.h
6659@@ -10,7 +10,7 @@
6660
6661 #include <linux/atomic.h>
6662
6663-extern atomic_t irq_err_count;
6664+extern atomic_unchecked_t irq_err_count;
6665
6666 /*
6667 * interrupt-retrigger: NOP for now. This may not be appropriate for all
6668diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6669index 8feaed6..1bd8a64 100644
6670--- a/arch/mips/include/asm/local.h
6671+++ b/arch/mips/include/asm/local.h
6672@@ -13,15 +13,25 @@ typedef struct
6673 atomic_long_t a;
6674 } local_t;
6675
6676+typedef struct {
6677+ atomic_long_unchecked_t a;
6678+} local_unchecked_t;
6679+
6680 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6681
6682 #define local_read(l) atomic_long_read(&(l)->a)
6683+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6684 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6685+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6686
6687 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6688+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6689 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6690+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6691 #define local_inc(l) atomic_long_inc(&(l)->a)
6692+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6693 #define local_dec(l) atomic_long_dec(&(l)->a)
6694+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6695
6696 /*
6697 * Same as above, but return the result value
6698@@ -71,6 +81,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6699 return result;
6700 }
6701
6702+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6703+{
6704+ unsigned long result;
6705+
6706+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
6707+ unsigned long temp;
6708+
6709+ __asm__ __volatile__(
6710+ " .set mips3 \n"
6711+ "1:" __LL "%1, %2 # local_add_return \n"
6712+ " addu %0, %1, %3 \n"
6713+ __SC "%0, %2 \n"
6714+ " beqzl %0, 1b \n"
6715+ " addu %0, %1, %3 \n"
6716+ " .set mips0 \n"
6717+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6718+ : "Ir" (i), "m" (l->a.counter)
6719+ : "memory");
6720+ } else if (kernel_uses_llsc) {
6721+ unsigned long temp;
6722+
6723+ __asm__ __volatile__(
6724+ " .set mips3 \n"
6725+ "1:" __LL "%1, %2 # local_add_return \n"
6726+ " addu %0, %1, %3 \n"
6727+ __SC "%0, %2 \n"
6728+ " beqz %0, 1b \n"
6729+ " addu %0, %1, %3 \n"
6730+ " .set mips0 \n"
6731+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6732+ : "Ir" (i), "m" (l->a.counter)
6733+ : "memory");
6734+ } else {
6735+ unsigned long flags;
6736+
6737+ local_irq_save(flags);
6738+ result = l->a.counter;
6739+ result += i;
6740+ l->a.counter = result;
6741+ local_irq_restore(flags);
6742+ }
6743+
6744+ return result;
6745+}
6746+
6747 static __inline__ long local_sub_return(long i, local_t * l)
6748 {
6749 unsigned long result;
6750@@ -118,6 +173,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6751
6752 #define local_cmpxchg(l, o, n) \
6753 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6754+#define local_cmpxchg_unchecked(l, o, n) \
6755+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6756 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6757
6758 /**
6759diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6760index 89dd7fe..a123c97 100644
6761--- a/arch/mips/include/asm/page.h
6762+++ b/arch/mips/include/asm/page.h
6763@@ -118,7 +118,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6764 #ifdef CONFIG_CPU_MIPS32
6765 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6766 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6767- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6768+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6769 #else
6770 typedef struct { unsigned long long pte; } pte_t;
6771 #define pte_val(x) ((x).pte)
6772diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6773index b336037..5b874cc 100644
6774--- a/arch/mips/include/asm/pgalloc.h
6775+++ b/arch/mips/include/asm/pgalloc.h
6776@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6777 {
6778 set_pud(pud, __pud((unsigned long)pmd));
6779 }
6780+
6781+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6782+{
6783+ pud_populate(mm, pud, pmd);
6784+}
6785 #endif
6786
6787 /*
6788diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
6789index ae85694..4cdbba8 100644
6790--- a/arch/mips/include/asm/pgtable.h
6791+++ b/arch/mips/include/asm/pgtable.h
6792@@ -20,6 +20,9 @@
6793 #include <asm/io.h>
6794 #include <asm/pgtable-bits.h>
6795
6796+#define ktla_ktva(addr) (addr)
6797+#define ktva_ktla(addr) (addr)
6798+
6799 struct mm_struct;
6800 struct vm_area_struct;
6801
6802diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6803index 9c0014e..5101ef5 100644
6804--- a/arch/mips/include/asm/thread_info.h
6805+++ b/arch/mips/include/asm/thread_info.h
6806@@ -100,6 +100,9 @@ static inline struct thread_info *current_thread_info(void)
6807 #define TIF_SECCOMP 4 /* secure computing */
6808 #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
6809 #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
6810+/* li takes a 32bit immediate */
6811+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
6812+
6813 #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
6814 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
6815 #define TIF_NOHZ 19 /* in adaptive nohz mode */
6816@@ -135,14 +138,16 @@ static inline struct thread_info *current_thread_info(void)
6817 #define _TIF_USEDMSA (1<<TIF_USEDMSA)
6818 #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
6819 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6820+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6821
6822 #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6823 _TIF_SYSCALL_AUDIT | \
6824- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
6825+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
6826+ _TIF_GRSEC_SETXID)
6827
6828 /* work to do in syscall_trace_leave() */
6829 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6830- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
6831+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6832
6833 /* work to do on interrupt/exception return */
6834 #define _TIF_WORK_MASK \
6835@@ -150,7 +155,7 @@ static inline struct thread_info *current_thread_info(void)
6836 /* work to do on any return to u-space */
6837 #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6838 _TIF_WORK_SYSCALL_EXIT | \
6839- _TIF_SYSCALL_TRACEPOINT)
6840+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
6841
6842 /*
6843 * We stash processor id into a COP0 register to retrieve it fast
6844diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
6845index 5305d69..1da2bf5 100644
6846--- a/arch/mips/include/asm/uaccess.h
6847+++ b/arch/mips/include/asm/uaccess.h
6848@@ -146,6 +146,7 @@ static inline bool eva_kernel_access(void)
6849 __ok == 0; \
6850 })
6851
6852+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
6853 #define access_ok(type, addr, size) \
6854 likely(__access_ok((addr), (size), __access_mask))
6855
6856diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6857index 1188e00..41cf144 100644
6858--- a/arch/mips/kernel/binfmt_elfn32.c
6859+++ b/arch/mips/kernel/binfmt_elfn32.c
6860@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6861 #undef ELF_ET_DYN_BASE
6862 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6863
6864+#ifdef CONFIG_PAX_ASLR
6865+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6866+
6867+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6868+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6869+#endif
6870+
6871 #include <asm/processor.h>
6872 #include <linux/module.h>
6873 #include <linux/elfcore.h>
6874diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6875index 9287678..f870e47 100644
6876--- a/arch/mips/kernel/binfmt_elfo32.c
6877+++ b/arch/mips/kernel/binfmt_elfo32.c
6878@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6879 #undef ELF_ET_DYN_BASE
6880 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6881
6882+#ifdef CONFIG_PAX_ASLR
6883+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6884+
6885+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6886+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6887+#endif
6888+
6889 #include <asm/processor.h>
6890
6891 #include <linux/module.h>
6892diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
6893index 74f6752..f3d7a47 100644
6894--- a/arch/mips/kernel/i8259.c
6895+++ b/arch/mips/kernel/i8259.c
6896@@ -205,7 +205,7 @@ spurious_8259A_irq:
6897 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
6898 spurious_irq_mask |= irqmask;
6899 }
6900- atomic_inc(&irq_err_count);
6901+ atomic_inc_unchecked(&irq_err_count);
6902 /*
6903 * Theoretically we do not have to handle this IRQ,
6904 * but in Linux this does not cause problems and is
6905diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
6906index 44a1f79..2bd6aa3 100644
6907--- a/arch/mips/kernel/irq-gt641xx.c
6908+++ b/arch/mips/kernel/irq-gt641xx.c
6909@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
6910 }
6911 }
6912
6913- atomic_inc(&irq_err_count);
6914+ atomic_inc_unchecked(&irq_err_count);
6915 }
6916
6917 void __init gt641xx_irq_init(void)
6918diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
6919index 8eb5af8..2baf465 100644
6920--- a/arch/mips/kernel/irq.c
6921+++ b/arch/mips/kernel/irq.c
6922@@ -34,17 +34,17 @@ void ack_bad_irq(unsigned int irq)
6923 printk("unexpected IRQ # %d\n", irq);
6924 }
6925
6926-atomic_t irq_err_count;
6927+atomic_unchecked_t irq_err_count;
6928
6929 int arch_show_interrupts(struct seq_file *p, int prec)
6930 {
6931- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
6932+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
6933 return 0;
6934 }
6935
6936 asmlinkage void spurious_interrupt(void)
6937 {
6938- atomic_inc(&irq_err_count);
6939+ atomic_inc_unchecked(&irq_err_count);
6940 }
6941
6942 void __init init_IRQ(void)
6943@@ -58,6 +58,8 @@ void __init init_IRQ(void)
6944 }
6945
6946 #ifdef CONFIG_DEBUG_STACKOVERFLOW
6947+
6948+extern void gr_handle_kernel_exploit(void);
6949 static inline void check_stack_overflow(void)
6950 {
6951 unsigned long sp;
6952@@ -73,6 +75,7 @@ static inline void check_stack_overflow(void)
6953 printk("do_IRQ: stack overflow: %ld\n",
6954 sp - sizeof(struct thread_info));
6955 dump_stack();
6956+ gr_handle_kernel_exploit();
6957 }
6958 }
6959 #else
6960diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
6961index 0614717..002fa43 100644
6962--- a/arch/mips/kernel/pm-cps.c
6963+++ b/arch/mips/kernel/pm-cps.c
6964@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
6965 nc_core_ready_count = nc_addr;
6966
6967 /* Ensure ready_count is zero-initialised before the assembly runs */
6968- ACCESS_ONCE(*nc_core_ready_count) = 0;
6969+ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
6970 coupled_barrier(&per_cpu(pm_barrier, core), online);
6971
6972 /* Run the generated entry code */
6973diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6974index f2975d4..f61d355 100644
6975--- a/arch/mips/kernel/process.c
6976+++ b/arch/mips/kernel/process.c
6977@@ -541,18 +541,6 @@ out:
6978 return pc;
6979 }
6980
6981-/*
6982- * Don't forget that the stack pointer must be aligned on a 8 bytes
6983- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6984- */
6985-unsigned long arch_align_stack(unsigned long sp)
6986-{
6987- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6988- sp -= get_random_int() & ~PAGE_MASK;
6989-
6990- return sp & ALMASK;
6991-}
6992-
6993 static void arch_dump_stack(void *info)
6994 {
6995 struct pt_regs *regs;
6996diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6997index e933a30..0d02625 100644
6998--- a/arch/mips/kernel/ptrace.c
6999+++ b/arch/mips/kernel/ptrace.c
7000@@ -785,6 +785,10 @@ long arch_ptrace(struct task_struct *child, long request,
7001 return ret;
7002 }
7003
7004+#ifdef CONFIG_GRKERNSEC_SETXID
7005+extern void gr_delayed_cred_worker(void);
7006+#endif
7007+
7008 /*
7009 * Notification of system call entry/exit
7010 * - triggered by current->work.syscall_trace
7011@@ -803,6 +807,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
7012 tracehook_report_syscall_entry(regs))
7013 ret = -1;
7014
7015+#ifdef CONFIG_GRKERNSEC_SETXID
7016+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
7017+ gr_delayed_cred_worker();
7018+#endif
7019+
7020 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
7021 trace_sys_enter(regs, regs->regs[2]);
7022
7023diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
7024index 2242bdd..b284048 100644
7025--- a/arch/mips/kernel/sync-r4k.c
7026+++ b/arch/mips/kernel/sync-r4k.c
7027@@ -18,8 +18,8 @@
7028 #include <asm/mipsregs.h>
7029
7030 static atomic_t count_start_flag = ATOMIC_INIT(0);
7031-static atomic_t count_count_start = ATOMIC_INIT(0);
7032-static atomic_t count_count_stop = ATOMIC_INIT(0);
7033+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
7034+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
7035 static atomic_t count_reference = ATOMIC_INIT(0);
7036
7037 #define COUNTON 100
7038@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
7039
7040 for (i = 0; i < NR_LOOPS; i++) {
7041 /* slaves loop on '!= 2' */
7042- while (atomic_read(&count_count_start) != 1)
7043+ while (atomic_read_unchecked(&count_count_start) != 1)
7044 mb();
7045- atomic_set(&count_count_stop, 0);
7046+ atomic_set_unchecked(&count_count_stop, 0);
7047 smp_wmb();
7048
7049 /* this lets the slaves write their count register */
7050- atomic_inc(&count_count_start);
7051+ atomic_inc_unchecked(&count_count_start);
7052
7053 /*
7054 * Everyone initialises count in the last loop:
7055@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
7056 /*
7057 * Wait for all slaves to leave the synchronization point:
7058 */
7059- while (atomic_read(&count_count_stop) != 1)
7060+ while (atomic_read_unchecked(&count_count_stop) != 1)
7061 mb();
7062- atomic_set(&count_count_start, 0);
7063+ atomic_set_unchecked(&count_count_start, 0);
7064 smp_wmb();
7065- atomic_inc(&count_count_stop);
7066+ atomic_inc_unchecked(&count_count_stop);
7067 }
7068 /* Arrange for an interrupt in a short while */
7069 write_c0_compare(read_c0_count() + COUNTON);
7070@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
7071 initcount = atomic_read(&count_reference);
7072
7073 for (i = 0; i < NR_LOOPS; i++) {
7074- atomic_inc(&count_count_start);
7075- while (atomic_read(&count_count_start) != 2)
7076+ atomic_inc_unchecked(&count_count_start);
7077+ while (atomic_read_unchecked(&count_count_start) != 2)
7078 mb();
7079
7080 /*
7081@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
7082 if (i == NR_LOOPS-1)
7083 write_c0_count(initcount);
7084
7085- atomic_inc(&count_count_stop);
7086- while (atomic_read(&count_count_stop) != 2)
7087+ atomic_inc_unchecked(&count_count_stop);
7088+ while (atomic_read_unchecked(&count_count_stop) != 2)
7089 mb();
7090 }
7091 /* Arrange for an interrupt in a short while */
7092diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
7093index 8ea28e6..c8873d5 100644
7094--- a/arch/mips/kernel/traps.c
7095+++ b/arch/mips/kernel/traps.c
7096@@ -697,7 +697,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
7097 siginfo_t info;
7098
7099 prev_state = exception_enter();
7100- die_if_kernel("Integer overflow", regs);
7101+ if (unlikely(!user_mode(regs))) {
7102+
7103+#ifdef CONFIG_PAX_REFCOUNT
7104+ if (fixup_exception(regs)) {
7105+ pax_report_refcount_overflow(regs);
7106+ exception_exit(prev_state);
7107+ return;
7108+ }
7109+#endif
7110+
7111+ die("Integer overflow", regs);
7112+ }
7113
7114 info.si_code = FPE_INTOVF;
7115 info.si_signo = SIGFPE;
7116diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
7117index cd4c129..290c518 100644
7118--- a/arch/mips/kvm/mips.c
7119+++ b/arch/mips/kvm/mips.c
7120@@ -1016,7 +1016,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
7121 return r;
7122 }
7123
7124-int kvm_arch_init(void *opaque)
7125+int kvm_arch_init(const void *opaque)
7126 {
7127 if (kvm_mips_callbacks) {
7128 kvm_err("kvm: module already exists\n");
7129diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
7130index 852a41c..75b9d38 100644
7131--- a/arch/mips/mm/fault.c
7132+++ b/arch/mips/mm/fault.c
7133@@ -31,6 +31,23 @@
7134
7135 int show_unhandled_signals = 1;
7136
7137+#ifdef CONFIG_PAX_PAGEEXEC
7138+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7139+{
7140+ unsigned long i;
7141+
7142+ printk(KERN_ERR "PAX: bytes at PC: ");
7143+ for (i = 0; i < 5; i++) {
7144+ unsigned int c;
7145+ if (get_user(c, (unsigned int *)pc+i))
7146+ printk(KERN_CONT "???????? ");
7147+ else
7148+ printk(KERN_CONT "%08x ", c);
7149+ }
7150+ printk("\n");
7151+}
7152+#endif
7153+
7154 /*
7155 * This routine handles page faults. It determines the address,
7156 * and the problem, and then passes it off to one of the appropriate
7157@@ -207,6 +224,14 @@ bad_area:
7158 bad_area_nosemaphore:
7159 /* User mode accesses just cause a SIGSEGV */
7160 if (user_mode(regs)) {
7161+
7162+#ifdef CONFIG_PAX_PAGEEXEC
7163+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
7164+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
7165+ do_group_exit(SIGKILL);
7166+ }
7167+#endif
7168+
7169 tsk->thread.cp0_badvaddr = address;
7170 tsk->thread.error_code = write;
7171 if (show_unhandled_signals &&
7172diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
7173index 5c81fdd..db158d3 100644
7174--- a/arch/mips/mm/mmap.c
7175+++ b/arch/mips/mm/mmap.c
7176@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7177 struct vm_area_struct *vma;
7178 unsigned long addr = addr0;
7179 int do_color_align;
7180+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7181 struct vm_unmapped_area_info info;
7182
7183 if (unlikely(len > TASK_SIZE))
7184@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7185 do_color_align = 1;
7186
7187 /* requesting a specific address */
7188+
7189+#ifdef CONFIG_PAX_RANDMMAP
7190+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
7191+#endif
7192+
7193 if (addr) {
7194 if (do_color_align)
7195 addr = COLOUR_ALIGN(addr, pgoff);
7196@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
7197 addr = PAGE_ALIGN(addr);
7198
7199 vma = find_vma(mm, addr);
7200- if (TASK_SIZE - len >= addr &&
7201- (!vma || addr + len <= vma->vm_start))
7202+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7203 return addr;
7204 }
7205
7206 info.length = len;
7207 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
7208 info.align_offset = pgoff << PAGE_SHIFT;
7209+ info.threadstack_offset = offset;
7210
7211 if (dir == DOWN) {
7212 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
7213@@ -160,45 +166,34 @@ void arch_pic