grub: disable pax mprotect for grub.
[people/teissler/ipfire-2.x.git] / src / patches / grsecurity-2.9.1-3.10.9-201308202015.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index b89a739..79768fb 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9 +*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13 +*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17 @@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21 +*.gmo
22 *.grep
23 *.grp
24 *.gz
25 @@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29 +*.vim
30 *.xml
31 *.xz
32 *_MODULES
33 +*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38 -.*
39 +.[^g]*
40 +.gen*
41 .*.d
42 .mm
43 53c700_d.h
44 @@ -69,9 +75,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48 +PERF*
49 SCCS
50 System.map*
51 TAGS
52 +TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56 @@ -80,6 +88,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60 +ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64 @@ -92,19 +101,24 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68 +builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74 +clut_vga16.c
75 +common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82 +config.c
83 config.mak
84 config.mak.autogen
85 +config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89 @@ -115,9 +129,11 @@ devlist.h*
90 dnotify_test
91 docproc
92 dslm
93 +dtc-lexer.lex.c
94 elf2ecoff
95 elfconfig.h*
96 evergreen_reg_safe.h
97 +exception_policy.conf
98 fixdep
99 flask.h
100 fore200e_mkfirm
101 @@ -125,12 +141,15 @@ fore200e_pca_fw.c*
102 gconf
103 gconf.glade.h
104 gen-devlist
105 +gen-kdb_cmds.c
106 gen_crc32table
107 gen_init_cpio
108 generated
109 genheaders
110 genksyms
111 *_gray256.c
112 +hash
113 +hid-example
114 hpet_example
115 hugepage-mmap
116 hugepage-shm
117 @@ -145,14 +164,14 @@ int32.c
118 int4.c
119 int8.c
120 kallsyms
121 -kconfig
122 +kern_constants.h
123 keywords.c
124 ksym.c*
125 ksym.h*
126 kxgettext
127 lex.c
128 lex.*.c
129 -linux
130 +lib1funcs.S
131 logo_*.c
132 logo_*_clut224.c
133 logo_*_mono.c
134 @@ -162,14 +181,15 @@ mach-types.h
135 machtypes.h
136 map
137 map_hugetlb
138 -media
139 mconf
140 +mdp
141 miboot*
142 mk_elfconfig
143 mkboot
144 mkbugboot
145 mkcpustr
146 mkdep
147 +mkpiggy
148 mkprep
149 mkregtable
150 mktables
151 @@ -185,6 +205,8 @@ oui.c*
152 page-types
153 parse.c
154 parse.h
155 +parse-events*
156 +pasyms.h
157 patches*
158 pca200e.bin
159 pca200e_ecd.bin2
160 @@ -194,6 +216,7 @@ perf-archive
161 piggyback
162 piggy.gzip
163 piggy.S
164 +pmu-*
165 pnmtologo
166 ppc_defs.h*
167 pss_boot.h
168 @@ -203,7 +226,10 @@ r200_reg_safe.h
169 r300_reg_safe.h
170 r420_reg_safe.h
171 r600_reg_safe.h
172 +realmode.lds
173 +realmode.relocs
174 recordmcount
175 +regdb.c
176 relocs
177 rlim_names.h
178 rn50_reg_safe.h
179 @@ -213,8 +239,12 @@ series
180 setup
181 setup.bin
182 setup.elf
183 +signing_key*
184 +size_overflow_hash.h
185 sImage
186 +slabinfo
187 sm_tbl*
188 +sortextable
189 split-include
190 syscalltab.h
191 tables.c
192 @@ -224,6 +254,7 @@ tftpboot.img
193 timeconst.h
194 times.h*
195 trix_boot.h
196 +user_constants.h
197 utsrelease.h*
198 vdso-syms.lds
199 vdso.lds
200 @@ -235,13 +266,17 @@ vdso32.lds
201 vdso32.so.dbg
202 vdso64.lds
203 vdso64.so.dbg
204 +vdsox32.lds
205 +vdsox32-syms.lds
206 version.h*
207 vmImage
208 vmlinux
209 vmlinux-*
210 vmlinux.aout
211 vmlinux.bin.all
212 +vmlinux.bin.bz2
213 vmlinux.lds
214 +vmlinux.relocs
215 vmlinuz
216 voffset.h
217 vsyscall.lds
218 @@ -249,9 +284,12 @@ vsyscall_32.lds
219 wanxlfw.inc
220 uImage
221 unifdef
222 +utsrelease.h
223 wakeup.bin
224 wakeup.elf
225 wakeup.lds
226 +x509*
227 zImage*
228 zconf.hash.c
229 +zconf.lex.c
230 zoffset.h
231 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
232 index 2fe6e76..889ee23 100644
233 --- a/Documentation/kernel-parameters.txt
234 +++ b/Documentation/kernel-parameters.txt
235 @@ -976,6 +976,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
236 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
237 Default: 1024
238
239 + grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
240 + ignore grsecurity's /proc restrictions
241 +
242 +
243 hashdist= [KNL,NUMA] Large hashes allocated during boot
244 are distributed across NUMA nodes. Defaults on
245 for 64-bit NUMA, off otherwise.
246 @@ -1928,6 +1932,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
247 noexec=on: enable non-executable mappings (default)
248 noexec=off: disable non-executable mappings
249
250 + nopcid [X86-64]
251 + Disable PCID (Process-Context IDentifier) even if it
252 + is supported by the processor.
253 +
254 nosmap [X86]
255 Disable SMAP (Supervisor Mode Access Prevention)
256 even if it is supported by processor.
257 @@ -2195,6 +2203,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
258 the specified number of seconds. This is to be used if
259 your oopses keep scrolling off the screen.
260
261 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
262 + virtualization environments that don't cope well with the
263 + expand down segment used by UDEREF on X86-32 or the frequent
264 + page table updates on X86-64.
265 +
266 + pax_sanitize_slab=
267 + 0/1 to disable/enable slab object sanitization (enabled by
268 + default).
269 +
270 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
271 +
272 + pax_extra_latent_entropy
273 + Enable a very simple form of latent entropy extraction
274 + from the first 4GB of memory as the bootmem allocator
275 + passes the memory pages to the buddy allocator.
276 +
277 + pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
278 + when the processor supports PCID.
279 +
280 pcbit= [HW,ISDN]
281
282 pcd. [PARIDE]
283 diff --git a/Makefile b/Makefile
284 index 4b31d62..ac99d49 100644
285 --- a/Makefile
286 +++ b/Makefile
287 @@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
288
289 HOSTCC = gcc
290 HOSTCXX = g++
291 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
292 -HOSTCXXFLAGS = -O2
293 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
294 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
295 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
296
297 # Decide whether to build built-in, modular, or both.
298 # Normally, just do built-in.
299 @@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
300 # Rules shared between *config targets and build targets
301
302 # Basic helpers built in scripts/
303 -PHONY += scripts_basic
304 -scripts_basic:
305 +PHONY += scripts_basic gcc-plugins
306 +scripts_basic: gcc-plugins
307 $(Q)$(MAKE) $(build)=scripts/basic
308 $(Q)rm -f .tmp_quiet_recordmcount
309
310 @@ -576,6 +577,65 @@ else
311 KBUILD_CFLAGS += -O2
312 endif
313
314 +ifndef DISABLE_PAX_PLUGINS
315 +ifeq ($(call cc-ifversion, -ge, 0408, y), y)
316 +PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
317 +else
318 +PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
319 +endif
320 +ifneq ($(PLUGINCC),)
321 +ifdef CONFIG_PAX_CONSTIFY_PLUGIN
322 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
323 +endif
324 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
325 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
326 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
327 +endif
328 +ifdef CONFIG_KALLOCSTAT_PLUGIN
329 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
330 +endif
331 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
332 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
333 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
334 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
335 +endif
336 +ifdef CONFIG_CHECKER_PLUGIN
337 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
338 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
339 +endif
340 +endif
341 +COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
342 +ifdef CONFIG_PAX_SIZE_OVERFLOW
343 +SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
344 +endif
345 +ifdef CONFIG_PAX_LATENT_ENTROPY
346 +LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
347 +endif
348 +ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
349 +STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
350 +endif
351 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
352 +GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
353 +GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
354 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
355 +export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
356 +ifeq ($(KBUILD_EXTMOD),)
357 +gcc-plugins:
358 + $(Q)$(MAKE) $(build)=tools/gcc
359 +else
360 +gcc-plugins: ;
361 +endif
362 +else
363 +gcc-plugins:
364 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
365 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
366 +else
367 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
368 +endif
369 + $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
370 +endif
371 +endif
372 +
373 include $(srctree)/arch/$(SRCARCH)/Makefile
374
375 ifdef CONFIG_READABLE_ASM
376 @@ -733,7 +793,7 @@ export mod_sign_cmd
377
378
379 ifeq ($(KBUILD_EXTMOD),)
380 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
381 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
382
383 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
384 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
385 @@ -782,6 +842,8 @@ endif
386
387 # The actual objects are generated when descending,
388 # make sure no implicit rule kicks in
389 +$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
390 +$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
391 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
392
393 # Handle descending into subdirectories listed in $(vmlinux-dirs)
394 @@ -791,7 +853,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
395 # Error messages still appears in the original language
396
397 PHONY += $(vmlinux-dirs)
398 -$(vmlinux-dirs): prepare scripts
399 +$(vmlinux-dirs): gcc-plugins prepare scripts
400 $(Q)$(MAKE) $(build)=$@
401
402 # Store (new) KERNELRELASE string in include/config/kernel.release
403 @@ -835,6 +897,7 @@ prepare0: archprepare FORCE
404 $(Q)$(MAKE) $(build)=.
405
406 # All the preparing..
407 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
408 prepare: prepare0
409
410 # Generate some files
411 @@ -942,6 +1005,8 @@ all: modules
412 # using awk while concatenating to the final file.
413
414 PHONY += modules
415 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
416 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
417 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
418 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
419 @$(kecho) ' Building modules, stage 2.';
420 @@ -957,7 +1022,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
421
422 # Target to prepare building external modules
423 PHONY += modules_prepare
424 -modules_prepare: prepare scripts
425 +modules_prepare: gcc-plugins prepare scripts
426
427 # Target to install modules
428 PHONY += modules_install
429 @@ -1023,7 +1088,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
430 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
431 signing_key.priv signing_key.x509 x509.genkey \
432 extra_certificates signing_key.x509.keyid \
433 - signing_key.x509.signer
434 + signing_key.x509.signer tools/gcc/size_overflow_hash.h
435
436 # clean - Delete most, but leave enough to build external modules
437 #
438 @@ -1063,6 +1128,7 @@ distclean: mrproper
439 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
440 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
441 -o -name '.*.rej' \
442 + -o -name '.*.rej' -o -name '*.so' \
443 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
444 -type f -print | xargs rm -f
445
446 @@ -1223,6 +1289,8 @@ PHONY += $(module-dirs) modules
447 $(module-dirs): crmodverdir $(objtree)/Module.symvers
448 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
449
450 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
451 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
452 modules: $(module-dirs)
453 @$(kecho) ' Building modules, stage 2.';
454 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
455 @@ -1359,17 +1427,21 @@ else
456 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
457 endif
458
459 -%.s: %.c prepare scripts FORCE
460 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
461 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
462 +%.s: %.c gcc-plugins prepare scripts FORCE
463 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
464 %.i: %.c prepare scripts FORCE
465 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
466 -%.o: %.c prepare scripts FORCE
467 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
468 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
469 +%.o: %.c gcc-plugins prepare scripts FORCE
470 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
471 %.lst: %.c prepare scripts FORCE
472 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
473 -%.s: %.S prepare scripts FORCE
474 +%.s: %.S gcc-plugins prepare scripts FORCE
475 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
476 -%.o: %.S prepare scripts FORCE
477 +%.o: %.S gcc-plugins prepare scripts FORCE
478 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
479 %.symtypes: %.c prepare scripts FORCE
480 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
481 @@ -1379,11 +1451,15 @@ endif
482 $(cmd_crmodverdir)
483 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
484 $(build)=$(build-dir)
485 -%/: prepare scripts FORCE
486 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
487 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
488 +%/: gcc-plugins prepare scripts FORCE
489 $(cmd_crmodverdir)
490 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
491 $(build)=$(build-dir)
492 -%.ko: prepare scripts FORCE
493 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
494 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
495 +%.ko: gcc-plugins prepare scripts FORCE
496 $(cmd_crmodverdir)
497 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
498 $(build)=$(build-dir) $(@:.ko=.o)
499 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
500 index c2cbe4f..f7264b4 100644
501 --- a/arch/alpha/include/asm/atomic.h
502 +++ b/arch/alpha/include/asm/atomic.h
503 @@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
504 #define atomic_dec(v) atomic_sub(1,(v))
505 #define atomic64_dec(v) atomic64_sub(1,(v))
506
507 +#define atomic64_read_unchecked(v) atomic64_read(v)
508 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
509 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
510 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
511 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
512 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
513 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
514 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
515 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
516 +
517 #define smp_mb__before_atomic_dec() smp_mb()
518 #define smp_mb__after_atomic_dec() smp_mb()
519 #define smp_mb__before_atomic_inc() smp_mb()
520 diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
521 index ad368a9..fbe0f25 100644
522 --- a/arch/alpha/include/asm/cache.h
523 +++ b/arch/alpha/include/asm/cache.h
524 @@ -4,19 +4,19 @@
525 #ifndef __ARCH_ALPHA_CACHE_H
526 #define __ARCH_ALPHA_CACHE_H
527
528 +#include <linux/const.h>
529
530 /* Bytes per L1 (data) cache line. */
531 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
532 -# define L1_CACHE_BYTES 64
533 # define L1_CACHE_SHIFT 6
534 #else
535 /* Both EV4 and EV5 are write-through, read-allocate,
536 direct-mapped, physical.
537 */
538 -# define L1_CACHE_BYTES 32
539 # define L1_CACHE_SHIFT 5
540 #endif
541
542 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
543 #define SMP_CACHE_BYTES L1_CACHE_BYTES
544
545 #endif
546 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
547 index 968d999..d36b2df 100644
548 --- a/arch/alpha/include/asm/elf.h
549 +++ b/arch/alpha/include/asm/elf.h
550 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
551
552 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
553
554 +#ifdef CONFIG_PAX_ASLR
555 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
556 +
557 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
558 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
559 +#endif
560 +
561 /* $0 is set by ld.so to a pointer to a function which might be
562 registered using atexit. This provides a mean for the dynamic
563 linker to call DT_FINI functions for shared libraries that have
564 diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
565 index bc2a0da..8ad11ee 100644
566 --- a/arch/alpha/include/asm/pgalloc.h
567 +++ b/arch/alpha/include/asm/pgalloc.h
568 @@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
569 pgd_set(pgd, pmd);
570 }
571
572 +static inline void
573 +pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
574 +{
575 + pgd_populate(mm, pgd, pmd);
576 +}
577 +
578 extern pgd_t *pgd_alloc(struct mm_struct *mm);
579
580 static inline void
581 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
582 index 81a4342..348b927 100644
583 --- a/arch/alpha/include/asm/pgtable.h
584 +++ b/arch/alpha/include/asm/pgtable.h
585 @@ -102,6 +102,17 @@ struct vm_area_struct;
586 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
587 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
588 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
589 +
590 +#ifdef CONFIG_PAX_PAGEEXEC
591 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
592 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
593 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
594 +#else
595 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
596 +# define PAGE_COPY_NOEXEC PAGE_COPY
597 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
598 +#endif
599 +
600 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
601
602 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
603 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
604 index 2fd00b7..cfd5069 100644
605 --- a/arch/alpha/kernel/module.c
606 +++ b/arch/alpha/kernel/module.c
607 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
608
609 /* The small sections were sorted to the end of the segment.
610 The following should definitely cover them. */
611 - gp = (u64)me->module_core + me->core_size - 0x8000;
612 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
613 got = sechdrs[me->arch.gotsecindex].sh_addr;
614
615 for (i = 0; i < n; i++) {
616 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
617 index b9e37ad..44c24e7 100644
618 --- a/arch/alpha/kernel/osf_sys.c
619 +++ b/arch/alpha/kernel/osf_sys.c
620 @@ -1297,10 +1297,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
621 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
622
623 static unsigned long
624 -arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
625 - unsigned long limit)
626 +arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
627 + unsigned long limit, unsigned long flags)
628 {
629 struct vm_unmapped_area_info info;
630 + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
631
632 info.flags = 0;
633 info.length = len;
634 @@ -1308,6 +1309,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
635 info.high_limit = limit;
636 info.align_mask = 0;
637 info.align_offset = 0;
638 + info.threadstack_offset = offset;
639 return vm_unmapped_area(&info);
640 }
641
642 @@ -1340,20 +1342,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
643 merely specific addresses, but regions of memory -- perhaps
644 this feature should be incorporated into all ports? */
645
646 +#ifdef CONFIG_PAX_RANDMMAP
647 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
648 +#endif
649 +
650 if (addr) {
651 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
652 + addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
653 if (addr != (unsigned long) -ENOMEM)
654 return addr;
655 }
656
657 /* Next, try allocating at TASK_UNMAPPED_BASE. */
658 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
659 - len, limit);
660 + addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
661 +
662 if (addr != (unsigned long) -ENOMEM)
663 return addr;
664
665 /* Finally, try allocating in low memory. */
666 - addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
667 + addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
668
669 return addr;
670 }
671 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
672 index 0c4132d..88f0d53 100644
673 --- a/arch/alpha/mm/fault.c
674 +++ b/arch/alpha/mm/fault.c
675 @@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
676 __reload_thread(pcb);
677 }
678
679 +#ifdef CONFIG_PAX_PAGEEXEC
680 +/*
681 + * PaX: decide what to do with offenders (regs->pc = fault address)
682 + *
683 + * returns 1 when task should be killed
684 + * 2 when patched PLT trampoline was detected
685 + * 3 when unpatched PLT trampoline was detected
686 + */
687 +static int pax_handle_fetch_fault(struct pt_regs *regs)
688 +{
689 +
690 +#ifdef CONFIG_PAX_EMUPLT
691 + int err;
692 +
693 + do { /* PaX: patched PLT emulation #1 */
694 + unsigned int ldah, ldq, jmp;
695 +
696 + err = get_user(ldah, (unsigned int *)regs->pc);
697 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
698 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
699 +
700 + if (err)
701 + break;
702 +
703 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
704 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
705 + jmp == 0x6BFB0000U)
706 + {
707 + unsigned long r27, addr;
708 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
709 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
710 +
711 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
712 + err = get_user(r27, (unsigned long *)addr);
713 + if (err)
714 + break;
715 +
716 + regs->r27 = r27;
717 + regs->pc = r27;
718 + return 2;
719 + }
720 + } while (0);
721 +
722 + do { /* PaX: patched PLT emulation #2 */
723 + unsigned int ldah, lda, br;
724 +
725 + err = get_user(ldah, (unsigned int *)regs->pc);
726 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
727 + err |= get_user(br, (unsigned int *)(regs->pc+8));
728 +
729 + if (err)
730 + break;
731 +
732 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
733 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
734 + (br & 0xFFE00000U) == 0xC3E00000U)
735 + {
736 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
737 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
738 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
739 +
740 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
741 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
742 + return 2;
743 + }
744 + } while (0);
745 +
746 + do { /* PaX: unpatched PLT emulation */
747 + unsigned int br;
748 +
749 + err = get_user(br, (unsigned int *)regs->pc);
750 +
751 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
752 + unsigned int br2, ldq, nop, jmp;
753 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
754 +
755 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
756 + err = get_user(br2, (unsigned int *)addr);
757 + err |= get_user(ldq, (unsigned int *)(addr+4));
758 + err |= get_user(nop, (unsigned int *)(addr+8));
759 + err |= get_user(jmp, (unsigned int *)(addr+12));
760 + err |= get_user(resolver, (unsigned long *)(addr+16));
761 +
762 + if (err)
763 + break;
764 +
765 + if (br2 == 0xC3600000U &&
766 + ldq == 0xA77B000CU &&
767 + nop == 0x47FF041FU &&
768 + jmp == 0x6B7B0000U)
769 + {
770 + regs->r28 = regs->pc+4;
771 + regs->r27 = addr+16;
772 + regs->pc = resolver;
773 + return 3;
774 + }
775 + }
776 + } while (0);
777 +#endif
778 +
779 + return 1;
780 +}
781 +
782 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
783 +{
784 + unsigned long i;
785 +
786 + printk(KERN_ERR "PAX: bytes at PC: ");
787 + for (i = 0; i < 5; i++) {
788 + unsigned int c;
789 + if (get_user(c, (unsigned int *)pc+i))
790 + printk(KERN_CONT "???????? ");
791 + else
792 + printk(KERN_CONT "%08x ", c);
793 + }
794 + printk("\n");
795 +}
796 +#endif
797
798 /*
799 * This routine handles page faults. It determines the address,
800 @@ -133,8 +251,29 @@ retry:
801 good_area:
802 si_code = SEGV_ACCERR;
803 if (cause < 0) {
804 - if (!(vma->vm_flags & VM_EXEC))
805 + if (!(vma->vm_flags & VM_EXEC)) {
806 +
807 +#ifdef CONFIG_PAX_PAGEEXEC
808 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
809 + goto bad_area;
810 +
811 + up_read(&mm->mmap_sem);
812 + switch (pax_handle_fetch_fault(regs)) {
813 +
814 +#ifdef CONFIG_PAX_EMUPLT
815 + case 2:
816 + case 3:
817 + return;
818 +#endif
819 +
820 + }
821 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
822 + do_group_exit(SIGKILL);
823 +#else
824 goto bad_area;
825 +#endif
826 +
827 + }
828 } else if (!cause) {
829 /* Allow reads even for write-only mappings */
830 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
831 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
832 index 18a9f5e..ca910b7 100644
833 --- a/arch/arm/Kconfig
834 +++ b/arch/arm/Kconfig
835 @@ -1766,7 +1766,7 @@ config ALIGNMENT_TRAP
836
837 config UACCESS_WITH_MEMCPY
838 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
839 - depends on MMU
840 + depends on MMU && !PAX_MEMORY_UDEREF
841 default y if CPU_FEROCEON
842 help
843 Implement faster copy_to_user and clear_user methods for CPU
844 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
845 index da1c77d..2ee6056 100644
846 --- a/arch/arm/include/asm/atomic.h
847 +++ b/arch/arm/include/asm/atomic.h
848 @@ -17,17 +17,35 @@
849 #include <asm/barrier.h>
850 #include <asm/cmpxchg.h>
851
852 +#ifdef CONFIG_GENERIC_ATOMIC64
853 +#include <asm-generic/atomic64.h>
854 +#endif
855 +
856 #define ATOMIC_INIT(i) { (i) }
857
858 #ifdef __KERNEL__
859
860 +#define _ASM_EXTABLE(from, to) \
861 +" .pushsection __ex_table,\"a\"\n"\
862 +" .align 3\n" \
863 +" .long " #from ", " #to"\n" \
864 +" .popsection"
865 +
866 /*
867 * On ARM, ordinary assignment (str instruction) doesn't clear the local
868 * strex/ldrex monitor on some implementations. The reason we can use it for
869 * atomic_set() is the clrex or dummy strex done on every exception return.
870 */
871 #define atomic_read(v) (*(volatile int *)&(v)->counter)
872 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
873 +{
874 + return v->counter;
875 +}
876 #define atomic_set(v,i) (((v)->counter) = (i))
877 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
878 +{
879 + v->counter = i;
880 +}
881
882 #if __LINUX_ARM_ARCH__ >= 6
883
884 @@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
885 int result;
886
887 __asm__ __volatile__("@ atomic_add\n"
888 +"1: ldrex %1, [%3]\n"
889 +" adds %0, %1, %4\n"
890 +
891 +#ifdef CONFIG_PAX_REFCOUNT
892 +" bvc 3f\n"
893 +"2: bkpt 0xf103\n"
894 +"3:\n"
895 +#endif
896 +
897 +" strex %1, %0, [%3]\n"
898 +" teq %1, #0\n"
899 +" bne 1b"
900 +
901 +#ifdef CONFIG_PAX_REFCOUNT
902 +"\n4:\n"
903 + _ASM_EXTABLE(2b, 4b)
904 +#endif
905 +
906 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
907 + : "r" (&v->counter), "Ir" (i)
908 + : "cc");
909 +}
910 +
911 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
912 +{
913 + unsigned long tmp;
914 + int result;
915 +
916 + __asm__ __volatile__("@ atomic_add_unchecked\n"
917 "1: ldrex %0, [%3]\n"
918 " add %0, %0, %4\n"
919 " strex %1, %0, [%3]\n"
920 @@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
921 smp_mb();
922
923 __asm__ __volatile__("@ atomic_add_return\n"
924 +"1: ldrex %1, [%3]\n"
925 +" adds %0, %1, %4\n"
926 +
927 +#ifdef CONFIG_PAX_REFCOUNT
928 +" bvc 3f\n"
929 +" mov %0, %1\n"
930 +"2: bkpt 0xf103\n"
931 +"3:\n"
932 +#endif
933 +
934 +" strex %1, %0, [%3]\n"
935 +" teq %1, #0\n"
936 +" bne 1b"
937 +
938 +#ifdef CONFIG_PAX_REFCOUNT
939 +"\n4:\n"
940 + _ASM_EXTABLE(2b, 4b)
941 +#endif
942 +
943 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
944 + : "r" (&v->counter), "Ir" (i)
945 + : "cc");
946 +
947 + smp_mb();
948 +
949 + return result;
950 +}
951 +
952 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
953 +{
954 + unsigned long tmp;
955 + int result;
956 +
957 + smp_mb();
958 +
959 + __asm__ __volatile__("@ atomic_add_return_unchecked\n"
960 "1: ldrex %0, [%3]\n"
961 " add %0, %0, %4\n"
962 " strex %1, %0, [%3]\n"
963 @@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
964 int result;
965
966 __asm__ __volatile__("@ atomic_sub\n"
967 +"1: ldrex %1, [%3]\n"
968 +" subs %0, %1, %4\n"
969 +
970 +#ifdef CONFIG_PAX_REFCOUNT
971 +" bvc 3f\n"
972 +"2: bkpt 0xf103\n"
973 +"3:\n"
974 +#endif
975 +
976 +" strex %1, %0, [%3]\n"
977 +" teq %1, #0\n"
978 +" bne 1b"
979 +
980 +#ifdef CONFIG_PAX_REFCOUNT
981 +"\n4:\n"
982 + _ASM_EXTABLE(2b, 4b)
983 +#endif
984 +
985 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
986 + : "r" (&v->counter), "Ir" (i)
987 + : "cc");
988 +}
989 +
990 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
991 +{
992 + unsigned long tmp;
993 + int result;
994 +
995 + __asm__ __volatile__("@ atomic_sub_unchecked\n"
996 "1: ldrex %0, [%3]\n"
997 " sub %0, %0, %4\n"
998 " strex %1, %0, [%3]\n"
999 @@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1000 smp_mb();
1001
1002 __asm__ __volatile__("@ atomic_sub_return\n"
1003 -"1: ldrex %0, [%3]\n"
1004 -" sub %0, %0, %4\n"
1005 +"1: ldrex %1, [%3]\n"
1006 +" subs %0, %1, %4\n"
1007 +
1008 +#ifdef CONFIG_PAX_REFCOUNT
1009 +" bvc 3f\n"
1010 +" mov %0, %1\n"
1011 +"2: bkpt 0xf103\n"
1012 +"3:\n"
1013 +#endif
1014 +
1015 " strex %1, %0, [%3]\n"
1016 " teq %1, #0\n"
1017 " bne 1b"
1018 +
1019 +#ifdef CONFIG_PAX_REFCOUNT
1020 +"\n4:\n"
1021 + _ASM_EXTABLE(2b, 4b)
1022 +#endif
1023 +
1024 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1025 : "r" (&v->counter), "Ir" (i)
1026 : "cc");
1027 @@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1028 return oldval;
1029 }
1030
1031 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1032 +{
1033 + unsigned long oldval, res;
1034 +
1035 + smp_mb();
1036 +
1037 + do {
1038 + __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1039 + "ldrex %1, [%3]\n"
1040 + "mov %0, #0\n"
1041 + "teq %1, %4\n"
1042 + "strexeq %0, %5, [%3]\n"
1043 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1044 + : "r" (&ptr->counter), "Ir" (old), "r" (new)
1045 + : "cc");
1046 + } while (res);
1047 +
1048 + smp_mb();
1049 +
1050 + return oldval;
1051 +}
1052 +
1053 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1054 {
1055 unsigned long tmp, tmp2;
1056 @@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1057
1058 return val;
1059 }
1060 +
1061 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1062 +{
1063 + return atomic_add_return(i, v);
1064 +}
1065 +
1066 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1067 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1068 +{
1069 + (void) atomic_add_return(i, v);
1070 +}
1071
1072 static inline int atomic_sub_return(int i, atomic_t *v)
1073 {
1074 @@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1075 return val;
1076 }
1077 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1078 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1079 +{
1080 + (void) atomic_sub_return(i, v);
1081 +}
1082
1083 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1084 {
1085 @@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1086 return ret;
1087 }
1088
1089 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1090 +{
1091 + return atomic_cmpxchg(v, old, new);
1092 +}
1093 +
1094 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1095 {
1096 unsigned long flags;
1097 @@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1098 #endif /* __LINUX_ARM_ARCH__ */
1099
1100 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1101 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1102 +{
1103 + return xchg(&v->counter, new);
1104 +}
1105
1106 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1107 {
1108 @@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1109 }
1110
1111 #define atomic_inc(v) atomic_add(1, v)
1112 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1113 +{
1114 + atomic_add_unchecked(1, v);
1115 +}
1116 #define atomic_dec(v) atomic_sub(1, v)
1117 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1118 +{
1119 + atomic_sub_unchecked(1, v);
1120 +}
1121
1122 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1123 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1124 +{
1125 + return atomic_add_return_unchecked(1, v) == 0;
1126 +}
1127 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1128 #define atomic_inc_return(v) (atomic_add_return(1, v))
1129 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1130 +{
1131 + return atomic_add_return_unchecked(1, v);
1132 +}
1133 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1134 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1135
1136 @@ -241,6 +428,14 @@ typedef struct {
1137 u64 __aligned(8) counter;
1138 } atomic64_t;
1139
1140 +#ifdef CONFIG_PAX_REFCOUNT
1141 +typedef struct {
1142 + u64 __aligned(8) counter;
1143 +} atomic64_unchecked_t;
1144 +#else
1145 +typedef atomic64_t atomic64_unchecked_t;
1146 +#endif
1147 +
1148 #define ATOMIC64_INIT(i) { (i) }
1149
1150 #ifdef CONFIG_ARM_LPAE
1151 @@ -257,6 +452,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1152 return result;
1153 }
1154
1155 +static inline u64 atomic64_read_unchecked(const atomic64_unchecked_t *v)
1156 +{
1157 + u64 result;
1158 +
1159 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
1160 +" ldrd %0, %H0, [%1]"
1161 + : "=&r" (result)
1162 + : "r" (&v->counter), "Qo" (v->counter)
1163 + );
1164 +
1165 + return result;
1166 +}
1167 +
1168 static inline void atomic64_set(atomic64_t *v, u64 i)
1169 {
1170 __asm__ __volatile__("@ atomic64_set\n"
1171 @@ -265,6 +473,15 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1172 : "r" (&v->counter), "r" (i)
1173 );
1174 }
1175 +
1176 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1177 +{
1178 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
1179 +" strd %2, %H2, [%1]"
1180 + : "=Qo" (v->counter)
1181 + : "r" (&v->counter), "r" (i)
1182 + );
1183 +}
1184 #else
1185 static inline u64 atomic64_read(const atomic64_t *v)
1186 {
1187 @@ -279,6 +496,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1188 return result;
1189 }
1190
1191 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1192 +{
1193 + u64 result;
1194 +
1195 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
1196 +" ldrexd %0, %H0, [%1]"
1197 + : "=&r" (result)
1198 + : "r" (&v->counter), "Qo" (v->counter)
1199 + );
1200 +
1201 + return result;
1202 +}
1203 +
1204 static inline void atomic64_set(atomic64_t *v, u64 i)
1205 {
1206 u64 tmp;
1207 @@ -292,6 +522,21 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1208 : "r" (&v->counter), "r" (i)
1209 : "cc");
1210 }
1211 +
1212 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1213 +{
1214 + u64 tmp;
1215 +
1216 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
1217 +"1: ldrexd %0, %H0, [%2]\n"
1218 +" strexd %0, %3, %H3, [%2]\n"
1219 +" teq %0, #0\n"
1220 +" bne 1b"
1221 + : "=&r" (tmp), "=Qo" (v->counter)
1222 + : "r" (&v->counter), "r" (i)
1223 + : "cc");
1224 +}
1225 +
1226 #endif
1227
1228 static inline void atomic64_add(u64 i, atomic64_t *v)
1229 @@ -302,6 +547,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1230 __asm__ __volatile__("@ atomic64_add\n"
1231 "1: ldrexd %0, %H0, [%3]\n"
1232 " adds %0, %0, %4\n"
1233 +" adcs %H0, %H0, %H4\n"
1234 +
1235 +#ifdef CONFIG_PAX_REFCOUNT
1236 +" bvc 3f\n"
1237 +"2: bkpt 0xf103\n"
1238 +"3:\n"
1239 +#endif
1240 +
1241 +" strexd %1, %0, %H0, [%3]\n"
1242 +" teq %1, #0\n"
1243 +" bne 1b"
1244 +
1245 +#ifdef CONFIG_PAX_REFCOUNT
1246 +"\n4:\n"
1247 + _ASM_EXTABLE(2b, 4b)
1248 +#endif
1249 +
1250 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1251 + : "r" (&v->counter), "r" (i)
1252 + : "cc");
1253 +}
1254 +
1255 +static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1256 +{
1257 + u64 result;
1258 + unsigned long tmp;
1259 +
1260 + __asm__ __volatile__("@ atomic64_add_unchecked\n"
1261 +"1: ldrexd %0, %H0, [%3]\n"
1262 +" adds %0, %0, %4\n"
1263 " adc %H0, %H0, %H4\n"
1264 " strexd %1, %0, %H0, [%3]\n"
1265 " teq %1, #0\n"
1266 @@ -313,12 +588,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1267
1268 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1269 {
1270 - u64 result;
1271 - unsigned long tmp;
1272 + u64 result, tmp;
1273
1274 smp_mb();
1275
1276 __asm__ __volatile__("@ atomic64_add_return\n"
1277 +"1: ldrexd %1, %H1, [%3]\n"
1278 +" adds %0, %1, %4\n"
1279 +" adcs %H0, %H1, %H4\n"
1280 +
1281 +#ifdef CONFIG_PAX_REFCOUNT
1282 +" bvc 3f\n"
1283 +" mov %0, %1\n"
1284 +" mov %H0, %H1\n"
1285 +"2: bkpt 0xf103\n"
1286 +"3:\n"
1287 +#endif
1288 +
1289 +" strexd %1, %0, %H0, [%3]\n"
1290 +" teq %1, #0\n"
1291 +" bne 1b"
1292 +
1293 +#ifdef CONFIG_PAX_REFCOUNT
1294 +"\n4:\n"
1295 + _ASM_EXTABLE(2b, 4b)
1296 +#endif
1297 +
1298 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1299 + : "r" (&v->counter), "r" (i)
1300 + : "cc");
1301 +
1302 + smp_mb();
1303 +
1304 + return result;
1305 +}
1306 +
1307 +static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1308 +{
1309 + u64 result;
1310 + unsigned long tmp;
1311 +
1312 + smp_mb();
1313 +
1314 + __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1315 "1: ldrexd %0, %H0, [%3]\n"
1316 " adds %0, %0, %4\n"
1317 " adc %H0, %H0, %H4\n"
1318 @@ -342,6 +654,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1319 __asm__ __volatile__("@ atomic64_sub\n"
1320 "1: ldrexd %0, %H0, [%3]\n"
1321 " subs %0, %0, %4\n"
1322 +" sbcs %H0, %H0, %H4\n"
1323 +
1324 +#ifdef CONFIG_PAX_REFCOUNT
1325 +" bvc 3f\n"
1326 +"2: bkpt 0xf103\n"
1327 +"3:\n"
1328 +#endif
1329 +
1330 +" strexd %1, %0, %H0, [%3]\n"
1331 +" teq %1, #0\n"
1332 +" bne 1b"
1333 +
1334 +#ifdef CONFIG_PAX_REFCOUNT
1335 +"\n4:\n"
1336 + _ASM_EXTABLE(2b, 4b)
1337 +#endif
1338 +
1339 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1340 + : "r" (&v->counter), "r" (i)
1341 + : "cc");
1342 +}
1343 +
1344 +static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1345 +{
1346 + u64 result;
1347 + unsigned long tmp;
1348 +
1349 + __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1350 +"1: ldrexd %0, %H0, [%3]\n"
1351 +" subs %0, %0, %4\n"
1352 " sbc %H0, %H0, %H4\n"
1353 " strexd %1, %0, %H0, [%3]\n"
1354 " teq %1, #0\n"
1355 @@ -353,18 +695,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1356
1357 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1358 {
1359 - u64 result;
1360 - unsigned long tmp;
1361 + u64 result, tmp;
1362
1363 smp_mb();
1364
1365 __asm__ __volatile__("@ atomic64_sub_return\n"
1366 -"1: ldrexd %0, %H0, [%3]\n"
1367 -" subs %0, %0, %4\n"
1368 -" sbc %H0, %H0, %H4\n"
1369 +"1: ldrexd %1, %H1, [%3]\n"
1370 +" subs %0, %1, %4\n"
1371 +" sbcs %H0, %H1, %H4\n"
1372 +
1373 +#ifdef CONFIG_PAX_REFCOUNT
1374 +" bvc 3f\n"
1375 +" mov %0, %1\n"
1376 +" mov %H0, %H1\n"
1377 +"2: bkpt 0xf103\n"
1378 +"3:\n"
1379 +#endif
1380 +
1381 " strexd %1, %0, %H0, [%3]\n"
1382 " teq %1, #0\n"
1383 " bne 1b"
1384 +
1385 +#ifdef CONFIG_PAX_REFCOUNT
1386 +"\n4:\n"
1387 + _ASM_EXTABLE(2b, 4b)
1388 +#endif
1389 +
1390 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1391 : "r" (&v->counter), "r" (i)
1392 : "cc");
1393 @@ -398,6 +754,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1394 return oldval;
1395 }
1396
1397 +static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1398 +{
1399 + u64 oldval;
1400 + unsigned long res;
1401 +
1402 + smp_mb();
1403 +
1404 + do {
1405 + __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1406 + "ldrexd %1, %H1, [%3]\n"
1407 + "mov %0, #0\n"
1408 + "teq %1, %4\n"
1409 + "teqeq %H1, %H4\n"
1410 + "strexdeq %0, %5, %H5, [%3]"
1411 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1412 + : "r" (&ptr->counter), "r" (old), "r" (new)
1413 + : "cc");
1414 + } while (res);
1415 +
1416 + smp_mb();
1417 +
1418 + return oldval;
1419 +}
1420 +
1421 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1422 {
1423 u64 result;
1424 @@ -421,21 +801,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1425
1426 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1427 {
1428 - u64 result;
1429 - unsigned long tmp;
1430 + u64 result, tmp;
1431
1432 smp_mb();
1433
1434 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1435 -"1: ldrexd %0, %H0, [%3]\n"
1436 -" subs %0, %0, #1\n"
1437 -" sbc %H0, %H0, #0\n"
1438 +"1: ldrexd %1, %H1, [%3]\n"
1439 +" subs %0, %1, #1\n"
1440 +" sbcs %H0, %H1, #0\n"
1441 +
1442 +#ifdef CONFIG_PAX_REFCOUNT
1443 +" bvc 3f\n"
1444 +" mov %0, %1\n"
1445 +" mov %H0, %H1\n"
1446 +"2: bkpt 0xf103\n"
1447 +"3:\n"
1448 +#endif
1449 +
1450 " teq %H0, #0\n"
1451 -" bmi 2f\n"
1452 +" bmi 4f\n"
1453 " strexd %1, %0, %H0, [%3]\n"
1454 " teq %1, #0\n"
1455 " bne 1b\n"
1456 -"2:"
1457 +"4:\n"
1458 +
1459 +#ifdef CONFIG_PAX_REFCOUNT
1460 + _ASM_EXTABLE(2b, 4b)
1461 +#endif
1462 +
1463 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1464 : "r" (&v->counter)
1465 : "cc");
1466 @@ -458,13 +851,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1467 " teq %0, %5\n"
1468 " teqeq %H0, %H5\n"
1469 " moveq %1, #0\n"
1470 -" beq 2f\n"
1471 +" beq 4f\n"
1472 " adds %0, %0, %6\n"
1473 -" adc %H0, %H0, %H6\n"
1474 +" adcs %H0, %H0, %H6\n"
1475 +
1476 +#ifdef CONFIG_PAX_REFCOUNT
1477 +" bvc 3f\n"
1478 +"2: bkpt 0xf103\n"
1479 +"3:\n"
1480 +#endif
1481 +
1482 " strexd %2, %0, %H0, [%4]\n"
1483 " teq %2, #0\n"
1484 " bne 1b\n"
1485 -"2:"
1486 +"4:\n"
1487 +
1488 +#ifdef CONFIG_PAX_REFCOUNT
1489 + _ASM_EXTABLE(2b, 4b)
1490 +#endif
1491 +
1492 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1493 : "r" (&v->counter), "r" (u), "r" (a)
1494 : "cc");
1495 @@ -477,10 +882,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1496
1497 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1498 #define atomic64_inc(v) atomic64_add(1LL, (v))
1499 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1500 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1501 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1502 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1503 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1504 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1505 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1506 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1507 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1508 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1509 diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1510 index 75fe66b..ba3dee4 100644
1511 --- a/arch/arm/include/asm/cache.h
1512 +++ b/arch/arm/include/asm/cache.h
1513 @@ -4,8 +4,10 @@
1514 #ifndef __ASMARM_CACHE_H
1515 #define __ASMARM_CACHE_H
1516
1517 +#include <linux/const.h>
1518 +
1519 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1520 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1521 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1522
1523 /*
1524 * Memory returned by kmalloc() may be used for DMA, so we must make
1525 @@ -24,5 +26,6 @@
1526 #endif
1527
1528 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1529 +#define __read_only __attribute__ ((__section__(".data..read_only")))
1530
1531 #endif
1532 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1533 index 17d0ae8..014e350 100644
1534 --- a/arch/arm/include/asm/cacheflush.h
1535 +++ b/arch/arm/include/asm/cacheflush.h
1536 @@ -116,7 +116,7 @@ struct cpu_cache_fns {
1537 void (*dma_unmap_area)(const void *, size_t, int);
1538
1539 void (*dma_flush_range)(const void *, const void *);
1540 -};
1541 +} __no_const;
1542
1543 /*
1544 * Select the calling method
1545 diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1546 index 6dcc164..b14d917 100644
1547 --- a/arch/arm/include/asm/checksum.h
1548 +++ b/arch/arm/include/asm/checksum.h
1549 @@ -37,7 +37,19 @@ __wsum
1550 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1551
1552 __wsum
1553 -csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1554 +__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1555 +
1556 +static inline __wsum
1557 +csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1558 +{
1559 + __wsum ret;
1560 + pax_open_userland();
1561 + ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1562 + pax_close_userland();
1563 + return ret;
1564 +}
1565 +
1566 +
1567
1568 /*
1569 * Fold a partial checksum without adding pseudo headers
1570 diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1571 index 4f009c1..466c59b 100644
1572 --- a/arch/arm/include/asm/cmpxchg.h
1573 +++ b/arch/arm/include/asm/cmpxchg.h
1574 @@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1575
1576 #define xchg(ptr,x) \
1577 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1578 +#define xchg_unchecked(ptr,x) \
1579 + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1580
1581 #include <asm-generic/cmpxchg-local.h>
1582
1583 diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1584 index 6ddbe44..b5e38b1 100644
1585 --- a/arch/arm/include/asm/domain.h
1586 +++ b/arch/arm/include/asm/domain.h
1587 @@ -48,18 +48,37 @@
1588 * Domain types
1589 */
1590 #define DOMAIN_NOACCESS 0
1591 -#define DOMAIN_CLIENT 1
1592 #ifdef CONFIG_CPU_USE_DOMAINS
1593 +#define DOMAIN_USERCLIENT 1
1594 +#define DOMAIN_KERNELCLIENT 1
1595 #define DOMAIN_MANAGER 3
1596 +#define DOMAIN_VECTORS DOMAIN_USER
1597 #else
1598 +
1599 +#ifdef CONFIG_PAX_KERNEXEC
1600 #define DOMAIN_MANAGER 1
1601 +#define DOMAIN_KERNEXEC 3
1602 +#else
1603 +#define DOMAIN_MANAGER 1
1604 +#endif
1605 +
1606 +#ifdef CONFIG_PAX_MEMORY_UDEREF
1607 +#define DOMAIN_USERCLIENT 0
1608 +#define DOMAIN_UDEREF 1
1609 +#define DOMAIN_VECTORS DOMAIN_KERNEL
1610 +#else
1611 +#define DOMAIN_USERCLIENT 1
1612 +#define DOMAIN_VECTORS DOMAIN_USER
1613 +#endif
1614 +#define DOMAIN_KERNELCLIENT 1
1615 +
1616 #endif
1617
1618 #define domain_val(dom,type) ((type) << (2*(dom)))
1619
1620 #ifndef __ASSEMBLY__
1621
1622 -#ifdef CONFIG_CPU_USE_DOMAINS
1623 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1624 static inline void set_domain(unsigned val)
1625 {
1626 asm volatile(
1627 @@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1628 isb();
1629 }
1630
1631 -#define modify_domain(dom,type) \
1632 - do { \
1633 - struct thread_info *thread = current_thread_info(); \
1634 - unsigned int domain = thread->cpu_domain; \
1635 - domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1636 - thread->cpu_domain = domain | domain_val(dom, type); \
1637 - set_domain(thread->cpu_domain); \
1638 - } while (0)
1639 -
1640 +extern void modify_domain(unsigned int dom, unsigned int type);
1641 #else
1642 static inline void set_domain(unsigned val) { }
1643 static inline void modify_domain(unsigned dom, unsigned type) { }
1644 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1645 index 56211f2..17e8a25 100644
1646 --- a/arch/arm/include/asm/elf.h
1647 +++ b/arch/arm/include/asm/elf.h
1648 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1649 the loader. We need to make sure that it is out of the way of the program
1650 that it will "exec", and that there is sufficient room for the brk. */
1651
1652 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1653 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1654 +
1655 +#ifdef CONFIG_PAX_ASLR
1656 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1657 +
1658 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1659 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1660 +#endif
1661
1662 /* When the program starts, a1 contains a pointer to a function to be
1663 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1664 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1665 extern void elf_set_personality(const struct elf32_hdr *);
1666 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1667
1668 -struct mm_struct;
1669 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1670 -#define arch_randomize_brk arch_randomize_brk
1671 -
1672 #ifdef CONFIG_MMU
1673 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1674 struct linux_binprm;
1675 diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1676 index de53547..52b9a28 100644
1677 --- a/arch/arm/include/asm/fncpy.h
1678 +++ b/arch/arm/include/asm/fncpy.h
1679 @@ -81,7 +81,9 @@
1680 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1681 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1682 \
1683 + pax_open_kernel(); \
1684 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1685 + pax_close_kernel(); \
1686 flush_icache_range((unsigned long)(dest_buf), \
1687 (unsigned long)(dest_buf) + (size)); \
1688 \
1689 diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1690 index e42cf59..7b94b8f 100644
1691 --- a/arch/arm/include/asm/futex.h
1692 +++ b/arch/arm/include/asm/futex.h
1693 @@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1694 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1695 return -EFAULT;
1696
1697 + pax_open_userland();
1698 +
1699 smp_mb();
1700 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1701 "1: ldrex %1, [%4]\n"
1702 @@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1703 : "cc", "memory");
1704 smp_mb();
1705
1706 + pax_close_userland();
1707 +
1708 *uval = val;
1709 return ret;
1710 }
1711 @@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1712 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1713 return -EFAULT;
1714
1715 + pax_open_userland();
1716 +
1717 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1718 "1: " TUSER(ldr) " %1, [%4]\n"
1719 " teq %1, %2\n"
1720 @@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1721 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1722 : "cc", "memory");
1723
1724 + pax_close_userland();
1725 +
1726 *uval = val;
1727 return ret;
1728 }
1729 @@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1730 return -EFAULT;
1731
1732 pagefault_disable(); /* implies preempt_disable() */
1733 + pax_open_userland();
1734
1735 switch (op) {
1736 case FUTEX_OP_SET:
1737 @@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1738 ret = -ENOSYS;
1739 }
1740
1741 + pax_close_userland();
1742 pagefault_enable(); /* subsumes preempt_enable() */
1743
1744 if (!ret) {
1745 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1746 index 83eb2f7..ed77159 100644
1747 --- a/arch/arm/include/asm/kmap_types.h
1748 +++ b/arch/arm/include/asm/kmap_types.h
1749 @@ -4,6 +4,6 @@
1750 /*
1751 * This is the "bare minimum". AIO seems to require this.
1752 */
1753 -#define KM_TYPE_NR 16
1754 +#define KM_TYPE_NR 17
1755
1756 #endif
1757 diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1758 index 9e614a1..3302cca 100644
1759 --- a/arch/arm/include/asm/mach/dma.h
1760 +++ b/arch/arm/include/asm/mach/dma.h
1761 @@ -22,7 +22,7 @@ struct dma_ops {
1762 int (*residue)(unsigned int, dma_t *); /* optional */
1763 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1764 const char *type;
1765 -};
1766 +} __do_const;
1767
1768 struct dma_struct {
1769 void *addr; /* single DMA address */
1770 diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1771 index 2fe141f..192dc01 100644
1772 --- a/arch/arm/include/asm/mach/map.h
1773 +++ b/arch/arm/include/asm/mach/map.h
1774 @@ -27,13 +27,16 @@ struct map_desc {
1775 #define MT_MINICLEAN 6
1776 #define MT_LOW_VECTORS 7
1777 #define MT_HIGH_VECTORS 8
1778 -#define MT_MEMORY 9
1779 +#define MT_MEMORY_RWX 9
1780 #define MT_ROM 10
1781 -#define MT_MEMORY_NONCACHED 11
1782 +#define MT_MEMORY_NONCACHED_RX 11
1783 #define MT_MEMORY_DTCM 12
1784 #define MT_MEMORY_ITCM 13
1785 #define MT_MEMORY_SO 14
1786 #define MT_MEMORY_DMA_READY 15
1787 +#define MT_MEMORY_RW 16
1788 +#define MT_MEMORY_RX 17
1789 +#define MT_MEMORY_NONCACHED_RW 18
1790
1791 #ifdef CONFIG_MMU
1792 extern void iotable_init(struct map_desc *, int);
1793 diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1794 index 12f71a1..04e063c 100644
1795 --- a/arch/arm/include/asm/outercache.h
1796 +++ b/arch/arm/include/asm/outercache.h
1797 @@ -35,7 +35,7 @@ struct outer_cache_fns {
1798 #endif
1799 void (*set_debug)(unsigned long);
1800 void (*resume)(void);
1801 -};
1802 +} __no_const;
1803
1804 #ifdef CONFIG_OUTER_CACHE
1805
1806 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1807 index cbdc7a2..32f44fe 100644
1808 --- a/arch/arm/include/asm/page.h
1809 +++ b/arch/arm/include/asm/page.h
1810 @@ -114,7 +114,7 @@ struct cpu_user_fns {
1811 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1812 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1813 unsigned long vaddr, struct vm_area_struct *vma);
1814 -};
1815 +} __no_const;
1816
1817 #ifdef MULTI_USER
1818 extern struct cpu_user_fns cpu_user;
1819 diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1820 index 943504f..c37a730 100644
1821 --- a/arch/arm/include/asm/pgalloc.h
1822 +++ b/arch/arm/include/asm/pgalloc.h
1823 @@ -17,6 +17,7 @@
1824 #include <asm/processor.h>
1825 #include <asm/cacheflush.h>
1826 #include <asm/tlbflush.h>
1827 +#include <asm/system_info.h>
1828
1829 #define check_pgt_cache() do { } while (0)
1830
1831 @@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1832 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1833 }
1834
1835 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1836 +{
1837 + pud_populate(mm, pud, pmd);
1838 +}
1839 +
1840 #else /* !CONFIG_ARM_LPAE */
1841
1842 /*
1843 @@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1844 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1845 #define pmd_free(mm, pmd) do { } while (0)
1846 #define pud_populate(mm,pmd,pte) BUG()
1847 +#define pud_populate_kernel(mm,pmd,pte) BUG()
1848
1849 #endif /* CONFIG_ARM_LPAE */
1850
1851 @@ -126,6 +133,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1852 __free_page(pte);
1853 }
1854
1855 +static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1856 +{
1857 +#ifdef CONFIG_ARM_LPAE
1858 + pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1859 +#else
1860 + if (addr & SECTION_SIZE)
1861 + pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1862 + else
1863 + pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1864 +#endif
1865 + flush_pmd_entry(pmdp);
1866 +}
1867 +
1868 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1869 pmdval_t prot)
1870 {
1871 @@ -155,7 +175,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1872 static inline void
1873 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1874 {
1875 - __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1876 + __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1877 }
1878 #define pmd_pgtable(pmd) pmd_page(pmd)
1879
1880 diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1881 index 5cfba15..f415e1a 100644
1882 --- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1883 +++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1884 @@ -20,12 +20,15 @@
1885 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1886 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1887 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1888 +#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
1889 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1890 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1891 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1892 +
1893 /*
1894 * - section
1895 */
1896 +#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1897 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1898 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1899 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1900 @@ -37,6 +40,7 @@
1901 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1902 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1903 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1904 +#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1905
1906 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1907 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1908 @@ -66,6 +70,7 @@
1909 * - extended small page/tiny page
1910 */
1911 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1912 +#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1913 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1914 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1915 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1916 diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1917 index f97ee02..cc9fe9e 100644
1918 --- a/arch/arm/include/asm/pgtable-2level.h
1919 +++ b/arch/arm/include/asm/pgtable-2level.h
1920 @@ -126,6 +126,9 @@
1921 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1922 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1923
1924 +/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1925 +#define L_PTE_PXN (_AT(pteval_t, 0))
1926 +
1927 /*
1928 * These are the memory types, defined to be compatible with
1929 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1930 diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1931 index 18f5cef..25b8f43 100644
1932 --- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1933 +++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1934 @@ -41,6 +41,7 @@
1935 */
1936 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1937 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1938 +#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7)
1939 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
1940 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
1941 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
1942 @@ -71,6 +72,7 @@
1943 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1944 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1945 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1946 +#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1947 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1948
1949 /*
1950 diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1951 index 86b8fe3..e25f975 100644
1952 --- a/arch/arm/include/asm/pgtable-3level.h
1953 +++ b/arch/arm/include/asm/pgtable-3level.h
1954 @@ -74,6 +74,7 @@
1955 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1956 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1957 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1958 +#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1959 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1960 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
1961 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
1962 @@ -82,6 +83,7 @@
1963 /*
1964 * To be used in assembly code with the upper page attributes.
1965 */
1966 +#define L_PTE_PXN_HIGH (1 << (53 - 32))
1967 #define L_PTE_XN_HIGH (1 << (54 - 32))
1968 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1969
1970 diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1971 index 9bcd262..fba731c 100644
1972 --- a/arch/arm/include/asm/pgtable.h
1973 +++ b/arch/arm/include/asm/pgtable.h
1974 @@ -30,6 +30,9 @@
1975 #include <asm/pgtable-2level.h>
1976 #endif
1977
1978 +#define ktla_ktva(addr) (addr)
1979 +#define ktva_ktla(addr) (addr)
1980 +
1981 /*
1982 * Just any arbitrary offset to the start of the vmalloc VM area: the
1983 * current 8MB value just means that there will be a 8MB "hole" after the
1984 @@ -45,6 +48,9 @@
1985 #define LIBRARY_TEXT_START 0x0c000000
1986
1987 #ifndef __ASSEMBLY__
1988 +extern pteval_t __supported_pte_mask;
1989 +extern pmdval_t __supported_pmd_mask;
1990 +
1991 extern void __pte_error(const char *file, int line, pte_t);
1992 extern void __pmd_error(const char *file, int line, pmd_t);
1993 extern void __pgd_error(const char *file, int line, pgd_t);
1994 @@ -53,6 +59,50 @@ extern void __pgd_error(const char *file, int line, pgd_t);
1995 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
1996 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
1997
1998 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
1999 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2000 +
2001 +#ifdef CONFIG_PAX_KERNEXEC
2002 +#include <asm/domain.h>
2003 +#include <linux/thread_info.h>
2004 +#include <linux/preempt.h>
2005 +#endif
2006 +
2007 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2008 +static inline int test_domain(int domain, int domaintype)
2009 +{
2010 + return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2011 +}
2012 +#endif
2013 +
2014 +#ifdef CONFIG_PAX_KERNEXEC
2015 +static inline unsigned long pax_open_kernel(void) {
2016 +#ifdef CONFIG_ARM_LPAE
2017 + /* TODO */
2018 +#else
2019 + preempt_disable();
2020 + BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2021 + modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2022 +#endif
2023 + return 0;
2024 +}
2025 +
2026 +static inline unsigned long pax_close_kernel(void) {
2027 +#ifdef CONFIG_ARM_LPAE
2028 + /* TODO */
2029 +#else
2030 + BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2031 + /* DOMAIN_MANAGER = "client" under KERNEXEC */
2032 + modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2033 + preempt_enable_no_resched();
2034 +#endif
2035 + return 0;
2036 +}
2037 +#else
2038 +static inline unsigned long pax_open_kernel(void) { return 0; }
2039 +static inline unsigned long pax_close_kernel(void) { return 0; }
2040 +#endif
2041 +
2042 /*
2043 * This is the lowest virtual address we can permit any user space
2044 * mapping to be mapped at. This is particularly important for
2045 @@ -72,8 +122,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2046 /*
2047 * The pgprot_* and protection_map entries will be fixed up in runtime
2048 * to include the cachable and bufferable bits based on memory policy,
2049 - * as well as any architecture dependent bits like global/ASID and SMP
2050 - * shared mapping bits.
2051 + * as well as any architecture dependent bits like global/ASID, PXN,
2052 + * and SMP shared mapping bits.
2053 */
2054 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2055
2056 @@ -257,7 +307,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2057 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2058 {
2059 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2060 - L_PTE_NONE | L_PTE_VALID;
2061 + L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2062 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2063 return pte;
2064 }
2065 diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
2066 index f3628fb..a0672dd 100644
2067 --- a/arch/arm/include/asm/proc-fns.h
2068 +++ b/arch/arm/include/asm/proc-fns.h
2069 @@ -75,7 +75,7 @@ extern struct processor {
2070 unsigned int suspend_size;
2071 void (*do_suspend)(void *);
2072 void (*do_resume)(void *);
2073 -} processor;
2074 +} __do_const processor;
2075
2076 #ifndef MULTI_CPU
2077 extern void cpu_proc_init(void);
2078 diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2079 index ce0dbe7..c085b6f 100644
2080 --- a/arch/arm/include/asm/psci.h
2081 +++ b/arch/arm/include/asm/psci.h
2082 @@ -29,7 +29,7 @@ struct psci_operations {
2083 int (*cpu_off)(struct psci_power_state state);
2084 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
2085 int (*migrate)(unsigned long cpuid);
2086 -};
2087 +} __no_const;
2088
2089 extern struct psci_operations psci_ops;
2090
2091 diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2092 index d3a22be..3a69ad5 100644
2093 --- a/arch/arm/include/asm/smp.h
2094 +++ b/arch/arm/include/asm/smp.h
2095 @@ -107,7 +107,7 @@ struct smp_operations {
2096 int (*cpu_disable)(unsigned int cpu);
2097 #endif
2098 #endif
2099 -};
2100 +} __no_const;
2101
2102 /*
2103 * set platform specific SMP operations
2104 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2105 index f00b569..aa5bb41 100644
2106 --- a/arch/arm/include/asm/thread_info.h
2107 +++ b/arch/arm/include/asm/thread_info.h
2108 @@ -77,9 +77,9 @@ struct thread_info {
2109 .flags = 0, \
2110 .preempt_count = INIT_PREEMPT_COUNT, \
2111 .addr_limit = KERNEL_DS, \
2112 - .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2113 - domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2114 - domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2115 + .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2116 + domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2117 + domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2118 .restart_block = { \
2119 .fn = do_no_restart_syscall, \
2120 }, \
2121 @@ -152,7 +152,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2122 #define TIF_SYSCALL_AUDIT 9
2123 #define TIF_SYSCALL_TRACEPOINT 10
2124 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2125 -#define TIF_NOHZ 12 /* in adaptive nohz mode */
2126 +/* within 8 bits of TIF_SYSCALL_TRACE
2127 + * to meet flexible second operand requirements
2128 + */
2129 +#define TIF_GRSEC_SETXID 12
2130 +#define TIF_NOHZ 13 /* in adaptive nohz mode */
2131 #define TIF_USING_IWMMXT 17
2132 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2133 #define TIF_RESTORE_SIGMASK 20
2134 @@ -165,10 +169,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2135 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2136 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2137 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2138 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2139
2140 /* Checks for any syscall work in entry-common.S */
2141 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2142 - _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2143 + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2144
2145 /*
2146 * Change these and you break ASM code in entry-common.S
2147 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2148 index 7e1f760..de33b13 100644
2149 --- a/arch/arm/include/asm/uaccess.h
2150 +++ b/arch/arm/include/asm/uaccess.h
2151 @@ -18,6 +18,7 @@
2152 #include <asm/domain.h>
2153 #include <asm/unified.h>
2154 #include <asm/compiler.h>
2155 +#include <asm/pgtable.h>
2156
2157 #define VERIFY_READ 0
2158 #define VERIFY_WRITE 1
2159 @@ -63,11 +64,38 @@ extern int __put_user_bad(void);
2160 static inline void set_fs(mm_segment_t fs)
2161 {
2162 current_thread_info()->addr_limit = fs;
2163 - modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2164 + modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2165 }
2166
2167 #define segment_eq(a,b) ((a) == (b))
2168
2169 +#define __HAVE_ARCH_PAX_OPEN_USERLAND
2170 +#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2171 +
2172 +static inline void pax_open_userland(void)
2173 +{
2174 +
2175 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2176 + if (segment_eq(get_fs(), USER_DS)) {
2177 + BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2178 + modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2179 + }
2180 +#endif
2181 +
2182 +}
2183 +
2184 +static inline void pax_close_userland(void)
2185 +{
2186 +
2187 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2188 + if (segment_eq(get_fs(), USER_DS)) {
2189 + BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2190 + modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2191 + }
2192 +#endif
2193 +
2194 +}
2195 +
2196 #define __addr_ok(addr) ({ \
2197 unsigned long flag; \
2198 __asm__("cmp %2, %0; movlo %0, #0" \
2199 @@ -143,8 +171,12 @@ extern int __get_user_4(void *);
2200
2201 #define get_user(x,p) \
2202 ({ \
2203 + int __e; \
2204 might_fault(); \
2205 - __get_user_check(x,p); \
2206 + pax_open_userland(); \
2207 + __e = __get_user_check(x,p); \
2208 + pax_close_userland(); \
2209 + __e; \
2210 })
2211
2212 extern int __put_user_1(void *, unsigned int);
2213 @@ -188,8 +220,12 @@ extern int __put_user_8(void *, unsigned long long);
2214
2215 #define put_user(x,p) \
2216 ({ \
2217 + int __e; \
2218 might_fault(); \
2219 - __put_user_check(x,p); \
2220 + pax_open_userland(); \
2221 + __e = __put_user_check(x,p); \
2222 + pax_close_userland(); \
2223 + __e; \
2224 })
2225
2226 #else /* CONFIG_MMU */
2227 @@ -230,13 +266,17 @@ static inline void set_fs(mm_segment_t fs)
2228 #define __get_user(x,ptr) \
2229 ({ \
2230 long __gu_err = 0; \
2231 + pax_open_userland(); \
2232 __get_user_err((x),(ptr),__gu_err); \
2233 + pax_close_userland(); \
2234 __gu_err; \
2235 })
2236
2237 #define __get_user_error(x,ptr,err) \
2238 ({ \
2239 + pax_open_userland(); \
2240 __get_user_err((x),(ptr),err); \
2241 + pax_close_userland(); \
2242 (void) 0; \
2243 })
2244
2245 @@ -312,13 +352,17 @@ do { \
2246 #define __put_user(x,ptr) \
2247 ({ \
2248 long __pu_err = 0; \
2249 + pax_open_userland(); \
2250 __put_user_err((x),(ptr),__pu_err); \
2251 + pax_close_userland(); \
2252 __pu_err; \
2253 })
2254
2255 #define __put_user_error(x,ptr,err) \
2256 ({ \
2257 + pax_open_userland(); \
2258 __put_user_err((x),(ptr),err); \
2259 + pax_close_userland(); \
2260 (void) 0; \
2261 })
2262
2263 @@ -418,11 +462,44 @@ do { \
2264
2265
2266 #ifdef CONFIG_MMU
2267 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2268 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2269 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2270 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2271 +
2272 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2273 +{
2274 + unsigned long ret;
2275 +
2276 + check_object_size(to, n, false);
2277 + pax_open_userland();
2278 + ret = ___copy_from_user(to, from, n);
2279 + pax_close_userland();
2280 + return ret;
2281 +}
2282 +
2283 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2284 +{
2285 + unsigned long ret;
2286 +
2287 + check_object_size(from, n, true);
2288 + pax_open_userland();
2289 + ret = ___copy_to_user(to, from, n);
2290 + pax_close_userland();
2291 + return ret;
2292 +}
2293 +
2294 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2295 -extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2296 +extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2297 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2298 +
2299 +static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2300 +{
2301 + unsigned long ret;
2302 + pax_open_userland();
2303 + ret = ___clear_user(addr, n);
2304 + pax_close_userland();
2305 + return ret;
2306 +}
2307 +
2308 #else
2309 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2310 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2311 @@ -431,6 +508,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2312
2313 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2314 {
2315 + if ((long)n < 0)
2316 + return n;
2317 +
2318 if (access_ok(VERIFY_READ, from, n))
2319 n = __copy_from_user(to, from, n);
2320 else /* security hole - plug it */
2321 @@ -440,6 +520,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2322
2323 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2324 {
2325 + if ((long)n < 0)
2326 + return n;
2327 +
2328 if (access_ok(VERIFY_WRITE, to, n))
2329 n = __copy_to_user(to, from, n);
2330 return n;
2331 diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2332 index 96ee092..37f1844 100644
2333 --- a/arch/arm/include/uapi/asm/ptrace.h
2334 +++ b/arch/arm/include/uapi/asm/ptrace.h
2335 @@ -73,7 +73,7 @@
2336 * ARMv7 groups of PSR bits
2337 */
2338 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2339 -#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2340 +#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2341 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2342 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2343
2344 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2345 index 60d3b73..e5a0f22 100644
2346 --- a/arch/arm/kernel/armksyms.c
2347 +++ b/arch/arm/kernel/armksyms.c
2348 @@ -53,7 +53,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2349
2350 /* networking */
2351 EXPORT_SYMBOL(csum_partial);
2352 -EXPORT_SYMBOL(csum_partial_copy_from_user);
2353 +EXPORT_SYMBOL(__csum_partial_copy_from_user);
2354 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2355 EXPORT_SYMBOL(__csum_ipv6_magic);
2356
2357 @@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
2358 #ifdef CONFIG_MMU
2359 EXPORT_SYMBOL(copy_page);
2360
2361 -EXPORT_SYMBOL(__copy_from_user);
2362 -EXPORT_SYMBOL(__copy_to_user);
2363 -EXPORT_SYMBOL(__clear_user);
2364 +EXPORT_SYMBOL(___copy_from_user);
2365 +EXPORT_SYMBOL(___copy_to_user);
2366 +EXPORT_SYMBOL(___clear_user);
2367
2368 EXPORT_SYMBOL(__get_user_1);
2369 EXPORT_SYMBOL(__get_user_2);
2370 diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2371 index d43c7e5..257c050 100644
2372 --- a/arch/arm/kernel/entry-armv.S
2373 +++ b/arch/arm/kernel/entry-armv.S
2374 @@ -47,6 +47,87 @@
2375 9997:
2376 .endm
2377
2378 + .macro pax_enter_kernel
2379 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2380 + @ make aligned space for saved DACR
2381 + sub sp, sp, #8
2382 + @ save regs
2383 + stmdb sp!, {r1, r2}
2384 + @ read DACR from cpu_domain into r1
2385 + mov r2, sp
2386 + @ assume 8K pages, since we have to split the immediate in two
2387 + bic r2, r2, #(0x1fc0)
2388 + bic r2, r2, #(0x3f)
2389 + ldr r1, [r2, #TI_CPU_DOMAIN]
2390 + @ store old DACR on stack
2391 + str r1, [sp, #8]
2392 +#ifdef CONFIG_PAX_KERNEXEC
2393 + @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2394 + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2395 + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2396 +#endif
2397 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2398 + @ set current DOMAIN_USER to DOMAIN_NOACCESS
2399 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2400 +#endif
2401 + @ write r1 to current_thread_info()->cpu_domain
2402 + str r1, [r2, #TI_CPU_DOMAIN]
2403 + @ write r1 to DACR
2404 + mcr p15, 0, r1, c3, c0, 0
2405 + @ instruction sync
2406 + instr_sync
2407 + @ restore regs
2408 + ldmia sp!, {r1, r2}
2409 +#endif
2410 + .endm
2411 +
2412 + .macro pax_open_userland
2413 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2414 + @ save regs
2415 + stmdb sp!, {r0, r1}
2416 + @ read DACR from cpu_domain into r1
2417 + mov r0, sp
2418 + @ assume 8K pages, since we have to split the immediate in two
2419 + bic r0, r0, #(0x1fc0)
2420 + bic r0, r0, #(0x3f)
2421 + ldr r1, [r0, #TI_CPU_DOMAIN]
2422 + @ set current DOMAIN_USER to DOMAIN_CLIENT
2423 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2424 + orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2425 + @ write r1 to current_thread_info()->cpu_domain
2426 + str r1, [r0, #TI_CPU_DOMAIN]
2427 + @ write r1 to DACR
2428 + mcr p15, 0, r1, c3, c0, 0
2429 + @ instruction sync
2430 + instr_sync
2431 + @ restore regs
2432 + ldmia sp!, {r0, r1}
2433 +#endif
2434 + .endm
2435 +
2436 + .macro pax_close_userland
2437 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2438 + @ save regs
2439 + stmdb sp!, {r0, r1}
2440 + @ read DACR from cpu_domain into r1
2441 + mov r0, sp
2442 + @ assume 8K pages, since we have to split the immediate in two
2443 + bic r0, r0, #(0x1fc0)
2444 + bic r0, r0, #(0x3f)
2445 + ldr r1, [r0, #TI_CPU_DOMAIN]
2446 + @ set current DOMAIN_USER to DOMAIN_NOACCESS
2447 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2448 + @ write r1 to current_thread_info()->cpu_domain
2449 + str r1, [r0, #TI_CPU_DOMAIN]
2450 + @ write r1 to DACR
2451 + mcr p15, 0, r1, c3, c0, 0
2452 + @ instruction sync
2453 + instr_sync
2454 + @ restore regs
2455 + ldmia sp!, {r0, r1}
2456 +#endif
2457 + .endm
2458 +
2459 .macro pabt_helper
2460 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2461 #ifdef MULTI_PABORT
2462 @@ -89,11 +170,15 @@
2463 * Invalid mode handlers
2464 */
2465 .macro inv_entry, reason
2466 +
2467 + pax_enter_kernel
2468 +
2469 sub sp, sp, #S_FRAME_SIZE
2470 ARM( stmib sp, {r1 - lr} )
2471 THUMB( stmia sp, {r0 - r12} )
2472 THUMB( str sp, [sp, #S_SP] )
2473 THUMB( str lr, [sp, #S_LR] )
2474 +
2475 mov r1, #\reason
2476 .endm
2477
2478 @@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2479 .macro svc_entry, stack_hole=0
2480 UNWIND(.fnstart )
2481 UNWIND(.save {r0 - pc} )
2482 +
2483 + pax_enter_kernel
2484 +
2485 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2486 +
2487 #ifdef CONFIG_THUMB2_KERNEL
2488 SPFIX( str r0, [sp] ) @ temporarily saved
2489 SPFIX( mov r0, sp )
2490 @@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2491 ldmia r0, {r3 - r5}
2492 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2493 mov r6, #-1 @ "" "" "" ""
2494 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2495 + @ offset sp by 8 as done in pax_enter_kernel
2496 + add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2497 +#else
2498 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2499 +#endif
2500 SPFIX( addeq r2, r2, #4 )
2501 str r3, [sp, #-4]! @ save the "real" r0 copied
2502 @ from the exception stack
2503 @@ -316,6 +410,9 @@ ENDPROC(__pabt_svc)
2504 .macro usr_entry
2505 UNWIND(.fnstart )
2506 UNWIND(.cantunwind ) @ don't unwind the user space
2507 +
2508 + pax_enter_kernel_user
2509 +
2510 sub sp, sp, #S_FRAME_SIZE
2511 ARM( stmib sp, {r1 - r12} )
2512 THUMB( stmia sp, {r0 - r12} )
2513 @@ -357,7 +454,8 @@ ENDPROC(__pabt_svc)
2514 .endm
2515
2516 .macro kuser_cmpxchg_check
2517 -#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
2518 +#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
2519 + !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
2520 #ifndef CONFIG_MMU
2521 #warning "NPTL on non MMU needs fixing"
2522 #else
2523 @@ -414,7 +512,9 @@ __und_usr:
2524 tst r3, #PSR_T_BIT @ Thumb mode?
2525 bne __und_usr_thumb
2526 sub r4, r2, #4 @ ARM instr at LR - 4
2527 + pax_open_userland
2528 1: ldrt r0, [r4]
2529 + pax_close_userland
2530 #ifdef CONFIG_CPU_ENDIAN_BE8
2531 rev r0, r0 @ little endian instruction
2532 #endif
2533 @@ -449,10 +549,14 @@ __und_usr_thumb:
2534 */
2535 .arch armv6t2
2536 #endif
2537 + pax_open_userland
2538 2: ldrht r5, [r4]
2539 + pax_close_userland
2540 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2541 blo __und_usr_fault_16 @ 16bit undefined instruction
2542 + pax_open_userland
2543 3: ldrht r0, [r2]
2544 + pax_close_userland
2545 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2546 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2547 orr r0, r0, r5, lsl #16
2548 @@ -481,7 +585,8 @@ ENDPROC(__und_usr)
2549 */
2550 .pushsection .fixup, "ax"
2551 .align 2
2552 -4: mov pc, r9
2553 +4: pax_close_userland
2554 + mov pc, r9
2555 .popsection
2556 .pushsection __ex_table,"a"
2557 .long 1b, 4b
2558 @@ -690,7 +795,7 @@ ENTRY(__switch_to)
2559 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
2560 THUMB( str sp, [ip], #4 )
2561 THUMB( str lr, [ip], #4 )
2562 -#ifdef CONFIG_CPU_USE_DOMAINS
2563 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2564 ldr r6, [r2, #TI_CPU_DOMAIN]
2565 #endif
2566 set_tls r3, r4, r5
2567 @@ -699,7 +804,7 @@ ENTRY(__switch_to)
2568 ldr r8, =__stack_chk_guard
2569 ldr r7, [r7, #TSK_STACK_CANARY]
2570 #endif
2571 -#ifdef CONFIG_CPU_USE_DOMAINS
2572 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2573 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2574 #endif
2575 mov r5, r0
2576 diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2577 index bc5bc0a..d0998ca 100644
2578 --- a/arch/arm/kernel/entry-common.S
2579 +++ b/arch/arm/kernel/entry-common.S
2580 @@ -10,18 +10,46 @@
2581
2582 #include <asm/unistd.h>
2583 #include <asm/ftrace.h>
2584 +#include <asm/domain.h>
2585 #include <asm/unwind.h>
2586
2587 +#include "entry-header.S"
2588 +
2589 #ifdef CONFIG_NEED_RET_TO_USER
2590 #include <mach/entry-macro.S>
2591 #else
2592 .macro arch_ret_to_user, tmp1, tmp2
2593 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2594 + @ save regs
2595 + stmdb sp!, {r1, r2}
2596 + @ read DACR from cpu_domain into r1
2597 + mov r2, sp
2598 + @ assume 8K pages, since we have to split the immediate in two
2599 + bic r2, r2, #(0x1fc0)
2600 + bic r2, r2, #(0x3f)
2601 + ldr r1, [r2, #TI_CPU_DOMAIN]
2602 +#ifdef CONFIG_PAX_KERNEXEC
2603 + @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2604 + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2605 + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2606 +#endif
2607 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2608 + @ set current DOMAIN_USER to DOMAIN_UDEREF
2609 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2610 + orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2611 +#endif
2612 + @ write r1 to current_thread_info()->cpu_domain
2613 + str r1, [r2, #TI_CPU_DOMAIN]
2614 + @ write r1 to DACR
2615 + mcr p15, 0, r1, c3, c0, 0
2616 + @ instruction sync
2617 + instr_sync
2618 + @ restore regs
2619 + ldmia sp!, {r1, r2}
2620 +#endif
2621 .endm
2622 #endif
2623
2624 -#include "entry-header.S"
2625 -
2626 -
2627 .align 5
2628 /*
2629 * This is the fast syscall return path. We do as little as
2630 @@ -350,6 +378,7 @@ ENDPROC(ftrace_stub)
2631
2632 .align 5
2633 ENTRY(vector_swi)
2634 +
2635 sub sp, sp, #S_FRAME_SIZE
2636 stmia sp, {r0 - r12} @ Calling r0 - r12
2637 ARM( add r8, sp, #S_PC )
2638 @@ -399,6 +428,12 @@ ENTRY(vector_swi)
2639 ldr scno, [lr, #-4] @ get SWI instruction
2640 #endif
2641
2642 + /*
2643 + * do this here to avoid a performance hit of wrapping the code above
2644 + * that directly dereferences userland to parse the SWI instruction
2645 + */
2646 + pax_enter_kernel_user
2647 +
2648 #ifdef CONFIG_ALIGNMENT_TRAP
2649 ldr ip, __cr_alignment
2650 ldr ip, [ip]
2651 diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2652 index 160f337..db67ee4 100644
2653 --- a/arch/arm/kernel/entry-header.S
2654 +++ b/arch/arm/kernel/entry-header.S
2655 @@ -73,6 +73,60 @@
2656 msr cpsr_c, \rtemp @ switch back to the SVC mode
2657 .endm
2658
2659 + .macro pax_enter_kernel_user
2660 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2661 + @ save regs
2662 + stmdb sp!, {r0, r1}
2663 + @ read DACR from cpu_domain into r1
2664 + mov r0, sp
2665 + @ assume 8K pages, since we have to split the immediate in two
2666 + bic r0, r0, #(0x1fc0)
2667 + bic r0, r0, #(0x3f)
2668 + ldr r1, [r0, #TI_CPU_DOMAIN]
2669 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2670 + @ set current DOMAIN_USER to DOMAIN_NOACCESS
2671 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2672 +#endif
2673 +#ifdef CONFIG_PAX_KERNEXEC
2674 + @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2675 + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2676 + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2677 +#endif
2678 + @ write r1 to current_thread_info()->cpu_domain
2679 + str r1, [r0, #TI_CPU_DOMAIN]
2680 + @ write r1 to DACR
2681 + mcr p15, 0, r1, c3, c0, 0
2682 + @ instruction sync
2683 + instr_sync
2684 + @ restore regs
2685 + ldmia sp!, {r0, r1}
2686 +#endif
2687 + .endm
2688 +
2689 + .macro pax_exit_kernel
2690 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2691 + @ save regs
2692 + stmdb sp!, {r0, r1}
2693 + @ read old DACR from stack into r1
2694 + ldr r1, [sp, #(8 + S_SP)]
2695 + sub r1, r1, #8
2696 + ldr r1, [r1]
2697 +
2698 + @ write r1 to current_thread_info()->cpu_domain
2699 + mov r0, sp
2700 + @ assume 8K pages, since we have to split the immediate in two
2701 + bic r0, r0, #(0x1fc0)
2702 + bic r0, r0, #(0x3f)
2703 + str r1, [r0, #TI_CPU_DOMAIN]
2704 + @ write r1 to DACR
2705 + mcr p15, 0, r1, c3, c0, 0
2706 + @ instruction sync
2707 + instr_sync
2708 + @ restore regs
2709 + ldmia sp!, {r0, r1}
2710 +#endif
2711 + .endm
2712 +
2713 #ifndef CONFIG_THUMB2_KERNEL
2714 .macro svc_exit, rpsr, irq = 0
2715 .if \irq != 0
2716 @@ -92,6 +146,9 @@
2717 blne trace_hardirqs_off
2718 #endif
2719 .endif
2720 +
2721 + pax_exit_kernel
2722 +
2723 msr spsr_cxsf, \rpsr
2724 #if defined(CONFIG_CPU_V6)
2725 ldr r0, [sp]
2726 @@ -155,6 +212,9 @@
2727 blne trace_hardirqs_off
2728 #endif
2729 .endif
2730 +
2731 + pax_exit_kernel
2732 +
2733 ldr lr, [sp, #S_SP] @ top of the stack
2734 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2735 clrex @ clear the exclusive monitor
2736 diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2737 index 25442f4..d4948fc 100644
2738 --- a/arch/arm/kernel/fiq.c
2739 +++ b/arch/arm/kernel/fiq.c
2740 @@ -84,17 +84,16 @@ int show_fiq_list(struct seq_file *p, int prec)
2741
2742 void set_fiq_handler(void *start, unsigned int length)
2743 {
2744 -#if defined(CONFIG_CPU_USE_DOMAINS)
2745 - void *base = (void *)0xffff0000;
2746 -#else
2747 void *base = vectors_page;
2748 -#endif
2749 unsigned offset = FIQ_OFFSET;
2750
2751 + pax_open_kernel();
2752 memcpy(base + offset, start, length);
2753 + pax_close_kernel();
2754 +
2755 + if (!cache_is_vipt_nonaliasing())
2756 + flush_icache_range(base + offset, offset + length);
2757 flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
2758 - if (!vectors_high())
2759 - flush_icache_range(offset, offset + length);
2760 }
2761
2762 int claim_fiq(struct fiq_handler *f)
2763 diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2764 index 8bac553..caee108 100644
2765 --- a/arch/arm/kernel/head.S
2766 +++ b/arch/arm/kernel/head.S
2767 @@ -52,7 +52,9 @@
2768 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
2769
2770 .macro pgtbl, rd, phys
2771 - add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
2772 + mov \rd, #TEXT_OFFSET
2773 + sub \rd, #PG_DIR_SIZE
2774 + add \rd, \rd, \phys
2775 .endm
2776
2777 /*
2778 @@ -434,7 +436,7 @@ __enable_mmu:
2779 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2780 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2781 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2782 - domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2783 + domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2784 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2785 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2786 #endif
2787 diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
2788 index 1fd749e..47adb08 100644
2789 --- a/arch/arm/kernel/hw_breakpoint.c
2790 +++ b/arch/arm/kernel/hw_breakpoint.c
2791 @@ -1029,7 +1029,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
2792 return NOTIFY_OK;
2793 }
2794
2795 -static struct notifier_block __cpuinitdata dbg_reset_nb = {
2796 +static struct notifier_block dbg_reset_nb = {
2797 .notifier_call = dbg_reset_notify,
2798 };
2799
2800 diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2801 index 1e9be5d..03edbc2 100644
2802 --- a/arch/arm/kernel/module.c
2803 +++ b/arch/arm/kernel/module.c
2804 @@ -37,12 +37,37 @@
2805 #endif
2806
2807 #ifdef CONFIG_MMU
2808 -void *module_alloc(unsigned long size)
2809 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2810 {
2811 + if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2812 + return NULL;
2813 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2814 - GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
2815 + GFP_KERNEL, prot, -1,
2816 __builtin_return_address(0));
2817 }
2818 +
2819 +void *module_alloc(unsigned long size)
2820 +{
2821 +
2822 +#ifdef CONFIG_PAX_KERNEXEC
2823 + return __module_alloc(size, PAGE_KERNEL);
2824 +#else
2825 + return __module_alloc(size, PAGE_KERNEL_EXEC);
2826 +#endif
2827 +
2828 +}
2829 +
2830 +#ifdef CONFIG_PAX_KERNEXEC
2831 +void module_free_exec(struct module *mod, void *module_region)
2832 +{
2833 + module_free(mod, module_region);
2834 +}
2835 +
2836 +void *module_alloc_exec(unsigned long size)
2837 +{
2838 + return __module_alloc(size, PAGE_KERNEL_EXEC);
2839 +}
2840 +#endif
2841 #endif
2842
2843 int
2844 diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2845 index 07314af..c46655c 100644
2846 --- a/arch/arm/kernel/patch.c
2847 +++ b/arch/arm/kernel/patch.c
2848 @@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2849 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2850 int size;
2851
2852 + pax_open_kernel();
2853 if (thumb2 && __opcode_is_thumb16(insn)) {
2854 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2855 size = sizeof(u16);
2856 @@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2857 *(u32 *)addr = insn;
2858 size = sizeof(u32);
2859 }
2860 + pax_close_kernel();
2861
2862 flush_icache_range((uintptr_t)(addr),
2863 (uintptr_t)(addr) + size);
2864 diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
2865 index e19edc6..e186ee1 100644
2866 --- a/arch/arm/kernel/perf_event.c
2867 +++ b/arch/arm/kernel/perf_event.c
2868 @@ -56,7 +56,7 @@ armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
2869 int mapping;
2870
2871 if (config >= PERF_COUNT_HW_MAX)
2872 - return -ENOENT;
2873 + return -EINVAL;
2874
2875 mapping = (*event_map)[config];
2876 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
2877 diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
2878 index 1f2740e..b36e225 100644
2879 --- a/arch/arm/kernel/perf_event_cpu.c
2880 +++ b/arch/arm/kernel/perf_event_cpu.c
2881 @@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
2882 return NOTIFY_OK;
2883 }
2884
2885 -static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
2886 +static struct notifier_block cpu_pmu_hotplug_notifier = {
2887 .notifier_call = cpu_pmu_notify,
2888 };
2889
2890 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2891 index 5bc2615..dcd439f 100644
2892 --- a/arch/arm/kernel/process.c
2893 +++ b/arch/arm/kernel/process.c
2894 @@ -223,6 +223,7 @@ void machine_power_off(void)
2895
2896 if (pm_power_off)
2897 pm_power_off();
2898 + BUG();
2899 }
2900
2901 /*
2902 @@ -236,7 +237,7 @@ void machine_power_off(void)
2903 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2904 * to use. Implementing such co-ordination would be essentially impossible.
2905 */
2906 -void machine_restart(char *cmd)
2907 +__noreturn void machine_restart(char *cmd)
2908 {
2909 smp_send_stop();
2910
2911 @@ -258,8 +259,8 @@ void __show_regs(struct pt_regs *regs)
2912
2913 show_regs_print_info(KERN_DEFAULT);
2914
2915 - print_symbol("PC is at %s\n", instruction_pointer(regs));
2916 - print_symbol("LR is at %s\n", regs->ARM_lr);
2917 + printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2918 + printk("LR is at %pA\n", (void *)regs->ARM_lr);
2919 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2920 "sp : %08lx ip : %08lx fp : %08lx\n",
2921 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2922 @@ -426,12 +427,6 @@ unsigned long get_wchan(struct task_struct *p)
2923 return 0;
2924 }
2925
2926 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2927 -{
2928 - unsigned long range_end = mm->brk + 0x02000000;
2929 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2930 -}
2931 -
2932 #ifdef CONFIG_MMU
2933 #ifdef CONFIG_KUSER_HELPERS
2934 /*
2935 @@ -447,7 +442,7 @@ static struct vm_area_struct gate_vma = {
2936
2937 static int __init gate_vma_init(void)
2938 {
2939 - gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2940 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2941 return 0;
2942 }
2943 arch_initcall(gate_vma_init);
2944 @@ -466,48 +461,23 @@ int in_gate_area_no_mm(unsigned long addr)
2945 {
2946 return in_gate_area(NULL, addr);
2947 }
2948 -#define is_gate_vma(vma) ((vma) = &gate_vma)
2949 +#define is_gate_vma(vma) ((vma) == &gate_vma)
2950 #else
2951 #define is_gate_vma(vma) 0
2952 #endif
2953
2954 const char *arch_vma_name(struct vm_area_struct *vma)
2955 {
2956 - return is_gate_vma(vma) ? "[vectors]" :
2957 - (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
2958 - "[sigpage]" : NULL;
2959 + return is_gate_vma(vma) ? "[vectors]" : NULL;
2960 }
2961
2962 -static struct page *signal_page;
2963 -extern struct page *get_signal_page(void);
2964 -
2965 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2966 {
2967 struct mm_struct *mm = current->mm;
2968 - unsigned long addr;
2969 - int ret;
2970 -
2971 - if (!signal_page)
2972 - signal_page = get_signal_page();
2973 - if (!signal_page)
2974 - return -ENOMEM;
2975
2976 down_write(&mm->mmap_sem);
2977 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
2978 - if (IS_ERR_VALUE(addr)) {
2979 - ret = addr;
2980 - goto up_fail;
2981 - }
2982 -
2983 - ret = install_special_mapping(mm, addr, PAGE_SIZE,
2984 - VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2985 - &signal_page);
2986 -
2987 - if (ret == 0)
2988 - mm->context.sigpage = addr;
2989 -
2990 - up_fail:
2991 + mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2992 up_write(&mm->mmap_sem);
2993 - return ret;
2994 + return 0;
2995 }
2996 #endif
2997 diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2998 index 3653164..d83e55d 100644
2999 --- a/arch/arm/kernel/psci.c
3000 +++ b/arch/arm/kernel/psci.c
3001 @@ -24,7 +24,7 @@
3002 #include <asm/opcodes-virt.h>
3003 #include <asm/psci.h>
3004
3005 -struct psci_operations psci_ops;
3006 +struct psci_operations psci_ops __read_only;
3007
3008 static int (*invoke_psci_fn)(u32, u32, u32, u32);
3009
3010 diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
3011 index 03deeff..741ce88 100644
3012 --- a/arch/arm/kernel/ptrace.c
3013 +++ b/arch/arm/kernel/ptrace.c
3014 @@ -937,10 +937,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
3015 return current_thread_info()->syscall;
3016 }
3017
3018 +#ifdef CONFIG_GRKERNSEC_SETXID
3019 +extern void gr_delayed_cred_worker(void);
3020 +#endif
3021 +
3022 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
3023 {
3024 current_thread_info()->syscall = scno;
3025
3026 +#ifdef CONFIG_GRKERNSEC_SETXID
3027 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3028 + gr_delayed_cred_worker();
3029 +#endif
3030 +
3031 /* Do the secure computing check first; failures should be fast. */
3032 if (secure_computing(scno) == -1)
3033 return -1;
3034 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
3035 index b4b1d39..efdc9be 100644
3036 --- a/arch/arm/kernel/setup.c
3037 +++ b/arch/arm/kernel/setup.c
3038 @@ -97,21 +97,23 @@ EXPORT_SYMBOL(system_serial_high);
3039 unsigned int elf_hwcap __read_mostly;
3040 EXPORT_SYMBOL(elf_hwcap);
3041
3042 +pteval_t __supported_pte_mask __read_only;
3043 +pmdval_t __supported_pmd_mask __read_only;
3044
3045 #ifdef MULTI_CPU
3046 -struct processor processor __read_mostly;
3047 +struct processor processor;
3048 #endif
3049 #ifdef MULTI_TLB
3050 -struct cpu_tlb_fns cpu_tlb __read_mostly;
3051 +struct cpu_tlb_fns cpu_tlb __read_only;
3052 #endif
3053 #ifdef MULTI_USER
3054 -struct cpu_user_fns cpu_user __read_mostly;
3055 +struct cpu_user_fns cpu_user __read_only;
3056 #endif
3057 #ifdef MULTI_CACHE
3058 -struct cpu_cache_fns cpu_cache __read_mostly;
3059 +struct cpu_cache_fns cpu_cache __read_only;
3060 #endif
3061 #ifdef CONFIG_OUTER_CACHE
3062 -struct outer_cache_fns outer_cache __read_mostly;
3063 +struct outer_cache_fns outer_cache __read_only;
3064 EXPORT_SYMBOL(outer_cache);
3065 #endif
3066
3067 @@ -236,9 +238,13 @@ static int __get_cpu_architecture(void)
3068 asm("mrc p15, 0, %0, c0, c1, 4"
3069 : "=r" (mmfr0));
3070 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3071 - (mmfr0 & 0x000000f0) >= 0x00000030)
3072 + (mmfr0 & 0x000000f0) >= 0x00000030) {
3073 cpu_arch = CPU_ARCH_ARMv7;
3074 - else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3075 + if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3076 + __supported_pte_mask |= L_PTE_PXN;
3077 + __supported_pmd_mask |= PMD_PXNTABLE;
3078 + }
3079 + } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3080 (mmfr0 & 0x000000f0) == 0x00000020)
3081 cpu_arch = CPU_ARCH_ARMv6;
3082 else
3083 @@ -479,7 +485,7 @@ static void __init setup_processor(void)
3084 __cpu_architecture = __get_cpu_architecture();
3085
3086 #ifdef MULTI_CPU
3087 - processor = *list->proc;
3088 + memcpy((void *)&processor, list->proc, sizeof processor);
3089 #endif
3090 #ifdef MULTI_TLB
3091 cpu_tlb = *list->tlb;
3092 diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3093 index 5a42c12..a2bb7c6 100644
3094 --- a/arch/arm/kernel/signal.c
3095 +++ b/arch/arm/kernel/signal.c
3096 @@ -45,8 +45,6 @@ static const unsigned long sigreturn_codes[7] = {
3097 MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
3098 };
3099
3100 -static unsigned long signal_return_offset;
3101 -
3102 #ifdef CONFIG_CRUNCH
3103 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3104 {
3105 @@ -406,8 +404,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3106 * except when the MPU has protected the vectors
3107 * page from PL0
3108 */
3109 - retcode = mm->context.sigpage + signal_return_offset +
3110 - (idx << 2) + thumb;
3111 + retcode = mm->context.sigpage + (idx << 2) + thumb;
3112 } else
3113 #endif
3114 {
3115 @@ -611,33 +608,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3116 } while (thread_flags & _TIF_WORK_MASK);
3117 return 0;
3118 }
3119 -
3120 -struct page *get_signal_page(void)
3121 -{
3122 - unsigned long ptr;
3123 - unsigned offset;
3124 - struct page *page;
3125 - void *addr;
3126 -
3127 - page = alloc_pages(GFP_KERNEL, 0);
3128 -
3129 - if (!page)
3130 - return NULL;
3131 -
3132 - addr = page_address(page);
3133 -
3134 - /* Give the signal return code some randomness */
3135 - offset = 0x200 + (get_random_int() & 0x7fc);
3136 - signal_return_offset = offset;
3137 -
3138 - /*
3139 - * Copy signal return handlers into the vector page, and
3140 - * set sigreturn to be a pointer to these.
3141 - */
3142 - memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3143 -
3144 - ptr = (unsigned long)addr + offset;
3145 - flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3146 -
3147 - return page;
3148 -}
3149 diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3150 index 5919eb4..b5d6dfe 100644
3151 --- a/arch/arm/kernel/smp.c
3152 +++ b/arch/arm/kernel/smp.c
3153 @@ -70,7 +70,7 @@ enum ipi_msg_type {
3154
3155 static DECLARE_COMPLETION(cpu_running);
3156
3157 -static struct smp_operations smp_ops;
3158 +static struct smp_operations smp_ops __read_only;
3159
3160 void __init smp_set_ops(struct smp_operations *ops)
3161 {
3162 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3163 index 6b9567e..b8af2d6 100644
3164 --- a/arch/arm/kernel/traps.c
3165 +++ b/arch/arm/kernel/traps.c
3166 @@ -55,7 +55,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3167 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3168 {
3169 #ifdef CONFIG_KALLSYMS
3170 - printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3171 + printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3172 #else
3173 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3174 #endif
3175 @@ -257,6 +257,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3176 static int die_owner = -1;
3177 static unsigned int die_nest_count;
3178
3179 +extern void gr_handle_kernel_exploit(void);
3180 +
3181 static unsigned long oops_begin(void)
3182 {
3183 int cpu;
3184 @@ -299,6 +301,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3185 panic("Fatal exception in interrupt");
3186 if (panic_on_oops)
3187 panic("Fatal exception");
3188 +
3189 + gr_handle_kernel_exploit();
3190 +
3191 if (signr)
3192 do_exit(signr);
3193 }
3194 @@ -592,7 +597,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3195 * The user helper at 0xffff0fe0 must be used instead.
3196 * (see entry-armv.S for details)
3197 */
3198 + pax_open_kernel();
3199 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3200 + pax_close_kernel();
3201 }
3202 return 0;
3203
3204 @@ -848,5 +855,9 @@ void __init early_trap_init(void *vectors_base)
3205 kuser_init(vectors_base);
3206
3207 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3208 - modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3209 +
3210 +#ifndef CONFIG_PAX_MEMORY_UDEREF
3211 + modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3212 +#endif
3213 +
3214 }
3215 diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3216 index 33f2ea3..0b91824 100644
3217 --- a/arch/arm/kernel/vmlinux.lds.S
3218 +++ b/arch/arm/kernel/vmlinux.lds.S
3219 @@ -8,7 +8,11 @@
3220 #include <asm/thread_info.h>
3221 #include <asm/memory.h>
3222 #include <asm/page.h>
3223 -
3224 +
3225 +#ifdef CONFIG_PAX_KERNEXEC
3226 +#include <asm/pgtable.h>
3227 +#endif
3228 +
3229 #define PROC_INFO \
3230 . = ALIGN(4); \
3231 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3232 @@ -94,6 +98,11 @@ SECTIONS
3233 _text = .;
3234 HEAD_TEXT
3235 }
3236 +
3237 +#ifdef CONFIG_PAX_KERNEXEC
3238 + . = ALIGN(1<<SECTION_SHIFT);
3239 +#endif
3240 +
3241 .text : { /* Real text segment */
3242 _stext = .; /* Text and read-only data */
3243 __exception_text_start = .;
3244 @@ -116,6 +125,8 @@ SECTIONS
3245 ARM_CPU_KEEP(PROC_INFO)
3246 }
3247
3248 + _etext = .; /* End of text section */
3249 +
3250 RO_DATA(PAGE_SIZE)
3251
3252 . = ALIGN(4);
3253 @@ -146,7 +157,9 @@ SECTIONS
3254
3255 NOTES
3256
3257 - _etext = .; /* End of text and rodata section */
3258 +#ifdef CONFIG_PAX_KERNEXEC
3259 + . = ALIGN(1<<SECTION_SHIFT);
3260 +#endif
3261
3262 #ifndef CONFIG_XIP_KERNEL
3263 . = ALIGN(PAGE_SIZE);
3264 @@ -224,6 +237,11 @@ SECTIONS
3265 . = PAGE_OFFSET + TEXT_OFFSET;
3266 #else
3267 __init_end = .;
3268 +
3269 +#ifdef CONFIG_PAX_KERNEXEC
3270 + . = ALIGN(1<<SECTION_SHIFT);
3271 +#endif
3272 +
3273 . = ALIGN(THREAD_SIZE);
3274 __data_loc = .;
3275 #endif
3276 diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3277 index 14a0d98..7771a7d 100644
3278 --- a/arch/arm/lib/clear_user.S
3279 +++ b/arch/arm/lib/clear_user.S
3280 @@ -12,14 +12,14 @@
3281
3282 .text
3283
3284 -/* Prototype: int __clear_user(void *addr, size_t sz)
3285 +/* Prototype: int ___clear_user(void *addr, size_t sz)
3286 * Purpose : clear some user memory
3287 * Params : addr - user memory address to clear
3288 * : sz - number of bytes to clear
3289 * Returns : number of bytes NOT cleared
3290 */
3291 ENTRY(__clear_user_std)
3292 -WEAK(__clear_user)
3293 +WEAK(___clear_user)
3294 stmfd sp!, {r1, lr}
3295 mov r2, #0
3296 cmp r1, #4
3297 @@ -44,7 +44,7 @@ WEAK(__clear_user)
3298 USER( strnebt r2, [r0])
3299 mov r0, #0
3300 ldmfd sp!, {r1, pc}
3301 -ENDPROC(__clear_user)
3302 +ENDPROC(___clear_user)
3303 ENDPROC(__clear_user_std)
3304
3305 .pushsection .fixup,"ax"
3306 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3307 index 66a477a..bee61d3 100644
3308 --- a/arch/arm/lib/copy_from_user.S
3309 +++ b/arch/arm/lib/copy_from_user.S
3310 @@ -16,7 +16,7 @@
3311 /*
3312 * Prototype:
3313 *
3314 - * size_t __copy_from_user(void *to, const void *from, size_t n)
3315 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
3316 *
3317 * Purpose:
3318 *
3319 @@ -84,11 +84,11 @@
3320
3321 .text
3322
3323 -ENTRY(__copy_from_user)
3324 +ENTRY(___copy_from_user)
3325
3326 #include "copy_template.S"
3327
3328 -ENDPROC(__copy_from_user)
3329 +ENDPROC(___copy_from_user)
3330
3331 .pushsection .fixup,"ax"
3332 .align 0
3333 diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3334 index 6ee2f67..d1cce76 100644
3335 --- a/arch/arm/lib/copy_page.S
3336 +++ b/arch/arm/lib/copy_page.S
3337 @@ -10,6 +10,7 @@
3338 * ASM optimised string functions
3339 */
3340 #include <linux/linkage.h>
3341 +#include <linux/const.h>
3342 #include <asm/assembler.h>
3343 #include <asm/asm-offsets.h>
3344 #include <asm/cache.h>
3345 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3346 index d066df6..df28194 100644
3347 --- a/arch/arm/lib/copy_to_user.S
3348 +++ b/arch/arm/lib/copy_to_user.S
3349 @@ -16,7 +16,7 @@
3350 /*
3351 * Prototype:
3352 *
3353 - * size_t __copy_to_user(void *to, const void *from, size_t n)
3354 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
3355 *
3356 * Purpose:
3357 *
3358 @@ -88,11 +88,11 @@
3359 .text
3360
3361 ENTRY(__copy_to_user_std)
3362 -WEAK(__copy_to_user)
3363 +WEAK(___copy_to_user)
3364
3365 #include "copy_template.S"
3366
3367 -ENDPROC(__copy_to_user)
3368 +ENDPROC(___copy_to_user)
3369 ENDPROC(__copy_to_user_std)
3370
3371 .pushsection .fixup,"ax"
3372 diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3373 index 7d08b43..f7ca7ea 100644
3374 --- a/arch/arm/lib/csumpartialcopyuser.S
3375 +++ b/arch/arm/lib/csumpartialcopyuser.S
3376 @@ -57,8 +57,8 @@
3377 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3378 */
3379
3380 -#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3381 -#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3382 +#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3383 +#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3384
3385 #include "csumpartialcopygeneric.S"
3386
3387 diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3388 index 64dbfa5..84a3fd9 100644
3389 --- a/arch/arm/lib/delay.c
3390 +++ b/arch/arm/lib/delay.c
3391 @@ -28,7 +28,7 @@
3392 /*
3393 * Default to the loop-based delay implementation.
3394 */
3395 -struct arm_delay_ops arm_delay_ops = {
3396 +struct arm_delay_ops arm_delay_ops __read_only = {
3397 .delay = __loop_delay,
3398 .const_udelay = __loop_const_udelay,
3399 .udelay = __loop_udelay,
3400 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3401 index 025f742..8432b08 100644
3402 --- a/arch/arm/lib/uaccess_with_memcpy.c
3403 +++ b/arch/arm/lib/uaccess_with_memcpy.c
3404 @@ -104,7 +104,7 @@ out:
3405 }
3406
3407 unsigned long
3408 -__copy_to_user(void __user *to, const void *from, unsigned long n)
3409 +___copy_to_user(void __user *to, const void *from, unsigned long n)
3410 {
3411 /*
3412 * This test is stubbed out of the main function above to keep
3413 diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3414 index f389228..592ef66 100644
3415 --- a/arch/arm/mach-kirkwood/common.c
3416 +++ b/arch/arm/mach-kirkwood/common.c
3417 @@ -149,7 +149,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3418 clk_gate_ops.disable(hw);
3419 }
3420
3421 -static struct clk_ops clk_gate_fn_ops;
3422 +static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3423 +{
3424 + return clk_gate_ops.is_enabled(hw);
3425 +}
3426 +
3427 +static struct clk_ops clk_gate_fn_ops = {
3428 + .enable = clk_gate_fn_enable,
3429 + .disable = clk_gate_fn_disable,
3430 + .is_enabled = clk_gate_fn_is_enabled,
3431 +};
3432
3433 static struct clk __init *clk_register_gate_fn(struct device *dev,
3434 const char *name,
3435 @@ -183,14 +192,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3436 gate_fn->fn_en = fn_en;
3437 gate_fn->fn_dis = fn_dis;
3438
3439 - /* ops is the gate ops, but with our enable/disable functions */
3440 - if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
3441 - clk_gate_fn_ops.disable != clk_gate_fn_disable) {
3442 - clk_gate_fn_ops = clk_gate_ops;
3443 - clk_gate_fn_ops.enable = clk_gate_fn_enable;
3444 - clk_gate_fn_ops.disable = clk_gate_fn_disable;
3445 - }
3446 -
3447 clk = clk_register(dev, &gate_fn->gate.hw);
3448
3449 if (IS_ERR(clk))
3450 diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3451 index f6eeb87..cc90868 100644
3452 --- a/arch/arm/mach-omap2/board-n8x0.c
3453 +++ b/arch/arm/mach-omap2/board-n8x0.c
3454 @@ -631,7 +631,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3455 }
3456 #endif
3457
3458 -static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3459 +static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3460 .late_init = n8x0_menelaus_late_init,
3461 };
3462
3463 diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3464 index 6c4da12..d9ca72d 100644
3465 --- a/arch/arm/mach-omap2/gpmc.c
3466 +++ b/arch/arm/mach-omap2/gpmc.c
3467 @@ -147,7 +147,6 @@ struct omap3_gpmc_regs {
3468 };
3469
3470 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3471 -static struct irq_chip gpmc_irq_chip;
3472 static unsigned gpmc_irq_start;
3473
3474 static struct resource gpmc_mem_root;
3475 @@ -711,6 +710,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3476
3477 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3478
3479 +static struct irq_chip gpmc_irq_chip = {
3480 + .name = "gpmc",
3481 + .irq_startup = gpmc_irq_noop_ret,
3482 + .irq_enable = gpmc_irq_enable,
3483 + .irq_disable = gpmc_irq_disable,
3484 + .irq_shutdown = gpmc_irq_noop,
3485 + .irq_ack = gpmc_irq_noop,
3486 + .irq_mask = gpmc_irq_noop,
3487 + .irq_unmask = gpmc_irq_noop,
3488 +
3489 +};
3490 +
3491 static int gpmc_setup_irq(void)
3492 {
3493 int i;
3494 @@ -725,15 +736,6 @@ static int gpmc_setup_irq(void)
3495 return gpmc_irq_start;
3496 }
3497
3498 - gpmc_irq_chip.name = "gpmc";
3499 - gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3500 - gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3501 - gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3502 - gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3503 - gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3504 - gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3505 - gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3506 -
3507 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3508 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3509
3510 diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3511 index f8bb3b9..831e7b8 100644
3512 --- a/arch/arm/mach-omap2/omap-wakeupgen.c
3513 +++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3514 @@ -339,7 +339,7 @@ static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
3515 return NOTIFY_OK;
3516 }
3517
3518 -static struct notifier_block __refdata irq_hotplug_notifier = {
3519 +static struct notifier_block irq_hotplug_notifier = {
3520 .notifier_call = irq_cpu_hotplug_notify,
3521 };
3522
3523 diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3524 index e6d2307..d057195 100644
3525 --- a/arch/arm/mach-omap2/omap_device.c
3526 +++ b/arch/arm/mach-omap2/omap_device.c
3527 @@ -499,7 +499,7 @@ void omap_device_delete(struct omap_device *od)
3528 struct platform_device __init *omap_device_build(const char *pdev_name,
3529 int pdev_id,
3530 struct omap_hwmod *oh,
3531 - void *pdata, int pdata_len)
3532 + const void *pdata, int pdata_len)