]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/4.6.3/grsecurity-3.1-4.6.3-201607060823.patch
Auto commit, grsecurity-3.1-4.9.16-201703180820.patch added.
[thirdparty/grsecurity-scrape.git] / test / 4.6.3 / grsecurity-3.1-4.6.3-201607060823.patch
CommitLineData
538d5a32
PK
1diff --git a/.gitignore b/.gitignore
2index fd3a355..c47e86a 100644
3--- a/.gitignore
4+++ b/.gitignore
5@@ -37,6 +37,7 @@ modules.builtin
6 Module.symvers
7 *.dwo
8 *.su
9+*.c.[012]*.*
10
11 #
12 # Top-level generic files
13diff --git a/Documentation/dontdiff b/Documentation/dontdiff
14index 8ea834f..1462492 100644
15--- a/Documentation/dontdiff
16+++ b/Documentation/dontdiff
17@@ -3,9 +3,11 @@
18 *.bc
19 *.bin
20 *.bz2
21+*.c.[012]*.*
22 *.cis
23 *.cpio
24 *.csp
25+*.dbg
26 *.dsp
27 *.dvi
28 *.elf
29@@ -15,6 +17,7 @@
30 *.gcov
31 *.gen.S
32 *.gif
33+*.gmo
34 *.grep
35 *.grp
36 *.gz
37@@ -51,14 +54,17 @@
38 *.tab.h
39 *.tex
40 *.ver
41+*.vim
42 *.xml
43 *.xz
44 *_MODULES
45+*_reg_safe.h
46 *_vga16.c
47 *~
48 \#*#
49 *.9
50-.*
51+.[^g]*
52+.gen*
53 .*.d
54 .mm
55 53c700_d.h
56@@ -72,9 +78,11 @@ Image
57 Module.markers
58 Module.symvers
59 PENDING
60+PERF*
61 SCCS
62 System.map*
63 TAGS
64+TRACEEVENT-CFLAGS
65 aconf
66 af_names.h
67 aic7*reg.h*
68@@ -83,6 +91,7 @@ aic7*seq.h*
69 aicasm
70 aicdb.h*
71 altivec*.c
72+ashldi3.S
73 asm-offsets.h
74 asm_offsets.h
75 autoconf.h*
76@@ -95,32 +104,40 @@ bounds.h
77 bsetup
78 btfixupprep
79 build
80+builtin-policy.h
81 bvmlinux
82 bzImage*
83 capability_names.h
84 capflags.c
85 classlist.h*
86+clut_vga16.c
87+common-cmds.h
88 comp*.log
89 compile.h*
90 conf
91 config
92 config-*
93 config_data.h*
94+config.c
95 config.mak
96 config.mak.autogen
97+config.tmp
98 conmakehash
99 consolemap_deftbl.c*
100 cpustr.h
101 crc32table.h*
102 cscope.*
103 defkeymap.c
104+devicetable-offsets.h
105 devlist.h*
106 dnotify_test
107 docproc
108 dslm
109+dtc-lexer.lex.c
110 elf2ecoff
111 elfconfig.h*
112 evergreen_reg_safe.h
113+exception_policy.conf
114 fixdep
115 flask.h
116 fore200e_mkfirm
117@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
118 gconf
119 gconf.glade.h
120 gen-devlist
121+gen-kdb_cmds.c
122 gen_crc32table
123 gen_init_cpio
124 generated
125 genheaders
126 genksyms
127 *_gray256.c
128+hash
129+hid-example
130 hpet_example
131 hugepage-mmap
132 hugepage-shm
133@@ -148,14 +168,14 @@ int32.c
134 int4.c
135 int8.c
136 kallsyms
137-kconfig
138+kern_constants.h
139 keywords.c
140 ksym.c*
141 ksym.h*
142 kxgettext
143 lex.c
144 lex.*.c
145-linux
146+lib1funcs.S
147 logo_*.c
148 logo_*_clut224.c
149 logo_*_mono.c
150@@ -166,12 +186,14 @@ machtypes.h
151 map
152 map_hugetlb
153 mconf
154+mdp
155 miboot*
156 mk_elfconfig
157 mkboot
158 mkbugboot
159 mkcpustr
160 mkdep
161+mkpiggy
162 mkprep
163 mkregtable
164 mktables
165@@ -187,6 +209,8 @@ oui.c*
166 page-types
167 parse.c
168 parse.h
169+parse-events*
170+pasyms.h
171 patches*
172 pca200e.bin
173 pca200e_ecd.bin2
174@@ -196,6 +220,7 @@ perf-archive
175 piggyback
176 piggy.gzip
177 piggy.S
178+pmu-*
179 pnmtologo
180 ppc_defs.h*
181 pss_boot.h
182@@ -205,7 +230,12 @@ r200_reg_safe.h
183 r300_reg_safe.h
184 r420_reg_safe.h
185 r600_reg_safe.h
186+randomize_layout_hash.h
187+randomize_layout_seed.h
188+realmode.lds
189+realmode.relocs
190 recordmcount
191+regdb.c
192 relocs
193 rlim_names.h
194 rn50_reg_safe.h
195@@ -215,8 +245,12 @@ series
196 setup
197 setup.bin
198 setup.elf
199+signing_key*
200+size_overflow_hash.h
201 sImage
202+slabinfo
203 sm_tbl*
204+sortextable
205 split-include
206 syscalltab.h
207 tables.c
208@@ -226,6 +260,7 @@ tftpboot.img
209 timeconst.h
210 times.h*
211 trix_boot.h
212+user_constants.h
213 utsrelease.h*
214 vdso-syms.lds
215 vdso.lds
216@@ -237,13 +272,17 @@ vdso32.lds
217 vdso32.so.dbg
218 vdso64.lds
219 vdso64.so.dbg
220+vdsox32.lds
221+vdsox32-syms.lds
222 version.h*
223 vmImage
224 vmlinux
225 vmlinux-*
226 vmlinux.aout
227 vmlinux.bin.all
228+vmlinux.bin.bz2
229 vmlinux.lds
230+vmlinux.relocs
231 vmlinuz
232 voffset.h
233 vsyscall.lds
234@@ -251,9 +290,12 @@ vsyscall_32.lds
235 wanxlfw.inc
236 uImage
237 unifdef
238+utsrelease.h
239 wakeup.bin
240 wakeup.elf
241 wakeup.lds
242+x509*
243 zImage*
244 zconf.hash.c
245+zconf.lex.c
246 zoffset.h
247diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
248index 13f888a..250729b 100644
249--- a/Documentation/kbuild/makefiles.txt
250+++ b/Documentation/kbuild/makefiles.txt
251@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
252 === 4 Host Program support
253 --- 4.1 Simple Host Program
254 --- 4.2 Composite Host Programs
255- --- 4.3 Using C++ for host programs
256- --- 4.4 Controlling compiler options for host programs
257- --- 4.5 When host programs are actually built
258- --- 4.6 Using hostprogs-$(CONFIG_FOO)
259+ --- 4.3 Defining shared libraries
260+ --- 4.4 Using C++ for host programs
261+ --- 4.5 Controlling compiler options for host programs
262+ --- 4.6 When host programs are actually built
263+ --- 4.7 Using hostprogs-$(CONFIG_FOO)
264
265 === 5 Kbuild clean infrastructure
266
267@@ -643,7 +644,29 @@ Both possibilities are described in the following.
268 Finally, the two .o files are linked to the executable, lxdialog.
269 Note: The syntax <executable>-y is not permitted for host-programs.
270
271---- 4.3 Using C++ for host programs
272+--- 4.3 Defining shared libraries
273+
274+ Objects with extension .so are considered shared libraries, and
275+ will be compiled as position independent objects.
276+ Kbuild provides support for shared libraries, but the usage
277+ shall be restricted.
278+ In the following example the libkconfig.so shared library is used
279+ to link the executable conf.
280+
281+ Example:
282+ #scripts/kconfig/Makefile
283+ hostprogs-y := conf
284+ conf-objs := conf.o libkconfig.so
285+ libkconfig-objs := expr.o type.o
286+
287+ Shared libraries always require a corresponding -objs line, and
288+ in the example above the shared library libkconfig is composed by
289+ the two objects expr.o and type.o.
290+ expr.o and type.o will be built as position independent code and
291+ linked as a shared library libkconfig.so. C++ is not supported for
292+ shared libraries.
293+
294+--- 4.4 Using C++ for host programs
295
296 kbuild offers support for host programs written in C++. This was
297 introduced solely to support kconfig, and is not recommended
298@@ -666,7 +689,7 @@ Both possibilities are described in the following.
299 qconf-cxxobjs := qconf.o
300 qconf-objs := check.o
301
302---- 4.4 Controlling compiler options for host programs
303+--- 4.5 Controlling compiler options for host programs
304
305 When compiling host programs, it is possible to set specific flags.
306 The programs will always be compiled utilising $(HOSTCC) passed
307@@ -694,7 +717,7 @@ Both possibilities are described in the following.
308 When linking qconf, it will be passed the extra option
309 "-L$(QTDIR)/lib".
310
311---- 4.5 When host programs are actually built
312+--- 4.6 When host programs are actually built
313
314 Kbuild will only build host-programs when they are referenced
315 as a prerequisite.
316@@ -725,7 +748,7 @@ Both possibilities are described in the following.
317 This will tell kbuild to build lxdialog even if not referenced in
318 any rule.
319
320---- 4.6 Using hostprogs-$(CONFIG_FOO)
321+--- 4.7 Using hostprogs-$(CONFIG_FOO)
322
323 A typical pattern in a Kbuild file looks like this:
324
325diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
326index 0b3de80..550d8e8 100644
327--- a/Documentation/kernel-parameters.txt
328+++ b/Documentation/kernel-parameters.txt
329@@ -1320,6 +1320,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
330 [KNL] Should the hard-lockup detector generate
331 backtraces on all cpus.
332 Format: <integer>
333+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
334+ ignore grsecurity's /proc restrictions
335+
336+ grsec_sysfs_restrict= Format: 0 | 1
337+ Default: 1
338+ Disables GRKERNSEC_SYSFS_RESTRICT if enabled in config
339
340 hashdist= [KNL,NUMA] Large hashes allocated during boot
341 are distributed across NUMA nodes. Defaults on
342@@ -2515,6 +2521,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
343 noexec=on: enable non-executable mappings (default)
344 noexec=off: disable non-executable mappings
345
346+ nopcid [X86-64]
347+ Disable PCID (Process-Context IDentifier) even if it
348+ is supported by the processor.
349+
350 nosmap [X86]
351 Disable SMAP (Supervisor Mode Access Prevention)
352 even if it is supported by processor.
353@@ -2818,6 +2828,35 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
354 the specified number of seconds. This is to be used if
355 your oopses keep scrolling off the screen.
356
357+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
358+ virtualization environments that don't cope well with the
359+ expand down segment used by UDEREF on X86-32 or the frequent
360+ page table updates on X86-64.
361+
362+ pax_sanitize_slab=
363+ Format: { 0 | 1 | off | fast | full }
364+ Options '0' and '1' are only provided for backward
365+ compatibility, 'off' or 'fast' should be used instead.
366+ 0|off : disable slab object sanitization
367+ 1|fast: enable slab object sanitization excluding
368+ whitelisted slabs (default)
369+ full : sanitize all slabs, even the whitelisted ones
370+
371+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
372+
373+ pax_extra_latent_entropy
374+ Enable a very simple form of latent entropy extraction
375+ from the first 4GB of memory as the bootmem allocator
376+ passes the memory pages to the buddy allocator.
377+
378+ pax_size_overflow_report_only
379+ Enables rate-limited logging of size_overflow plugin
380+ violations while disabling killing of the violating
381+ task.
382+
383+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
384+ when the processor supports PCID.
385+
386 pcbit= [HW,ISDN]
387
388 pcd. [PARIDE]
389diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
390index fcddfd5..71afd6b 100644
391--- a/Documentation/sysctl/kernel.txt
392+++ b/Documentation/sysctl/kernel.txt
393@@ -42,6 +42,7 @@ show up in /proc/sys/kernel:
394 - kptr_restrict
395 - kstack_depth_to_print [ X86 only ]
396 - l2cr [ PPC only ]
397+- modify_ldt [ X86 only ]
398 - modprobe ==> Documentation/debugging-modules.txt
399 - modules_disabled
400 - msg_next_id [ sysv ipc ]
401@@ -406,6 +407,20 @@ This flag controls the L2 cache of G3 processor boards. If
402
403 ==============================================================
404
405+modify_ldt: (X86 only)
406+
407+Enables (1) or disables (0) the modify_ldt syscall. Modifying the LDT
408+(Local Descriptor Table) may be needed to run a 16-bit or segmented code
409+such as Dosemu or Wine. This is done via a system call which is not needed
410+to run portable applications, and which can sometimes be abused to exploit
411+some weaknesses of the architecture, opening new vulnerabilities.
412+
413+This sysctl allows one to increase the system's security by disabling the
414+system call, or to restore compatibility with specific applications when it
415+was already disabled.
416+
417+==============================================================
418+
419 modules_disabled:
420
421 A toggle value indicating if modules are allowed to be loaded
422diff --git a/Makefile b/Makefile
423index c62b531..e158b54 100644
424--- a/Makefile
425+++ b/Makefile
426@@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
427 HOSTCC = gcc
428 HOSTCXX = g++
429 HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
430-HOSTCXXFLAGS = -O2
431+HOSTCFLAGS = -W -Wno-unused-parameter -Wno-missing-field-initializers -fno-delete-null-pointer-checks
432+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
433+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
434
435 ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
436 HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
437@@ -548,7 +550,7 @@ ifeq ($(KBUILD_EXTMOD),)
438 # in parallel
439 PHONY += scripts
440 scripts: scripts_basic include/config/auto.conf include/config/tristate.conf \
441- asm-generic
442+ asm-generic gcc-plugins
443 $(Q)$(MAKE) $(build)=$(@)
444
445 # Objects we will link into vmlinux / subdirs we need to visit
446@@ -623,6 +625,8 @@ endif
447 # Tell gcc to never replace conditional load with a non-conditional one
448 KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
449
450+include scripts/Makefile.gcc-plugins
451+
452 ifdef CONFIG_READABLE_ASM
453 # Disable optimizations that make assembler listings hard to read.
454 # reorder blocks reorders the control in the function
455@@ -724,7 +728,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
456 else
457 KBUILD_CFLAGS += -g
458 endif
459-KBUILD_AFLAGS += -Wa,-gdwarf-2
460+KBUILD_AFLAGS += -Wa,--gdwarf-2
461 endif
462 ifdef CONFIG_DEBUG_INFO_DWARF4
463 KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
464@@ -899,7 +903,7 @@ export mod_sign_cmd
465
466
467 ifeq ($(KBUILD_EXTMOD),)
468-core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
469+core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
470
471 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
472 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
473@@ -1002,7 +1006,7 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
474
475 archprepare: archheaders archscripts prepare1 scripts_basic
476
477-prepare0: archprepare FORCE
478+prepare0: archprepare gcc-plugins FORCE
479 $(Q)$(MAKE) $(build)=.
480
481 # All the preparing..
482@@ -1220,7 +1224,11 @@ MRPROPER_FILES += .config .config.old .version .old_version \
483 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
484 signing_key.pem signing_key.priv signing_key.x509 \
485 x509.genkey extra_certificates signing_key.x509.keyid \
486- signing_key.x509.signer vmlinux-gdb.py
487+ signing_key.x509.signer vmlinux-gdb.py \
488+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
489+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
490+ tools/gcc/size_overflow_plugin/disable_size_overflow_hash.h \
491+ tools/gcc/randomize_layout_seed.h
492
493 # clean - Delete most, but leave enough to build external modules
494 #
495@@ -1259,7 +1267,7 @@ distclean: mrproper
496 @find $(srctree) $(RCS_FIND_IGNORE) \
497 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
498 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
499- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
500+ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
501 -type f -print | xargs rm -f
502
503
504@@ -1480,6 +1488,7 @@ clean: $(clean-dirs)
505 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
506 -o -name '*.symtypes' -o -name 'modules.order' \
507 -o -name modules.builtin -o -name '.tmp_*.o.*' \
508+ -o -name '*.c.[012]*.*' \
509 -o -name '*.gcno' \) -type f -print | xargs rm -f
510
511 # Generate tags for editors
512diff --git a/arch/Kconfig b/arch/Kconfig
513index 81869a5..b10fc6c 100644
514--- a/arch/Kconfig
515+++ b/arch/Kconfig
516@@ -353,6 +353,20 @@ config SECCOMP_FILTER
517
518 See Documentation/prctl/seccomp_filter.txt for details.
519
520+config HAVE_GCC_PLUGINS
521+ bool
522+ help
523+ An arch should select this symbol if it supports building with
524+ GCC plugins.
525+
526+menuconfig GCC_PLUGINS
527+ bool "GCC plugins"
528+ depends on HAVE_GCC_PLUGINS
529+ default y
530+ help
531+ GCC plugins are loadable modules that provide extra features to the
532+ compiler. They are useful for runtime instrumentation and static analysis.
533+
534 config HAVE_CC_STACKPROTECTOR
535 bool
536 help
537diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
538index 572b228..e03acdd 100644
539--- a/arch/alpha/include/asm/atomic.h
540+++ b/arch/alpha/include/asm/atomic.h
541@@ -251,4 +251,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
542 #define atomic_dec(v) atomic_sub(1,(v))
543 #define atomic64_dec(v) atomic64_sub(1,(v))
544
545+#define atomic64_read_unchecked(v) atomic64_read(v)
546+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
547+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
548+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
549+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
550+#define atomic64_inc_unchecked(v) atomic64_inc(v)
551+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
552+#define atomic64_dec_unchecked(v) atomic64_dec(v)
553+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
554+
555 #endif /* _ALPHA_ATOMIC_H */
556diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
557index ad368a9..fbe0f25 100644
558--- a/arch/alpha/include/asm/cache.h
559+++ b/arch/alpha/include/asm/cache.h
560@@ -4,19 +4,19 @@
561 #ifndef __ARCH_ALPHA_CACHE_H
562 #define __ARCH_ALPHA_CACHE_H
563
564+#include <linux/const.h>
565
566 /* Bytes per L1 (data) cache line. */
567 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
568-# define L1_CACHE_BYTES 64
569 # define L1_CACHE_SHIFT 6
570 #else
571 /* Both EV4 and EV5 are write-through, read-allocate,
572 direct-mapped, physical.
573 */
574-# define L1_CACHE_BYTES 32
575 # define L1_CACHE_SHIFT 5
576 #endif
577
578+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
579 #define SMP_CACHE_BYTES L1_CACHE_BYTES
580
581 #endif
582diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
583index 968d999..d36b2df 100644
584--- a/arch/alpha/include/asm/elf.h
585+++ b/arch/alpha/include/asm/elf.h
586@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
587
588 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
589
590+#ifdef CONFIG_PAX_ASLR
591+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
592+
593+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
594+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
595+#endif
596+
597 /* $0 is set by ld.so to a pointer to a function which might be
598 registered using atexit. This provides a mean for the dynamic
599 linker to call DT_FINI functions for shared libraries that have
600diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
601index aab14a0..b4fa3e7 100644
602--- a/arch/alpha/include/asm/pgalloc.h
603+++ b/arch/alpha/include/asm/pgalloc.h
604@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
605 pgd_set(pgd, pmd);
606 }
607
608+static inline void
609+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
610+{
611+ pgd_populate(mm, pgd, pmd);
612+}
613+
614 extern pgd_t *pgd_alloc(struct mm_struct *mm);
615
616 static inline void
617diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
618index a9a1195..e9b8417 100644
619--- a/arch/alpha/include/asm/pgtable.h
620+++ b/arch/alpha/include/asm/pgtable.h
621@@ -101,6 +101,17 @@ struct vm_area_struct;
622 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
623 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
624 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
625+
626+#ifdef CONFIG_PAX_PAGEEXEC
627+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
628+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
629+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
630+#else
631+# define PAGE_SHARED_NOEXEC PAGE_SHARED
632+# define PAGE_COPY_NOEXEC PAGE_COPY
633+# define PAGE_READONLY_NOEXEC PAGE_READONLY
634+#endif
635+
636 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
637
638 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
639diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
640index 936bc8f..bb1859f 100644
641--- a/arch/alpha/kernel/module.c
642+++ b/arch/alpha/kernel/module.c
643@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
644
645 /* The small sections were sorted to the end of the segment.
646 The following should definitely cover them. */
647- gp = (u64)me->core_layout.base + me->core_layout.size - 0x8000;
648+ gp = (u64)me->core_layout.base_rw + me->core_layout.size_rw - 0x8000;
649 got = sechdrs[me->arch.gotsecindex].sh_addr;
650
651 for (i = 0; i < n; i++) {
652diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
653index 6cc0816..3dd424d 100644
654--- a/arch/alpha/kernel/osf_sys.c
655+++ b/arch/alpha/kernel/osf_sys.c
656@@ -1300,10 +1300,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
657 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
658
659 static unsigned long
660-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
661- unsigned long limit)
662+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
663+ unsigned long limit, unsigned long flags)
664 {
665 struct vm_unmapped_area_info info;
666+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
667
668 info.flags = 0;
669 info.length = len;
670@@ -1311,6 +1312,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
671 info.high_limit = limit;
672 info.align_mask = 0;
673 info.align_offset = 0;
674+ info.threadstack_offset = offset;
675 return vm_unmapped_area(&info);
676 }
677
678@@ -1343,20 +1345,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
679 merely specific addresses, but regions of memory -- perhaps
680 this feature should be incorporated into all ports? */
681
682+#ifdef CONFIG_PAX_RANDMMAP
683+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
684+#endif
685+
686 if (addr) {
687- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
688+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
689 if (addr != (unsigned long) -ENOMEM)
690 return addr;
691 }
692
693 /* Next, try allocating at TASK_UNMAPPED_BASE. */
694- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
695- len, limit);
696+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
697+
698 if (addr != (unsigned long) -ENOMEM)
699 return addr;
700
701 /* Finally, try allocating in low memory. */
702- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
703+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
704
705 return addr;
706 }
707diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
708index 4a905bd..0a4da53 100644
709--- a/arch/alpha/mm/fault.c
710+++ b/arch/alpha/mm/fault.c
711@@ -52,6 +52,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
712 __reload_thread(pcb);
713 }
714
715+#ifdef CONFIG_PAX_PAGEEXEC
716+/*
717+ * PaX: decide what to do with offenders (regs->pc = fault address)
718+ *
719+ * returns 1 when task should be killed
720+ * 2 when patched PLT trampoline was detected
721+ * 3 when unpatched PLT trampoline was detected
722+ */
723+static int pax_handle_fetch_fault(struct pt_regs *regs)
724+{
725+
726+#ifdef CONFIG_PAX_EMUPLT
727+ int err;
728+
729+ do { /* PaX: patched PLT emulation #1 */
730+ unsigned int ldah, ldq, jmp;
731+
732+ err = get_user(ldah, (unsigned int *)regs->pc);
733+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
734+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
735+
736+ if (err)
737+ break;
738+
739+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
740+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
741+ jmp == 0x6BFB0000U)
742+ {
743+ unsigned long r27, addr;
744+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
745+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
746+
747+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
748+ err = get_user(r27, (unsigned long *)addr);
749+ if (err)
750+ break;
751+
752+ regs->r27 = r27;
753+ regs->pc = r27;
754+ return 2;
755+ }
756+ } while (0);
757+
758+ do { /* PaX: patched PLT emulation #2 */
759+ unsigned int ldah, lda, br;
760+
761+ err = get_user(ldah, (unsigned int *)regs->pc);
762+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
763+ err |= get_user(br, (unsigned int *)(regs->pc+8));
764+
765+ if (err)
766+ break;
767+
768+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
769+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
770+ (br & 0xFFE00000U) == 0xC3E00000U)
771+ {
772+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
773+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
774+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
775+
776+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
777+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
778+ return 2;
779+ }
780+ } while (0);
781+
782+ do { /* PaX: unpatched PLT emulation */
783+ unsigned int br;
784+
785+ err = get_user(br, (unsigned int *)regs->pc);
786+
787+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
788+ unsigned int br2, ldq, nop, jmp;
789+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
790+
791+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
792+ err = get_user(br2, (unsigned int *)addr);
793+ err |= get_user(ldq, (unsigned int *)(addr+4));
794+ err |= get_user(nop, (unsigned int *)(addr+8));
795+ err |= get_user(jmp, (unsigned int *)(addr+12));
796+ err |= get_user(resolver, (unsigned long *)(addr+16));
797+
798+ if (err)
799+ break;
800+
801+ if (br2 == 0xC3600000U &&
802+ ldq == 0xA77B000CU &&
803+ nop == 0x47FF041FU &&
804+ jmp == 0x6B7B0000U)
805+ {
806+ regs->r28 = regs->pc+4;
807+ regs->r27 = addr+16;
808+ regs->pc = resolver;
809+ return 3;
810+ }
811+ }
812+ } while (0);
813+#endif
814+
815+ return 1;
816+}
817+
818+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
819+{
820+ unsigned long i;
821+
822+ printk(KERN_ERR "PAX: bytes at PC: ");
823+ for (i = 0; i < 5; i++) {
824+ unsigned int c;
825+ if (get_user(c, (unsigned int *)pc+i))
826+ printk(KERN_CONT "???????? ");
827+ else
828+ printk(KERN_CONT "%08x ", c);
829+ }
830+ printk("\n");
831+}
832+#endif
833
834 /*
835 * This routine handles page faults. It determines the address,
836@@ -132,8 +250,29 @@ retry:
837 good_area:
838 si_code = SEGV_ACCERR;
839 if (cause < 0) {
840- if (!(vma->vm_flags & VM_EXEC))
841+ if (!(vma->vm_flags & VM_EXEC)) {
842+
843+#ifdef CONFIG_PAX_PAGEEXEC
844+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
845+ goto bad_area;
846+
847+ up_read(&mm->mmap_sem);
848+ switch (pax_handle_fetch_fault(regs)) {
849+
850+#ifdef CONFIG_PAX_EMUPLT
851+ case 2:
852+ case 3:
853+ return;
854+#endif
855+
856+ }
857+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
858+ do_group_exit(SIGKILL);
859+#else
860 goto bad_area;
861+#endif
862+
863+ }
864 } else if (!cause) {
865 /* Allow reads even for write-only mappings */
866 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
867diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
868index a876743..fe2a193 100644
869--- a/arch/arc/Kconfig
870+++ b/arch/arc/Kconfig
871@@ -549,6 +549,7 @@ config ARC_DBG_TLB_MISS_COUNT
872 bool "Profile TLB Misses"
873 default n
874 select DEBUG_FS
875+ depends on !GRKERNSEC_KMEM
876 help
877 Counts number of I and D TLB Misses and exports them via Debugfs
878 The counters can be cleared via Debugfs as well
879diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
880index cdfa6c2..aba8354 100644
881--- a/arch/arm/Kconfig
882+++ b/arch/arm/Kconfig
883@@ -53,6 +53,7 @@ config ARM
884 select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
885 select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL)
886 select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
887+ select HAVE_GCC_PLUGINS
888 select HAVE_GENERIC_DMA_COHERENT
889 select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
890 select HAVE_IDE if PCI || ISA || PCMCIA
891@@ -1629,6 +1630,7 @@ config HIGHPTE
892 config CPU_SW_DOMAIN_PAN
893 bool "Enable use of CPU domains to implement privileged no-access"
894 depends on MMU && !ARM_LPAE
895+ depends on !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
896 default y
897 help
898 Increase kernel security by ensuring that normal kernel accesses
899@@ -1705,7 +1707,7 @@ config ALIGNMENT_TRAP
900
901 config UACCESS_WITH_MEMCPY
902 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
903- depends on MMU
904+ depends on MMU && !PAX_MEMORY_UDEREF
905 default y if CPU_FEROCEON
906 help
907 Implement faster copy_to_user and clear_user methods for CPU
908@@ -1960,6 +1962,7 @@ config KEXEC
909 depends on (!SMP || PM_SLEEP_SMP)
910 depends on !CPU_V7M
911 select KEXEC_CORE
912+ depends on !GRKERNSEC_KMEM
913 help
914 kexec is a system call that implements the ability to shutdown your
915 current kernel, and to start another kernel. It is like a reboot
916@@ -2004,7 +2007,7 @@ config EFI_STUB
917
918 config EFI
919 bool "UEFI runtime support"
920- depends on OF && !CPU_BIG_ENDIAN && MMU && AUTO_ZRELADDR && !XIP_KERNEL
921+ depends on OF && !CPU_BIG_ENDIAN && MMU && AUTO_ZRELADDR && !XIP_KERNEL && !PAX_KERNEXEC
922 select UCS2_STRING
923 select EFI_PARAMS_FROM_FDT
924 select EFI_STUB
925diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
926index 1098e91..d6415c8 100644
927--- a/arch/arm/Kconfig.debug
928+++ b/arch/arm/Kconfig.debug
929@@ -7,6 +7,7 @@ config ARM_PTDUMP
930 depends on DEBUG_KERNEL
931 depends on MMU
932 select DEBUG_FS
933+ depends on !GRKERNSEC_KMEM
934 ---help---
935 Say Y here if you want to show the kernel pagetable layout in a
936 debugfs file. This information is only useful for kernel developers
937diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
938index d50430c..01cc53b 100644
939--- a/arch/arm/boot/compressed/Makefile
940+++ b/arch/arm/boot/compressed/Makefile
941@@ -103,6 +103,8 @@ ORIG_CFLAGS := $(KBUILD_CFLAGS)
942 KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
943 endif
944
945+KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
946+
947 # -fstack-protector-strong triggers protection checks in this code,
948 # but it is being used too early to link to meaningful stack_chk logic.
949 nossp_flags := $(call cc-option, -fno-stack-protector)
950diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
951index 9e10c45..24a14ce 100644
952--- a/arch/arm/include/asm/atomic.h
953+++ b/arch/arm/include/asm/atomic.h
954@@ -18,17 +18,41 @@
955 #include <asm/barrier.h>
956 #include <asm/cmpxchg.h>
957
958+#ifdef CONFIG_GENERIC_ATOMIC64
959+#include <asm-generic/atomic64.h>
960+#endif
961+
962 #define ATOMIC_INIT(i) { (i) }
963
964 #ifdef __KERNEL__
965
966+#ifdef CONFIG_THUMB2_KERNEL
967+#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
968+#else
969+#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
970+#endif
971+
972+#define _ASM_EXTABLE(from, to) \
973+" .pushsection __ex_table,\"a\"\n"\
974+" .align 3\n" \
975+" .long " #from ", " #to"\n" \
976+" .popsection"
977+
978 /*
979 * On ARM, ordinary assignment (str instruction) doesn't clear the local
980 * strex/ldrex monitor on some implementations. The reason we can use it for
981 * atomic_set() is the clrex or dummy strex done on every exception return.
982 */
983 #define atomic_read(v) READ_ONCE((v)->counter)
984+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
985+{
986+ return READ_ONCE(v->counter);
987+}
988 #define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
989+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
990+{
991+ WRITE_ONCE(v->counter, i);
992+}
993
994 #if __LINUX_ARM_ARCH__ >= 6
995
996@@ -38,38 +62,64 @@
997 * to ensure that the update happens.
998 */
999
1000-#define ATOMIC_OP(op, c_op, asm_op) \
1001-static inline void atomic_##op(int i, atomic_t *v) \
1002+#ifdef CONFIG_PAX_REFCOUNT
1003+#define __OVERFLOW_POST \
1004+ " bvc 3f\n" \
1005+ "2: " REFCOUNT_TRAP_INSN "\n"\
1006+ "3:\n"
1007+#define __OVERFLOW_POST_RETURN \
1008+ " bvc 3f\n" \
1009+" mov %0, %1\n" \
1010+ "2: " REFCOUNT_TRAP_INSN "\n"\
1011+ "3:\n"
1012+#define __OVERFLOW_EXTABLE \
1013+ "4:\n" \
1014+ _ASM_EXTABLE(2b, 4b)
1015+#else
1016+#define __OVERFLOW_POST
1017+#define __OVERFLOW_POST_RETURN
1018+#define __OVERFLOW_EXTABLE
1019+#endif
1020+
1021+#define __ATOMIC_OP(op, suffix, c_op, asm_op, post_op, extable) \
1022+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1023 { \
1024 unsigned long tmp; \
1025 int result; \
1026 \
1027 prefetchw(&v->counter); \
1028- __asm__ __volatile__("@ atomic_" #op "\n" \
1029+ __asm__ __volatile__("@ atomic_" #op #suffix "\n" \
1030 "1: ldrex %0, [%3]\n" \
1031 " " #asm_op " %0, %0, %4\n" \
1032+ post_op \
1033 " strex %1, %0, [%3]\n" \
1034 " teq %1, #0\n" \
1035-" bne 1b" \
1036+" bne 1b\n" \
1037+ extable \
1038 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1039 : "r" (&v->counter), "Ir" (i) \
1040 : "cc"); \
1041 } \
1042
1043-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1044-static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
1045+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, _unchecked, c_op, asm_op, , )\
1046+ __ATOMIC_OP(op, , c_op, asm_op##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1047+
1048+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op, post_op, extable) \
1049+static inline int atomic_##op##_return##suffix##_relaxed(int i, atomic##suffix##_t *v)\
1050 { \
1051 unsigned long tmp; \
1052 int result; \
1053 \
1054 prefetchw(&v->counter); \
1055 \
1056- __asm__ __volatile__("@ atomic_" #op "_return\n" \
1057+ __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n" \
1058 "1: ldrex %0, [%3]\n" \
1059 " " #asm_op " %0, %0, %4\n" \
1060+ post_op \
1061 " strex %1, %0, [%3]\n" \
1062 " teq %1, #0\n" \
1063-" bne 1b" \
1064+" bne 1b\n" \
1065+ extable \
1066 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1067 : "r" (&v->counter), "Ir" (i) \
1068 : "cc"); \
1069@@ -78,8 +128,12 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
1070 }
1071
1072 #define atomic_add_return_relaxed atomic_add_return_relaxed
1073+#define atomic_add_return_unchecked atomic_add_return_unchecked_relaxed
1074 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
1075
1076+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op, , )\
1077+ __ATOMIC_OP_RETURN(op, , c_op, asm_op##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1078+
1079 static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
1080 {
1081 int oldval;
1082@@ -113,12 +167,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1083 __asm__ __volatile__ ("@ atomic_add_unless\n"
1084 "1: ldrex %0, [%4]\n"
1085 " teq %0, %5\n"
1086-" beq 2f\n"
1087-" add %1, %0, %6\n"
1088+" beq 4f\n"
1089+" adds %1, %0, %6\n"
1090+
1091+#ifdef CONFIG_PAX_REFCOUNT
1092+" bvc 3f\n"
1093+"2: " REFCOUNT_TRAP_INSN "\n"
1094+"3:\n"
1095+#endif
1096+
1097 " strex %2, %1, [%4]\n"
1098 " teq %2, #0\n"
1099 " bne 1b\n"
1100-"2:"
1101+"4:"
1102+
1103+#ifdef CONFIG_PAX_REFCOUNT
1104+ _ASM_EXTABLE(2b, 4b)
1105+#endif
1106+
1107 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
1108 : "r" (&v->counter), "r" (u), "r" (a)
1109 : "cc");
1110@@ -129,14 +195,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1111 return oldval;
1112 }
1113
1114+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1115+{
1116+ unsigned long oldval, res;
1117+
1118+ smp_mb();
1119+
1120+ do {
1121+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1122+ "ldrex %1, [%3]\n"
1123+ "mov %0, #0\n"
1124+ "teq %1, %4\n"
1125+ "strexeq %0, %5, [%3]\n"
1126+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1127+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1128+ : "cc");
1129+ } while (res);
1130+
1131+ smp_mb();
1132+
1133+ return oldval;
1134+}
1135+
1136 #else /* ARM_ARCH_6 */
1137
1138 #ifdef CONFIG_SMP
1139 #error SMP not supported on pre-ARMv6 CPUs
1140 #endif
1141
1142-#define ATOMIC_OP(op, c_op, asm_op) \
1143-static inline void atomic_##op(int i, atomic_t *v) \
1144+#define __ATOMIC_OP(op, suffix, c_op, asm_op) \
1145+static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
1146 { \
1147 unsigned long flags; \
1148 \
1149@@ -145,8 +233,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
1150 raw_local_irq_restore(flags); \
1151 } \
1152
1153-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
1154-static inline int atomic_##op##_return(int i, atomic_t *v) \
1155+#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op) \
1156+ __ATOMIC_OP(op, _unchecked, c_op, asm_op)
1157+
1158+#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \
1159+static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
1160 { \
1161 unsigned long flags; \
1162 int val; \
1163@@ -159,6 +250,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
1164 return val; \
1165 }
1166
1167+#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
1168+ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
1169+
1170 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1171 {
1172 int ret;
1173@@ -173,6 +267,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1174 return ret;
1175 }
1176
1177+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1178+{
1179+ return atomic_cmpxchg((atomic_t *)v, old, new);
1180+}
1181+
1182 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1183 {
1184 int c, old;
1185@@ -201,16 +300,38 @@ ATOMIC_OP(xor, ^=, eor)
1186
1187 #undef ATOMIC_OPS
1188 #undef ATOMIC_OP_RETURN
1189+#undef __ATOMIC_OP_RETURN
1190 #undef ATOMIC_OP
1191+#undef __ATOMIC_OP
1192
1193 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1194+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1195+{
1196+ return xchg_relaxed(&v->counter, new);
1197+}
1198
1199 #define atomic_inc(v) atomic_add(1, v)
1200+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1201+{
1202+ atomic_add_unchecked(1, v);
1203+}
1204 #define atomic_dec(v) atomic_sub(1, v)
1205+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1206+{
1207+ atomic_sub_unchecked(1, v);
1208+}
1209
1210 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1211+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1212+{
1213+ return atomic_add_return_unchecked(1, v) == 0;
1214+}
1215 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1216 #define atomic_inc_return_relaxed(v) (atomic_add_return_relaxed(1, v))
1217+static inline int atomic_inc_return_unchecked_relaxed(atomic_unchecked_t *v)
1218+{
1219+ return atomic_add_return_unchecked_relaxed(1, v);
1220+}
1221 #define atomic_dec_return_relaxed(v) (atomic_sub_return_relaxed(1, v))
1222 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1223
1224@@ -221,6 +342,14 @@ typedef struct {
1225 long long counter;
1226 } atomic64_t;
1227
1228+#ifdef CONFIG_PAX_REFCOUNT
1229+typedef struct {
1230+ long long counter;
1231+} atomic64_unchecked_t;
1232+#else
1233+typedef atomic64_t atomic64_unchecked_t;
1234+#endif
1235+
1236 #define ATOMIC64_INIT(i) { (i) }
1237
1238 #ifdef CONFIG_ARM_LPAE
1239@@ -237,6 +366,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1240 return result;
1241 }
1242
1243+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1244+{
1245+ long long result;
1246+
1247+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1248+" ldrd %0, %H0, [%1]"
1249+ : "=&r" (result)
1250+ : "r" (&v->counter), "Qo" (v->counter)
1251+ );
1252+
1253+ return result;
1254+}
1255+
1256 static inline void atomic64_set(atomic64_t *v, long long i)
1257 {
1258 __asm__ __volatile__("@ atomic64_set\n"
1259@@ -245,6 +387,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1260 : "r" (&v->counter), "r" (i)
1261 );
1262 }
1263+
1264+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1265+{
1266+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1267+" strd %2, %H2, [%1]"
1268+ : "=Qo" (v->counter)
1269+ : "r" (&v->counter), "r" (i)
1270+ );
1271+}
1272 #else
1273 static inline long long atomic64_read(const atomic64_t *v)
1274 {
1275@@ -259,6 +410,19 @@ static inline long long atomic64_read(const atomic64_t *v)
1276 return result;
1277 }
1278
1279+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
1280+{
1281+ long long result;
1282+
1283+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1284+" ldrexd %0, %H0, [%1]"
1285+ : "=&r" (result)
1286+ : "r" (&v->counter), "Qo" (v->counter)
1287+ );
1288+
1289+ return result;
1290+}
1291+
1292 static inline void atomic64_set(atomic64_t *v, long long i)
1293 {
1294 long long tmp;
1295@@ -273,43 +437,73 @@ static inline void atomic64_set(atomic64_t *v, long long i)
1296 : "r" (&v->counter), "r" (i)
1297 : "cc");
1298 }
1299+
1300+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
1301+{
1302+ long long tmp;
1303+
1304+ prefetchw(&v->counter);
1305+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1306+"1: ldrexd %0, %H0, [%2]\n"
1307+" strexd %0, %3, %H3, [%2]\n"
1308+" teq %0, #0\n"
1309+" bne 1b"
1310+ : "=&r" (tmp), "=Qo" (v->counter)
1311+ : "r" (&v->counter), "r" (i)
1312+ : "cc");
1313+}
1314 #endif
1315
1316-#define ATOMIC64_OP(op, op1, op2) \
1317-static inline void atomic64_##op(long long i, atomic64_t *v) \
1318+#undef __OVERFLOW_POST_RETURN
1319+#define __OVERFLOW_POST_RETURN \
1320+ " bvc 3f\n" \
1321+" mov %0, %1\n" \
1322+" mov %H0, %H1\n" \
1323+ "2: " REFCOUNT_TRAP_INSN "\n"\
1324+ "3:\n"
1325+
1326+#define __ATOMIC64_OP(op, suffix, op1, op2, post_op, extable) \
1327+static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
1328 { \
1329 long long result; \
1330 unsigned long tmp; \
1331 \
1332 prefetchw(&v->counter); \
1333- __asm__ __volatile__("@ atomic64_" #op "\n" \
1334+ __asm__ __volatile__("@ atomic64_" #op #suffix "\n" \
1335 "1: ldrexd %0, %H0, [%3]\n" \
1336 " " #op1 " %Q0, %Q0, %Q4\n" \
1337 " " #op2 " %R0, %R0, %R4\n" \
1338+ post_op \
1339 " strexd %1, %0, %H0, [%3]\n" \
1340 " teq %1, #0\n" \
1341-" bne 1b" \
1342+" bne 1b\n" \
1343+ extable \
1344 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1345 : "r" (&v->counter), "r" (i) \
1346 : "cc"); \
1347 } \
1348
1349-#define ATOMIC64_OP_RETURN(op, op1, op2) \
1350+#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, _unchecked, op1, op2, , ) \
1351+ __ATOMIC64_OP(op, , op1, op2##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
1352+
1353+#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2, post_op, extable) \
1354 static inline long long \
1355-atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
1356+atomic64_##op##_return##suffix##_relaxed(long long i, atomic64##suffix##_t *v) \
1357 { \
1358 long long result; \
1359 unsigned long tmp; \
1360 \
1361 prefetchw(&v->counter); \
1362 \
1363- __asm__ __volatile__("@ atomic64_" #op "_return\n" \
1364+ __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n" \
1365 "1: ldrexd %0, %H0, [%3]\n" \
1366 " " #op1 " %Q0, %Q0, %Q4\n" \
1367 " " #op2 " %R0, %R0, %R4\n" \
1368+ post_op \
1369 " strexd %1, %0, %H0, [%3]\n" \
1370 " teq %1, #0\n" \
1371-" bne 1b" \
1372+" bne 1b\n" \
1373+ extable \
1374 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
1375 : "r" (&v->counter), "r" (i) \
1376 : "cc"); \
1377@@ -317,6 +511,9 @@ atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
1378 return result; \
1379 }
1380
1381+#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2, , ) \
1382+ __ATOMIC64_OP_RETURN(op, , op1, op2##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
1383+
1384 #define ATOMIC64_OPS(op, op1, op2) \
1385 ATOMIC64_OP(op, op1, op2) \
1386 ATOMIC64_OP_RETURN(op, op1, op2)
1387@@ -325,6 +522,7 @@ ATOMIC64_OPS(add, adds, adc)
1388 ATOMIC64_OPS(sub, subs, sbc)
1389
1390 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
1391+#define atomic64_add_return_unchecked atomic64_add_return_unchecked_relaxed
1392 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
1393
1394 #define atomic64_andnot atomic64_andnot
1395@@ -336,7 +534,12 @@ ATOMIC64_OP(xor, eor, eor)
1396
1397 #undef ATOMIC64_OPS
1398 #undef ATOMIC64_OP_RETURN
1399+#undef __ATOMIC64_OP_RETURN
1400 #undef ATOMIC64_OP
1401+#undef __ATOMIC64_OP
1402+#undef __OVERFLOW_EXTABLE
1403+#undef __OVERFLOW_POST_RETURN
1404+#undef __OVERFLOW_POST
1405
1406 static inline long long
1407 atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
1408@@ -361,6 +564,33 @@ atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
1409 return oldval;
1410 }
1411 #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
1412+#define atomic64_cmpxchg_unchecked atomic64_cmpxchg_unchecked_relaxed
1413+
1414+static inline long long
1415+atomic64_cmpxchg_unchecked_relaxed(atomic64_unchecked_t *ptr, long long old,
1416+ long long new)
1417+{
1418+ long long oldval;
1419+ unsigned long res;
1420+
1421+ smp_mb();
1422+
1423+ do {
1424+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1425+ "ldrexd %1, %H1, [%3]\n"
1426+ "mov %0, #0\n"
1427+ "teq %1, %4\n"
1428+ "teqeq %H1, %H4\n"
1429+ "strexdeq %0, %5, %H5, [%3]"
1430+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1431+ : "r" (&ptr->counter), "r" (old), "r" (new)
1432+ : "cc");
1433+ } while (res);
1434+
1435+ smp_mb();
1436+
1437+ return oldval;
1438+}
1439
1440 static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
1441 {
1442@@ -385,21 +615,35 @@ static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
1443 static inline long long atomic64_dec_if_positive(atomic64_t *v)
1444 {
1445 long long result;
1446- unsigned long tmp;
1447+ u64 tmp;
1448
1449 smp_mb();
1450 prefetchw(&v->counter);
1451
1452 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1453-"1: ldrexd %0, %H0, [%3]\n"
1454-" subs %Q0, %Q0, #1\n"
1455-" sbc %R0, %R0, #0\n"
1456+"1: ldrexd %1, %H1, [%3]\n"
1457+" subs %Q0, %Q1, #1\n"
1458+" sbcs %R0, %R1, #0\n"
1459+
1460+#ifdef CONFIG_PAX_REFCOUNT
1461+" bvc 3f\n"
1462+" mov %Q0, %Q1\n"
1463+" mov %R0, %R1\n"
1464+"2: " REFCOUNT_TRAP_INSN "\n"
1465+"3:\n"
1466+#endif
1467+
1468 " teq %R0, #0\n"
1469-" bmi 2f\n"
1470+" bmi 4f\n"
1471 " strexd %1, %0, %H0, [%3]\n"
1472 " teq %1, #0\n"
1473 " bne 1b\n"
1474-"2:"
1475+"4:\n"
1476+
1477+#ifdef CONFIG_PAX_REFCOUNT
1478+ _ASM_EXTABLE(2b, 4b)
1479+#endif
1480+
1481 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1482 : "r" (&v->counter)
1483 : "cc");
1484@@ -423,13 +667,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1485 " teq %0, %5\n"
1486 " teqeq %H0, %H5\n"
1487 " moveq %1, #0\n"
1488-" beq 2f\n"
1489+" beq 4f\n"
1490 " adds %Q0, %Q0, %Q6\n"
1491-" adc %R0, %R0, %R6\n"
1492+" adcs %R0, %R0, %R6\n"
1493+
1494+#ifdef CONFIG_PAX_REFCOUNT
1495+" bvc 3f\n"
1496+"2: " REFCOUNT_TRAP_INSN "\n"
1497+"3:\n"
1498+#endif
1499+
1500 " strexd %2, %0, %H0, [%4]\n"
1501 " teq %2, #0\n"
1502 " bne 1b\n"
1503-"2:"
1504+"4:\n"
1505+
1506+#ifdef CONFIG_PAX_REFCOUNT
1507+ _ASM_EXTABLE(2b, 4b)
1508+#endif
1509+
1510 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1511 : "r" (&v->counter), "r" (u), "r" (a)
1512 : "cc");
1513@@ -442,10 +698,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
1514
1515 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1516 #define atomic64_inc(v) atomic64_add(1LL, (v))
1517+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1518 #define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1LL, (v))
1519+#define atomic64_inc_return_unchecked_relaxed(v) atomic64_add_return_unchecked_relaxed(1LL, (v))
1520 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1521 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1522 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1523+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1524 #define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v))
1525 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1526 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1527diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1528index 75fe66b..2255c86 100644
1529--- a/arch/arm/include/asm/cache.h
1530+++ b/arch/arm/include/asm/cache.h
1531@@ -4,8 +4,10 @@
1532 #ifndef __ASMARM_CACHE_H
1533 #define __ASMARM_CACHE_H
1534
1535+#include <linux/const.h>
1536+
1537 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1538-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1539+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1540
1541 /*
1542 * Memory returned by kmalloc() may be used for DMA, so we must make
1543diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1544index 9156fc3..9791d17 100644
1545--- a/arch/arm/include/asm/cacheflush.h
1546+++ b/arch/arm/include/asm/cacheflush.h
1547@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1548 void (*dma_unmap_area)(const void *, size_t, int);
1549
1550 void (*dma_flush_range)(const void *, const void *);
1551-};
1552+} __no_const;
1553
1554 /*
1555 * Select the calling method
1556diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1557index 524692f..a8871ec 100644
1558--- a/arch/arm/include/asm/checksum.h
1559+++ b/arch/arm/include/asm/checksum.h
1560@@ -37,7 +37,19 @@ __wsum
1561 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1562
1563 __wsum
1564-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1565+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1566+
1567+static inline __wsum
1568+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1569+{
1570+ __wsum ret;
1571+ pax_open_userland();
1572+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1573+ pax_close_userland();
1574+ return ret;
1575+}
1576+
1577+
1578
1579 /*
1580 * Fold a partial checksum without adding pseudo headers
1581diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1582index 97882f9..0cc6ef1 100644
1583--- a/arch/arm/include/asm/cmpxchg.h
1584+++ b/arch/arm/include/asm/cmpxchg.h
1585@@ -117,6 +117,10 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1586 (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
1587 sizeof(*(ptr))); \
1588 })
1589+#define xchg_unchecked(ptr, x) ({ \
1590+ (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
1591+ sizeof(*(ptr))); \
1592+})
1593
1594 #include <asm-generic/cmpxchg-local.h>
1595
1596diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
1597index 3848259..bee9d84 100644
1598--- a/arch/arm/include/asm/cpuidle.h
1599+++ b/arch/arm/include/asm/cpuidle.h
1600@@ -32,7 +32,7 @@ struct device_node;
1601 struct cpuidle_ops {
1602 int (*suspend)(unsigned long arg);
1603 int (*init)(struct device_node *, int cpu);
1604-};
1605+} __no_const;
1606
1607 struct of_cpuidle_method {
1608 const char *method;
1609diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1610index 99d9f63..e3e4da6 100644
1611--- a/arch/arm/include/asm/domain.h
1612+++ b/arch/arm/include/asm/domain.h
1613@@ -42,7 +42,6 @@
1614 #define DOMAIN_USER 1
1615 #define DOMAIN_IO 0
1616 #endif
1617-#define DOMAIN_VECTORS 3
1618
1619 /*
1620 * Domain types
1621@@ -51,9 +50,27 @@
1622 #define DOMAIN_CLIENT 1
1623 #ifdef CONFIG_CPU_USE_DOMAINS
1624 #define DOMAIN_MANAGER 3
1625+#define DOMAIN_VECTORS 3
1626 #else
1627+
1628+#ifdef CONFIG_PAX_KERNEXEC
1629 #define DOMAIN_MANAGER 1
1630+#define DOMAIN_KERNEXEC 3
1631+#else
1632+#define DOMAIN_MANAGER 1
1633+#endif
1634+
1635+#ifdef CONFIG_PAX_MEMORY_UDEREF
1636+#define DOMAIN_USERCLIENT 0
1637+#define DOMAIN_UDEREF 1
1638+#define DOMAIN_VECTORS DOMAIN_KERNEL
1639+#else
1640+#define DOMAIN_USERCLIENT 1
1641+#define DOMAIN_VECTORS DOMAIN_USER
1642+#endif
1643+
1644 #endif
1645+#define DOMAIN_KERNELCLIENT 1
1646
1647 #define domain_mask(dom) ((3) << (2 * (dom)))
1648 #define domain_val(dom,type) ((type) << (2 * (dom)))
1649@@ -62,13 +79,19 @@
1650 #define DACR_INIT \
1651 (domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
1652 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
1653- domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
1654+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT) | \
1655 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
1656+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
1657+ /* DOMAIN_VECTORS is defined to DOMAIN_KERNEL */
1658+#define DACR_INIT \
1659+ (domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
1660+ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
1661+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
1662 #else
1663 #define DACR_INIT \
1664- (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
1665+ (domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
1666 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
1667- domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
1668+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT) | \
1669 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
1670 #endif
1671
1672@@ -124,6 +147,17 @@ static inline void set_domain(unsigned val)
1673 set_domain(domain); \
1674 } while (0)
1675
1676+#elif defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1677+#define modify_domain(dom,type) \
1678+ do { \
1679+ struct thread_info *thread = current_thread_info(); \
1680+ unsigned int domain = get_domain(); \
1681+ domain &= ~domain_mask(dom); \
1682+ domain = domain | domain_val(dom, type); \
1683+ thread->cpu_domain = domain; \
1684+ set_domain(domain); \
1685+ } while (0)
1686+
1687 #else
1688 static inline void modify_domain(unsigned dom, unsigned type) { }
1689 #endif
1690diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1691index d2315ff..f60b47b 100644
1692--- a/arch/arm/include/asm/elf.h
1693+++ b/arch/arm/include/asm/elf.h
1694@@ -117,7 +117,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1695 the loader. We need to make sure that it is out of the way of the program
1696 that it will "exec", and that there is sufficient room for the brk. */
1697
1698-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1699+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1700+
1701+#ifdef CONFIG_PAX_ASLR
1702+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1703+
1704+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1705+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1706+#endif
1707
1708 /* When the program starts, a1 contains a pointer to a function to be
1709 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1710diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1711index de53547..52b9a28 100644
1712--- a/arch/arm/include/asm/fncpy.h
1713+++ b/arch/arm/include/asm/fncpy.h
1714@@ -81,7 +81,9 @@
1715 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1716 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1717 \
1718+ pax_open_kernel(); \
1719 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1720+ pax_close_kernel(); \
1721 flush_icache_range((unsigned long)(dest_buf), \
1722 (unsigned long)(dest_buf) + (size)); \
1723 \
1724diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1725index 6795368..6c4d749 100644
1726--- a/arch/arm/include/asm/futex.h
1727+++ b/arch/arm/include/asm/futex.h
1728@@ -107,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1729 return -EFAULT;
1730
1731 preempt_disable();
1732+
1733 __ua_flags = uaccess_save_and_enable();
1734 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1735 "1: " TUSER(ldr) " %1, [%4]\n"
1736diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1737index 83eb2f7..ed77159 100644
1738--- a/arch/arm/include/asm/kmap_types.h
1739+++ b/arch/arm/include/asm/kmap_types.h
1740@@ -4,6 +4,6 @@
1741 /*
1742 * This is the "bare minimum". AIO seems to require this.
1743 */
1744-#define KM_TYPE_NR 16
1745+#define KM_TYPE_NR 17
1746
1747 #endif
1748diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1749index 9e614a1..3302cca 100644
1750--- a/arch/arm/include/asm/mach/dma.h
1751+++ b/arch/arm/include/asm/mach/dma.h
1752@@ -22,7 +22,7 @@ struct dma_ops {
1753 int (*residue)(unsigned int, dma_t *); /* optional */
1754 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1755 const char *type;
1756-};
1757+} __do_const;
1758
1759 struct dma_struct {
1760 void *addr; /* single DMA address */
1761diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1762index 9b7c328..2dfe68b 100644
1763--- a/arch/arm/include/asm/mach/map.h
1764+++ b/arch/arm/include/asm/mach/map.h
1765@@ -23,17 +23,19 @@ struct map_desc {
1766
1767 /* types 0-3 are defined in asm/io.h */
1768 enum {
1769- MT_UNCACHED = 4,
1770- MT_CACHECLEAN,
1771- MT_MINICLEAN,
1772+ MT_UNCACHED_RW = 4,
1773+ MT_CACHECLEAN_RO,
1774+ MT_MINICLEAN_RO,
1775 MT_LOW_VECTORS,
1776 MT_HIGH_VECTORS,
1777- MT_MEMORY_RWX,
1778+ __MT_MEMORY_RWX,
1779 MT_MEMORY_RW,
1780- MT_ROM,
1781- MT_MEMORY_RWX_NONCACHED,
1782+ MT_MEMORY_RX,
1783+ MT_ROM_RX,
1784+ MT_MEMORY_RW_NONCACHED,
1785+ MT_MEMORY_RX_NONCACHED,
1786 MT_MEMORY_RW_DTCM,
1787- MT_MEMORY_RWX_ITCM,
1788+ MT_MEMORY_RX_ITCM,
1789 MT_MEMORY_RW_SO,
1790 MT_MEMORY_DMA_READY,
1791 };
1792diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1793index c2bf24f..69e437c 100644
1794--- a/arch/arm/include/asm/outercache.h
1795+++ b/arch/arm/include/asm/outercache.h
1796@@ -39,7 +39,7 @@ struct outer_cache_fns {
1797 /* This is an ARM L2C thing */
1798 void (*write_sec)(unsigned long, unsigned);
1799 void (*configure)(const struct l2x0_regs *);
1800-};
1801+} __no_const;
1802
1803 extern struct outer_cache_fns outer_cache;
1804
1805diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1806index 4355f0e..cd9168e 100644
1807--- a/arch/arm/include/asm/page.h
1808+++ b/arch/arm/include/asm/page.h
1809@@ -23,6 +23,7 @@
1810
1811 #else
1812
1813+#include <linux/compiler.h>
1814 #include <asm/glue.h>
1815
1816 /*
1817@@ -114,7 +115,7 @@ struct cpu_user_fns {
1818 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1819 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1820 unsigned long vaddr, struct vm_area_struct *vma);
1821-};
1822+} __no_const;
1823
1824 #ifdef MULTI_USER
1825 extern struct cpu_user_fns cpu_user;
1826diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1827index 19cfab5..3f5c7e9 100644
1828--- a/arch/arm/include/asm/pgalloc.h
1829+++ b/arch/arm/include/asm/pgalloc.h
1830@@ -17,6 +17,7 @@
1831 #include <asm/processor.h>
1832 #include <asm/cacheflush.h>
1833 #include <asm/tlbflush.h>
1834+#include <asm/system_info.h>
1835
1836 #define check_pgt_cache() do { } while (0)
1837
1838@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1839 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1840 }
1841
1842+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1843+{
1844+ pud_populate(mm, pud, pmd);
1845+}
1846+
1847 #else /* !CONFIG_ARM_LPAE */
1848
1849 /*
1850@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1851 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1852 #define pmd_free(mm, pmd) do { } while (0)
1853 #define pud_populate(mm,pmd,pte) BUG()
1854+#define pud_populate_kernel(mm,pmd,pte) BUG()
1855
1856 #endif /* CONFIG_ARM_LPAE */
1857
1858@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1859 __free_page(pte);
1860 }
1861
1862+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1863+{
1864+#ifdef CONFIG_ARM_LPAE
1865+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1866+#else
1867+ if (addr & SECTION_SIZE)
1868+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1869+ else
1870+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1871+#endif
1872+ flush_pmd_entry(pmdp);
1873+}
1874+
1875 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1876 pmdval_t prot)
1877 {
1878diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1879index d0131ee..23a0939 100644
1880--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1881+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1882@@ -28,7 +28,7 @@
1883 /*
1884 * - section
1885 */
1886-#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1887+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1888 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1889 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1890 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1891@@ -40,6 +40,7 @@
1892 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1893 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1894 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1895+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1896
1897 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1898 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1899@@ -69,6 +70,7 @@
1900 * - extended small page/tiny page
1901 */
1902 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1903+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1904 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1905 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1906 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1907diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1908index aeddd28..207745c 100644
1909--- a/arch/arm/include/asm/pgtable-2level.h
1910+++ b/arch/arm/include/asm/pgtable-2level.h
1911@@ -127,6 +127,9 @@
1912 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1913 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1914
1915+/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1916+#define L_PTE_PXN (_AT(pteval_t, 0))
1917+
1918 /*
1919 * These are the memory types, defined to be compatible with
1920 * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B
1921diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1922index dc46398..70dab92 100644
1923--- a/arch/arm/include/asm/pgtable-3level.h
1924+++ b/arch/arm/include/asm/pgtable-3level.h
1925@@ -80,6 +80,7 @@
1926 #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
1927 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1928 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1929+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1930 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1931 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
1932 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
1933@@ -90,10 +91,12 @@
1934 #define L_PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
1935 #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
1936 #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
1937+#define PMD_SECT_RDONLY PMD_SECT_AP2
1938
1939 /*
1940 * To be used in assembly code with the upper page attributes.
1941 */
1942+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1943 #define L_PTE_XN_HIGH (1 << (54 - 32))
1944 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1945
1946diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1947index 348caab..306b62d 100644
1948--- a/arch/arm/include/asm/pgtable.h
1949+++ b/arch/arm/include/asm/pgtable.h
1950@@ -33,6 +33,9 @@
1951 #include <asm/pgtable-2level.h>
1952 #endif
1953
1954+#define ktla_ktva(addr) (addr)
1955+#define ktva_ktla(addr) (addr)
1956+
1957 /*
1958 * Just any arbitrary offset to the start of the vmalloc VM area: the
1959 * current 8MB value just means that there will be a 8MB "hole" after the
1960@@ -48,6 +51,9 @@
1961 #define LIBRARY_TEXT_START 0x0c000000
1962
1963 #ifndef __ASSEMBLY__
1964+extern pteval_t __supported_pte_mask;
1965+extern pmdval_t __supported_pmd_mask;
1966+
1967 extern void __pte_error(const char *file, int line, pte_t);
1968 extern void __pmd_error(const char *file, int line, pmd_t);
1969 extern void __pgd_error(const char *file, int line, pgd_t);
1970@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
1971 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
1972 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
1973
1974+#define __HAVE_ARCH_PAX_OPEN_KERNEL
1975+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
1976+
1977+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1978+#include <asm/domain.h>
1979+#include <linux/thread_info.h>
1980+#include <linux/preempt.h>
1981+
1982+static inline int test_domain(int domain, int domaintype)
1983+{
1984+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
1985+}
1986+#endif
1987+
1988+#ifdef CONFIG_PAX_KERNEXEC
1989+static inline unsigned long pax_open_kernel(void) {
1990+#ifdef CONFIG_ARM_LPAE
1991+ /* TODO */
1992+#else
1993+ preempt_disable();
1994+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
1995+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
1996+#endif
1997+ return 0;
1998+}
1999+
2000+static inline unsigned long pax_close_kernel(void) {
2001+#ifdef CONFIG_ARM_LPAE
2002+ /* TODO */
2003+#else
2004+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2005+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2006+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2007+ preempt_enable_no_resched();
2008+#endif
2009+ return 0;
2010+}
2011+#else
2012+static inline unsigned long pax_open_kernel(void) { return 0; }
2013+static inline unsigned long pax_close_kernel(void) { return 0; }
2014+#endif
2015+
2016 /*
2017 * This is the lowest virtual address we can permit any user space
2018 * mapping to be mapped at. This is particularly important for
2019@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2020 /*
2021 * The pgprot_* and protection_map entries will be fixed up in runtime
2022 * to include the cachable and bufferable bits based on memory policy,
2023- * as well as any architecture dependent bits like global/ASID and SMP
2024- * shared mapping bits.
2025+ * as well as any architecture dependent bits like global/ASID, PXN,
2026+ * and SMP shared mapping bits.
2027 */
2028 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2029
2030@@ -307,7 +355,7 @@ static inline pte_t pte_mknexec(pte_t pte)
2031 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2032 {
2033 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2034- L_PTE_NONE | L_PTE_VALID;
2035+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2036 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2037 return pte;
2038 }
2039diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2040index 3d6dc8b..1262ad3 100644
2041--- a/arch/arm/include/asm/smp.h
2042+++ b/arch/arm/include/asm/smp.h
2043@@ -108,7 +108,7 @@ struct smp_operations {
2044 int (*cpu_disable)(unsigned int cpu);
2045 #endif
2046 #endif
2047-};
2048+} __no_const;
2049
2050 struct of_cpu_method {
2051 const char *method;
2052diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h
2053index cf4f3aa..3f68273 100644
2054--- a/arch/arm/include/asm/string.h
2055+++ b/arch/arm/include/asm/string.h
2056@@ -7,19 +7,19 @@
2057 */
2058
2059 #define __HAVE_ARCH_STRRCHR
2060-extern char * strrchr(const char * s, int c);
2061+extern char * strrchr(const char * s, int c) __nocapture(1);
2062
2063 #define __HAVE_ARCH_STRCHR
2064-extern char * strchr(const char * s, int c);
2065+extern char * strchr(const char * s, int c) __nocapture(1);
2066
2067 #define __HAVE_ARCH_MEMCPY
2068-extern void * memcpy(void *, const void *, __kernel_size_t);
2069+extern void * memcpy(void *, const void *, __kernel_size_t) __nocapture(2);
2070
2071 #define __HAVE_ARCH_MEMMOVE
2072-extern void * memmove(void *, const void *, __kernel_size_t);
2073+extern void * memmove(void *, const void *, __kernel_size_t) __nocapture(2);
2074
2075 #define __HAVE_ARCH_MEMCHR
2076-extern void * memchr(const void *, int, __kernel_size_t);
2077+extern void * memchr(const void *, int, __kernel_size_t) __nocapture(1);
2078
2079 #define __HAVE_ARCH_MEMSET
2080 extern void * memset(void *, int, __kernel_size_t);
2081diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2082index 776757d..a552c1d 100644
2083--- a/arch/arm/include/asm/thread_info.h
2084+++ b/arch/arm/include/asm/thread_info.h
2085@@ -73,6 +73,9 @@ struct thread_info {
2086 .flags = 0, \
2087 .preempt_count = INIT_PREEMPT_COUNT, \
2088 .addr_limit = KERNEL_DS, \
2089+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2090+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2091+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2092 }
2093
2094 #define init_thread_info (init_thread_union.thread_info)
2095@@ -143,6 +146,10 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2096 #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
2097 #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
2098 #define TIF_SECCOMP 7 /* seccomp syscall filtering active */
2099+/* within 8 bits of TIF_SYSCALL_TRACE
2100+ * to meet flexible second operand requirements
2101+ */
2102+#define TIF_GRSEC_SETXID 8
2103
2104 #define TIF_NOHZ 12 /* in adaptive nohz mode */
2105 #define TIF_USING_IWMMXT 17
2106@@ -158,10 +165,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2107 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2108 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2109 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2110+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2111
2112 /* Checks for any syscall work in entry-common.S */
2113 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2114- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2115+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2116
2117 /*
2118 * Change these and you break ASM code in entry-common.S
2119diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
2120index 5f833f7..76e6644 100644
2121--- a/arch/arm/include/asm/tls.h
2122+++ b/arch/arm/include/asm/tls.h
2123@@ -3,6 +3,7 @@
2124
2125 #include <linux/compiler.h>
2126 #include <asm/thread_info.h>
2127+#include <asm/pgtable.h>
2128
2129 #ifdef __ASSEMBLY__
2130 #include <asm/asm-offsets.h>
2131@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
2132 * at 0xffff0fe0 must be used instead. (see
2133 * entry-armv.S for details)
2134 */
2135+ pax_open_kernel();
2136 *((unsigned int *)0xffff0ff0) = val;
2137+ pax_close_kernel();
2138 #endif
2139 }
2140
2141diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2142index 35c9db8..400e490 100644
2143--- a/arch/arm/include/asm/uaccess.h
2144+++ b/arch/arm/include/asm/uaccess.h
2145@@ -18,6 +18,7 @@
2146 #include <asm/domain.h>
2147 #include <asm/unified.h>
2148 #include <asm/compiler.h>
2149+#include <asm/pgtable.h>
2150
2151 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2152 #include <asm-generic/uaccess-unaligned.h>
2153@@ -50,6 +51,59 @@ struct exception_table_entry
2154 extern int fixup_exception(struct pt_regs *regs);
2155
2156 /*
2157+ * These two are intentionally not defined anywhere - if the kernel
2158+ * code generates any references to them, that's a bug.
2159+ */
2160+extern int __get_user_bad(void);
2161+extern int __put_user_bad(void);
2162+
2163+/*
2164+ * Note that this is actually 0x1,0000,0000
2165+ */
2166+#define KERNEL_DS 0x00000000
2167+#define get_ds() (KERNEL_DS)
2168+
2169+#ifdef CONFIG_MMU
2170+
2171+#define USER_DS TASK_SIZE
2172+#define get_fs() (current_thread_info()->addr_limit)
2173+
2174+static inline void set_fs(mm_segment_t fs)
2175+{
2176+ current_thread_info()->addr_limit = fs;
2177+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2178+}
2179+
2180+#define segment_eq(a, b) ((a) == (b))
2181+
2182+#define __HAVE_ARCH_PAX_OPEN_USERLAND
2183+#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2184+
2185+static inline void pax_open_userland(void)
2186+{
2187+
2188+#ifdef CONFIG_PAX_MEMORY_UDEREF
2189+ if (segment_eq(get_fs(), USER_DS)) {
2190+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2191+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2192+ }
2193+#endif
2194+
2195+}
2196+
2197+static inline void pax_close_userland(void)
2198+{
2199+
2200+#ifdef CONFIG_PAX_MEMORY_UDEREF
2201+ if (segment_eq(get_fs(), USER_DS)) {
2202+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2203+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2204+ }
2205+#endif
2206+
2207+}
2208+
2209+/*
2210 * These two functions allow hooking accesses to userspace to increase
2211 * system integrity by ensuring that the kernel can not inadvertantly
2212 * perform such accesses (eg, via list poison values) which could then
2213@@ -66,6 +120,7 @@ static inline unsigned int uaccess_save_and_enable(void)
2214
2215 return old_domain;
2216 #else
2217+ pax_open_userland();
2218 return 0;
2219 #endif
2220 }
2221@@ -75,35 +130,11 @@ static inline void uaccess_restore(unsigned int flags)
2222 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
2223 /* Restore the user access mask */
2224 set_domain(flags);
2225+#else
2226+ pax_close_userland();
2227 #endif
2228 }
2229
2230-/*
2231- * These two are intentionally not defined anywhere - if the kernel
2232- * code generates any references to them, that's a bug.
2233- */
2234-extern int __get_user_bad(void);
2235-extern int __put_user_bad(void);
2236-
2237-/*
2238- * Note that this is actually 0x1,0000,0000
2239- */
2240-#define KERNEL_DS 0x00000000
2241-#define get_ds() (KERNEL_DS)
2242-
2243-#ifdef CONFIG_MMU
2244-
2245-#define USER_DS TASK_SIZE
2246-#define get_fs() (current_thread_info()->addr_limit)
2247-
2248-static inline void set_fs(mm_segment_t fs)
2249-{
2250- current_thread_info()->addr_limit = fs;
2251- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2252-}
2253-
2254-#define segment_eq(a, b) ((a) == (b))
2255-
2256 #define __addr_ok(addr) ({ \
2257 unsigned long flag; \
2258 __asm__("cmp %2, %0; movlo %0, #0" \
2259@@ -302,6 +333,7 @@ static inline void set_fs(mm_segment_t fs)
2260
2261 #endif /* CONFIG_MMU */
2262
2263+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
2264 #define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
2265
2266 #define user_addr_max() \
2267@@ -490,39 +522,46 @@ do { \
2268
2269
2270 #ifdef CONFIG_MMU
2271-extern unsigned long __must_check
2272+extern unsigned long __must_check __size_overflow(3)
2273 arm_copy_from_user(void *to, const void __user *from, unsigned long n);
2274
2275-static inline unsigned long __must_check
2276+static inline unsigned long __must_check __size_overflow(3)
2277 __copy_from_user(void *to, const void __user *from, unsigned long n)
2278 {
2279- unsigned int __ua_flags = uaccess_save_and_enable();
2280+ unsigned int __ua_flags;
2281+
2282+ check_object_size(to, n, false);
2283+ __ua_flags = uaccess_save_and_enable();
2284 n = arm_copy_from_user(to, from, n);
2285 uaccess_restore(__ua_flags);
2286 return n;
2287 }
2288
2289-extern unsigned long __must_check
2290+extern unsigned long __must_check __size_overflow(3)
2291 arm_copy_to_user(void __user *to, const void *from, unsigned long n);
2292-extern unsigned long __must_check
2293+extern unsigned long __must_check __size_overflow(3)
2294 __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2295
2296 static inline unsigned long __must_check
2297 __copy_to_user(void __user *to, const void *from, unsigned long n)
2298 {
2299 #ifndef CONFIG_UACCESS_WITH_MEMCPY
2300- unsigned int __ua_flags = uaccess_save_and_enable();
2301+ unsigned int __ua_flags;
2302+
2303+ check_object_size(from, n, true);
2304+ __ua_flags = uaccess_save_and_enable();
2305 n = arm_copy_to_user(to, from, n);
2306 uaccess_restore(__ua_flags);
2307 return n;
2308 #else
2309+ check_object_size(from, n, true);
2310 return arm_copy_to_user(to, from, n);
2311 #endif
2312 }
2313
2314-extern unsigned long __must_check
2315+extern unsigned long __must_check __size_overflow(2)
2316 arm_clear_user(void __user *addr, unsigned long n);
2317-extern unsigned long __must_check
2318+extern unsigned long __must_check __size_overflow(2)
2319 __clear_user_std(void __user *addr, unsigned long n);
2320
2321 static inline unsigned long __must_check
2322@@ -542,6 +581,9 @@ __clear_user(void __user *addr, unsigned long n)
2323
2324 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2325 {
2326+ if ((long)n < 0)
2327+ return n;
2328+
2329 if (access_ok(VERIFY_READ, from, n))
2330 n = __copy_from_user(to, from, n);
2331 else /* security hole - plug it */
2332@@ -551,6 +593,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2333
2334 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2335 {
2336+ if ((long)n < 0)
2337+ return n;
2338+
2339 if (access_ok(VERIFY_WRITE, to, n))
2340 n = __copy_to_user(to, from, n);
2341 return n;
2342diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2343index 5af0ed1..cea83883 100644
2344--- a/arch/arm/include/uapi/asm/ptrace.h
2345+++ b/arch/arm/include/uapi/asm/ptrace.h
2346@@ -92,7 +92,7 @@
2347 * ARMv7 groups of PSR bits
2348 */
2349 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2350-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2351+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2352 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2353 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2354
2355diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2356index 7e45f69..2c047db 100644
2357--- a/arch/arm/kernel/armksyms.c
2358+++ b/arch/arm/kernel/armksyms.c
2359@@ -59,7 +59,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2360
2361 /* networking */
2362 EXPORT_SYMBOL(csum_partial);
2363-EXPORT_SYMBOL(csum_partial_copy_from_user);
2364+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2365 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2366 EXPORT_SYMBOL(__csum_ipv6_magic);
2367
2368diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c
2369index 703926e..39aa432 100644
2370--- a/arch/arm/kernel/cpuidle.c
2371+++ b/arch/arm/kernel/cpuidle.c
2372@@ -19,7 +19,7 @@ extern struct of_cpuidle_method __cpuidle_method_of_table[];
2373 static const struct of_cpuidle_method __cpuidle_method_of_table_sentinel
2374 __used __section(__cpuidle_method_of_table_end);
2375
2376-static struct cpuidle_ops cpuidle_ops[NR_CPUS];
2377+static struct cpuidle_ops cpuidle_ops[NR_CPUS] __read_only;
2378
2379 /**
2380 * arm_cpuidle_simple_enter() - a wrapper to cpu_do_idle()
2381diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2382index e255050..51e1b59 100644
2383--- a/arch/arm/kernel/entry-armv.S
2384+++ b/arch/arm/kernel/entry-armv.S
2385@@ -50,6 +50,87 @@
2386 9997:
2387 .endm
2388
2389+ .macro pax_enter_kernel
2390+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2391+ @ make aligned space for saved DACR
2392+ sub sp, sp, #8
2393+ @ save regs
2394+ stmdb sp!, {r1, r2}
2395+ @ read DACR from cpu_domain into r1
2396+ mov r2, sp
2397+ @ assume 8K pages, since we have to split the immediate in two
2398+ bic r2, r2, #(0x1fc0)
2399+ bic r2, r2, #(0x3f)
2400+ ldr r1, [r2, #TI_CPU_DOMAIN]
2401+ @ store old DACR on stack
2402+ str r1, [sp, #8]
2403+#ifdef CONFIG_PAX_KERNEXEC
2404+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2405+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2406+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2407+#endif
2408+#ifdef CONFIG_PAX_MEMORY_UDEREF
2409+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2410+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2411+#endif
2412+ @ write r1 to current_thread_info()->cpu_domain
2413+ str r1, [r2, #TI_CPU_DOMAIN]
2414+ @ write r1 to DACR
2415+ mcr p15, 0, r1, c3, c0, 0
2416+ @ instruction sync
2417+ instr_sync
2418+ @ restore regs
2419+ ldmia sp!, {r1, r2}
2420+#endif
2421+ .endm
2422+
2423+ .macro pax_open_userland
2424+#ifdef CONFIG_PAX_MEMORY_UDEREF
2425+ @ save regs
2426+ stmdb sp!, {r0, r1}
2427+ @ read DACR from cpu_domain into r1
2428+ mov r0, sp
2429+ @ assume 8K pages, since we have to split the immediate in two
2430+ bic r0, r0, #(0x1fc0)
2431+ bic r0, r0, #(0x3f)
2432+ ldr r1, [r0, #TI_CPU_DOMAIN]
2433+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2434+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2435+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2436+ @ write r1 to current_thread_info()->cpu_domain
2437+ str r1, [r0, #TI_CPU_DOMAIN]
2438+ @ write r1 to DACR
2439+ mcr p15, 0, r1, c3, c0, 0
2440+ @ instruction sync
2441+ instr_sync
2442+ @ restore regs
2443+ ldmia sp!, {r0, r1}
2444+#endif
2445+ .endm
2446+
2447+ .macro pax_close_userland
2448+#ifdef CONFIG_PAX_MEMORY_UDEREF
2449+ @ save regs
2450+ stmdb sp!, {r0, r1}
2451+ @ read DACR from cpu_domain into r1
2452+ mov r0, sp
2453+ @ assume 8K pages, since we have to split the immediate in two
2454+ bic r0, r0, #(0x1fc0)
2455+ bic r0, r0, #(0x3f)
2456+ ldr r1, [r0, #TI_CPU_DOMAIN]
2457+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2458+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2459+ @ write r1 to current_thread_info()->cpu_domain
2460+ str r1, [r0, #TI_CPU_DOMAIN]
2461+ @ write r1 to DACR
2462+ mcr p15, 0, r1, c3, c0, 0
2463+ @ instruction sync
2464+ instr_sync
2465+ @ restore regs
2466+ ldmia sp!, {r0, r1}
2467+#endif
2468+ .endm
2469+
2470 .macro pabt_helper
2471 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2472 #ifdef MULTI_PABORT
2473@@ -92,11 +173,15 @@
2474 * Invalid mode handlers
2475 */
2476 .macro inv_entry, reason
2477+
2478+ pax_enter_kernel
2479+
2480 sub sp, sp, #S_FRAME_SIZE
2481 ARM( stmib sp, {r1 - lr} )
2482 THUMB( stmia sp, {r0 - r12} )
2483 THUMB( str sp, [sp, #S_SP] )
2484 THUMB( str lr, [sp, #S_LR] )
2485+
2486 mov r1, #\reason
2487 .endm
2488
2489@@ -152,6 +237,9 @@ ENDPROC(__und_invalid)
2490 .macro svc_entry, stack_hole=0, trace=1, uaccess=1
2491 UNWIND(.fnstart )
2492 UNWIND(.save {r0 - pc} )
2493+
2494+ pax_enter_kernel
2495+
2496 sub sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
2497 #ifdef CONFIG_THUMB2_KERNEL
2498 SPFIX( str r0, [sp] ) @ temporarily saved
2499@@ -167,7 +255,12 @@ ENDPROC(__und_invalid)
2500 ldmia r0, {r3 - r5}
2501 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2502 mov r6, #-1 @ "" "" "" ""
2503+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2504+ @ offset sp by 8 as done in pax_enter_kernel
2505+ add r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole + 4)
2506+#else
2507 add r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
2508+#endif
2509 SPFIX( addeq r2, r2, #4 )
2510 str r3, [sp, #-4]! @ save the "real" r0 copied
2511 @ from the exception stack
2512@@ -376,6 +469,9 @@ ENDPROC(__fiq_abt)
2513 .macro usr_entry, trace=1, uaccess=1
2514 UNWIND(.fnstart )
2515 UNWIND(.cantunwind ) @ don't unwind the user space
2516+
2517+ pax_enter_kernel_user
2518+
2519 sub sp, sp, #S_FRAME_SIZE
2520 ARM( stmib sp, {r1 - r12} )
2521 THUMB( stmia sp, {r0 - r12} )
2522@@ -489,7 +585,9 @@ __und_usr:
2523 tst r3, #PSR_T_BIT @ Thumb mode?
2524 bne __und_usr_thumb
2525 sub r4, r2, #4 @ ARM instr at LR - 4
2526+ pax_open_userland
2527 1: ldrt r0, [r4]
2528+ pax_close_userland
2529 ARM_BE8(rev r0, r0) @ little endian instruction
2530
2531 uaccess_disable ip
2532@@ -525,11 +623,15 @@ __und_usr_thumb:
2533 */
2534 .arch armv6t2
2535 #endif
2536+ pax_open_userland
2537 2: ldrht r5, [r4]
2538+ pax_close_userland
2539 ARM_BE8(rev16 r5, r5) @ little endian instruction
2540 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2541 blo __und_usr_fault_16_pan @ 16bit undefined instruction
2542+ pax_open_userland
2543 3: ldrht r0, [r2]
2544+ pax_close_userland
2545 ARM_BE8(rev16 r0, r0) @ little endian instruction
2546 uaccess_disable ip
2547 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2548@@ -560,7 +662,8 @@ ENDPROC(__und_usr)
2549 */
2550 .pushsection .text.fixup, "ax"
2551 .align 2
2552-4: str r4, [sp, #S_PC] @ retry current instruction
2553+4: pax_close_userland
2554+ str r4, [sp, #S_PC] @ retry current instruction
2555 ret r9
2556 .popsection
2557 .pushsection __ex_table,"a"
2558@@ -782,7 +885,7 @@ ENTRY(__switch_to)
2559 THUMB( str lr, [ip], #4 )
2560 ldr r4, [r2, #TI_TP_VALUE]
2561 ldr r5, [r2, #TI_TP_VALUE + 4]
2562-#ifdef CONFIG_CPU_USE_DOMAINS
2563+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2564 mrc p15, 0, r6, c3, c0, 0 @ Get domain register
2565 str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register
2566 ldr r6, [r2, #TI_CPU_DOMAIN]
2567@@ -793,7 +896,7 @@ ENTRY(__switch_to)
2568 ldr r8, =__stack_chk_guard
2569 ldr r7, [r7, #TSK_STACK_CANARY]
2570 #endif
2571-#ifdef CONFIG_CPU_USE_DOMAINS
2572+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2573 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2574 #endif
2575 mov r5, r0
2576diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2577index 30a7228..d071196 100644
2578--- a/arch/arm/kernel/entry-common.S
2579+++ b/arch/arm/kernel/entry-common.S
2580@@ -11,18 +11,46 @@